Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

hotfix: kwargs in extra_body #188

Merged
merged 2 commits into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions portkey_ai/api_resources/apis/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def create(
response_format=response_format,
temperature=temperature,
timestamp_granularities=timestamp_granularities,
**kwargs
extra_body=kwargs,
)
data = Transcription(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -72,7 +72,7 @@ def create(
prompt=prompt,
response_format=response_format,
temperature=temperature,
**kwargs
extra_body=kwargs,
)
data = Translation(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -102,7 +102,7 @@ def create(
voice=voice,
response_format=response_format,
speed=speed,
**kwargs
extra_body=kwargs,
)

return response
Expand Down Expand Up @@ -144,7 +144,7 @@ async def create(
response_format=response_format,
temperature=temperature,
timestamp_granularities=timestamp_granularities,
**kwargs
extra_body=kwargs,
)
)
data = Transcription(**json.loads(response.text))
Expand Down Expand Up @@ -174,7 +174,7 @@ async def create(
prompt=prompt,
response_format=response_format,
temperature=temperature,
**kwargs
extra_body=kwargs,
)
data = Translation(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -204,7 +204,7 @@ async def create(
voice=voice,
response_format=response_format,
speed=speed,
**kwargs
extra_body=kwargs,
)

data = response
Expand Down
16 changes: 8 additions & 8 deletions portkey_ai/api_resources/apis/batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def create(
endpoint=endpoint,
input_file_id=input_file_id,
metadata=metadata,
**kwargs
extra_body=kwargs,
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -37,7 +37,7 @@ def create(

def retrieve(self, batch_id, **kwargs) -> Batch:
response = self.openai_client.with_raw_response.batches.retrieve(
batch_id=batch_id, **kwargs
batch_id=batch_id, extra_body=kwargs
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -52,7 +52,7 @@ def list(
**kwargs
) -> BatchList:
response = self.openai_client.with_raw_response.batches.list(
after=after, limit=limit, **kwargs
after=after, limit=limit, extra_body=kwargs
)
data = BatchList(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -61,7 +61,7 @@ def list(

def cancel(self, batch_id: str, **kwargs) -> Batch:
response = self.openai_client.with_raw_response.batches.cancel(
batch_id=batch_id, **kwargs
batch_id=batch_id, extra_body=kwargs
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -89,7 +89,7 @@ async def create(
endpoint=endpoint,
input_file_id=input_file_id,
metadata=metadata,
**kwargs
extra_body=kwargs,
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -98,7 +98,7 @@ async def create(

async def retrieve(self, batch_id, **kwargs) -> Batch:
response = await self.openai_client.with_raw_response.batches.retrieve(
batch_id=batch_id, **kwargs
batch_id=batch_id, extra_body=kwargs
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -113,7 +113,7 @@ async def list(
**kwargs
) -> BatchList:
response = await self.openai_client.with_raw_response.batches.list(
after=after, limit=limit, **kwargs
after=after, limit=limit, extra_body=kwargs
)
data = BatchList(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -122,7 +122,7 @@ async def list(

async def cancel(self, batch_id: str, **kwargs) -> Batch:
response = await self.openai_client.with_raw_response.batches.cancel(
batch_id=batch_id, **kwargs
batch_id=batch_id, extra_body=kwargs
)
data = Batch(**json.loads(response.text))
data._headers = response.headers
Expand Down
8 changes: 4 additions & 4 deletions portkey_ai/api_resources/apis/chat_complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def stream_create( # type: ignore[return]
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
) as response:
for line in response.iter_lines():
json_string = line.replace("data: ", "")
Expand All @@ -79,7 +79,7 @@ def normal_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
)
data = ChatCompletions(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -133,7 +133,7 @@ async def stream_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
) as response:
async for line in response.iter_lines():
json_string = line.replace("data: ", "")
Expand All @@ -159,7 +159,7 @@ async def normal_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
)
data = ChatCompletions(**json.loads(response.text))
data._headers = response.headers
Expand Down
8 changes: 4 additions & 4 deletions portkey_ai/api_resources/apis/complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def stream_create( # type: ignore[return]
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
) as response:
for line in response.iter_lines():
json_string = line.replace("data: ", "")
Expand All @@ -53,7 +53,7 @@ def normal_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
)
data = TextCompletion(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -107,7 +107,7 @@ async def stream_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
) as response:
async for line in response.iter_lines():
json_string = line.replace("data: ", "")
Expand All @@ -133,7 +133,7 @@ async def normal_create(
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs,
extra_body=kwargs,
)
data = TextCompletion(**json.loads(response.text))
data._headers = response.headers
Expand Down
4 changes: 2 additions & 2 deletions portkey_ai/api_resources/apis/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def create(
dimensions=dimensions,
encoding_format=encoding_format,
user=user,
**kwargs
extra_body=kwargs,
)

data = CreateEmbeddingResponse(**json.loads(response.text))
Expand Down Expand Up @@ -60,7 +60,7 @@ async def create(
dimensions=dimensions,
encoding_format=encoding_format,
user=user,
**kwargs
extra_body=kwargs,
)
data = CreateEmbeddingResponse(**json.loads(response.text))
data._headers = response.headers
Expand Down
27 changes: 15 additions & 12 deletions portkey_ai/api_resources/apis/fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def create(
seed=seed,
suffix=suffix,
validation_file=validation_file,
**kwargs,
extra_body=kwargs,
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -57,7 +57,7 @@ def create(

def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob:
response = self.openai_client.with_raw_response.fine_tuning.jobs.retrieve(
fine_tuning_job_id=fine_tuning_job_id, **kwargs
fine_tuning_job_id=fine_tuning_job_id, extra_body=kwargs
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -72,7 +72,7 @@ def list(
**kwargs,
) -> FineTuningJobList:
response = self.openai_client.with_raw_response.fine_tuning.jobs.list(
after=after, limit=limit, **kwargs
after=after, limit=limit, extra_body=kwargs
)
data = FineTuningJobList(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -81,7 +81,7 @@ def list(

def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob:
response = self.openai_client.with_raw_response.fine_tuning.jobs.cancel(
fine_tuning_job_id=fine_tuning_job_id, **kwargs
fine_tuning_job_id=fine_tuning_job_id, extra_body=kwargs
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -97,7 +97,10 @@ def list_events(
**kwargs,
) -> FineTuningJobEventList:
response = self.openai_client.with_raw_response.fine_tuning.jobs.list_events(
fine_tuning_job_id=fine_tuning_job_id, after=after, limit=limit, **kwargs
fine_tuning_job_id=fine_tuning_job_id,
after=after,
limit=limit,
extra_body=kwargs,
)
data = FineTuningJobEventList(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -123,7 +126,7 @@ def list(
fine_tuning_job_id=fine_tuning_job_id,
after=after,
limit=limit,
**kwargs,
extra_body=kwargs,
)
)

Expand Down Expand Up @@ -168,7 +171,7 @@ async def create(
seed=seed,
suffix=suffix,
validation_file=validation_file,
**kwargs,
extra_body=kwargs,
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -177,7 +180,7 @@ async def create(

async def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob:
response = await self.openai_client.with_raw_response.fine_tuning.jobs.retrieve(
fine_tuning_job_id=fine_tuning_job_id, **kwargs
fine_tuning_job_id=fine_tuning_job_id, extra_body=kwargs
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -192,7 +195,7 @@ async def list(
**kwargs,
) -> FineTuningJobList:
response = await self.openai_client.with_raw_response.fine_tuning.jobs.list(
after=after, limit=limit, **kwargs
after=after, limit=limit, extra_body=kwargs
)
data = FineTuningJobList(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -201,7 +204,7 @@ async def list(

async def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob:
response = await self.openai_client.with_raw_response.fine_tuning.jobs.cancel(
fine_tuning_job_id, **kwargs
fine_tuning_job_id, extra_body=kwargs
)
data = FineTuningJob(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -221,7 +224,7 @@ async def list_events(
fine_tuning_job_id=fine_tuning_job_id,
after=after,
limit=limit,
**kwargs,
extra_body=kwargs,
)
)
data = FineTuningJobEventList(**json.loads(response.text))
Expand All @@ -247,7 +250,7 @@ async def list(
fine_tuning_job_id=fine_tuning_job_id,
after=after,
limit=limit,
**kwargs,
extra_body=kwargs,
)

data = FineTuningJobCheckpointList(**json.loads(response.text))
Expand Down
16 changes: 8 additions & 8 deletions portkey_ai/api_resources/apis/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ def generate(
quality=quality,
response_format=response_format,
size=size,
user=user,
style=style,
**kwargs
user=user,
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -65,7 +65,7 @@ def edit(
response_format=response_format,
size=size,
user=user,
**kwargs
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -89,7 +89,7 @@ def create_variation(
response_format=response_format,
size=size,
user=user,
**kwargs
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -123,9 +123,9 @@ async def generate(
quality=quality,
response_format=response_format,
size=size,
user=user,
style=style,
**kwargs
user=user,
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand Down Expand Up @@ -155,7 +155,7 @@ async def edit(
response_format=response_format,
size=size,
user=user,
**kwargs
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand All @@ -179,7 +179,7 @@ async def create_variation(
response_format=response_format,
size=size,
user=user,
**kwargs
extra_body=kwargs,
)
data = ImagesResponse(**json.loads(response.text))
data._headers = response.headers
Expand Down
Loading
Loading