From 685f4ba0bfc739b1dca9798aea1daaa0f6db37f2 Mon Sep 17 00:00:00 2001 From: rex <1073853456@qq.com> Date: Wed, 30 Aug 2023 00:29:35 +0800 Subject: [PATCH 1/3] add cost calculator --- openai_api_call/response.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/openai_api_call/response.py b/openai_api_call/response.py index 2b61aab..4ac6214 100644 --- a/openai_api_call/response.py +++ b/openai_api_call/response.py @@ -2,6 +2,22 @@ from typing import Dict +# model cost($ per 1K tokens)\ +## https://openai.com/pricing +## model | input | output +model_cost_perktoken ={ + "gpt-3.5-turbo": (0.0015, 0.002), + "gpt-3.5-turbo-0613" : (0.0015, 0.002), + "gpt-3.5-turbo-0301" : (0.0015, 0.002), + "gpt-3.5-turbo-16k-0613" : (0.003, 0.004), + "gpt-3.5-turbo-16k" : (0.003, 0.004), + "gpt-4": (0.03, 0.06), + "gpt-4-0613": (0.03, 0.06), + "gpt-4-0301": (0.03, 0.06), + "gpt-4-32k-0613": (0.06, 0.12), + "gpt-4-32k": (0.06, 0.12), +} + class Resp(): def __init__(self, response:Dict) -> None: @@ -11,6 +27,10 @@ def is_valid(self): """Check if the response is an error""" return 'error' not in self.response + def cost(self): + """Calculate the cost of the response""" + return response_cost(self.model, self.prompt_tokens, self.completion_tokens) + def __repr__(self) -> str: return f"`Resp`: {self.content}" @@ -91,4 +111,19 @@ def error_code(self): @property def finish_reason(self): """Finish reason""" - return self.response['choices'][0]['finish_reason'] \ No newline at end of file + return self.response['choices'][0]['finish_reason'] + +def response_cost(model:str, prompt_tokens:int, completion_tokens:int): + """Calculate the cost of the response + + Args: + model (str): model name + prompt_tokens (int): number of tokens in the prompt + completion_tokens (int): number of tokens of the response + + Returns: + float: cost of the response + """ + assert model in model_cost_perktoken, f"Model {model} is not known!" + input_price, output_price = model_cost_perktoken[model] + return (input_price * prompt_tokens + output_price * completion_tokens) / 1000 \ No newline at end of file From 67a458d20505d3ea05b29a6004f103efc7c8c248 Mon Sep 17 00:00:00 2001 From: rex <1073853456@qq.com> Date: Wed, 30 Aug 2023 00:40:17 +0800 Subject: [PATCH 2/3] fix tests --- openai_api_call/asynctool.py | 2 +- tests/test_async.py | 2 ++ tests/test_openai_api_call.py | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/openai_api_call/asynctool.py b/openai_api_call/asynctool.py index 21592e9..452afc3 100644 --- a/openai_api_call/asynctool.py +++ b/openai_api_call/asynctool.py @@ -110,7 +110,7 @@ async def chat_complete(ind, locker, chatlog, chkpoint, **options): , chatlog=chatlog , chkpoint=chkpoint , **options))) - responses = await tqdm.gather(*tasks) + responses = await tqdm.gather(tasks) return responses def async_chat_completion( chatlogs:List[List[Dict]] diff --git a/tests/test_async.py b/tests/test_async.py index de0f319..2a03fe2 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -27,3 +27,5 @@ def data2chat(data): t = time.time() process_chats(chatlogs, data2chat, chkpoint, clearfile=True) print(f"Time elapsed: {time.time() - t:.2f}s") + +test_async_process() \ No newline at end of file diff --git a/tests/test_openai_api_call.py b/tests/test_openai_api_call.py index db8421a..23dbb43 100644 --- a/tests/test_openai_api_call.py +++ b/tests/test_openai_api_call.py @@ -194,6 +194,7 @@ def test_usage(): assert resp.total_tokens == 18 assert resp.prompt_tokens == 8 assert resp.completion_tokens == 10 + print(resp.cost()) def test_content(): resp = Resp(response=response) From f8c63fd960b871e75631f6784234924e2118d615 Mon Sep 17 00:00:00 2001 From: rex <1073853456@qq.com> Date: Wed, 30 Aug 2023 00:42:19 +0800 Subject: [PATCH 3/3] update patch version --- .github/workflows/test.yml | 2 -- openai_api_call/__init__.py | 2 +- setup.py | 2 +- tests/test_async.py | 4 +--- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9d2d263..bb77451 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -20,8 +20,6 @@ jobs: os: ubuntu-latest - python-version: 3.9 os: ubuntu-latest - - python-version: '3.10' - os: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/openai_api_call/__init__.py b/openai_api_call/__init__.py index caf48e3..ad14275 100644 --- a/openai_api_call/__init__.py +++ b/openai_api_call/__init__.py @@ -2,7 +2,7 @@ __author__ = """Rex Wang""" __email__ = '1073853456@qq.com' -__version__ = '1.0.0' +__version__ = '1.0.1' import os, requests from .chattool import Chat, Resp diff --git a/setup.py b/setup.py index d07bb15..152d48e 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open('README.md') as readme_file: readme = readme_file.read() -VERSION = '1.0.0' +VERSION = '1.0.1' requirements = ['Click>=7.0', 'requests>=2.20', 'tqdm>=4.60', 'docstring_parser>=0.10', 'aiohttp>=3.8'] test_requirements = ['pytest>=3', 'unittest'] diff --git a/tests/test_async.py b/tests/test_async.py index 2a03fe2..a44122e 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -26,6 +26,4 @@ def data2chat(data): return chat t = time.time() process_chats(chatlogs, data2chat, chkpoint, clearfile=True) - print(f"Time elapsed: {time.time() - t:.2f}s") - -test_async_process() \ No newline at end of file + print(f"Time elapsed: {time.time() - t:.2f}s") \ No newline at end of file