diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1129af5..2c6e36e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,21 +10,16 @@ on: jobs: build: - runs-on: ubuntu-latest - strategy: matrix: - python-version: [3.8, 3.9] - + python-version: [3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies run: | python -m pip install --upgrade pip @@ -32,6 +27,9 @@ jobs: python -m pip install -r requirements_dev.txt - name: Test with pytest and coverage + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_API_BASE_URL: ${{ secrets.OPENAI_API_BASE_URL }} run: | pip install coverage coverage run -m pytest tests/ diff --git a/.gitignore b/.gitignore index 05c7b3b..eb3c126 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +##### user defined ###### +*.jsonl + + +##### seperated line ###### + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/README.md b/README.md index 4ef53ca..0bf527b 100644 --- a/README.md +++ b/README.md @@ -136,18 +136,6 @@ chat.save("chat_history.log", mode="w") # default to "a" chat.print_log() ``` -Moreover, you can check the usage status of the API key: - -```py -# show usage status of the default API key -chat = Chat() -chat.show_usage_status() - -# show usage status of the specified API key -chat.api_key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -chat.show_usage_status() -``` - ### Advance usage Save the chat history to a file: @@ -171,14 +159,6 @@ Load the chat history from a file: # load chats(default) chats = load_chats(checkpoint) assert chats == [Chat(log) for log in chat_logs] -# load chat log only -chat_logs = load_chats(checkpoint, chat_log_only=True) -assert chat_logs == [[], [{'role': 'user', 'content': 'hello!'}], - [{'role': 'user', 'content': 'hello!'}, - {'role': 'assistant', 'content': '你好, how can I assist you today?'}]] -# load the last message only -chat_msgs = load_chats(checkpoint, last_message_only=True) -assert chat_msgs == ["", "hello!", "你好, how can I assist you today?"] ``` In general, one can create a function `msg2chat` and use `process_chats` to process the data: diff --git a/README_zh_CN.md b/README_zh_CN.md index ea526c9..8a398d1 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -133,18 +133,6 @@ chat.save("chat_history.log", mode="w") # 默认为 "a" chat.print_log() ``` -此外,你可以使用 `Chat` 类的 `show_usage_status` 方法来查看 API 的使用情况: - -```py -# 查看默认 API 的使用情况 -chat = Chat() -chat.show_usage_status() - -# 查看指定 API 的使用情况 -chat.api_key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -chat.show_usage_status() -``` - ### 进阶用法 将对话历史保存到文件中: @@ -167,11 +155,6 @@ chat.save(checkpoint) # 加载 Chat 对象(默认) chats = load_chats(checkpoint) assert chats == [Chat(log) for log in chat_logs] -# 仅加载对话历史 -chat_logs = load_chats(checkpoint, chat_log_only=True) -# 仅加载最后一条消息 -chat_msgs = load_chats(checkpoint, last_message_only=True) -assert chat_msgs == ["", "hello!", "你好, how can I assist you today?"] ``` 一般来说,你可以定义函数 `msg2chat` 并使用 `process_chats` 来处理数据: diff --git a/openai_api_call/__init__.py b/openai_api_call/__init__.py index 7b4b797..f2b8df2 100644 --- a/openai_api_call/__init__.py +++ b/openai_api_call/__init__.py @@ -5,7 +5,7 @@ __version__ = '0.6.0' import os, requests -from .chattool import Chat, Resp, chat_completion, usage_status +from .chattool import Chat, Resp, chat_completion from .checkpoint import load_chats, process_chats from .proxy import proxy_on, proxy_off, proxy_status from . import request @@ -44,7 +44,7 @@ def show_base_url(): def debug_log( net_url:str="https://www.baidu.com" , timeout:int=5 , message:str="hello world! 你好!" - , test_usage:bool=True + , test_apikey:bool=True , test_response:bool=True , test_model:bool=True): """Debug the API call @@ -58,41 +58,32 @@ def debug_log( net_url:str="https://www.baidu.com" Returns: bool: True if the debug is finished. """ - # 1. Test whether the network is available + # Network test try: requests.get(net_url, timeout=timeout) except: print("Warning: Network is not available.") return False - print("Your network is available.") - - # 2. Check the API key - print("\nPlease verify the API key:") - show_apikey() - - # 3. Check the proxy status - print("\nYour proxy status:") + ## Check the proxy status + print("\nPlease check your proxy:") proxy_status() - print("Note that, you don't need to set proxy if your `base_url` has done it!") - # 4. Base url + ## Base url print("\nCheck your base url:") show_base_url() - if request.url is not None: - print("Warning: the `url` parameter is deprecated, please use `base_url` instead.") - - # 5. Get usage status - if test_usage: - print("\nThe usage status of your API key:") - Chat().show_usage_status(recent=3) + + ## Please check your API key + if test_apikey: + print("\nPlease verify your API key:") + show_apikey() - # 6. Get model list + # Get model list if test_model: print("\nThe model list:") print(Chat().get_valid_models()) - # 7. Test hello world + # Test hello world if test_response: print("\nTest message:", message) chat = Chat(message) diff --git a/openai_api_call/chattool.py b/openai_api_call/chattool.py index 445629a..a25c431 100644 --- a/openai_api_call/chattool.py +++ b/openai_api_call/chattool.py @@ -1,19 +1,16 @@ # The object that stores the chat log -from typing import List, Dict, Union, Callable +from typing import List, Dict, Union import openai_api_call from .response import Resp -from .request import chat_completion, usage_status, valid_models -import signal, time, random, datetime, json, warnings, docstring_parser +from .request import chat_completion, valid_models +import signal, time, random +import json # timeout handler def handler(signum, frame): raise Exception("API call timed out!") -# TODO: Generate a function description(Json Schema) -def func2desc(func:Callable) -> str: - pass - class Chat(): def __init__( self , msg:Union[List[Dict], None, str]=None @@ -42,9 +39,6 @@ def __init__( self raise ValueError("msg should be a list of dict, a string or None") self._api_key = openai_api_call.api_key if api_key is None else api_key self._chat_url = chat_url - self._function_call = None - self._functions = None - self._available_functions = None @property def api_key(self): @@ -70,151 +64,31 @@ def chat_url(self, chat_url:str): def chat_log(self): """Chat history""" return self._chat_log - - @property - def function_call(self): - """Function call - - Control the behavior of the model. Can be "auto", "none" or a dict with only one key "name" - - Explanation: - "auto": the model will automatically call the function if it thinks it is necessary - "none": the model will never call the function - {"name": "get_current_weather"}: the model will be forced to call the function "get_current_weather" - """ - return self._function_call - - @function_call.setter - def function_call(self, para:Union[None, str, Dict]): - """Set value of function call - - Args: - para (Union[None, str, Dict]): function call. Can be "auto", "none" or a dict with only one key "name" - - Examples: - >>> chat = Chat() - >>> chat.function_call = None - >>> chat.function_call = "auto" - >>> chat.function_call = "none" - >>> chat.function_call = {"name": "get_current_weather"} - """ - if para is not None: - if isinstance(para, str): - assert para in ["auto", "none"], "Function call should be either 'auto' or 'none'!" - elif isinstance(para, dict): - assert 'name' in para.keys() and len(para) == 1, "Function call should be a dict with only one key 'name'!" - else: - raise ValueError("Function call should be either 'auto', 'none' or a dict!") - self._function_call = para - - @property - def functions(self): - """function list for the function calling feature""" - return self._functions - - @functions.setter - def functions(self, para:Union[None, List[Dict]]): - """Set function list - - Args: - para (Union[None, List[Dict]]): function list. Defaults to None. - - Examples: - >>> chat = Chat() - >>> chat.functions = None - >>> chat.functions = [{ - "name": "get_current_weather", - "description": "Get the current weather", - "arguments": { - "location": { - "type": "string", - "description": "The location to get the weather for" - }, - "time": { - "type": "string", - "description": "The time to get the weather for" - } - } - }] - """ - if para is not None: - assert isinstance(para, list), "Functions should be a list!" - assert len(para), "Functions should not be empty!" - self._functions = para - - @property - def available_functions(self): - """Available functions""" - return self._available_functions - - @available_functions.setter - def available_functions(self, funcs:Union[list, dict, None]): - """Set available functions - - Args: - funcs (Union[list, dict, None]): available functions. Defaults to None. - If it is a list, the function name will be used as the key. - """ - assert funcs is None or isinstance(funcs, (list, dict)), "Available functions should be a list or a dict!" - - if funcs is None: - self._available_functions = None - elif not len(funcs): # empty list - warnings.warn("No available functions!") - self._available_functions = {} - elif isinstance(funcs, list): - # use function name as key - self._available_functions = {func.__name__: func for func in funcs} - else: - self._available_functions = funcs - - def eval_func(self, call:dict, update:bool=True): - name = call['name'] - func = self.available_functions.get(name) - assert func is not None, f"Function {name} is not available!" - args = json.loads(call['arguments']) - result = func(**args) - if update: - self.function(name, result) - return result def getresponse( self , max_requests:int=1 - , strip:bool=True - , update:bool = True , timeout:int = 0 , timeinterval:int = 0 , api_key:Union[str, None]=None - , function_call:Union[None, str, Dict]=None - , evalfunc:bool=False , model:str = "gpt-3.5-turbo" + , update:bool = True , **options)->Resp: """Get the API response Args: max_requests (int, optional): maximum number of requests to make. Defaults to 1. - strip (bool, optional): whether to strip the prompt message. Defaults to True. - update (bool, optional): whether to update the chat log. Defaults to True. timeout (int, optional): timeout for the API call. Defaults to 0(no timeout). timeinterval (int, optional): time interval between two API calls. Defaults to 0. model (str, optional): model to use. Defaults to "gpt-3.5-turbo". + update (bool, optional): whether to update the chat log. Defaults to True. **options : options inherited from the `openai.ChatCompletion.create` function. Returns: Resp: API response """ - # Two optional keys: "api_key" and "function_call" if api_key is None: api_key = self.api_key assert api_key is not None, "API key is not set!" - if function_call is None: - function_call = self.function_call - # functions(Json schemas) - functions = self.functions - if function_call is not None: - assert functions is not None, "`function_call` is only allowed when `functions` are specified." - # available functions(dict) - available_functions = self.available_functions # initialize prompt message msg = self.chat_log @@ -231,77 +105,25 @@ def getresponse( self signal.alarm(timeout) # Make the API call response = chat_completion( - api_key=api_key, messages=msg, model=model, chat_url=self.chat_url, - function_call=self.function_call, functions=self.functions, **options) + api_key=api_key, messages=msg, model=model, chat_url=self.chat_url, **options) time.sleep(random.random() * timeinterval) - resp = Resp(response, strip=strip) + resp = Resp(response) assert resp.is_valid(), "Invalid response with message: " + resp.error_message break except Exception as e: max_requests -= 1 numoftries += 1 - print(f"API call failed with message: {e}\nTry again ({numoftries})") + print(f"Try again ({numoftries}):{e}\n") finally: # Disable the alarm after execution signal.alarm(0) else: - raise Exception("Failed to get the response!\nYou can try to update the API key" - + ", increase `max_requests` or set proxy.") + raise Exception("Request failed! Try using `debug_log()` to find out the problem " + + "or increase the `max_requests`.") if update: # update the chat log - # The following is equivalent to `self.chat_log.append(resp.message))` - if not resp.is_function_call(): - self.assistant(resp.content) - else: # function call - self.assistant(resp.content, call=resp.function_call) - if evalfunc: - assert available_functions is not None, "Please specify the available functions!" - self.eval_func(resp.function_call, update=True) + self.assistant(resp.content) return resp - - def get_usage_status(self, recent:int=10, duration:int=99): - """Get the usage status - - Args: - recent (int, optional): number of the usage of recent days. Defaults to 10. - duration (int, optional): duration of the usage. Defaults to 99. - - Returns: - str: usage status - """ - storage, usage, dailyusage = usage_status(self.api_key, duration=duration) - status = [storage, usage, storage-usage, {}] - if recent <= 0 or len(dailyusage) == 0: # no need to print the usage of recent days - return status - recent = min(recent, len(dailyusage)) # number of recent days - dailyusage = dailyusage[-recent:] - for day in dailyusage: - date = datetime.datetime.fromtimestamp(day.get("timestamp")).strftime("%Y-%m-%d") - line_items = day.get("line_items") - cost = sum([item.get("cost") for item in line_items]) / 100 - status[-1].update({date: cost}) - return status - def show_usage_status(self, thismonth:bool=True, recent:int=10, duration:int=99): - """Show the usage status - - Args: - thismonth (bool): - recent (int, optional): number of the usage of recent days. Defaults to 10. - duration (int, optional): duration of the usage. Defaults to 99. - """ - if thismonth: - duration = datetime.datetime.now().day - 1 - storage, usage, rem, recent_usage = self.get_usage_status(recent=recent, duration=duration) - print(f"Amount: {storage:.4f}$") - if thismonth: - print(f"Usage(this month): {usage:.4f}$") - print(f"Remaining(this month): {rem:.4f}$") - if len(recent_usage) > 0: - usage = sum(recent_usage.values()) - print(f"Usage(the last {len(recent_usage)} days): {usage:.4f}$") - for date, cost in recent_usage.items(): - print(f"{date}: {cost:.4f}$") - def get_valid_models(self, gpt_only:bool=True)->List[str]: """Get the valid models @@ -313,53 +135,23 @@ def get_valid_models(self, gpt_only:bool=True)->List[str]: """ return valid_models(self.api_key, gpt_only=gpt_only) - def add( self - , role:str - , content:Union[None, str]=None - , function_call:Union[None, str]=None - , name:Union[None, str]=None): - """Add a role message to the chat log - - This is the wrapper of the `message` part of the `openai.ChatCompletion.create` function. - - Args: - role (str): role of the message, should be 'user', 'assistant', 'system' or 'function'. - content (object): content of the message, it can be None, str, or object of function response. - function_call (Union[None, dict], optional): function call, arguments of the function. Defaults to None. - name (Union[None, str], optional): name of the function. Defaults to None. - """ - assert role in ['user', 'assistant', 'system', 'function'], "role should be 'user', 'assistant', 'system' or 'function'!" - # same as before - if role == 'system' or role == 'user' \ - or (role == 'assistant' and function_call is None): - assert content is not None, "Invalid format: The content should not be None!" - self._chat_log.append({"role": role, "content": content}) - # assistant with function call - elif role == 'assistant': - if content is not None: - warnings.warn("The content will be ignored when `function_call` is specified!") - self._chat_log.append({"role": role, "content": content, "function_call": function_call}) - # function call - elif role == 'function': - assert name is not None, "Invalid format: The name of fucntion should be specified!" - self._chat_log.append({"role": role, "name": name, "content": content}) + def add(self, role:str, msg:str): + """Add a message to the chat log""" + assert role in ['user', 'assistant', 'system'], "role should be 'user', 'assistant' or 'system'" + self._chat_log.append({"role": role, "content": msg}) return self - def user(self, content:str): + def user(self, msg:str): """User message""" - return self.add('user', content) + return self.add('user', msg) - def assistant(self, content:Union[None, str], call:Union[None, str]=None): + def assistant(self, msg:str): """Assistant message""" - return self.add('assistant', content, function_call=call) + return self.add('assistant', msg) - def system(self, content:str): + def system(self, msg:str): """System message""" - return self.add('system', content) - - def function(self, funcname:str, funcresp:str): - """Function call""" - return self.add('function', name=funcname, content=funcresp) + return self.add('system', msg) def clear(self): """Clear the chat log""" @@ -368,45 +160,45 @@ def clear(self): def copy(self): """Copy the chat log""" return Chat(self._chat_log) + + def last(self): + """Get the last message""" + return self._chat_log[-1]['content'] - def save(self, path:str, mode:str='a', end:str='\n', chatid:int=-1): + def save(self, path:str, mode:str='a'): """ - Save the chat log to a file + Save the chat log to a file. Each line is a json string. Args: path (str): path to the file mode (str, optional): mode to open the file. Defaults to 'a'. - end (str, optional): end of each line. Defaults to '\n'. - chatid (int, optional): chat id. Defaults to -1. """ - assert mode in ['a', 'w'], "mode should be 'a' or 'w'" + assert mode in ['a', 'w'], "saving mode should be 'a' or 'w'" data = self.chat_log - if chatid >= 0: - data = {'chatid': chatid, 'chatlog': data} with open(path, mode, encoding='utf-8') as f: - f.write(json.dumps(data, ensure_ascii=False) + end) + f.write(json.dumps(data, ensure_ascii=False) + '\n') return - + + def savewithid(self, path:str, chatid:int, mode:str='a'): + """Save the chat log with chat id. Each line is a json string. + + Args: + path (str): path to the file + chatid (int): chat id + mode (str, optional): mode to open the file. Defaults to 'a'. + """ + assert mode in ['a', 'w'], "saving mode should be 'a' or 'w'" + data = {"chatid": chatid, "chatlog": self.chat_log} + with open(path, mode, encoding='utf-8') as f: + f.write(json.dumps(data, ensure_ascii=False) + '\n') + return + def print_log(self, sep: Union[str, None]=None): """Print the chat log""" if sep is None: sep = '\n' + '-'*15 + '\n' - for data in self._chat_log: - role = data['role'] - if role == 'user' or role == 'system'\ - or (role == 'assistant' and 'function_call' not in data): - print(sep, role, sep, end='') - print(data['content']) - elif role == 'assistant': - print(sep, "assistant(with function call)", sep, end='') - print(data['function_call']) - elif role == 'function': - print(sep, "function", sep, end='') - print(data['name']) - print(data['content']) - else: - print("invalid role!") - return + for d in self._chat_log: + print(sep, d['role'], sep, d['content']) def pop(self, ind:int=-1): """Pop the last message""" @@ -429,4 +221,4 @@ def __eq__(self, chat: object) -> bool: def __getitem__(self, index): """Get the message at index""" - return self._chat_log[index] + return self._chat_log[index] \ No newline at end of file diff --git a/openai_api_call/checkpoint.py b/openai_api_call/checkpoint.py index 5d03abc..3788539 100644 --- a/openai_api_call/checkpoint.py +++ b/openai_api_call/checkpoint.py @@ -4,16 +4,12 @@ import tqdm def load_chats( checkpoint:str - , sep:str='\n' - , last_message_only:bool=False - , chat_log_only:bool=False): + , withid:bool=False): """Load chats from a checkpoint file Args: checkpoint (str): path to the checkpoint file - sep (str, optional): separator of chats. Defaults to '\n'. - last_message_only (bool, optional): whether to return the last message of each chat. Defaults to False. - chat_log_only (bool, optional): whether to return the chat log only. Defaults to False. + withid (bool, optional): whether the checkpoint file contains chatid. Defaults to False. Returns: list: chats @@ -24,49 +20,39 @@ def load_chats( checkpoint:str return [] # load chats from the checkpoint file with open(checkpoint, 'r', encoding='utf-8') as f: - txts = f.read().strip().split(sep) + txts = f.read().strip().split('\n') + ## empty file if len(txts) == 1 and txts[0] == '': return [] - chats = [json.loads(txt) for txt in txts] - if 'chatid' in chats[0]: # chats with chatid - chat_size = chats[-1]['chatid'] - chatlogs = [None] * chat_size - for chat in chats: - idx = chat['chatid'] - if idx >= chat_size: + + # get the chatlogs + logs = [json.loads(txt) for txt in txts] + ## chatlogs with chatid + if withid: + chat_size, chatlogs = 1, [None] + for log in logs: + idx = log['chatid'] + if idx >= chat_size: # extend chatlogs chatlogs.extend([None] * (idx - chat_size + 1)) chat_size = idx + 1 - chatlogs[idx] = chat['chatlog'] - else: # chats without chatid - chatlogs = chats - # last message of chats only - if last_message_only: - data = [None] * len(chatlogs) - for i, chat in enumerate(chatlogs): - if chat is None: continue - data[i] = chat[-1]['content'] if len(chat) else "" - return data - # chat log only - if chat_log_only: return chatlogs + chatlogs[idx] = log['chatlog'] + else: ## logs without chatid + chatlogs = logs # return Chat class return [Chat(chatlog) if chatlog is not None else None for chatlog in chatlogs] def process_chats( data:List[Any] , data2chat:Callable[[Any], Chat] , checkpoint:str - , sep:str='\n' - , last_message_only:bool=False , clearfile:bool=False - , notebook:bool=False): - """Process chats and save to a checkpoint file + , isjupyter:bool=False): + """Process chats and save to a checkpoint file(non-asyncio version) Args: data (List[Any]): data to be processed data2chat (Callable[[Any], Chat]): function to convert data to Chat checkpoint (str): path to the checkpoint file - sep (str, optional): separator of chats. Defaults to '\n'. - last_message_only (bool, optional): whether to return the last message of each chat. Defaults to False. clearfile (bool, optional): whether to clear the checkpoint file. Defaults to False. - notebook (bool, optional): whether to use tqdm in Jupiter Notebook. Defaults to False. + isjupyter (bool, optional): whether to use tqdm in Jupiter Notebook. Defaults to False. Returns: list: chats or last messages of chats @@ -75,20 +61,17 @@ def process_chats( data:List[Any] # Warning: You are about to delete the checkpoint file os.system(f"rm {checkpoint}") ## load chats from the checkpoint file - chats = load_chats(checkpoint, sep=sep) + chats = load_chats(checkpoint) if len(chats) > len(data): - warnings.warn(f"checkpoint file {checkpoint} has more chats than the messages") - chats = chats[:len(data)] - return [chat[-1]['content'] for chat in chats] if last_message_only else chats - + warnings.warn(f"checkpoint file {checkpoint} has more chats than the data to be processed") + return chats[:len(data)] + chats.extend([None] * (len(data) - len(chats))) ## process chats - tq = tqdm.tqdm if not notebook else tqdm.notebook.tqdm + tq = tqdm.tqdm if not isjupyter else tqdm.notebook.tqdm for i in tq(range(len(data))): if chats[i] is not None: continue chat = data2chat(data[i]) - chat.save(checkpoint, mode='a', end=sep) + chat.save(checkpoint, mode='a') chats[i] = chat - if last_message_only: - return [chat[-1]['content'] for chat in chats] return chats \ No newline at end of file diff --git a/openai_api_call/proxy.py b/openai_api_call/proxy.py index 651ef75..8424532 100644 --- a/openai_api_call/proxy.py +++ b/openai_api_call/proxy.py @@ -39,8 +39,8 @@ def proxy_status(): print(f"https_proxy:\t{https}") def proxy_test(url:str="www.facebook.com"): - url = url.replace("http://", "").replace("https://", "") - if os.system("curl -I https://"+url) != 0: - print("Https: Curl to "+url+" failed!") - if os.system("curl -I http://"+url) != 0: - print("Http: Curl to "+url+" failed!") \ No newline at end of file + rawurl = url.replace("http://", "").replace("https://", "") + if os.system("curl -I https://" + rawurl) != 0: + print("Https: Curl to " + rawurl + " failed!") + if os.system("curl -I http://" + rawurl) != 0: + print("Http: Curl to " + rawurl + " failed!") \ No newline at end of file diff --git a/openai_api_call/request.py b/openai_api_call/request.py index 08c05cd..6481d7f 100644 --- a/openai_api_call/request.py +++ b/openai_api_call/request.py @@ -2,11 +2,9 @@ from typing import List, Dict, Union import requests, json -import datetime, os, warnings +import os from urllib.parse import urlparse, urlunparse -url = None # Deprecated - # Read base_url from the environment if os.environ.get('OPENAI_BASE_URL') is not None: base_url = os.environ.get("OPENAI_BASE_URL") @@ -50,12 +48,12 @@ def normalize_url(url: str) -> str: parsed_url = parsed_url._replace(scheme="https") return urlunparse(parsed_url).replace("///", "//") +base_url = normalize_url(base_url) # normalize base_url + def chat_completion( api_key:str , messages:List[Dict] , model:str , chat_url:Union[str, None]=None - , function_call:Union[str, None]=None - , functions:Union[List[str], None]=None , **options) -> Dict: """Chat completion API call @@ -64,8 +62,6 @@ def chat_completion( api_key:str messages (List[Dict]): prompt message model (str): model to use chat_url (Union[str, None], optional): chat url. Defaults to None. - function_call (Union[str, None], optional): function call. Defaults to None. - functions (Union[List[str], None], optional): functions. Defaults to None. **options : options inherited from the `openai.ChatCompletion.create` function. Returns: @@ -76,10 +72,6 @@ def chat_completion( api_key:str "model": model, "messages": messages } - if function_call is not None: - payload.update({"function_call": function_call}) - if functions is not None: - payload.update({"functions": functions}) # inherit options payload.update(options) # request headers @@ -88,12 +80,8 @@ def chat_completion( api_key:str 'Authorization': 'Bearer ' + api_key } # initialize chat url - if not chat_url: - if url is not None: # deprecated warning - warnings.warn("The `url` parameter is deprecated. Please use `base_url` instead.", DeprecationWarning) - chat_url = url - else: - chat_url = os.path.join(base_url, "v1/chat/completions") + if chat_url is None: + chat_url = os.path.join(base_url, "v1/chat/completions") chat_url = normalize_url(chat_url) # get response @@ -102,47 +90,6 @@ def chat_completion( api_key:str raise Exception(response.text) return response.json() -def usage_status(api_key:str, duration:int=99, url:Union[str, None]=None): - """Get usage status - - Args: - api_key (str): API key - duration (int, optional): duration to check. Defaults to 99, which is the maximum duration. - url (Union[str, None], optional): base url. Defaults to None. - - Returns: - Tuple[float, float, List[float]]: total storage, total usage, daily costs - """ - headers = { - "Authorization": "Bearer " + api_key, - "Content-Type": "application/json" - } - if url is None: url = base_url - url = normalize_url(base_url) - # Get storage limit - subscription_url = os.path.join(url, "v1/dashboard/billing/subscription") - subscription_response = requests.get(subscription_url, headers=headers) - if subscription_response.status_code == 200: - data = subscription_response.json() - total_storage = data.get("hard_limit_usd") - else: - raise Exception(subscription_response.text) - # start_date - today = datetime.datetime.now() - start_date = (today - datetime.timedelta(days=duration)).strftime("%Y-%m-%d") - # end_date = today + 1 - end_date = (today + datetime.timedelta(days=1)).strftime("%Y-%m-%d") - billing_url = os.path.join(url, f"v1/dashboard/billing/usage?start_date={start_date}&end_date={end_date}") - billing_response = requests.get(billing_url, headers=headers) - # Get usage status - if billing_response.status_code == 200: - data = billing_response.json() - total_usage = data.get("total_usage") / 100 - daily_costs = data.get("daily_costs") - return total_storage, total_usage, daily_costs - else: - raise Exception(billing_response.text) - def valid_models(api_key:str, gpt_only:bool=True, url:Union[str, None]=None): """Get valid models Request url: https://api.openai.com/v1/models diff --git a/openai_api_call/response.py b/openai_api_call/response.py index b2e0154..2b61aab 100644 --- a/openai_api_call/response.py +++ b/openai_api_call/response.py @@ -1,25 +1,18 @@ # Response class for OpenAI API call from typing import Dict -import json class Resp(): - def __init__(self, response:Dict, strip:bool=True) -> None: + def __init__(self, response:Dict) -> None: self.response = response - if strip and self.is_valid() and self.content is not None: - self._strip_content() - def _strip_content(self): - """Strip the content""" - self.response['choices'][0]['message']['content'] = \ - self.content.strip() + def is_valid(self): + """Check if the response is an error""" + return 'error' not in self.response def __repr__(self) -> str: - if self.finish_reason == "function_call": - return f"`Resp(call)`: {self.function_call}" - else: - return f"`Resp`: {self.content}" + return f"`Resp`: {self.content}" def __str__(self) -> str: return self.content @@ -28,10 +21,6 @@ def __str__(self) -> str: def id(self): return self.response['id'] - @property - def object(self): - return self.response['object'] - @property def model(self): return self.response['model'] @@ -60,11 +49,6 @@ def completion_tokens(self): """Number of tokens of the response""" return self.usage['completion_tokens'] - @property - def finish_reason(self): - """Finish reason""" - return self.response['choices'][0]['finish_reason'] - @property def message(self): """Message""" @@ -74,26 +58,10 @@ def message(self): def content(self): """Content of the response""" return self.message['content'] - - @property - def function_call(self): - """Function call""" - if self.is_function_call: - args = {} - args['name'] = self.message['function_call']['name'] - args['arguments'] = self.message['function_call']['arguments'] - return args - else: - return None - - def is_function_call(self): - """Check if the response is a function call""" - return self.finish_reason == 'function_call' and \ - self.content is None - def is_valid(self): - """Check if the response is an error""" - return 'error' not in self.response and 'choices' in self.response + @property + def object(self): + return self.response['object'] @property def error(self): @@ -120,4 +88,7 @@ def error_code(self): """Error code""" return self.error['code'] - \ No newline at end of file + @property + def finish_reason(self): + """Finish reason""" + return self.response['choices'][0]['finish_reason'] \ No newline at end of file diff --git a/setup.py b/setup.py index 9d2a11e..d9d5a98 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( author="Rex Wang", author_email='1073853456@qq.com', - python_requires='>=3.8', + python_requires='>=3.7', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', diff --git a/tests/__init__.py b/tests/__init__.py index b57ff7c..1dd40e0 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,26 +1,13 @@ """Unit test package for openai_api_call.""" -# import pdb -import responses -import requests +from openai_api_call import Chat -mock_resp = { - "choices": [{ - "message": { - "content": "Response from GPT-3" - } - }], - "usage": { - "total_tokens": 100 - } -} - -@responses.activate def test_simple(): - responses.add(responses.GET, 'https://api.openai.com/v1/chat/completions', - json=mock_resp, status=200) - resp = requests.get('https://api.openai.com/v1/chat/completions') - assert resp.json() == mock_resp - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'https://api.openai.com/v1/chat/completions' - assert responses.calls[0].response.text == '{"choices": [{"message": {"content": "Response from GPT-3"}}], "usage": {"total_tokens": 100}}' \ No newline at end of file + # set api_key in the environment variable + chat = Chat() + chat.user("Hello!") + chat.getresponse() + chat.print_log() + assert chat.chat_log[0] == {"role": "user", "content": "Hello!"} + assert len(chat.chat_log) == 2 + \ No newline at end of file diff --git a/tests/test_chat.py b/tests/test_chat.py deleted file mode 100644 index bee0de7..0000000 --- a/tests/test_chat.py +++ /dev/null @@ -1,159 +0,0 @@ -# Test for the Chat class -from openai_api_call import Chat, Resp -import openai_api_call, json - -# test for the chat class -def test_chat(): - # initialize - chat = Chat() - assert chat.chat_log == [] - chat = Chat([{"role": "user", "content": "hello!"}]) - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - chat = Chat("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - - # general usage - chat = Chat() - chat.user("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - chat.assistant("Hello, how can I assist you today?") - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"} - ] - chat.system("I am a system message") - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}, - {"role": "system", "content": "I am a system message"} - ] - chat.clear() - assert chat.chat_log == [] - - # user/assistant/system - chat.user("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - chat.assistant("Hello, how can I assist you today?") - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"} - ] - chat.system("I am a system message") - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}, - {"role": "system", "content": "I am a system message"} - ] - # get index - assert chat[0]['content'] == "hello!" - assert chat[1]['content'] == "Hello, how can I assist you today?" - assert chat[2]['content'] == "I am a system message" - assert chat[-1]['content'] == "I am a system message" - # pop/copy/clear - chat.pop() - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"} - ] - chat.pop() - assert chat.chat_log == [ - {"role": "user", "content": "hello!"} - ] - chat.pop() - assert chat.chat_log == [] - chat.user("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - chat.assistant("Hello, how can I assist you today?") - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - chat2 = chat.copy() - assert chat2.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - chat2.user("hello!") - assert chat2.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}, - {"role": "user", "content": "hello!"}] - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - chat2.pop() - assert chat2.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - chat2.clear() - assert chat2.chat_log == [] - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - - # save json - chat.save("test.log", mode="w") - with open("test.log", "r") as f: - data = json.loads(f.read()) - chat2 = Chat(data) - assert chat2.chat_log == chat.chat_log - - # print log - chat.print_log() - chat.print_log(sep='\n') - assert True - print(chat) - repr(chat) - assert True - - # len - assert len(chat) == 2 - chat.pop() - assert len(chat) == 1 - -# test for long chatting -response = { - "id":"chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls", - "object":"chat.completion", - "created":1679408728, - "model":"gpt-3.5-turbo-0301", - "usage":{ - "prompt_tokens":8, - "completion_tokens":10, - "total_tokens":18 - }, - "choices":[ - { - "message":{ - "role":"assistant", - "content":"Hello, how can I assist you today?" - }, - "finish_reason":"stop", - "index":0 - } - ] -} - -def test_long_talk(): - resp = Resp(response=response) - msg = "hello!" - chat = Chat(msg) - chat.assistant(resp.content) - assert chat.chat_log == [ - {"role": "user", "content": "hello!"}, - {"role": "assistant", "content": "Hello, how can I assist you today?"}] - -def test_with_template(): - chat = Chat("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] - openai_api_call.default_prompt = lambda msg: [ - {"role": "system", "content": "I am a system message"}, - {"role": "user", "content": msg}] - chat = Chat("hello!") - assert chat.chat_log == [ - {"role": "system", "content": "I am a system message"}, - {"role": "user", "content": "hello!"}] - openai_api_call.default_prompt = None - chat = Chat("hello!") - assert chat.chat_log == [{"role": "user", "content": "hello!"}] diff --git a/tests/test_checkpoint.py b/tests/test_checkpoint.py index 61ba235..e0ec209 100644 --- a/tests/test_checkpoint.py +++ b/tests/test_checkpoint.py @@ -1,73 +1,60 @@ import os, responses -from openai_api_call import Chat, load_chats, process_chats +from openai_api_call import Chat, load_chats, process_chats, api_key def test_with_checkpoint(): # save chats without chatid chat = Chat() - checkpath = "tmp.log" + checkpath = "tmp.jsonl" chat.save(checkpath, mode="w") chat = Chat("hello!") chat.save(checkpath) # append chat.assistant("你好, how can I assist you today?") chat.save(checkpath) # append ## load chats - chat_logs = load_chats(checkpath, chat_log_only=True) - assert chat_logs == [[], [{'role': 'user', 'content': 'hello!'}], - [{'role': 'user', 'content': 'hello!'}, - {'role': 'assistant', 'content': '你好, how can I assist you today?'}]] - chat_msgs = load_chats(checkpath, last_message_only=True) - assert chat_msgs == ["", "hello!", "你好, how can I assist you today?"] chats = load_chats(checkpath) + chat_logs = [ + [], + [{"role": "user", "content": "hello!"}], + [{"role": "user", "content": "hello!"}, {"role": "assistant", "content": "你好, how can I assist you today?"}], + ] assert chats == [Chat(log) for log in chat_logs] # save chats with chatid chat = Chat() - checkpath = "tmp.log" - chat.save(checkpath, mode="w", chatid=0) + checkpath = "tmp_withid.jsonl" + chat.savewithid(checkpath, mode="w", chatid=0) chat = Chat("hello!") - chat.save(checkpath, chatid=3) + chat.savewithid(checkpath, chatid=3) chat.assistant("你好, how can I assist you today?") - chat.save(checkpath, chatid=2) + chat.savewithid(checkpath, chatid=2) ## load chats - chat_logs = load_chats(checkpath, chat_log_only=True) - assert chat_logs == [[], None, - [{'role': 'user', 'content': 'hello!'}, {'role': 'assistant', 'content': '你好, how can I assist you today?'}], - [{'role': 'user', 'content': 'hello!'}]] - chat_msgs = load_chats(checkpath, last_message_only=True) - assert chat_msgs == ["", None, "你好, how can I assist you today?", "hello!"] - chats = load_chats(checkpath) + chats = load_chats(checkpath, withid=True) + chat_logs = [ + [], + None, + [{"role": "user", "content": "hello!"}, {"role": "assistant", "content": "你好, how can I assist you today?"}], + [{"role": "user", "content": "hello!"}], + ] assert chats == [Chat(log) if log is not None else None for log in chat_logs] def test_process_chats(): - api_key = os.environ.get("OPENAI_API_KEY") - # assert api_key is not None # TODO: Add the key to the environment variables def msg2chat(msg): - chat = Chat(api_key=api_key) + chat = Chat() chat.system("You are a helpful translator for numbers.") chat.user(f"Please translate the digit to Roman numerals: {msg}") # chat.getresponse() chat.assistant("III") return chat - checkpath = "tmp.log" - # first call - msgs = ["1", "2", "3"] - chats = process_chats(msgs, msg2chat, checkpath, clearfile=True) + checkpath = "tmp_process.jsonl" + # process part of the data + msgs = [str(i) for i in range(6)] + chats = process_chats(msgs[:3], msg2chat, checkpath, clearfile=True) for chat in chats: print(chat[-1]) assert len(chats) == 3 assert all([len(chat) == 3 for chat in chats]) - # continue call - msgs = msgs + ["4", "5", "6"] - continue_chats = process_chats(msgs, msg2chat, checkpath, clearfile=False) + # continue processing the rest of the data + continue_chats = process_chats(msgs, msg2chat, checkpath) assert len(continue_chats) == 6 assert all(c1 == c2 for c1, c2 in zip(chats, continue_chats[:3])) - assert all([len(chat) == 3 for chat in continue_chats]) - - # get the last message only - last_msgs = process_chats(msgs, msg2chat, checkpath, clearfile=False, last_message_only=True) - assert last_msgs == [chat[-1]['content'] for chat in continue_chats] - last_msgs = process_chats(msgs[:3], msg2chat, checkpath, clearfile=False, last_message_only=True) - assert last_msgs == [chat[-1]['content'] for chat in continue_chats[:3]] - - - \ No newline at end of file + assert all([len(chat) == 3 for chat in continue_chats]) \ No newline at end of file diff --git a/tests/test_openai_api_call.py b/tests/test_openai_api_call.py index 992e5e1..db8421a 100644 --- a/tests/test_openai_api_call.py +++ b/tests/test_openai_api_call.py @@ -2,24 +2,10 @@ """Tests for `openai_api_call` package.""" -import pytest from click.testing import CliRunner +import openai_api_call, json from openai_api_call import cli - -@pytest.fixture -def response(): - """Sample pytest fixture. - - See more at: http://doc.pytest.org/en/latest/fixture.html - """ - # import requests - # return requests.get('https://github.com/audreyr/cookiecutter-pypackage') - - -def test_content(response): - """Sample pytest test function with the pytest fixture as an argument.""" - # from bs4 import BeautifulSoup - # assert 'GitHub' in BeautifulSoup(response.content).title.string +from openai_api_call import Chat, Resp def test_command_line_interface(): @@ -31,3 +17,197 @@ def test_command_line_interface(): help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output + +# test for the chat class +def test_chat(): + # initialize + chat = Chat() + assert chat.chat_log == [] + chat = Chat([{"role": "user", "content": "hello!"}]) + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + chat = Chat("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + + # general usage + chat = Chat() + chat.user("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + chat.assistant("Hello, how can I assist you today?") + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"} + ] + chat.system("I am a system message") + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}, + {"role": "system", "content": "I am a system message"} + ] + chat.clear() + assert chat.chat_log == [] + + # user/assistant/system + chat.user("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + chat.assistant("Hello, how can I assist you today?") + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"} + ] + chat.system("I am a system message") + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}, + {"role": "system", "content": "I am a system message"} + ] + # get index + assert all(chat[i]== chat.chat_log[i] for i in range(len(chat))) + # pop/copy/clear + chat.pop() + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"} + ] + chat.pop() + assert chat.chat_log == [ + {"role": "user", "content": "hello!"} + ] + chat.pop() + assert chat.chat_log == [] + chat.user("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + chat.assistant("Hello, how can I assist you today?") + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + chat2 = chat.copy() + assert chat2.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + chat2.user("hello!") + assert chat2.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}, + {"role": "user", "content": "hello!"}] + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + chat2.pop() + assert chat2.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + chat2.clear() + assert chat2.chat_log == [] + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + + # save json + chat.save("test.log", mode="w") + with open("test.log", "r") as f: + data = json.loads(f.read()) + chat2 = Chat(data) + assert chat2.chat_log == chat.chat_log + + # print log + chat.print_log() + chat.print_log(sep='\n') + assert True + print(chat) + repr(chat) + assert True + + # len + assert len(chat) == 2 + chat.pop() + assert len(chat) == 1 + +# test for long chatting +response = { + "id":"chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls", + "object":"chat.completion", + "created":1679408728, + "model":"gpt-3.5-turbo-0301", + "usage":{ + "prompt_tokens":8, + "completion_tokens":10, + "total_tokens":18 + }, + "choices":[ + { + "message":{ + "role":"assistant", + "content":"Hello, how can I assist you today?" + }, + "finish_reason":"stop", + "index":0 + } + ] +} + +err_api_key_resp = { + "error": { + "message": "Incorrect API key provided: sk-132. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key" + } +} + +def test_long_talk(): + resp = Resp(response=response) + msg = "hello!" + chat = Chat(msg) + chat.assistant(resp.content) + assert chat.chat_log == [ + {"role": "user", "content": "hello!"}, + {"role": "assistant", "content": "Hello, how can I assist you today?"}] + +def test_with_template(): + chat = Chat("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + openai_api_call.default_prompt = lambda msg: [ + {"role": "system", "content": "I am a system message"}, + {"role": "user", "content": msg}] + chat = Chat("hello!") + assert chat.chat_log == [ + {"role": "system", "content": "I am a system message"}, + {"role": "user", "content": "hello!"}] + openai_api_call.default_prompt = None + chat = Chat("hello!") + assert chat.chat_log == [{"role": "user", "content": "hello!"}] + +def test_error_message(): + resp = Resp(response=err_api_key_resp) + assert resp.error_message == "Incorrect API key provided: sk-132. You can find your API key at https://platform.openai.com/account/api-keys." + assert resp.error_type == "invalid_request_error" + assert resp.error_param == None + assert resp.error_code == "invalid_api_key" + assert resp.is_valid() == False + + +def test_usage(): + resp = Resp(response=response) + assert resp.total_tokens == 18 + assert resp.prompt_tokens == 8 + assert resp.completion_tokens == 10 + +def test_content(): + resp = Resp(response=response) + assert resp.content == "Hello, how can I assist you today?" + +def test_valid(): + resp = Resp(response=response) + assert resp.id == "chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls" + assert resp.model == "gpt-3.5-turbo-0301" + assert resp.created == 1679408728 + assert resp.is_valid() == True + +def test_show(): + resp = Resp(response=response) + assert str(resp) == resp.content + assert repr(resp) == f"`Resp`: {resp.content}" + \ No newline at end of file diff --git a/tests/test_request.py b/tests/test_request.py index 4bb269d..3b1da5d 100644 --- a/tests/test_request.py +++ b/tests/test_request.py @@ -1,266 +1,34 @@ -import responses, json -from openai_api_call import chat_completion, usage_status, debug_log, Chat +from openai_api_call import debug_log, Resp from openai_api_call.request import normalize_url, is_valid_url, valid_models import openai_api_call +openai_api_call.api_key="free-123" +openai_api_call.request.base_url = "api.wzhecnu.cn" +api_key = openai_api_call.api_key -mock_resp = { - "id":"chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls", - "object":"chat.completion", - "created":1679408728, - "model":"gpt-3.5-turbo-0301", - "usage":{ - "prompt_tokens":8, - "completion_tokens":10, - "total_tokens":18 - }, - "choices":[ - { - "message":{ - "role":"assistant", - "content":"Hello, how can I assist you today?" - }, - "finish_reason":"stop", - "index":0 - } - ] -} - -@responses.activate -def test_chat_completion(): - responses.add(responses.POST, 'https://api.openai.com/v1/chat/completions', - json=mock_resp, status=200) - resp = chat_completion(api_key="sk-123", messages=[{"role": "user", "content": "hello"}], model="gpt-3.5-turbo") - assert resp == mock_resp - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'https://api.openai.com/v1/chat/completions' - assert responses.calls[0].response.text == '{"id": "chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls", "object": "chat.completion", "created": 1679408728, "model": "gpt-3.5-turbo-0301", "usage": {"prompt_tokens": 8, "completion_tokens": 10, "total_tokens": 18}, "choices": [{"message": {"role": "assistant", "content": "Hello, how can I assist you today?"}, "finish_reason": "stop", "index": 0}]}' - -# test for usage status -mock_usage = { - "object": "billing_subscription", - "has_payment_method": False, - "canceled": False, - "canceled_at": None, - "delinquent": None, - "access_until": 1690848000, - "soft_limit": 66667, - "hard_limit": 83334, - "system_hard_limit": 83334, - "soft_limit_usd": 4.00002, - "hard_limit_usd": 5.00004, - "system_hard_limit_usd": 5.00004, - "plan": { - "title": "Explore", - "id": "free" - }, - "account_name": "apafa renor", - "po_number": None, - "billing_email": None, - "tax_ids": None, - "billing_address": None, - "business_address": None -} - -mock_billing = { - "object": "list", - "daily_costs": [ - { - "timestamp": 1681171200.0, - "line_items": [ - { - "name": "Instruct models", - "cost": 0.0 - }, - { - "name": "Chat models", - "cost": 106.619 - }, - { - "name": "GPT-4", - "cost": 0.0 - }, - { - "name": "Fine-tuned models", - "cost": 0.0 - }, - { - "name": "Embedding models", - "cost": 0.0 - }, - { - "name": "Image models", - "cost": 0.0 - }, - { - "name": "Audio models", - "cost": 0.0 - } - ] - } - ], - "total_usage": 106.619 -} - -@responses.activate -def test_usage_status(): - responses.add(responses.GET, 'https://api.openai.com/v1/dashboard/billing/subscription', - json=mock_usage, status=200) - responses.add(responses.GET, 'https://api.openai.com/v1/dashboard/billing/usage', - json=mock_billing, status=200) - storage, usage, daily = usage_status(api_key="sk-123") - assert storage == 5.00004 - assert usage == 106.619 / 100 - assert len(daily) == 1 - assert daily[0]["timestamp"] == 1681171200.0 - assert sum([item["cost"] for item in daily[0]["line_items"]]) == 106.619 - -# test for valid models response -with open("tests/assets/model_response.json", "r") as f: - valid_models_response = json.load(f) - -@responses.activate def test_valid_models(): - openai_api_call.api_key = "sk-123" - responses.add(responses.GET, 'https://api.openai.com/v1/models', - json=valid_models_response, status=200) - models = valid_models(api_key="sk-123", gpt_only=False) - assert len(models) == 53 - models = valid_models(api_key="sk-123", gpt_only=True) - assert len(models) == 5 - assert models == ['gpt-3.5-turbo-0613', 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k'] + models = valid_models(api_key=api_key, gpt_only=False) + assert len(models) >= 1 + models = valid_models(api_key=api_key, gpt_only=True) + assert len(models) >= 1 + assert 'gpt-3.5-turbo' in models -@responses.activate def test_debug_log(): """Test the debug log""" - responses.add(responses.GET, 'https://api.openai.com/v1/models', - json=valid_models_response, status=200) - responses.add(responses.GET, 'https://api.openai.com/v1/dashboard/billing/subscription', - json=mock_usage, status=200) - responses.add(responses.GET, 'https://api.openai.com/v1/dashboard/billing/usage', - json=mock_billing, status=200) - responses.add(responses.POST, 'https://api.openai.com/v1/chat/completions', - json=mock_resp, status=200) - responses.add(responses.GET, 'https://www.google.com', status=200) - assert debug_log(net_url="https://www.google.com") + assert debug_log(net_url="https://www.baidu.com") or debug_log(net_url="https://www.google.com") assert not debug_log(net_url="https://baidu123.com") # invalid url -# test for function call -function_response = { - "id": "chatcmpl-7X2vF57BKsEuzaSen0wFSI30Y2mJX", - "object": "chat.completion", - "created": 1688110413, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": None, - "function_call": { - "name": "get_current_weather", - "arguments": "{\n \"location\": \"Boston, MA\"\n}" - } - }, - "finish_reason": "function_call" - } - ], - "usage": { - "prompt_tokens": 88, - "completion_tokens": 18, - "total_tokens": 106 - } -} - -functions = [{ - "name": "get_current_weather", - "description": "Get the current weather", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - "description": "The temperature unit to use. Infer this from the users location.", - }, - }, - "required": ["location"], - }, -}] - -@responses.activate -def test_functions(): - responses.add(responses.POST, 'https://api.openai.com/v1/chat/completions', - json=function_response, status=200) - chat = Chat("What is the weather in Boston?") - chat.functions = functions - resp = chat.getresponse() - assert resp.finish_reason == "function_call" - assert resp.function_call['name'] == "get_current_weather" - assert resp.function_call['arguments'] == "{\n \"location\": \"Boston, MA\"\n}" - # normalize base url def test_is_valid_url(): - assert is_valid_url("http://api.wzhecnu.cn") == True + assert is_valid_url("http://api.openai.com") == True assert is_valid_url("https://www.google.com/") == True assert is_valid_url("ftp://ftp.debian.org/debian/") == True - assert is_valid_url("api.wzhecnu.cn") == False + assert is_valid_url("api.openai.com") == False assert is_valid_url("example.com") == False def test_normalize_url(): - assert normalize_url("http://api.wzhecnu.cn/") == "http://api.wzhecnu.cn/" + assert normalize_url("http://api.openai.com") == "http://api.openai.com" assert normalize_url("https://www.google.com") == "https://www.google.com" assert normalize_url("ftp://ftp.debian.org/debian/dists/stable/main/installer-amd64/current/images/cdrom/boot.img.gz") == "ftp://ftp.debian.org/debian/dists/stable/main/installer-amd64/current/images/cdrom/boot.img.gz" - assert normalize_url("api.wzhecnu.cn") == "https://api.wzhecnu.cn" + assert normalize_url("api.openai.com") == "https://api.openai.com" assert normalize_url("example.com/foo/bar") == "https://example.com/foo/bar" -def get_current_weather(location, unit="fahrenheit"): - """Get the current weather in a given location""" - weather_info = { - "location": location, - "temperature": "72", - "unit": unit, - "forecast": ["sunny", "windy"], - } - return json.dumps(weather_info) - -@responses.activate -def test_run_conversation(): - """test case from openai""" - responses.add(responses.POST, 'https://api.openai.com/v1/chat/completions', - json=function_response, status=200) - # send the conversation and available functions to GPT - messages = [{"role": "user", "content": "What's the weather like in Boston?"}] - chat = Chat(messages) - chat.functions = functions - available_functions = { - "get_current_weather": get_current_weather, - } - chat.available_functions = available_functions - response = chat.getresponse() - if response.is_function_call(): - # call the function - function_name = response.function_call['name'] - fuction_to_call = available_functions[function_name] - function_args = json.loads(response.function_call['arguments']) - function_result = fuction_to_call(**function_args) - # Step 4: send the info on the function call and function response to GPT - chat.function(function_name, function_result) - response = chat.getresponse() - chat.print_log() - - ## use Chat object directly - chat = Chat() - chat.user("What's the weather like in Boston?") - chat.functions = functions - chat.function_call = 'auto' - chat.available_functions = { - "get_current_weather": get_current_weather, - } - chat.getresponse(update=True, funceval=True) - - \ No newline at end of file diff --git a/tests/test_response.py b/tests/test_response.py deleted file mode 100644 index 8c6a400..0000000 --- a/tests/test_response.py +++ /dev/null @@ -1,69 +0,0 @@ - -# test for error response -from openai_api_call import Resp - -err_api_key_resp = { - "error": { - "message": "Incorrect API key provided: sk-132. You can find your API key at https://platform.openai.com/account/api-keys.", - "type": "invalid_request_error", - "param": None, - "code": "invalid_api_key" - } -} - -def test_error_message(): - resp = Resp(response=err_api_key_resp) - assert resp.error_message == "Incorrect API key provided: sk-132. You can find your API key at https://platform.openai.com/account/api-keys." - assert resp.error_type == "invalid_request_error" - assert resp.error_param == None - assert resp.error_code == "invalid_api_key" - -def test_is_valid(): - resp = Resp(response=err_api_key_resp) - assert resp.is_valid() == False - -# test for valid response - -valid_response = { - "id":"chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls", - "object":"chat.completion", - "created":1679408728, - "model":"gpt-3.5-turbo-0301", - "usage":{ - "prompt_tokens":8, - "completion_tokens":10, - "total_tokens":18 - }, - "choices":[ - { - "message":{ - "role":"assistant", - "content":"Hello, how can I assist you today?" - }, - "finish_reason":"stop", - "index":0 - } - ] -} - -def test_usage(): - resp = Resp(response=valid_response) - assert resp.total_tokens == 18 - assert resp.prompt_tokens == 8 - assert resp.completion_tokens == 10 - -def test_content(): - resp = Resp(response=valid_response) - assert resp.content == "Hello, how can I assist you today?" - -def test_valid(): - resp = Resp(response=valid_response) - assert resp.id == "chatcmpl-6wXDUIbYzNkmqSF9UnjPuKLP1hHls" - assert resp.model == "gpt-3.5-turbo-0301" - assert resp.created == 1679408728 - assert resp.is_valid() == True - -def test_show(): - resp = Resp(response=valid_response) - assert str(resp) == resp.content - assert repr(resp) == f"`Resp`: {resp.content}" \ No newline at end of file