Skip to content

Commit

Permalink
fix coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
RexWzh committed Oct 8, 2023
1 parent c0ccc0c commit c01b969
Show file tree
Hide file tree
Showing 11 changed files with 101 additions and 41 deletions.
4 changes: 3 additions & 1 deletion chattool/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from . import request
from .tokencalc import num_tokens_from_messages, model_cost_perktoken, findcost
from .asynctool import async_chat_completion
from .functioncall import generate_json_schema
from .functioncall import generate_json_schema, exec_python_code

# read API key from the environment variable
api_key = os.environ.get('OPENAI_API_KEY')
Expand All @@ -38,8 +38,10 @@
def show_apikey():
if api_key is not None:
print(f"API key:\t{api_key}")
return True
else:
print("API key is not set!")
return False

def default_prompt(msg:str):
"""Default prompt message for the API call
Expand Down
14 changes: 12 additions & 2 deletions chattool/chattool.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,15 +290,25 @@ def autoresponse( self
, maxturns:int=3
, capturerr:bool=True
, **options):
"""Get the response automatically"""
"""Get the response automatically
Args:
display (bool, optional): whether to display the response. Defaults to False.
maxturns (int, optional): maximum number of turns. Defaults to 3.
capturerr (bool, optional): if True, use the error message as the response. Defaults to True.
options (dict, optional): other options like `temperature`, `top_p`, etc.
Returns:
bool: whether the response is finished
"""
options['functions'], options['function_call'] = self.functions, self.function_call
show = lambda msg: print(self.display_role_content(msg))
resp = self.getresponse(**options)
if display: show(resp.message)
while self.iswaiting() and maxturns != 0:
# call api and update the result
status, msg = self.callfunction()
if not status: # update the error msg instead
if not status: # update the error msg
if not capturerr: return False
self.function(msg, 'error')
if display: show(self[-1])
Expand Down
21 changes: 20 additions & 1 deletion chattool/functioncall.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,23 @@ def delete_dialogue_assist(chat_log:List[Dict]):
chat_log.pop(ind)
else:
ind += 1
return chat_log
return chat_log

def exec_python_code(code:str)->dict:
"""Execute the code and return the namespace or error message
Args:
code (str): code to execute
Returns:
dict: namespace or error message
"""
try:
globalspace, currentspace, newspace = globals().copy(), globals().copy(), {}
exec(code, globalspace)
for key, val in globalspace.items():
if key not in currentspace:
newspace[key] = str(val)
return newspace
except Exception as e:
return {'error': str(e)}

Check warning on line 95 in chattool/functioncall.py

View check run for this annotation

Codecov / codecov/patch

chattool/functioncall.py#L94-L95

Added lines #L94 - L95 were not covered by tests
9 changes: 1 addition & 8 deletions chattool/proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,4 @@ def proxy_status():
if https is None:
print("`https_proxy` is not set!")
else:
print(f"https_proxy:\t{https}")

def proxy_test(url:str="www.facebook.com"):
rawurl = url.replace("http://", "").replace("https://", "")
if os.system("curl -I https://" + rawurl) != 0:
print("Https: Curl to " + rawurl + " failed!")
if os.system("curl -I http://" + rawurl) != 0:
print("Http: Curl to " + rawurl + " failed!")
print(f"https_proxy:\t{https}")
3 changes: 2 additions & 1 deletion tests/test_async.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import chattool, time, os
from chattool import Chat, process_chats, num_tokens_from_messages
from chattool.asynctool import async_chat_completion
import asyncio
import asyncio, pytest

# langs = ["Python", "Julia", "C++", "C", "Java", "JavaScript", "C#", "Go", "R", "Ruby"]
langs = ["Python", "Julia", "C++"]
Expand Down Expand Up @@ -37,6 +37,7 @@ async def show_resp(chat):
def test_async_process():
chkpoint = testpath + "test_async.jsonl"
t = time.time()
resp = async_chat_completion(chatlogs[:1], chkpoint, clearfile=True, ncoroutines=3)
resp = async_chat_completion(chatlogs, chkpoint, clearfile=True, ncoroutines=3)
assert all(resp)
print(f"Time elapsed: {time.time() - t:.2f}s")
Expand Down
24 changes: 21 additions & 3 deletions tests/test_chattool.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from click.testing import CliRunner
import chattool, json
from chattool import cli
from chattool import Chat, Resp
from chattool import Chat, Resp, findcost
import pytest
testpath = 'tests/testfiles/'

Expand Down Expand Up @@ -105,7 +105,11 @@ def test_chat():
assert chat.chat_log == [
{"role": "user", "content": "hello!"},
{"role": "assistant", "content": "Hello, how can I assist you today?"}]

# deepcopy
copychat = Chat(model='gpt-4')
copychat.setfuncs([findcost])
newchat = copychat.deepcopy()
assert newchat.functions == copychat.functions
# save json
chat.save(testpath + "test.log", mode="w")
with open(testpath + "test.log", "r") as f:
Expand Down Expand Up @@ -218,4 +222,18 @@ def test_show():
resp = Resp(response=response)
assert str(resp) == resp.content
assert repr(resp) == "<Resp with finished reason: stop>"


def test_token():
chat = Chat()
chat.user("hello!")
chat.assistant("Hello, how can I assist you today?")
print(f'gpt-3.5-cost: {findcost(chat.model, chat.prompt_token())}')
chat.model = "gpt-3.5-turbo-16k"
print(f'gpt-3.5 16k cost: {findcost(chat.model, chat.prompt_token())}')
chat.model = "gpt-4"
print(f'gpt-4 cost: {findcost(chat.model, chat.prompt_token())}')
chat.model = "gpt-4-32k"
print(f'gpt-4 32k cost: {findcost(chat.model, chat.prompt_token())}')
chat.model = "ft:gpt-3.5-turbo-0613:personal:recipe-ner:819klqSI"
with pytest.raises(AssertionError):
findcost("test-model", 100)
2 changes: 2 additions & 0 deletions tests/test_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,14 @@ def msg2chat(msg):
checkpath = testpath + "tmp_process.jsonl"
# process part of the data
msgs = [str(i) for i in range(6)]
chats = process_chats(msgs[:1], msg2chat, checkpath, clearfile=True)
chats = process_chats(msgs[:3], msg2chat, checkpath, clearfile=True)
for chat in chats:
print(chat[-1])
assert len(chats) == 3
assert all([len(chat) == 3 for chat in chats])
# continue processing the rest of the data
continue_chats = process_chats(msgs[:2], msg2chat, checkpath)
continue_chats = process_chats(msgs, msg2chat, checkpath)
assert len(continue_chats) == 6
assert all(c1 == c2 for c1, c2 in zip(chats, continue_chats[:3]))
Expand Down
3 changes: 2 additions & 1 deletion tests/test_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def test_finetune():
# default repl
print(ft)
# get model list
models = ft.list_models()
print(ft.list_models())
print(ft.model)
# delete files
for file in files:
if file['filename'] in ["githubtest_training.jsonl",
Expand Down
53 changes: 31 additions & 22 deletions tests/test_function.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# tests for function call

from chattool import Chat, generate_json_schema
from chattool import Chat, generate_json_schema, exec_python_code
import json

# schema of functions
Expand Down Expand Up @@ -31,9 +31,9 @@
'get_current_weather': lambda *kargs, **kwargs: weatherinfo
}

def test_function_call():
def test_call_weather():
chat = Chat("What's the weather like in Boston?")
resp = chat.getresponse(functions=functions, function_call='auto')
resp = chat.getresponse(functions=functions, function_call='auto', max_requests=3)
# TODO: wrap the response
if resp.finish_reason == 'function_call':
# test response from chat api
Expand All @@ -50,12 +50,16 @@ def test_function_call():
print("No function call found.")
assert True

def test_function_call2():
def test_auto_response():
chat = Chat("What's the weather like in Boston?")
chat.functions, chat.function_call = functions, 'auto'
chat.name2func = name2func
chat.autoresponse(max_requests=2)
chat.print_log()
chat.clear()
# response with nonempty content
chat.user("what is the result of 1+1, and What's the weather like in Boston?")
chat.autoresponse(max_requests=2)

# generate docstring from functions
def add(a: int, b: int) -> int:
Expand Down Expand Up @@ -85,15 +89,15 @@ def mult(a:int, b:int=1) -> int:
"""
return a * b

def test_generate_docstring():
def test_add_and_mult():
functions = [generate_json_schema(add)]
chat = Chat("find the sum of 784359345 and 345345345")
chat.functions = functions
chat.function_call = None # unset keyword equivalent to "auto"
chat.function_call = 'none'
chat.function_call = {'name':'add'}
chat.function_call = 'add' # specify the function(convert to dict)
chat.name2func = {'add': add}
chat.name2func = {'add': add} # dictionary of functions
chat.function_call = 'auto' # auto decision
# run until success: maxturns=-1
chat.autoresponse(max_requests=2, display=True, maxturns=-1)
Expand All @@ -106,27 +110,32 @@ def test_generate_docstring():
chat.autoresponse()
chat.simplify() # simplify the chat log
chat.print_log()
# test multichoice
chat.clear()
chat.user("find the value of 23723 + 12312, and 23723 * 12312")
chat.autoresponse()

def exec_python_code(code:str)->dict:
"""Execute the code and return the namespace or error message
def test_mock_resp():
chat = Chat("find the sum of 1235 and 3423")
chat.setfuncs([add, mult])
# mock result of the resp
para = {'name': 'add', 'arguments': '{\n "a": 1235,\n "b": 3423\n}'}
chat.assistant(content=None, function_call=para)
chat.callfunction()
chat.getresponse(max_requests=2)

def test_use_exec_function():
chat = Chat("find the result of sqrt(121314)")
chat.setfuncs([exec_python_code])
chat.autoresponse(max_requests=2)

Args:
code (str): code to execute
def test_find_permutation_group():
pass

Returns:
dict: namespace or error message
"""
try:
namespace = {}
exec(code, globals(), namespace)
for key, val in namespace.items():
namespace[key] = str(val)
return namespace
except Exception as e:
return {'error': str(e)}
def test_interact_with_leandojo():
pass

# debug area
# test_generate_docstring()
# test_function_call()
# test_function_call2()
# test_find_permutation_group()
4 changes: 3 additions & 1 deletion tests/test_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@

def test_proxy():
proxy_off()
proxy_status()
assert os.environ.get('http_proxy') is None
assert os.environ.get('https_proxy') is None
proxy_on(http="127.0.0.1:7890", https="socks://127.0.0.1:7891")
proxy_status()
assert os.environ.get('http_proxy') == "127.0.0.1:7890"
assert os.environ.get('https_proxy') == "socks://127.0.0.1:7891"
proxy_off()
assert os.environ.get('http_proxy') is None
assert os.environ.get('https_proxy') is None
proxy_status()
assert True
assert True
5 changes: 4 additions & 1 deletion tests/test_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
create_finetune_job, list_finetune_job, retrievejob,
listevents, canceljob, deletemodel
)
import pytest, chattool, os
import pytest, chattool
api_key, base_url = chattool.api_key, chattool.base_url
testpath = 'tests/testfiles/'

Expand All @@ -20,6 +20,9 @@ def test_debug_log():
"""Test the debug log"""
assert debug_log(net_url="https://www.baidu.com") or debug_log(net_url="https://www.google.com")
assert not debug_log(net_url="https://baidu123.com") # invalid url
chattool.api_key = None
chattool.show_apikey()
chattool.api_key = api_key

# normalize base url
def test_is_valid_url():
Expand Down

0 comments on commit c01b969

Please sign in to comment.