Is it possible to make the chatbot remember the previous chat? #1921
-
Most advanced chatbots can remember what the user said and what it responded to, so it will make the new response more related. Is it possible for g4f? |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 2 replies
-
Yes, it is possible. For example, Here is example: from g4f.client import Client
client = Client()
messages = []
while True:
messages.append({"role": "user", "content": input("Enter message to ChatGPT: ")})
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
)
messages.append({"role": "assistant", "content": response.choices[0].message.content})
print(response.choices[0].message.content) |
Beta Was this translation helpful? Give feedback.
-
Yes! The code with support length of dialogs in bytes and auto reduce: import g4f
from g4f.client import Client
from g4f.Provider import RetryProvider, Aichatos, FreeGpt
from enum import StrEnum
from IPython.display import display, Markdown, HTML, Pretty
class AssistantRole(StrEnum):
default = 'You will be a helpful assistant. Answer me in Russian and in Markdown format'
poet = 'You will be a poet who writes poems and is well versed in rhymes in Russian and responds in Markdown format'
mathan = 'You will be a helpful assistant that help me with my math homework! Answer me in Russian and in Markdown format'
class GPTChat():
def __init__(self, model:str='gpt-3.5-turbo-16k-0613', assistant_role=AssistantRole.default, provider=None,
context_max_len=None, api_key:str=None, proxies:str=None):
self.client = Client(
provider=provider or RetryProvider([Aichatos, FreeGpt], shuffle=False),
api_key=api_key, proxies=proxies
)
self.context = [{'role': 'system', 'content': str(assistant_role)}]
self.context_max_len = context_max_len or 1024*10
self.model = model
@property
def context_len(self):
return sum([len(i['content']) for i in self.context[1:]])
def reduce_context(self):
'''Reduce owerall length of context to setted context_max_len'''
while self.context_len > self.context_max_len:
del self.context[1:3]
def reset_context(self):
self.context = self.context[:1]
def request(self, request:str) -> tuple[str]:
self.context.append({'role': 'user', 'content': request})
try:
response = self.client.chat.completions.create(
messages=self.context,
model=self.model,
stream=False
)
True
except Exception as e:
self.context.pop()
raise Exception(e)
self.context.append({'role': 'assistant', 'content': response.choices[0].message.content})
self.reduce_context()
return (self.context[-1]['content'], response.provider, response.model) Example call: chat = GPTChat(assistant_role=AssistantRole.mathan, context_max_len=1024*5)
response = chat.request('''
Who are you?
''')
# print(chat.context_max_len, chat.context_len, chat.context)
display(
Markdown(response[0])
) |
Beta Was this translation helpful? Give feedback.
Yes, it is possible. For example,
[{"role":"user","content":"hi"},{"role":"assistant","content":"Hello there! How can I assist you today?"},{"role":"user","content":"who are u?"}]
is mean, what user sent to model message with conenthi
, model sent to user message with contentHello there! How can I assist you today?
and user sent to model message with contentwho are u?
.Here is example: