Skip to content

Commit

Permalink
Fix issue where json_mode was forcefully enabled in chat_completion w…
Browse files Browse the repository at this point in the history
…hen the messages parameter was set
  • Loading branch information
nonprofittechy committed Aug 28, 2024
1 parent 892107b commit 4e0db03
Showing 1 changed file with 38 additions and 8 deletions.
46 changes: 38 additions & 8 deletions docassemble/ALToolbox/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,14 +159,17 @@ def chat_completion(
f"Warning: { system_message } does not contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
system_message = f"{ system_message }\n\nRespond only with a JSON object"
elif messages:
if not any("json" in message["content"].lower() for message in messages):
log(
f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
messages.append(
{"role": "system", "content": "Respond only with a JSON object"}
)
elif (
messages
and json_mode
and not any("json" in message["content"].lower() for message in messages)
):
log(
f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
messages.append(
{"role": "system", "content": "Respond only with a JSON object"}
)

if not messages:
assert isinstance(system_message, str)
Expand Down Expand Up @@ -774,6 +777,33 @@ def synthesize_draft_response(self):
model=self.model,
)

def provide_feedback(self, feedback_prompt: str = ""):
"""Returns feedback to the user based on the goals they satisfied."""
if not feedback_prompt:
feedback_prompt = """
You are a helpful instructor who is providing feedback to a student
based on their reflection and response to any questions you asked.
Review the student's response and provide feedback on how well they
addressed the goals you set out for them. If they met the goals but
could dig deeper, offer specific feedback on how they could do so
in their next reflection.
"""
messages = [
{"role": "assistant", "content": self.initial_question},
{"role": "user", "content": self.initial_draft},
]
for question in self.elements:
messages.append({"role": "assistant", "content": question.question})
messages.append({"role": "user", "content": question.response})

messages.append({"role": "assistant", "content": feedback_prompt})

return chat_completion(
messages=messages,
model=self.model,
)


class IntakeQuestion(DAObject):
"""A class to represent a question in an LLM-assisted intake questionnaire.
Expand Down

0 comments on commit 4e0db03

Please sign in to comment.