Skip to content

Commit

Permalink
Completed demo for intake module
Browse files Browse the repository at this point in the history
  • Loading branch information
nonprofittechy committed Jun 20, 2024
1 parent 0152f2a commit b25d437
Show file tree
Hide file tree
Showing 3 changed files with 172 additions and 36 deletions.
53 changes: 53 additions & 0 deletions docassemble/ALToolbox/data/questions/intake.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
---
modules:
- .llms
---
generic object: IntakeQuestionList
code: |
x.there_are_any = x.need_more_questions()
---
generic object: IntakeQuestionList
code: |
x.there_is_another = x.need_more_questions()
---
generic object: IntakeQuestionList
code: |
x[i].question = x.next_question
---
generic object: IntakeQuestionList
question: |
${ x[i].question }
fields:
- ${ x[i].question }: x[i].response
datatype: area
right: |
% if get_config("debug"):
${ x.problem_type }
% endif
---
generic object: IntakeQuestionList
code: |
x.initial_question = "What is the problem you are trying to solve?"
---
generic object: IntakeQuestionList
question: |
What is the problem you are trying to solve?
subquestion: |
Explain what you are looking help with. Do not include any personal information
such as your name, address, or phone number.
fields:
- I need help with: x.initial_problem_description
datatype: area
---
generic object: IntakeQuestionList
continue button field: x.intake_results
question: |
% if x.qualifies:
You may qualify for help
% elif x.qualifies is not None:
You probably do not qualify for help
% else:
We need more information to determine if you qualify for help
% endif
subquestion: |
${ x.next_question }
56 changes: 56 additions & 0 deletions docassemble/ALToolbox/data/questions/intake_demo.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
---
include:
- intake.yml # docassemble.ALToolbox:data/questions/intake.yml
---
objects:
- intake_screener: |
IntakeQuestionList.using(
criteria={
"housing": str(intake_criteria),
},
problem_type = "housing", # Hardcoding for this simple demo
model="gpt-4-turbo"
)
---
mandatory: True
code: |
intake_screener.gather()
intake_screener.intake_results
---
# Example intake criteria
template: intake_criteria
content: |
PROTOCOLS FOR HOUSING INTAKES AUGUST 2023
Housing is focusing on eviction lawsuits. Thus, we ask workers to do intakes on court
matters only. (There are a few exceptions listed below.)
Court Summons The court matters we will mostly take are cases with summons dates,
(first court date, non-trial date). This includes subsidized housing and private
landlord/tenant as usual.
Other Court Matters
We will also take default judgments, consent judgments, cases
already set for trial, and sheriff evictions with the following limitations.
Default Judgment: If caller contacts us within Ten (10) calendar days from the
date of the default judgment, then please do the intake.
Cases Already set for Trial: If the caller contacts LSEM at least Ten (10) calendar
days from the date of the trial, then it is OK to do the intake.
Sheriff Evictions Posted and Consent Judgments: Call and/or email me before
doing the intake. We will take these, but are limited based on the specific facts of
the case, which I have to look at before agreeing to open a case.
Section 8
Please do an intake for a caller facing a proposed termination of Section 8, i.e. if the
caller has received a proposed termination of Section 8 notice from one of the following:
the St. Louis Housing Authority (SLHA); the Housing Authority of St. Louis County
(HASLC); JFCAC in Jefferson County; or NECAC in St. Charles County.
Domestic Violence
If domestic violence is a present, please call or email me. We may be able to take the
case even if there is no court date.
Other Housing Matters
For other Housing matters, please refer the caller to the Housing section of the LSEM
website and/or to the new website:“motenanthelp.org”
99 changes: 63 additions & 36 deletions docassemble/ALToolbox/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
"synthesize_user_responses",
"define_fields_from_dict",
"GoalSatisfactionList",
"IntakeQuestionList",
]

if os.getenv("OPENAI_API_KEY"):
Expand Down Expand Up @@ -807,19 +808,23 @@ class IntakeQuestionList(DAList):
initial_problem_description (str): The initial description of the problem from the user
initial_question (str): The original question posed in the interview
question_limit (int): The maximum number of follow-up questions to ask the user. Defaults to 10.
model (str): The model to use for the GPT API. Defaults to gpt-3.5-turbo. gpt-4-turbo may perform better.
model (str): The model to use for the GPT API. Defaults to gpt-4-turbo. gpt-3.5 is not smart enough
llm_role (str): The role the LLM should play. Allows you to customize the script the LLM uses to guide the user.
We have provided a default script that should work for most intake questionnaires.
llm_user_qualifies_prompt (str): The prompt to use to determine if the user qualifies. We have provided a default prompt.
out_of_questions (bool): Whether the user has run out of questions to answer
qualifies (bool): Whether the user qualifies based on the criteria
"""

def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = IntakeQuestion
self.complete_attribute = "complete"
self.out_of_questions = False
self.qualifies = None

if not hasattr(self, "model"):
self.model = "gpt-3.5-turbo"
self.model = "gpt-4-turbo"

if not hasattr(self, "question_limit"):
self.question_limit = 10
Expand Down Expand Up @@ -861,24 +866,34 @@ def _classify_problem_type(self):
model=self.model,
)

def keep_going(self):
"""Returns True if the user needs to answer more questions, False otherwise."""
if not self._get_next_question():
return False
return len(self.elements) < self.question_limit
def _keep_going(self):
"""Returns True if the user can and needs to answer more questions, False otherwise.
It respects the limit defined by self.question_limit.
As a side effect, checks if the user has run out of questions to answer and updates the next question to be asked
to be a closing message instead of a follow-up.
"""
self.out_of_questions = len(self.elements) >= self.question_limit
if self.out_of_questions:
self.next_question = self._ran_out_of_questions_message()
return False
return True

def need_more_questions(self):
"""Returns True if the user needs to answer more questions, False otherwise.
Also has the side effect of checking the user's most recent response to see if it satisfies the criteria
and updating the next question to be asked.
and updating both the next question to be asked and the current qualification status.
"""
status = self._get_next_question()
if not status:
status = self._current_qualification_status()
self.qualifies = status["qualifies"]
self.next_question = status["narrative"]
if not (status["qualifies"] is None):
return False
return self.keep_going()
return self._keep_going()

def _user_qualifies_on_current_thread(self):
def _current_qualification_status(self):
"""Returns a dictionary with the user's current qualification status"""
if not hasattr(self, "problem_type"):
self.problem_type = self._classify_problem_type()
Expand All @@ -888,7 +903,7 @@ def _user_qualifies_on_current_thread(self):
return False

qualification_prompt = f"""
You are an expert intake worker at a law firm doing initial screening. Based on the qualification criteria,
Based on the qualification criteria,
assess whether the user meets at least the *minimum* criteria for the following problem type:
`{ self.problem_type }`.
Expand Down Expand Up @@ -923,35 +938,47 @@ def _user_qualifies_on_current_thread(self):

results = chat_completion(
messages = [
{"role": "system", "content": self.llm_role},
{"role": "system", "content": qualification_prompt},
{"role": "system", "content": criteria_prompt},
] + self._get_thread(),
model=self.model,
json_mode=True,
)


def draft_summary(self):
"""Returns a draft summary of the user's responses."""
return synthesize_user_responses(
custom_instructions="",
messages=self._get_thread(),
model=self.model,
)

def _get_next_question(self):
"""Returns the next question to ask the user."""
if not hasattr(self, "problem_type"):
self.problem_type = self._classify_problem_type()
if isinstance(results, dict):
return results

criteria = self.criteria.get(self.problem_type, None)
if not criteria:
return None
raise Exception(f"Unexpected response from LLM: { results }")


if not hasattr(self, "next_question"):
self.next_question = chat_completion(
system_message=self.llm_role,
user_message=self.initial_problem_description,
model=self.model,
)
return self.next_question
def _get_thread(self):
"""Returns a list of messages (with corresponding role) related to the given goal.
"""
messages = [
{"role": "assistant", "content": self.initial_question},
{"role": "user", "content": self.initial_problem_description},
]

for element in self.elements:
messages.append({"role": "assistant", "content": element.question})
messages.append({"role": "user", "content": element.response})

return messages

def _ran_out_of_questions_message(self):
"""Returns a message to display when the user has run out of questions to answer."""
summary_prompt = """
Explain to the user that you have asked all the questions you need to determine if they qualify for services
and you still do not have a response. Explain why the answer that they gave was still incomplete.
"""
return chat_completion(
messages = [
{"role": "system", "content": self.llm_role},
] + self._get_thread() +
[
{"role": "system", "content": summary_prompt},
],
model=self.model,
json_mode=False,
)

0 comments on commit b25d437

Please sign in to comment.