Spaces:
Running
on
Zero
Running
on
Zero
Thanush
commited on
Commit
·
a985489
1
Parent(s):
000ab02
Enhance user information collection in app.py by emphasizing follow-up questions and refining response generation logic based on actual information turns.
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ SYSTEM_PROMPT = """You are a professional virtual doctor. Your goal is to collec
|
|
13 |
|
14 |
Always begin by asking for the user's name and age if not already provided.
|
15 |
|
16 |
-
Ask 1-2 follow-up questions at a time to gather more details about:
|
17 |
- Detailed description of symptoms
|
18 |
- Duration (when did it start?)
|
19 |
- Severity (scale of 1-10)
|
@@ -99,10 +99,10 @@ def extract_name_age(messages):
|
|
99 |
for msg in messages:
|
100 |
if msg.type == "human":
|
101 |
age_match = re.search(r"(?:I am|I'm|age is|aged|My age is)\s*(\d{1,3})", msg.content, re.IGNORECASE)
|
102 |
-
if age_match:
|
103 |
age = age_match.group(1)
|
104 |
name_match = re.search(r"(?:my name is|I'm|I am)\s*([A-Za-z]+)", msg.content, re.IGNORECASE)
|
105 |
-
if name_match:
|
106 |
name = name_match.group(1)
|
107 |
return name, age
|
108 |
|
@@ -125,10 +125,17 @@ def generate_response(message, history):
|
|
125 |
ask = "Before we continue, could you please tell me " + " and ".join(missing_info) + "?"
|
126 |
return ask
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
prompt = build_llama2_prompt(SYSTEM_PROMPT, messages, message)
|
129 |
-
|
130 |
-
|
131 |
-
if num_user_turns == 4:
|
132 |
prompt = prompt.replace("[/INST] ", "[/INST] Now summarize what you've learned and suggest when professional care may be needed. ")
|
133 |
|
134 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
@@ -149,9 +156,11 @@ def generate_response(message, history):
|
|
149 |
full_response = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
150 |
llama_response = full_response.split('[/INST]')[-1].split('</s>')[0].strip()
|
151 |
|
152 |
-
# After 4 turns, add medicine suggestions from Meditron, but only once
|
153 |
-
if
|
154 |
-
full_patient_info = "\n".join([
|
|
|
|
|
155 |
medicine_suggestions = get_meditron_suggestions(full_patient_info)
|
156 |
final_response = (
|
157 |
f"{llama_response}\n\n"
|
|
|
13 |
|
14 |
Always begin by asking for the user's name and age if not already provided.
|
15 |
|
16 |
+
**IMPORTANT** Ask 1-2 follow-up questions at a time to gather more details about:
|
17 |
- Detailed description of symptoms
|
18 |
- Duration (when did it start?)
|
19 |
- Severity (scale of 1-10)
|
|
|
99 |
for msg in messages:
|
100 |
if msg.type == "human":
|
101 |
age_match = re.search(r"(?:I am|I'm|age is|aged|My age is)\s*(\d{1,3})", msg.content, re.IGNORECASE)
|
102 |
+
if age_match and not age:
|
103 |
age = age_match.group(1)
|
104 |
name_match = re.search(r"(?:my name is|I'm|I am)\s*([A-Za-z]+)", msg.content, re.IGNORECASE)
|
105 |
+
if name_match and not name:
|
106 |
name = name_match.group(1)
|
107 |
return name, age
|
108 |
|
|
|
125 |
ask = "Before we continue, could you please tell me " + " and ".join(missing_info) + "?"
|
126 |
return ask
|
127 |
|
128 |
+
# Count how many user turns have actually provided new info (not just name/age)
|
129 |
+
info_turns = 0
|
130 |
+
for msg in messages:
|
131 |
+
if msg.type == "human":
|
132 |
+
# Ignore turns that only provide name/age
|
133 |
+
if not re.fullmatch(r".*(name|age|years? old|I'm|I am|my name is).*", msg.content, re.IGNORECASE):
|
134 |
+
info_turns += 1
|
135 |
+
|
136 |
prompt = build_llama2_prompt(SYSTEM_PROMPT, messages, message)
|
137 |
+
# Only add summarization ONCE, not on every turn after 4 info turns
|
138 |
+
if info_turns == 4:
|
|
|
139 |
prompt = prompt.replace("[/INST] ", "[/INST] Now summarize what you've learned and suggest when professional care may be needed. ")
|
140 |
|
141 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
156 |
full_response = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
157 |
llama_response = full_response.split('[/INST]')[-1].split('</s>')[0].strip()
|
158 |
|
159 |
+
# After 4 info turns, add medicine suggestions from Meditron, but only once
|
160 |
+
if info_turns == 4:
|
161 |
+
full_patient_info = "\n".join([
|
162 |
+
m.content for m in messages if m.type == "human" and not re.fullmatch(r".*(name|age|years? old|I'm|I am|my name is).*", m.content, re.IGNORECASE)
|
163 |
+
] + [message]) + "\n\nSummary: " + llama_response
|
164 |
medicine_suggestions = get_meditron_suggestions(full_patient_info)
|
165 |
final_response = (
|
166 |
f"{llama_response}\n\n"
|