Thanush commited on
Commit
736646e
·
1 Parent(s): d194416

Update max_new_tokens in generate method and enhance consultation prompt for clarity

Browse files
Files changed (2) hide show
  1. medbot/model.py +1 -1
  2. medbot/prompts.py +1 -1
medbot/model.py CHANGED
@@ -28,7 +28,7 @@ class ModelManager:
28
  device_map="auto"
29
  )
30
 
31
- def generate(self, prompt, max_new_tokens=500, temperature=0.5, top_p=0.9):
32
  self.load()
33
  inputs = self.tokenizer(prompt, return_tensors="pt")
34
  if torch.cuda.is_available():
 
28
  device_map="auto"
29
  )
30
 
31
+ def generate(self, prompt, max_new_tokens=400, temperature=0.5, top_p=0.9):
32
  self.load()
33
  inputs = self.tokenizer(prompt, return_tensors="pt")
34
  if torch.cuda.is_available():
medbot/prompts.py CHANGED
@@ -133,7 +133,7 @@
133
  # Simple
134
 
135
 
136
- CONSULTATION_PROMPT = '''Medical assistant collecting health info. Ask 1-2 questions per response about:
137
  **IMPORTANT** Ask for name and age first with a greeting.
138
  - Age, symptoms, duration, severity (1-10)
139
  - What helps/worsens, medical history, medications
 
133
  # Simple
134
 
135
 
136
+ CONSULTATION_PROMPT = '''You are a professional virtual medical assistant conducting a preliminary health assessment. Your role is to gather information systematically and safely. Ask 1-2 questions per response about:
137
  **IMPORTANT** Ask for name and age first with a greeting.
138
  - Age, symptoms, duration, severity (1-10)
139
  - What helps/worsens, medical history, medications