Abaryan commited on
Commit
eb9f7d3
·
verified ·
1 Parent(s): ffe13aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -49,7 +49,7 @@ def extract_answer(prediction: str) -> tuple:
49
 
50
  def predict(question: str, option_a: str, option_b: str, option_c: str, option_d: str,
51
  correct_option: int = None, explanation: str = None,
52
- temperature: float = 0.6, top_p: float = 0.9, max_tokens: int = 256):
53
  # Format the prompt
54
  prompt = f"Question: {question}\n\nOptions:\nA. {option_a}\nB. {option_b}\nC. {option_c}\nD. {option_d}\n\nAnswer:"
55
 
@@ -64,7 +64,7 @@ def predict(question: str, option_a: str, option_b: str, option_c: str, option_d
64
  temperature=temperature,
65
  top_p=top_p,
66
  do_sample=True,
67
- pad_token_id=tokenizer.eos_token_id
68
  )
69
 
70
  # Get prediction
@@ -118,7 +118,7 @@ with gr.Blocks(title="Medical MCQ Predictor") as demo:
118
  info="Higher values allow more diverse tokens, lower values more focused"
119
  )
120
  max_tokens = gr.Slider(
121
- minimum=32,
122
  maximum=512,
123
  value=256,
124
  step=32,
 
49
 
50
  def predict(question: str, option_a: str, option_b: str, option_c: str, option_d: str,
51
  correct_option: int = None, explanation: str = None,
52
+ temperature: float = 0.6, top_p: float = 0.9, max_tokens: int = 10):
53
  # Format the prompt
54
  prompt = f"Question: {question}\n\nOptions:\nA. {option_a}\nB. {option_b}\nC. {option_c}\nD. {option_d}\n\nAnswer:"
55
 
 
64
  temperature=temperature,
65
  top_p=top_p,
66
  do_sample=True,
67
+ # pad_token_id=tokenizer.eos_token_id
68
  )
69
 
70
  # Get prediction
 
118
  info="Higher values allow more diverse tokens, lower values more focused"
119
  )
120
  max_tokens = gr.Slider(
121
+ minimum=10,
122
  maximum=512,
123
  value=256,
124
  step=32,