Abaryan commited on
Commit
1b0f88d
·
verified ·
1 Parent(s): eb9f7d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -6,7 +6,8 @@ import random
6
  import re
7
 
8
  # Load model and tokenizer
9
- model_name = "rgb2gbr/GRPO_BioMedmcqa_Qwen2.5-0.5B"
 
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
 
@@ -49,7 +50,7 @@ def extract_answer(prediction: str) -> tuple:
49
 
50
  def predict(question: str, option_a: str, option_b: str, option_c: str, option_d: str,
51
  correct_option: int = None, explanation: str = None,
52
- temperature: float = 0.6, top_p: float = 0.9, max_tokens: int = 10):
53
  # Format the prompt
54
  prompt = f"Question: {question}\n\nOptions:\nA. {option_a}\nB. {option_b}\nC. {option_c}\nD. {option_d}\n\nAnswer:"
55
 
@@ -120,7 +121,7 @@ with gr.Blocks(title="Medical MCQ Predictor") as demo:
120
  max_tokens = gr.Slider(
121
  minimum=10,
122
  maximum=512,
123
- value=256,
124
  step=32,
125
  label="Max Tokens",
126
  info="Maximum length of the generated response"
 
6
  import re
7
 
8
  # Load model and tokenizer
9
+ # model_name = "rgb2gbr/GRPO_BioMedmcqa_Qwen2.5-0.5B"
10
+ model_name = "rgb2gbr/BioXP-0.5B-MedMCQA"
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
 
 
50
 
51
  def predict(question: str, option_a: str, option_b: str, option_c: str, option_d: str,
52
  correct_option: int = None, explanation: str = None,
53
+ temperature: float = 0.6, top_p: float = 0.9, max_tokens: int = 20):
54
  # Format the prompt
55
  prompt = f"Question: {question}\n\nOptions:\nA. {option_a}\nB. {option_b}\nC. {option_c}\nD. {option_d}\n\nAnswer:"
56
 
 
121
  max_tokens = gr.Slider(
122
  minimum=10,
123
  maximum=512,
124
+ value=20,
125
  step=32,
126
  label="Max Tokens",
127
  info="Maximum length of the generated response"