Update app.py
Browse files
app.py
CHANGED
@@ -14,11 +14,11 @@ client = OpenAI(
|
|
14 |
|
15 |
|
16 |
# Load model and tokenizer
|
17 |
-
our_model_path = "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"
|
18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
19 |
|
20 |
our_model = AutoModelForCausalLM.from_pretrained(our_model_path, device_map="auto", torch_dtype="auto")
|
21 |
-
our_tokenizer = AutoTokenizer.from_pretrained(our_model_path)
|
22 |
|
23 |
def format_math(text):
|
24 |
text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
|
@@ -184,7 +184,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
184 |
|
185 |
Try the example problems below from JEE Main 2025 or type in your own problems to see how our model breaks down complex reasoning problems.
|
186 |
|
187 |
-
|
188 |
"""
|
189 |
)
|
190 |
|
|
|
14 |
|
15 |
|
16 |
# Load model and tokenizer
|
17 |
+
'''our_model_path = "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"
|
18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
19 |
|
20 |
our_model = AutoModelForCausalLM.from_pretrained(our_model_path, device_map="auto", torch_dtype="auto")
|
21 |
+
our_tokenizer = AutoTokenizer.from_pretrained(our_model_path)'''
|
22 |
|
23 |
def format_math(text):
|
24 |
text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
|
|
|
184 |
|
185 |
Try the example problems below from JEE Main 2025 or type in your own problems to see how our model breaks down complex reasoning problems.
|
186 |
|
187 |
+
NOTE: that once you close this demo window, all currently saved conversations will be lost.
|
188 |
"""
|
189 |
)
|
190 |
|