Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,34 +2,60 @@ import os
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool
|
|
|
|
|
6 |
|
7 |
# --- Constants ---
|
8 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# --- Define Agent ---
|
12 |
class SmolAgentWrapper:
|
13 |
def __init__(self):
|
14 |
-
# Use
|
15 |
-
|
16 |
-
self.model = TransformersModel(
|
17 |
-
model_id="gpt2",
|
18 |
-
generation_kwargs={
|
19 |
-
"do_sample": True,
|
20 |
-
"max_new_tokens": 256,
|
21 |
-
"temperature": 0.7,
|
22 |
-
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\nUser: {{ message['content'] }}\n{% elif message['role'] == 'assistant' %}\nAssistant: {{ message['content'] }}\n{% elif message['role'] == 'system' %}\nSystem: {{ message['content'] }}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}\nAssistant: {% endif %}"
|
23 |
-
}
|
24 |
-
)
|
25 |
-
|
26 |
-
# Alternative options if the above doesn't work:
|
27 |
-
# Option 1: Using a different GPT model that might handle chat better
|
28 |
-
# self.model = TransformersModel(model_id="facebook/opt-350m")
|
29 |
-
|
30 |
-
# Option 2: Using a model with better instruction following
|
31 |
-
# self.model = TransformersModel(model_id="databricks/dolly-v2-3b")
|
32 |
-
|
33 |
self.tools = [DuckDuckGoSearchTool()]
|
34 |
self.agent = CodeAgent(model=self.model, tools=self.tools)
|
35 |
|
@@ -116,14 +142,14 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
116 |
|
117 |
# --- Gradio Interface ---
|
118 |
with gr.Blocks() as demo:
|
119 |
-
gr.Markdown("# SmolAgent Evaluation Runner")
|
120 |
gr.Markdown(
|
121 |
"""
|
122 |
**Instructions:**
|
123 |
1. Log in to Hugging Face with the button below.
|
124 |
2. Click the button to run all GAIA questions through the SmolAgent.
|
125 |
3. Results will be submitted automatically and your score will be shown.
|
126 |
-
**Note:**
|
127 |
"""
|
128 |
)
|
129 |
|
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
+
import torch
|
8 |
|
9 |
# --- Constants ---
|
10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
11 |
|
12 |
+
# Create our own model wrapper that handles the chat template properly
|
13 |
+
class CustomTransformersModel:
|
14 |
+
def __init__(self, model_id="gpt2"):
|
15 |
+
self.model_id = model_id
|
16 |
+
# Create the tokenizer and explicitly set the chat template
|
17 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
18 |
+
|
19 |
+
# Set the chat template directly on the tokenizer
|
20 |
+
simple_template = "{% for message in messages %}\n{% if message['role'] == 'user' %}\nUser: {{ message['content'] }}\n{% elif message['role'] == 'assistant' %}\nAssistant: {{ message['content'] }}\n{% elif message['role'] == 'system' %}\nSystem: {{ message['content'] }}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}\nAssistant: {% endif %}"
|
21 |
+
self.tokenizer.chat_template = simple_template
|
22 |
+
|
23 |
+
# Load the model
|
24 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_id)
|
25 |
+
|
26 |
+
def __call__(self, prompt, **kwargs):
|
27 |
+
# Format the prompt using our chat template
|
28 |
+
messages = [{"role": "user", "content": prompt}]
|
29 |
+
formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)
|
30 |
+
|
31 |
+
# Tokenize the prompt
|
32 |
+
inputs = self.tokenizer(formatted_prompt, return_tensors="pt")
|
33 |
+
|
34 |
+
# Generate the response
|
35 |
+
outputs = self.model.generate(
|
36 |
+
inputs.input_ids,
|
37 |
+
max_new_tokens=256,
|
38 |
+
do_sample=True,
|
39 |
+
temperature=0.7,
|
40 |
+
**kwargs
|
41 |
+
)
|
42 |
+
|
43 |
+
# Decode the response
|
44 |
+
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
45 |
+
|
46 |
+
# Extract just the assistant's response
|
47 |
+
try:
|
48 |
+
assistant_response = response.split("Assistant: ")[-1]
|
49 |
+
except:
|
50 |
+
assistant_response = response
|
51 |
+
|
52 |
+
return assistant_response
|
53 |
|
54 |
# --- Define Agent ---
|
55 |
class SmolAgentWrapper:
|
56 |
def __init__(self):
|
57 |
+
# Use our custom model wrapper
|
58 |
+
self.model = CustomTransformersModel(model_id="gpt2")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
self.tools = [DuckDuckGoSearchTool()]
|
60 |
self.agent = CodeAgent(model=self.model, tools=self.tools)
|
61 |
|
|
|
142 |
|
143 |
# --- Gradio Interface ---
|
144 |
with gr.Blocks() as demo:
|
145 |
+
gr.Markdown("# SmolAgent Evaluation Runner (Custom GPT-2 Implementation)")
|
146 |
gr.Markdown(
|
147 |
"""
|
148 |
**Instructions:**
|
149 |
1. Log in to Hugging Face with the button below.
|
150 |
2. Click the button to run all GAIA questions through the SmolAgent.
|
151 |
3. Results will be submitted automatically and your score will be shown.
|
152 |
+
**Note:** Using a custom implementation to handle chat templates properly.
|
153 |
"""
|
154 |
)
|
155 |
|