Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
11 |
|
12 |
# Create our own model wrapper that handles the chat template properly
|
13 |
class CustomTransformersModel:
|
14 |
-
def __init__(self, model_id="
|
15 |
self.model_id = model_id
|
16 |
# Create the tokenizer and explicitly set the chat template
|
17 |
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -50,12 +50,43 @@ class CustomTransformersModel:
|
|
50 |
assistant_response = response
|
51 |
|
52 |
return assistant_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# --- Define Agent ---
|
55 |
class SmolAgentWrapper:
|
56 |
def __init__(self):
|
57 |
-
# Use our custom model wrapper
|
58 |
-
self.model = CustomTransformersModel(model_id="
|
59 |
self.tools = [DuckDuckGoSearchTool()]
|
60 |
self.agent = CodeAgent(model=self.model, tools=self.tools)
|
61 |
|
@@ -142,14 +173,14 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
142 |
|
143 |
# --- Gradio Interface ---
|
144 |
with gr.Blocks() as demo:
|
145 |
-
gr.Markdown("# SmolAgent Evaluation Runner (
|
146 |
gr.Markdown(
|
147 |
"""
|
148 |
**Instructions:**
|
149 |
1. Log in to Hugging Face with the button below.
|
150 |
2. Click the button to run all GAIA questions through the SmolAgent.
|
151 |
3. Results will be submitted automatically and your score will be shown.
|
152 |
-
**Note:** Using
|
153 |
"""
|
154 |
)
|
155 |
|
|
|
11 |
|
12 |
# Create our own model wrapper that handles the chat template properly
|
13 |
class CustomTransformersModel:
|
14 |
+
def __init__(self, model_id="EleutherAI/gpt-neo-125m"):
|
15 |
self.model_id = model_id
|
16 |
# Create the tokenizer and explicitly set the chat template
|
17 |
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
50 |
assistant_response = response
|
51 |
|
52 |
return assistant_response
|
53 |
+
|
54 |
+
# Add generate method to match the interface expected by CodeAgent
|
55 |
+
def generate(self, prompt, **kwargs):
|
56 |
+
return self(prompt, **kwargs)
|
57 |
+
def __call__(self, prompt, **kwargs):
|
58 |
+
# Format the prompt using our chat template
|
59 |
+
messages = [{"role": "user", "content": prompt}]
|
60 |
+
formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)
|
61 |
+
|
62 |
+
# Tokenize the prompt
|
63 |
+
inputs = self.tokenizer(formatted_prompt, return_tensors="pt")
|
64 |
+
|
65 |
+
# Generate the response
|
66 |
+
outputs = self.model.generate(
|
67 |
+
inputs.input_ids,
|
68 |
+
max_new_tokens=256,
|
69 |
+
do_sample=True,
|
70 |
+
temperature=0.7,
|
71 |
+
**kwargs
|
72 |
+
)
|
73 |
+
|
74 |
+
# Decode the response
|
75 |
+
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
76 |
+
|
77 |
+
# Extract just the assistant's response
|
78 |
+
try:
|
79 |
+
assistant_response = response.split("Assistant: ")[-1]
|
80 |
+
except:
|
81 |
+
assistant_response = response
|
82 |
+
|
83 |
+
return assistant_response
|
84 |
|
85 |
# --- Define Agent ---
|
86 |
class SmolAgentWrapper:
|
87 |
def __init__(self):
|
88 |
+
# Use our custom model wrapper with GPT-Neo
|
89 |
+
self.model = CustomTransformersModel(model_id="EleutherAI/gpt-neo-125m")
|
90 |
self.tools = [DuckDuckGoSearchTool()]
|
91 |
self.agent = CodeAgent(model=self.model, tools=self.tools)
|
92 |
|
|
|
173 |
|
174 |
# --- Gradio Interface ---
|
175 |
with gr.Blocks() as demo:
|
176 |
+
gr.Markdown("# SmolAgent Evaluation Runner (GPT-Neo Implementation)")
|
177 |
gr.Markdown(
|
178 |
"""
|
179 |
**Instructions:**
|
180 |
1. Log in to Hugging Face with the button below.
|
181 |
2. Click the button to run all GAIA questions through the SmolAgent.
|
182 |
3. Results will be submitted automatically and your score will be shown.
|
183 |
+
**Note:** Using GPT-Neo 125M with custom chat template implementation.
|
184 |
"""
|
185 |
)
|
186 |
|