Aharneish's picture
Update app.py
7dda9e1 verified
raw
history blame
5.38 kB
import os
import gradio as gr
import requests
import pandas as pd
from smolagents import CodeAgent, DuckDuckGoSearchTool, TransformersModel
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Define Agent ---
class SmolAgentWrapper:
def __init__(self):
# Use a model that's compatible with AutoModelForCausalLM
# GPT-2 should work, but we need to properly handle the chat template issue
self.model = TransformersModel(
model_id="gpt2",
generation_kwargs={
"do_sample": True,
"max_new_tokens": 256,
"temperature": 0.7,
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\nUser: {{ message['content'] }}\n{% elif message['role'] == 'assistant' %}\nAssistant: {{ message['content'] }}\n{% elif message['role'] == 'system' %}\nSystem: {{ message['content'] }}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}\nAssistant: {% endif %}"
}
)
# Alternative options if the above doesn't work:
# Option 1: Using a different GPT model that might handle chat better
# self.model = TransformersModel(model_id="facebook/opt-350m")
# Option 2: Using a model with better instruction following
# self.model = TransformersModel(model_id="databricks/dolly-v2-3b")
self.tools = [DuckDuckGoSearchTool()]
self.agent = CodeAgent(model=self.model, tools=self.tools)
def __call__(self, question: str) -> str:
return self.agent.run(question)
# --- Evaluation Logic ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# Create the agent
try:
agent = SmolAgentWrapper()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
# Fetch questions
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
return f"Error fetching questions: {e}", None
# Run agent
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# Submit answers
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
results_df = pd.DataFrame(results_log)
return final_status, results_df
except Exception as e:
return f"Submission Failed: {e}", pd.DataFrame(results_log)
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# SmolAgent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Log in to Hugging Face with the button below.
2. Click the button to run all GAIA questions through the SmolAgent.
3. Results will be submitted automatically and your score will be shown.
**Note:** Model runs on Hugging Face Inference API.
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("-" * 60)
print("Launching SmolAgent Space...")
print("-" * 60)
demo.launch(debug=True, share=False)