File size: 7,798 Bytes
10e9b7d
 
eccf8e4
3c4371f
1e6b873
 
 
17d0a1c
e80aab9
3db6293
c7c39df
1e6b873
 
35e5165
1e6b873
 
 
 
 
 
 
 
 
 
 
 
9d23fdb
 
 
1e6b873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d23fdb
 
 
 
 
 
1e6b873
 
 
 
 
 
 
35e5165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e80aab9
7f66248
17d0a1c
3bf290c
35e5165
 
7dda9e1
7f66248
4021bf3
3bf290c
17d0a1c
 
7f66248
 
17d0a1c
 
3c4371f
7e4a06b
7f66248
3c4371f
7e4a06b
3c4371f
7d65c66
3c4371f
7e4a06b
31243f4
 
e80aab9
7f66248
37d3429
17d0a1c
31243f4
 
7f66248
36ed51a
3c4371f
7f66248
eccf8e4
31243f4
7d65c66
31243f4
 
17d0a1c
31243f4
7d65c66
7f66248
e80aab9
7f66248
7d65c66
 
7f66248
31243f4
 
 
 
 
 
7d65c66
 
 
31243f4
7f66248
31243f4
 
 
 
7f66248
 
 
 
 
 
e80aab9
 
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
 
 
e80aab9
31243f4
 
7d65c66
7f66248
e80aab9
 
7f66248
e80aab9
35e5165
17d0a1c
 
 
7f66248
 
 
35e5165
17d0a1c
 
e80aab9
7e4a06b
03f1283
17d0a1c
 
e80aab9
31243f4
ab98516
 
e80aab9
 
 
7f66248
 
 
7dda9e1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import os
import gradio as gr
import requests
import pandas as pd
from smolagents import CodeAgent, DuckDuckGoSearchTool
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

# Create our own model wrapper that handles the chat template properly
class CustomTransformersModel:
    def __init__(self, model_id="EleutherAI/gpt-neo-125m"):
        self.model_id = model_id
        # Create the tokenizer and explicitly set the chat template
        self.tokenizer = AutoTokenizer.from_pretrained(model_id)
        
        # Set the chat template directly on the tokenizer
        simple_template = "{% for message in messages %}\n{% if message['role'] == 'user' %}\nUser: {{ message['content'] }}\n{% elif message['role'] == 'assistant' %}\nAssistant: {{ message['content'] }}\n{% elif message['role'] == 'system' %}\nSystem: {{ message['content'] }}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}\nAssistant: {% endif %}"
        self.tokenizer.chat_template = simple_template
        
        # Load the model
        self.model = AutoModelForCausalLM.from_pretrained(model_id)
        
    def __call__(self, prompt, **kwargs):
        # Extract and handle stop_sequences if present
        stop_sequences = kwargs.pop('stop_sequences', None)
        
        # Format the prompt using our chat template
        messages = [{"role": "user", "content": prompt}]
        formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)
        
        # Tokenize the prompt
        inputs = self.tokenizer(formatted_prompt, return_tensors="pt")
        
        # Generate the response
        outputs = self.model.generate(
            inputs.input_ids,
            max_new_tokens=256,
            do_sample=True,
            temperature=0.7,
            **kwargs
        )
        
        # Decode the response
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Apply stop sequences manually if provided
        if stop_sequences:
            for stop_seq in stop_sequences:
                if stop_seq in response:
                    response = response.split(stop_seq)[0]
        
        # Extract just the assistant's response
        try:
            assistant_response = response.split("Assistant: ")[-1]
        except:
            assistant_response = response
            
        return assistant_response
        
    # Add generate method to match the interface expected by CodeAgent
    def generate(self, prompt, **kwargs):
        return self(prompt, **kwargs)
    def __call__(self, prompt, **kwargs):
        # Format the prompt using our chat template
        messages = [{"role": "user", "content": prompt}]
        formatted_prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)
        
        # Tokenize the prompt
        inputs = self.tokenizer(formatted_prompt, return_tensors="pt")
        
        # Generate the response
        outputs = self.model.generate(
            inputs.input_ids,
            max_new_tokens=256,
            do_sample=True,
            temperature=0.7,
            **kwargs
        )
        
        # Decode the response
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Extract just the assistant's response
        try:
            assistant_response = response.split("Assistant: ")[-1]
        except:
            assistant_response = response
            
        return assistant_response

# --- Define Agent ---
class SmolAgentWrapper:
    def __init__(self):
        # Use our custom model wrapper with GPT-Neo
        self.model = CustomTransformersModel(model_id="EleutherAI/gpt-neo-125m")
        self.tools = [DuckDuckGoSearchTool()]
        self.agent = CodeAgent(model=self.model, tools=self.tools)

    def __call__(self, question: str) -> str:
        return self.agent.run(question)


# --- Evaluation Logic ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
    space_id = os.getenv("SPACE_ID")

    if profile:
        username = f"{profile.username}"
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    # Create the agent
    try:
        agent = SmolAgentWrapper()
    except Exception as e:
        return f"Error initializing agent: {e}", None

    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"

    # Fetch questions
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
            return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except Exception as e:
        return f"Error fetching questions: {e}", None

    # Run agent
    results_log = []
    answers_payload = []

    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            continue
        try:
            submitted_answer = agent(question_text)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
        except Exception as e:
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})

    if not answers_payload:
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # Submit answers
    submission_data = {
        "username": username.strip(),
        "agent_code": agent_code,
        "answers": answers_payload
    }

    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except Exception as e:
        return f"Submission Failed: {e}", pd.DataFrame(results_log)


# --- Gradio Interface ---
with gr.Blocks() as demo:
    gr.Markdown("# SmolAgent Evaluation Runner (GPT-Neo Implementation)")
    gr.Markdown(
        """
        **Instructions:**
        1. Log in to Hugging Face with the button below.
        2. Click the button to run all GAIA questions through the SmolAgent.
        3. Results will be submitted automatically and your score will be shown.
        **Note:** Using GPT-Neo 125M with custom chat template implementation.
        """
    )

    gr.LoginButton()
    run_button = gr.Button("Run Evaluation & Submit All Answers")
    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(
        fn=run_and_submit_all,
        outputs=[status_output, results_table]
    )

if __name__ == "__main__":
    print("-" * 60)
    print("Launching SmolAgent Space...")
    print("-" * 60)
    demo.launch(debug=True, share=False)