File size: 8,139 Bytes
d59e1c2
d6fbb7e
08f3bff
d59e1c2
 
 
d6fbb7e
 
10e9b7d
08f3bff
10e9b7d
eccf8e4
3c4371f
d59e1c2
 
10e9b7d
08f3bff
d6fbb7e
e80aab9
d59e1c2
3db6293
e80aab9
d59e1c2
d6fbb7e
 
08f3bff
d6fbb7e
08f3bff
d6fbb7e
 
d59e1c2
 
 
 
 
 
 
d6fbb7e
 
 
31243f4
 
d6fbb7e
 
 
 
 
 
d59e1c2
 
d6fbb7e
d59e1c2
 
 
 
 
 
 
 
d6fbb7e
 
 
 
08f3bff
d6fbb7e
31243f4
d80eabc
31243f4
d6fbb7e
3c4371f
08f3bff
31243f4
eccf8e4
d6fbb7e
7d65c66
31243f4
 
7d65c66
08f3bff
e80aab9
08f3bff
 
3c4371f
f9b5dc1
d6fbb7e
31243f4
 
f9b5dc1
 
d6fbb7e
 
 
08f3bff
f9b5dc1
d59e1c2
 
f97d9bf
08f3bff
f97d9bf
 
 
 
08f3bff
f97d9bf
08f3bff
31243f4
d59e1c2
 
 
 
 
d6fbb7e
 
 
 
 
 
7d65c66
f9b5dc1
 
 
 
 
 
 
31243f4
f9b5dc1
 
 
 
 
 
 
 
 
31243f4
 
 
 
d59e1c2
d6fbb7e
 
e80aab9
7d65c66
e80aab9
 
f9b5dc1
 
 
d6fbb7e
7d65c66
d6fbb7e
31243f4
d6fbb7e
e80aab9
d59e1c2
f9b5dc1
 
 
 
d59e1c2
d6fbb7e
f9b5dc1
 
 
 
 
d59e1c2
 
 
f9b5dc1
 
d59e1c2
 
 
 
 
 
 
 
 
 
e80aab9
 
f9b5dc1
d59e1c2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# app.py
"""
This script provides the Gradio web interface to run the evaluation.
## MODIFICATION: This version is simplified to work with the new agent architecture.
It no longer performs file-type detection or prompt enhancement, as that responsibility
has been moved into the agent's 'multimodal_router'.
"""

import os
import re
import gradio as gr
import requests
import pandas as pd
# --- Import HumanMessage ---
from langchain_core.messages import HumanMessage

from agent import create_agent_executor

# --- Constants ---
# Ensure the URL is correctly formatted (remove trailing spaces)
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

# --- Helper function to parse the agent's output (remains the same) ---
def parse_final_answer(agent_response: str) -> str:
    match = re.search(r"FINAL ANSWER:\s*(.*)", agent_response, re.IGNORECASE | re.DOTALL)
    if match: return match.group(1).strip()
    lines = [line for line in agent_response.split('\n') if line.strip()]
    if lines: return lines[-1].strip()
    return "Could not parse a final answer."

## MODIFICATION: The `detect_file_type` function has been removed.
## It is now redundant as this logic is handled inside the agent.

## MODIFICATION: The `create_enhanced_prompt` function has been removed.
## It was causing errors by trying to instruct the agent to use tools that no longer exist.
## The agent is now responsible for handling the raw input itself.

def run_and_submit_all(profile: gr.OAuthProfile | None):
    """
    Fetches all questions, runs the agent on them, submits all answers,
    and displays the results.
    """
    if not profile:
        return "Please log in to Hugging Face with the button above to submit.", None
    
    username = profile.username
    print(f"User logged in: {username}")
    
    # --- Fix SPACE_ID retrieval and URL construction ---
    # Ensure SPACE_ID environment variable is set correctly in your Hugging Face Space.
    space_id = os.getenv("SPACE_ID")
    if not space_id:
         # Fallback or error handling if SPACE_ID is not set
         # You might need to adjust this based on how your space is configured
         # For example, if running locally, you might not have SPACE_ID.
         # This is a placeholder; adjust as needed.
         # Consider using a default or making it configurable.
         space_id = "your-username/your-space-name" # Example placeholder
         print(f"Warning: SPACE_ID environment variable not found. Using placeholder: {space_id}")
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    questions_url = f"{DEFAULT_API_URL}/questions"
    submit_url = f"{DEFAULT_API_URL}/submit"
    
    # 1. Instantiate Agent
    print("Initializing your custom agent...")
    try:
        agent_executor = create_agent_executor(provider="groq")  
    except Exception as e:
        return f"Fatal Error: Could not initialize agent. Check logs. Details: {e}", None

    # 2. Fetch Questions
    print(f"Fetching questions from: {questions_url}")
    try:
        response = requests.get(questions_url, timeout=20)
        response.raise_for_status()
        questions_data = response.json()
        print(f"Fetched {len(questions_data)} questions.")
    except Exception as e:
        return f"Error fetching questions: {e}", pd.DataFrame()

    # 3. Run your Agent
    results_log, answers_payload = [], []
    print(f"Running agent on {len(questions_data)} questions...")
    
    for i, item in enumerate(questions_data):
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None: 
            continue
        
        print(f"\n--- Running Task {i+1}/{len(questions_data)} (ID: {task_id}) ---")
        
        file_url = item.get("file_url")
        
        ## MODIFICATION: Prompt creation is now much simpler.
        # We just combine the question and the URL into one string.
        # The agent's multimodal_router will handle the rest.
        if file_url:
            full_question_text = f"{question_text}\n\nHere is the relevant file: {file_url}"
            print(f"File provided: {file_url}")
        else:
            full_question_text = question_text
        
        print(f"Raw Prompt for Agent:\n{full_question_text}")

        try:
            # --- FIX: Pass a list of HumanMessage objects ---
            # The agent expects MessagesState["messages"] to be a list of BaseMessage objects.
            input_state = {"messages": [HumanMessage(content=full_question_text)]}
            result = agent_executor.invoke(input_state)
            
            raw_answer = result['messages'][-1].content
            submitted_answer = parse_final_answer(raw_answer)
            
            print(f"Raw LLM Response: '{raw_answer}'")
            print(f"PARSED FINAL ANSWER: '{submitted_answer}'")

            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({
                "Task ID": task_id, 
                "Question": question_text, 
                "File URL": file_url or "None",
                "Submitted Answer": submitted_answer
            })
            
        except Exception as e:
            print(f"!! AGENT ERROR on task {task_id}: {e}")
            error_msg = f"AGENT RUNTIME ERROR: {e}"
            answers_payload.append({"task_id": task_id, "submitted_answer": error_msg})
            results_log.append({
                "Task ID": task_id, 
                "Question": question_text, 
                "File URL": file_url or "None",
                "Submitted Answer": error_msg
            })

    if not answers_payload:
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # 4. Prepare and 5. Submit (remains the same)
    submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
    print(f"\nSubmitting {len(answers_payload)} answers for user '{username}'...")
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (f"Submission Successful!\nUser: {result_data.get('username')}\n"
                       f"Overall Score: {result_data.get('score', 'N/A')}%\n"
                       f"Processed {len([r for r in results_log if 'ERROR' not in r['Submitted Answer']])} successful tasks")
        return final_status, pd.DataFrame(results_log)
    except Exception as e:
        status_message = f"Submission Failed: {e}"
        print(status_message)
        return status_message, pd.DataFrame(results_log)

# --- Gradio UI (remains largely the same) ---
with gr.Blocks(title="Multimodal Agent Evaluation") as demo:
    gr.Markdown("# Multimodal Agent Evaluation Runner")
    gr.Markdown("This agent can process images, YouTube videos, audio files, and perform web searches.")
    
    gr.LoginButton()
    run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary")
    status_output = gr.Textbox(label="Run Status / Submission Result", lines=6, interactive=False)
    results_table = gr.DataFrame(
        label="Questions and Agent Answers", 
        wrap=True, 
        row_count=10,
        # MODIFICATION: Removed the 'File Type' column as it's no longer detected here.
        # Adjust column widths if necessary based on actual content/columns
        # column_widths=[80, 250, 200, 250] 
    )
    
    # We also remove "File Type" from the results_log being displayed
    # (Though it's not in the log anymore, this is a safe check)
    def display_wrapper(profile):
        status, df = run_and_submit_all(profile)
        # Ensure df is a DataFrame before attempting operations
        if isinstance(df, pd.DataFrame) and "File Type" in df.columns:
            df = df.drop(columns=["File Type"])
        return status, df

    run_button.click(fn=display_wrapper, outputs=[status_output, results_table])

if __name__ == "__main__":
    print("\n" + "-"*30 + " Multimodal App Starting " + "-"*30)
    demo.launch()