Spaces:
Sleeping
Sleeping
# app.py | |
""" | |
This script provides the Gradio web interface to run the evaluation. | |
## MODIFICATION: This version is simplified to work with the new agent architecture. | |
It no longer performs file-type detection or prompt enhancement, as that responsibility | |
has been moved into the agent's 'multimodal_router'. | |
""" | |
import os | |
import re | |
import gradio as gr | |
import requests | |
import pandas as pd | |
# --- Import HumanMessage --- | |
from langchain_core.messages import HumanMessage | |
from agent import create_agent_executor | |
# --- Constants --- | |
# Ensure the URL is correctly formatted (remove trailing spaces) | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
# --- Helper function to parse the agent's output (remains the same) --- | |
def parse_final_answer(agent_response: str) -> str: | |
match = re.search(r"FINAL ANSWER:\s*(.*)", agent_response, re.IGNORECASE | re.DOTALL) | |
if match: return match.group(1).strip() | |
lines = [line for line in agent_response.split('\n') if line.strip()] | |
if lines: return lines[-1].strip() | |
return "Could not parse a final answer." | |
## MODIFICATION: The `detect_file_type` function has been removed. | |
## It is now redundant as this logic is handled inside the agent. | |
## MODIFICATION: The `create_enhanced_prompt` function has been removed. | |
## It was causing errors by trying to instruct the agent to use tools that no longer exist. | |
## The agent is now responsible for handling the raw input itself. | |
def run_and_submit_all(profile: gr.OAuthProfile | None): | |
""" | |
Fetches all questions, runs the agent on them, submits all answers, | |
and displays the results. | |
""" | |
if not profile: | |
return "Please log in to Hugging Face with the button above to submit.", None | |
username = profile.username | |
print(f"User logged in: {username}") | |
# --- Fix SPACE_ID retrieval and URL construction --- | |
# Ensure SPACE_ID environment variable is set correctly in your Hugging Face Space. | |
space_id = os.getenv("SPACE_ID") | |
if not space_id: | |
# Fallback or error handling if SPACE_ID is not set | |
# You might need to adjust this based on how your space is configured | |
# For example, if running locally, you might not have SPACE_ID. | |
# This is a placeholder; adjust as needed. | |
# Consider using a default or making it configurable. | |
space_id = "your-username/your-space-name" # Example placeholder | |
print(f"Warning: SPACE_ID environment variable not found. Using placeholder: {space_id}") | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
questions_url = f"{DEFAULT_API_URL}/questions" | |
submit_url = f"{DEFAULT_API_URL}/submit" | |
# 1. Instantiate Agent | |
print("Initializing your custom agent...") | |
try: | |
agent_executor = create_agent_executor(provider="groq") | |
except Exception as e: | |
return f"Fatal Error: Could not initialize agent. Check logs. Details: {e}", None | |
# 2. Fetch Questions | |
print(f"Fetching questions from: {questions_url}") | |
try: | |
response = requests.get(questions_url, timeout=20) | |
response.raise_for_status() | |
questions_data = response.json() | |
print(f"Fetched {len(questions_data)} questions.") | |
except Exception as e: | |
return f"Error fetching questions: {e}", pd.DataFrame() | |
# 3. Run your Agent | |
results_log, answers_payload = [], [] | |
print(f"Running agent on {len(questions_data)} questions...") | |
for i, item in enumerate(questions_data): | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
if not task_id or question_text is None: | |
continue | |
print(f"\n--- Running Task {i+1}/{len(questions_data)} (ID: {task_id}) ---") | |
file_url = item.get("file_url") | |
## MODIFICATION: Prompt creation is now much simpler. | |
# We just combine the question and the URL into one string. | |
# The agent's multimodal_router will handle the rest. | |
if file_url: | |
full_question_text = f"{question_text}\n\nHere is the relevant file: {file_url}" | |
print(f"File provided: {file_url}") | |
else: | |
full_question_text = question_text | |
print(f"Raw Prompt for Agent:\n{full_question_text}") | |
try: | |
# --- FIX: Pass a list of HumanMessage objects --- | |
# The agent expects MessagesState["messages"] to be a list of BaseMessage objects. | |
input_state = {"messages": [HumanMessage(content=full_question_text)]} | |
result = agent_executor.invoke(input_state) | |
raw_answer = result['messages'][-1].content | |
submitted_answer = parse_final_answer(raw_answer) | |
print(f"Raw LLM Response: '{raw_answer}'") | |
print(f"PARSED FINAL ANSWER: '{submitted_answer}'") | |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question_text, | |
"File URL": file_url or "None", | |
"Submitted Answer": submitted_answer | |
}) | |
except Exception as e: | |
print(f"!! AGENT ERROR on task {task_id}: {e}") | |
error_msg = f"AGENT RUNTIME ERROR: {e}" | |
answers_payload.append({"task_id": task_id, "submitted_answer": error_msg}) | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question_text, | |
"File URL": file_url or "None", | |
"Submitted Answer": error_msg | |
}) | |
if not answers_payload: | |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
# 4. Prepare and 5. Submit (remains the same) | |
submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload} | |
print(f"\nSubmitting {len(answers_payload)} answers for user '{username}'...") | |
try: | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
final_status = (f"Submission Successful!\nUser: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}%\n" | |
f"Processed {len([r for r in results_log if 'ERROR' not in r['Submitted Answer']])} successful tasks") | |
return final_status, pd.DataFrame(results_log) | |
except Exception as e: | |
status_message = f"Submission Failed: {e}" | |
print(status_message) | |
return status_message, pd.DataFrame(results_log) | |
# --- Gradio UI (remains largely the same) --- | |
with gr.Blocks(title="Multimodal Agent Evaluation") as demo: | |
gr.Markdown("# Multimodal Agent Evaluation Runner") | |
gr.Markdown("This agent can process images, YouTube videos, audio files, and perform web searches.") | |
gr.LoginButton() | |
run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary") | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=6, interactive=False) | |
results_table = gr.DataFrame( | |
label="Questions and Agent Answers", | |
wrap=True, | |
row_count=10, | |
# MODIFICATION: Removed the 'File Type' column as it's no longer detected here. | |
# Adjust column widths if necessary based on actual content/columns | |
# column_widths=[80, 250, 200, 250] | |
) | |
# We also remove "File Type" from the results_log being displayed | |
# (Though it's not in the log anymore, this is a safe check) | |
def display_wrapper(profile): | |
status, df = run_and_submit_all(profile) | |
# Ensure df is a DataFrame before attempting operations | |
if isinstance(df, pd.DataFrame) and "File Type" in df.columns: | |
df = df.drop(columns=["File Type"]) | |
return status, df | |
run_button.click(fn=display_wrapper, outputs=[status_output, results_table]) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " Multimodal App Starting " + "-"*30) | |
demo.launch() | |