Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,9 @@
|
|
|
|
1 |
"""
|
2 |
-
app.py
|
3 |
This script provides the Gradio web interface to run the evaluation.
|
4 |
-
This version is simplified to work with the new agent architecture
|
5 |
-
|
|
|
6 |
"""
|
7 |
|
8 |
import os
|
@@ -10,14 +11,16 @@ import re
|
|
10 |
import gradio as gr
|
11 |
import requests
|
12 |
import pandas as pd
|
13 |
-
|
|
|
14 |
|
15 |
from agent import create_agent_executor
|
16 |
|
17 |
# --- Constants ---
|
|
|
18 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
19 |
|
20 |
-
# --- Helper function to parse the agent's output ---
|
21 |
def parse_final_answer(agent_response: str) -> str:
|
22 |
match = re.search(r"FINAL ANSWER:\s*(.*)", agent_response, re.IGNORECASE | re.DOTALL)
|
23 |
if match: return match.group(1).strip()
|
@@ -25,6 +28,13 @@ def parse_final_answer(agent_response: str) -> str:
|
|
25 |
if lines: return lines[-1].strip()
|
26 |
return "Could not parse a final answer."
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
29 |
"""
|
30 |
Fetches all questions, runs the agent on them, submits all answers,
|
@@ -36,7 +46,17 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
36 |
username = profile.username
|
37 |
print(f"User logged in: {username}")
|
38 |
|
|
|
|
|
39 |
space_id = os.getenv("SPACE_ID")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
41 |
questions_url = f"{DEFAULT_API_URL}/questions"
|
42 |
submit_url = f"{DEFAULT_API_URL}/submit"
|
@@ -72,7 +92,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
72 |
|
73 |
file_url = item.get("file_url")
|
74 |
|
75 |
-
|
|
|
76 |
# The agent's multimodal_router will handle the rest.
|
77 |
if file_url:
|
78 |
full_question_text = f"{question_text}\n\nHere is the relevant file: {file_url}"
|
@@ -83,7 +104,11 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
83 |
print(f"Raw Prompt for Agent:\n{full_question_text}")
|
84 |
|
85 |
try:
|
86 |
-
|
|
|
|
|
|
|
|
|
87 |
raw_answer = result['messages'][-1].content
|
88 |
submitted_answer = parse_final_answer(raw_answer)
|
89 |
|
@@ -91,7 +116,6 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
91 |
print(f"PARSED FINAL ANSWER: '{submitted_answer}'")
|
92 |
|
93 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
94 |
-
# The log for the DataFrame no longer includes a 'File Type' column
|
95 |
results_log.append({
|
96 |
"Task ID": task_id,
|
97 |
"Question": question_text,
|
@@ -113,7 +137,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
113 |
if not answers_payload:
|
114 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
115 |
|
116 |
-
# 4. Prepare and 5. Submit
|
117 |
submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
|
118 |
print(f"\nSubmitting {len(answers_payload)} answers for user '{username}'...")
|
119 |
try:
|
@@ -129,31 +153,34 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
129 |
print(status_message)
|
130 |
return status_message, pd.DataFrame(results_log)
|
131 |
|
132 |
-
# --- Gradio UI ---
|
133 |
with gr.Blocks(title="Multimodal Agent Evaluation") as demo:
|
134 |
gr.Markdown("# Multimodal Agent Evaluation Runner")
|
135 |
gr.Markdown("This agent can process images, YouTube videos, audio files, and perform web searches.")
|
136 |
|
137 |
-
|
138 |
-
login_button = gr.LoginButton()
|
139 |
-
|
140 |
run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary")
|
141 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=6, interactive=False)
|
142 |
results_table = gr.DataFrame(
|
143 |
label="Questions and Agent Answers",
|
144 |
wrap=True,
|
145 |
row_count=10,
|
146 |
-
|
|
|
|
|
147 |
)
|
148 |
|
149 |
-
|
150 |
-
#
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
156 |
|
157 |
if __name__ == "__main__":
|
158 |
print("\n" + "-"*30 + " Multimodal App Starting " + "-"*30)
|
159 |
-
demo.launch()
|
|
|
1 |
+
# app.py
|
2 |
"""
|
|
|
3 |
This script provides the Gradio web interface to run the evaluation.
|
4 |
+
## MODIFICATION: This version is simplified to work with the new agent architecture.
|
5 |
+
It no longer performs file-type detection or prompt enhancement, as that responsibility
|
6 |
+
has been moved into the agent's 'multimodal_router'.
|
7 |
"""
|
8 |
|
9 |
import os
|
|
|
11 |
import gradio as gr
|
12 |
import requests
|
13 |
import pandas as pd
|
14 |
+
# --- Import HumanMessage ---
|
15 |
+
from langchain_core.messages import HumanMessage
|
16 |
|
17 |
from agent import create_agent_executor
|
18 |
|
19 |
# --- Constants ---
|
20 |
+
# Ensure the URL is correctly formatted (remove trailing spaces)
|
21 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
22 |
|
23 |
+
# --- Helper function to parse the agent's output (remains the same) ---
|
24 |
def parse_final_answer(agent_response: str) -> str:
|
25 |
match = re.search(r"FINAL ANSWER:\s*(.*)", agent_response, re.IGNORECASE | re.DOTALL)
|
26 |
if match: return match.group(1).strip()
|
|
|
28 |
if lines: return lines[-1].strip()
|
29 |
return "Could not parse a final answer."
|
30 |
|
31 |
+
## MODIFICATION: The `detect_file_type` function has been removed.
|
32 |
+
## It is now redundant as this logic is handled inside the agent.
|
33 |
+
|
34 |
+
## MODIFICATION: The `create_enhanced_prompt` function has been removed.
|
35 |
+
## It was causing errors by trying to instruct the agent to use tools that no longer exist.
|
36 |
+
## The agent is now responsible for handling the raw input itself.
|
37 |
+
|
38 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
39 |
"""
|
40 |
Fetches all questions, runs the agent on them, submits all answers,
|
|
|
46 |
username = profile.username
|
47 |
print(f"User logged in: {username}")
|
48 |
|
49 |
+
# --- Fix SPACE_ID retrieval and URL construction ---
|
50 |
+
# Ensure SPACE_ID environment variable is set correctly in your Hugging Face Space.
|
51 |
space_id = os.getenv("SPACE_ID")
|
52 |
+
if not space_id:
|
53 |
+
# Fallback or error handling if SPACE_ID is not set
|
54 |
+
# You might need to adjust this based on how your space is configured
|
55 |
+
# For example, if running locally, you might not have SPACE_ID.
|
56 |
+
# This is a placeholder; adjust as needed.
|
57 |
+
# Consider using a default or making it configurable.
|
58 |
+
space_id = "your-username/your-space-name" # Example placeholder
|
59 |
+
print(f"Warning: SPACE_ID environment variable not found. Using placeholder: {space_id}")
|
60 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
61 |
questions_url = f"{DEFAULT_API_URL}/questions"
|
62 |
submit_url = f"{DEFAULT_API_URL}/submit"
|
|
|
92 |
|
93 |
file_url = item.get("file_url")
|
94 |
|
95 |
+
## MODIFICATION: Prompt creation is now much simpler.
|
96 |
+
# We just combine the question and the URL into one string.
|
97 |
# The agent's multimodal_router will handle the rest.
|
98 |
if file_url:
|
99 |
full_question_text = f"{question_text}\n\nHere is the relevant file: {file_url}"
|
|
|
104 |
print(f"Raw Prompt for Agent:\n{full_question_text}")
|
105 |
|
106 |
try:
|
107 |
+
# --- FIX: Pass a list of HumanMessage objects ---
|
108 |
+
# The agent expects MessagesState["messages"] to be a list of BaseMessage objects.
|
109 |
+
input_state = {"messages": [HumanMessage(content=full_question_text)]}
|
110 |
+
result = agent_executor.invoke(input_state)
|
111 |
+
|
112 |
raw_answer = result['messages'][-1].content
|
113 |
submitted_answer = parse_final_answer(raw_answer)
|
114 |
|
|
|
116 |
print(f"PARSED FINAL ANSWER: '{submitted_answer}'")
|
117 |
|
118 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
|
|
119 |
results_log.append({
|
120 |
"Task ID": task_id,
|
121 |
"Question": question_text,
|
|
|
137 |
if not answers_payload:
|
138 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
139 |
|
140 |
+
# 4. Prepare and 5. Submit (remains the same)
|
141 |
submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
|
142 |
print(f"\nSubmitting {len(answers_payload)} answers for user '{username}'...")
|
143 |
try:
|
|
|
153 |
print(status_message)
|
154 |
return status_message, pd.DataFrame(results_log)
|
155 |
|
156 |
+
# --- Gradio UI (remains largely the same) ---
|
157 |
with gr.Blocks(title="Multimodal Agent Evaluation") as demo:
|
158 |
gr.Markdown("# Multimodal Agent Evaluation Runner")
|
159 |
gr.Markdown("This agent can process images, YouTube videos, audio files, and perform web searches.")
|
160 |
|
161 |
+
gr.LoginButton()
|
|
|
|
|
162 |
run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary")
|
163 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=6, interactive=False)
|
164 |
results_table = gr.DataFrame(
|
165 |
label="Questions and Agent Answers",
|
166 |
wrap=True,
|
167 |
row_count=10,
|
168 |
+
# MODIFICATION: Removed the 'File Type' column as it's no longer detected here.
|
169 |
+
# Adjust column widths if necessary based on actual content/columns
|
170 |
+
# column_widths=[80, 250, 200, 250]
|
171 |
)
|
172 |
|
173 |
+
# We also remove "File Type" from the results_log being displayed
|
174 |
+
# (Though it's not in the log anymore, this is a safe check)
|
175 |
+
def display_wrapper(profile):
|
176 |
+
status, df = run_and_submit_all(profile)
|
177 |
+
# Ensure df is a DataFrame before attempting operations
|
178 |
+
if isinstance(df, pd.DataFrame) and "File Type" in df.columns:
|
179 |
+
df = df.drop(columns=["File Type"])
|
180 |
+
return status, df
|
181 |
+
|
182 |
+
run_button.click(fn=display_wrapper, outputs=[status_output, results_table])
|
183 |
|
184 |
if __name__ == "__main__":
|
185 |
print("\n" + "-"*30 + " Multimodal App Starting " + "-"*30)
|
186 |
+
demo.launch()
|