naman1102 commited on
Commit
2757b9c
·
1 Parent(s): 108e6be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -19
app.py CHANGED
@@ -223,23 +223,79 @@ class BasicAgent:
223
  # as a last resort, strip everything before the first colon
224
  return raw.split(':', 1)[-1].strip()
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  def _generate_answer(self, state: AgentState) -> AgentState:
227
  if state["file_url"]:
228
  try:
 
229
  kind = mimetypes.guess_type(state["file_url"])[0] or ""
230
- data = requests.get(state["file_url"]).content
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  if "image" in kind:
 
233
  answer = image_qa_bytes(data, state["question"])
234
  elif "video" in kind:
 
235
  answer = video_label_bytes(data)
236
  elif kind.endswith("spreadsheet") or state["file_url"].endswith((".xlsx", ".csv")):
 
237
  answer = sheet_answer_bytes(data, state["question"])
238
  elif state["file_url"].endswith(".py"):
 
239
  answer = run_python(data.decode())
240
  else:
241
- answer = "Unsupported file type"
 
242
 
 
243
  state["final_answer"] = answer
244
  state["current_step"] = "done"
245
  return state
@@ -250,6 +306,7 @@ class BasicAgent:
250
  return state
251
 
252
  # For text-only questions, use the LLM
 
253
  prompt = f"""
254
  Answer this question using the materials provided.
255
 
@@ -262,6 +319,7 @@ Return ONLY this exact JSON object:
262
  try:
263
  raw = self._call_llm(prompt, 300)
264
  answer = self._safe_parse(raw)
 
265
  state["final_answer"] = answer
266
  except Exception as e:
267
  print(f"\nLLM Error in answer generation: {str(e)}")
@@ -277,23 +335,6 @@ Return ONLY this exact JSON object:
277
  sg.set_finish_point("answer")
278
  return sg.compile()
279
 
280
- def __call__(self, question: str, task_id: str = "unknown") -> str:
281
- state: AgentState = {
282
- "question": question,
283
- "current_step": "answer",
284
- "final_answer": "",
285
- "history": [],
286
- "needs_search": False,
287
- "search_query": "",
288
- "task_id": task_id,
289
- "logs": {},
290
- "file_url": "",
291
- "code_blocks": []
292
- }
293
-
294
- final_state = self.workflow.invoke(state)
295
- return final_state["final_answer"]
296
-
297
  # ----------------------------------------------------------------------------------
298
  # Gradio Interface & Submission Routines
299
  # ----------------------------------------------------------------------------------
 
223
  # as a last resort, strip everything before the first colon
224
  return raw.split(':', 1)[-1].strip()
225
 
226
+ def __call__(self, question: str, task_id: str = "unknown") -> str:
227
+ # Parse question to get both text and file_url
228
+ try:
229
+ question_data = json.loads(question)
230
+ state: AgentState = {
231
+ "question": question_data.get("question", ""),
232
+ "current_step": "answer",
233
+ "final_answer": "",
234
+ "history": [],
235
+ "needs_search": False,
236
+ "search_query": "",
237
+ "task_id": task_id,
238
+ "logs": {},
239
+ "file_url": question_data.get("file_url", ""),
240
+ "code_blocks": question_data.get("code_blocks", [])
241
+ }
242
+ print(f"\nProcessing task {task_id}")
243
+ print(f"Question: {state['question']}")
244
+ print(f"File URL: {state['file_url']}")
245
+ except (json.JSONDecodeError, KeyError) as e:
246
+ print(f"Error parsing question data: {e}")
247
+ state: AgentState = {
248
+ "question": question,
249
+ "current_step": "answer",
250
+ "final_answer": "",
251
+ "history": [],
252
+ "needs_search": False,
253
+ "search_query": "",
254
+ "task_id": task_id,
255
+ "logs": {},
256
+ "file_url": "",
257
+ "code_blocks": []
258
+ }
259
+
260
+ final_state = self.workflow.invoke(state)
261
+ return final_state["final_answer"]
262
+
263
  def _generate_answer(self, state: AgentState) -> AgentState:
264
  if state["file_url"]:
265
  try:
266
+ print(f"\nProcessing file: {state['file_url']}")
267
  kind = mimetypes.guess_type(state["file_url"])[0] or ""
268
+ print(f"Detected file type: {kind}")
269
+
270
+ # Download file with timeout and error handling
271
+ try:
272
+ response = requests.get(state["file_url"], timeout=30)
273
+ response.raise_for_status()
274
+ data = response.content
275
+ print(f"Successfully downloaded file, size: {len(data)} bytes")
276
+ except requests.exceptions.RequestException as e:
277
+ print(f"Error downloading file: {e}")
278
+ state["final_answer"] = f"Error downloading file: {str(e)}"
279
+ state["current_step"] = "done"
280
+ return state
281
 
282
  if "image" in kind:
283
+ print("Processing as image...")
284
  answer = image_qa_bytes(data, state["question"])
285
  elif "video" in kind:
286
+ print("Processing as video...")
287
  answer = video_label_bytes(data)
288
  elif kind.endswith("spreadsheet") or state["file_url"].endswith((".xlsx", ".csv")):
289
+ print("Processing as spreadsheet...")
290
  answer = sheet_answer_bytes(data, state["question"])
291
  elif state["file_url"].endswith(".py"):
292
+ print("Processing as Python file...")
293
  answer = run_python(data.decode())
294
  else:
295
+ print(f"Unsupported file type: {kind}")
296
+ answer = f"Unsupported file type: {kind}"
297
 
298
+ print(f"Generated answer: {answer}")
299
  state["final_answer"] = answer
300
  state["current_step"] = "done"
301
  return state
 
306
  return state
307
 
308
  # For text-only questions, use the LLM
309
+ print("\nProcessing as text-only question...")
310
  prompt = f"""
311
  Answer this question using the materials provided.
312
 
 
319
  try:
320
  raw = self._call_llm(prompt, 300)
321
  answer = self._safe_parse(raw)
322
+ print(f"Generated answer: {answer}")
323
  state["final_answer"] = answer
324
  except Exception as e:
325
  print(f"\nLLM Error in answer generation: {str(e)}")
 
335
  sg.set_finish_point("answer")
336
  return sg.compile()
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  # ----------------------------------------------------------------------------------
339
  # Gradio Interface & Submission Routines
340
  # ----------------------------------------------------------------------------------