Coool2 commited on
Commit
adface3
·
verified ·
1 Parent(s): 7f53df8

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +11 -26
agent.py CHANGED
@@ -463,38 +463,23 @@ class EnhancedGAIAAgent:
463
  tools=[analysis_tool, research_tool, code_tool]
464
  )
465
 
466
- def solve_gaia_question(self, question_data: Dict[str, Any]) -> str:
467
  question = question_data.get("Question", "")
468
  task_id = question_data.get("task_id", "")
469
-
470
  context_prompt = f"""
471
- GAIA Task ID: {task_id}
472
- Question: {question}
473
- {f"Associated files: {question_data.get('file_name', '')}" if 'file_name' in question_data else 'No files provided'}
474
-
475
- Instructions:
476
- 1. Analyze this GAIA question using ReAct reasoning
477
- 2. Use specialist tools ONLY when their specific expertise is needed
478
- 3. Provide a precise, exact answer in GAIA format
479
-
480
- Begin your reasoning process:
481
- """
482
-
483
  try:
484
- import asyncio
485
  from llama_index.core.workflow import Context
486
-
487
- # Créer le contexte
488
  ctx = Context(self.coordinator)
489
-
490
- # Fonction asynchrone pour exécuter l'agent
491
- async def run_agent():
492
- response = await self.coordinator.run(ctx=ctx, user_msg=context_prompt)
493
- return response
494
-
495
- # Exécuter de manière asynchrone
496
- response = asyncio.run(run_agent())
497
  return str(response)
498
-
499
  except Exception as e:
500
  return f"Error processing question: {str(e)}"
 
463
  tools=[analysis_tool, research_tool, code_tool]
464
  )
465
 
466
+ async def solve_gaia_question(self, question_data: Dict[str, Any]) -> str:
467
  question = question_data.get("Question", "")
468
  task_id = question_data.get("task_id", "")
 
469
  context_prompt = f"""
470
+ GAIA Task ID: {task_id}
471
+ Question: {question}
472
+ {f"Associated files: {question_data.get('file_name', '')}" if 'file_name' in question_data else 'No files provided'}
473
+ Instructions:
474
+ 1. Analyze this GAIA question using ReAct reasoning
475
+ 2. Use specialist tools ONLY when their specific expertise is needed
476
+ 3. Provide a precise, exact answer in GAIA format
477
+ Begin your reasoning process:
478
+ """
 
 
 
479
  try:
 
480
  from llama_index.core.workflow import Context
 
 
481
  ctx = Context(self.coordinator)
482
+ response = await self.coordinator.run(ctx=ctx, input=context_prompt)
 
 
 
 
 
 
 
483
  return str(response)
 
484
  except Exception as e:
485
  return f"Error processing question: {str(e)}"