Update agent.py
Browse files
agent.py
CHANGED
@@ -23,25 +23,23 @@ from llama_index.core.callbacks.base import CallbackManager
|
|
23 |
from llama_index.core.callbacks.llama_debug import LlamaDebugHandler
|
24 |
from llama_index.core import ServiceContext
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
wandb.init(project="gaia-llamaindex-agents") # Choisis ton nom de projet
|
27 |
wandb_callback = WandbCallbackHandler(run_args={"project": "gaia-llamaindex-agents"})
|
28 |
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
29 |
callback_manager = CallbackManager([wandb_callback, llama_debug])
|
30 |
|
31 |
service_context = ServiceContext.from_defaults(
|
32 |
-
llm=
|
33 |
embed_model=HuggingFaceEmbedding("BAAI/bge-small-en-v1.5"),
|
34 |
callback_manager=callback_manager
|
35 |
)
|
36 |
-
# Puis passe service_context=service_context à tes agents ou query engines
|
37 |
|
38 |
|
39 |
-
text_llm = OpenRouter(
|
40 |
-
model="mistralai/mistral-small-3.1-24b-instruct:free",
|
41 |
-
api_key=os.getenv("OPENROUTER_API_KEY"),
|
42 |
-
)
|
43 |
-
multimodal_llm = text_llm
|
44 |
-
|
45 |
|
46 |
class EnhancedRAGQueryEngine:
|
47 |
def __init__(self, task_context: str = ""):
|
@@ -131,7 +129,7 @@ class EnhancedRAGQueryEngine:
|
|
131 |
query_engine = RetrieverQueryEngine(
|
132 |
retriever=retriever,
|
133 |
node_postprocessors=[self.reranker],
|
134 |
-
llm=
|
135 |
service_context=service_context
|
136 |
)
|
137 |
|
@@ -234,7 +232,7 @@ analysis_agent = FunctionAgent(
|
|
234 |
|
235 |
Always consider the GAIA task context and provide precise, well-sourced answers.
|
236 |
""",
|
237 |
-
llm=
|
238 |
tools=[enhanced_rag_tool, cross_document_tool],
|
239 |
max_steps=5,
|
240 |
service_context=service_context
|
@@ -294,7 +292,7 @@ class IntelligentSourceRouter:
|
|
294 |
Respond with ONLY "arxiv" or "web_search".
|
295 |
"""
|
296 |
|
297 |
-
response =
|
298 |
selected_source = response.text.strip().lower()
|
299 |
|
300 |
# Execute search and extract content
|
@@ -380,7 +378,7 @@ code_agent = ReActAgent(
|
|
380 |
|
381 |
Always show your reasoning process clearly and provide exact answers as required by GAIA.
|
382 |
""",
|
383 |
-
llm=
|
384 |
tools=[code_execution_tool],
|
385 |
max_steps = 5,
|
386 |
service_context=service_context
|
@@ -440,7 +438,7 @@ class EnhancedGAIAAgent:
|
|
440 |
- For lists: use comma separation (e.g., "apple, banana, orange")
|
441 |
- NO explanations, NO additional text, ONLY the precise answer
|
442 |
""",
|
443 |
-
llm=
|
444 |
tools=[analysis_tool, research_tool, code_tool],
|
445 |
max_steps = 10,
|
446 |
service_context=service_context
|
|
|
23 |
from llama_index.core.callbacks.llama_debug import LlamaDebugHandler
|
24 |
from llama_index.core import ServiceContext
|
25 |
|
26 |
+
proj_llm = OpenRouter(
|
27 |
+
model="mistralai/mistral-small-3.1-24b-instruct:free",
|
28 |
+
api_key=os.getenv("OPENROUTER_API_KEY"),
|
29 |
+
)
|
30 |
+
|
31 |
wandb.init(project="gaia-llamaindex-agents") # Choisis ton nom de projet
|
32 |
wandb_callback = WandbCallbackHandler(run_args={"project": "gaia-llamaindex-agents"})
|
33 |
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
34 |
callback_manager = CallbackManager([wandb_callback, llama_debug])
|
35 |
|
36 |
service_context = ServiceContext.from_defaults(
|
37 |
+
llm=proj_llm,
|
38 |
embed_model=HuggingFaceEmbedding("BAAI/bge-small-en-v1.5"),
|
39 |
callback_manager=callback_manager
|
40 |
)
|
|
|
41 |
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
class EnhancedRAGQueryEngine:
|
45 |
def __init__(self, task_context: str = ""):
|
|
|
129 |
query_engine = RetrieverQueryEngine(
|
130 |
retriever=retriever,
|
131 |
node_postprocessors=[self.reranker],
|
132 |
+
llm=proj_llm,
|
133 |
service_context=service_context
|
134 |
)
|
135 |
|
|
|
232 |
|
233 |
Always consider the GAIA task context and provide precise, well-sourced answers.
|
234 |
""",
|
235 |
+
llm=proj_llm,
|
236 |
tools=[enhanced_rag_tool, cross_document_tool],
|
237 |
max_steps=5,
|
238 |
service_context=service_context
|
|
|
292 |
Respond with ONLY "arxiv" or "web_search".
|
293 |
"""
|
294 |
|
295 |
+
response = proj_llm.complete(intent_prompt)
|
296 |
selected_source = response.text.strip().lower()
|
297 |
|
298 |
# Execute search and extract content
|
|
|
378 |
|
379 |
Always show your reasoning process clearly and provide exact answers as required by GAIA.
|
380 |
""",
|
381 |
+
llm=proj_llm,
|
382 |
tools=[code_execution_tool],
|
383 |
max_steps = 5,
|
384 |
service_context=service_context
|
|
|
438 |
- For lists: use comma separation (e.g., "apple, banana, orange")
|
439 |
- NO explanations, NO additional text, ONLY the precise answer
|
440 |
""",
|
441 |
+
llm=proj_llm,
|
442 |
tools=[analysis_tool, research_tool, code_tool],
|
443 |
max_steps = 10,
|
444 |
service_context=service_context
|