no_pydanic
Browse files
app.py
CHANGED
@@ -3,10 +3,9 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
-
from typing import Dict, Any, List, TypedDict
|
7 |
from langgraph.graph import Graph, StateGraph
|
8 |
from langgraph.prebuilt import ToolNode
|
9 |
-
from pydantic import BaseModel, Field
|
10 |
from tools import create_calculator_tool, create_search_tool
|
11 |
print("trial")
|
12 |
# (Keep Constants as is)
|
@@ -15,21 +14,6 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
15 |
MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
|
16 |
HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
|
17 |
|
18 |
-
# Define the state type
|
19 |
-
StateType = TypeVar("StateType", bound=BaseModel)
|
20 |
-
|
21 |
-
class AgentState(BaseModel):
|
22 |
-
"""Schema for the agent's state."""
|
23 |
-
question: str = Field(..., description="The original question")
|
24 |
-
current_step: str = Field(default="analyze", description="Current step in the workflow")
|
25 |
-
tool_output: str = Field(default="", description="Output from the last tool used")
|
26 |
-
final_answer: str = Field(default="", description="The final answer to be returned")
|
27 |
-
history: List[Dict[str, str]] = Field(default_factory=list, description="History of operations performed")
|
28 |
-
needs_more_info: bool = Field(default=False, description="Whether more information is needed")
|
29 |
-
search_query: str = Field(default="", description="Current search query if any")
|
30 |
-
|
31 |
-
# --- Basic Agent Definition ---
|
32 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
33 |
class BasicAgent:
|
34 |
def __init__(self):
|
35 |
print("Initializing BasicAgent with Qwen2.5-Coder-32B-Instruct API...")
|
@@ -46,14 +30,14 @@ class BasicAgent:
|
|
46 |
"Content-Type": "application/json"
|
47 |
}
|
48 |
|
49 |
-
# Create the agent workflow
|
50 |
self.workflow = self._create_workflow()
|
51 |
print("BasicAgent initialization complete.")
|
52 |
|
53 |
def _create_workflow(self) -> Graph:
|
54 |
"""Create the agent workflow using LangGraph."""
|
55 |
-
# Create the workflow with state
|
56 |
-
workflow = StateGraph(
|
57 |
|
58 |
# Add nodes
|
59 |
workflow.add_node("analyze", self._analyze_question)
|
@@ -69,12 +53,12 @@ class BasicAgent:
|
|
69 |
workflow.add_edge("search", "final_answer")
|
70 |
|
71 |
# Define conditional edges
|
72 |
-
def router(state:
|
73 |
-
if state.current_step == 'calculator':
|
74 |
return 'calculator'
|
75 |
-
elif state.current_step == 'search':
|
76 |
return 'search'
|
77 |
-
elif state.current_step == 'final_answer':
|
78 |
return 'final_answer'
|
79 |
return 'analyze'
|
80 |
|
@@ -108,9 +92,9 @@ class BasicAgent:
|
|
108 |
print(f"Error calling LLM API: {e}")
|
109 |
return f"Error getting response from LLM: {str(e)}"
|
110 |
|
111 |
-
def _analyze_question(self, state:
|
112 |
"""Analyze the question and determine the next step."""
|
113 |
-
prompt = f"""Analyze this question and determine what needs to be done: {state
|
114 |
Return your analysis in this format:
|
115 |
{{
|
116 |
"needs_calculation": true/false,
|
@@ -124,67 +108,67 @@ class BasicAgent:
|
|
124 |
"""
|
125 |
|
126 |
analysis = eval(self._call_llm_api(prompt))
|
127 |
-
state
|
128 |
-
state
|
129 |
|
130 |
if analysis.get('needs_calculation', False):
|
131 |
-
state
|
132 |
-
state
|
133 |
elif analysis.get('needs_search', False):
|
134 |
-
state
|
135 |
else:
|
136 |
-
state
|
137 |
|
138 |
return state
|
139 |
|
140 |
-
def _use_calculator(self, state:
|
141 |
"""Use the calculator tool."""
|
142 |
try:
|
143 |
-
result = self.calculator.invoke({"input": eval(state
|
144 |
-
state
|
145 |
'step': 'calculator',
|
146 |
-
'input': state
|
147 |
'output': str(result['output'].result)
|
148 |
})
|
149 |
-
state
|
150 |
except Exception as e:
|
151 |
-
state
|
152 |
'step': 'calculator_error',
|
153 |
'error': str(e)
|
154 |
})
|
155 |
-
state
|
156 |
return state
|
157 |
|
158 |
-
def _use_search(self, state:
|
159 |
"""Use the search tool."""
|
160 |
try:
|
161 |
result = self.search_tool.invoke({
|
162 |
"input": {
|
163 |
-
"query": state
|
164 |
"max_results": 3
|
165 |
}
|
166 |
})
|
167 |
-
state
|
168 |
'step': 'search',
|
169 |
-
'query': state
|
170 |
'results': [str(r) for r in result['output'].results]
|
171 |
})
|
172 |
-
state
|
173 |
-
state
|
174 |
except Exception as e:
|
175 |
-
state
|
176 |
'step': 'search_error',
|
177 |
'error': str(e)
|
178 |
})
|
179 |
-
state
|
180 |
return state
|
181 |
|
182 |
-
def _generate_final_answer(self, state:
|
183 |
"""Generate the final answer based on all gathered information."""
|
184 |
history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
|
185 |
-
for h in state
|
186 |
|
187 |
-
prompt = f"""Based on the following information and history, provide a final answer to the question: {state
|
188 |
|
189 |
History of steps taken:
|
190 |
{history_str}
|
@@ -192,7 +176,7 @@ class BasicAgent:
|
|
192 |
Provide a clear, concise answer that addresses the original question.
|
193 |
"""
|
194 |
|
195 |
-
state
|
196 |
return state
|
197 |
|
198 |
def __call__(self, question: str) -> str:
|
@@ -201,19 +185,19 @@ class BasicAgent:
|
|
201 |
|
202 |
try:
|
203 |
# Initialize the state
|
204 |
-
initial_state =
|
205 |
-
question
|
206 |
-
current_step
|
207 |
-
tool_output
|
208 |
-
final_answer
|
209 |
-
history
|
210 |
-
needs_more_info
|
211 |
-
search_query
|
212 |
-
|
213 |
|
214 |
# Run the workflow
|
215 |
final_state = self.workflow.invoke(initial_state)
|
216 |
-
return final_state
|
217 |
|
218 |
except Exception as e:
|
219 |
print(f"Error in agent processing: {e}")
|
@@ -285,15 +269,15 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
285 |
|
286 |
try:
|
287 |
# Initialize the state for this question
|
288 |
-
initial_state =
|
289 |
-
question
|
290 |
-
current_step
|
291 |
-
tool_output
|
292 |
-
final_answer
|
293 |
-
history
|
294 |
-
needs_more_info
|
295 |
-
search_query
|
296 |
-
|
297 |
|
298 |
# Run the workflow for this question
|
299 |
print(f"\nProcessing question {task_id}: {question_text[:50]}...")
|
@@ -304,11 +288,11 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
304 |
f"Step: {h['step']}\n" +
|
305 |
f"Input: {h.get('input', h.get('query', ''))}\n" +
|
306 |
f"Output: {h.get('output', h.get('results', h.get('error', '')))}"
|
307 |
-
for h in final_state
|
308 |
])
|
309 |
|
310 |
# Add to results
|
311 |
-
submitted_answer = final_state
|
312 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
313 |
results_log.append({
|
314 |
"Task ID": task_id,
|
@@ -317,7 +301,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
317 |
"Workflow History": workflow_history
|
318 |
})
|
319 |
|
320 |
-
print(f"Completed question {task_id} with {len(final_state
|
321 |
|
322 |
except Exception as e:
|
323 |
print(f"Error running agent workflow on task {task_id}: {e}")
|
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
+
from typing import Dict, Any, List, TypedDict
|
7 |
from langgraph.graph import Graph, StateGraph
|
8 |
from langgraph.prebuilt import ToolNode
|
|
|
9 |
from tools import create_calculator_tool, create_search_tool
|
10 |
print("trial")
|
11 |
# (Keep Constants as is)
|
|
|
14 |
MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
|
15 |
HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
class BasicAgent:
|
18 |
def __init__(self):
|
19 |
print("Initializing BasicAgent with Qwen2.5-Coder-32B-Instruct API...")
|
|
|
30 |
"Content-Type": "application/json"
|
31 |
}
|
32 |
|
33 |
+
# Create the agent workflow
|
34 |
self.workflow = self._create_workflow()
|
35 |
print("BasicAgent initialization complete.")
|
36 |
|
37 |
def _create_workflow(self) -> Graph:
|
38 |
"""Create the agent workflow using LangGraph."""
|
39 |
+
# Create the workflow with simple state
|
40 |
+
workflow = StateGraph(dict)
|
41 |
|
42 |
# Add nodes
|
43 |
workflow.add_node("analyze", self._analyze_question)
|
|
|
53 |
workflow.add_edge("search", "final_answer")
|
54 |
|
55 |
# Define conditional edges
|
56 |
+
def router(state: Dict[str, Any]) -> str:
|
57 |
+
if state.get('current_step') == 'calculator':
|
58 |
return 'calculator'
|
59 |
+
elif state.get('current_step') == 'search':
|
60 |
return 'search'
|
61 |
+
elif state.get('current_step') == 'final_answer':
|
62 |
return 'final_answer'
|
63 |
return 'analyze'
|
64 |
|
|
|
92 |
print(f"Error calling LLM API: {e}")
|
93 |
return f"Error getting response from LLM: {str(e)}"
|
94 |
|
95 |
+
def _analyze_question(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
96 |
"""Analyze the question and determine the next step."""
|
97 |
+
prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
|
98 |
Return your analysis in this format:
|
99 |
{{
|
100 |
"needs_calculation": true/false,
|
|
|
108 |
"""
|
109 |
|
110 |
analysis = eval(self._call_llm_api(prompt))
|
111 |
+
state['needs_more_info'] = analysis.get('needs_search', False)
|
112 |
+
state['search_query'] = analysis.get('search_query', '')
|
113 |
|
114 |
if analysis.get('needs_calculation', False):
|
115 |
+
state['current_step'] = 'calculator'
|
116 |
+
state['tool_output'] = str(analysis['calculation'])
|
117 |
elif analysis.get('needs_search', False):
|
118 |
+
state['current_step'] = 'search'
|
119 |
else:
|
120 |
+
state['current_step'] = 'final_answer'
|
121 |
|
122 |
return state
|
123 |
|
124 |
+
def _use_calculator(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
125 |
"""Use the calculator tool."""
|
126 |
try:
|
127 |
+
result = self.calculator.invoke({"input": eval(state['tool_output'])})
|
128 |
+
state['history'].append({
|
129 |
'step': 'calculator',
|
130 |
+
'input': state['tool_output'],
|
131 |
'output': str(result['output'].result)
|
132 |
})
|
133 |
+
state['current_step'] = 'final_answer'
|
134 |
except Exception as e:
|
135 |
+
state['history'].append({
|
136 |
'step': 'calculator_error',
|
137 |
'error': str(e)
|
138 |
})
|
139 |
+
state['current_step'] = 'final_answer'
|
140 |
return state
|
141 |
|
142 |
+
def _use_search(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
143 |
"""Use the search tool."""
|
144 |
try:
|
145 |
result = self.search_tool.invoke({
|
146 |
"input": {
|
147 |
+
"query": state['search_query'],
|
148 |
"max_results": 3
|
149 |
}
|
150 |
})
|
151 |
+
state['history'].append({
|
152 |
'step': 'search',
|
153 |
+
'query': state['search_query'],
|
154 |
'results': [str(r) for r in result['output'].results]
|
155 |
})
|
156 |
+
state['needs_more_info'] = False
|
157 |
+
state['current_step'] = 'final_answer'
|
158 |
except Exception as e:
|
159 |
+
state['history'].append({
|
160 |
'step': 'search_error',
|
161 |
'error': str(e)
|
162 |
})
|
163 |
+
state['current_step'] = 'final_answer'
|
164 |
return state
|
165 |
|
166 |
+
def _generate_final_answer(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
167 |
"""Generate the final answer based on all gathered information."""
|
168 |
history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
|
169 |
+
for h in state['history']])
|
170 |
|
171 |
+
prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
|
172 |
|
173 |
History of steps taken:
|
174 |
{history_str}
|
|
|
176 |
Provide a clear, concise answer that addresses the original question.
|
177 |
"""
|
178 |
|
179 |
+
state['final_answer'] = self._call_llm_api(prompt)
|
180 |
return state
|
181 |
|
182 |
def __call__(self, question: str) -> str:
|
|
|
185 |
|
186 |
try:
|
187 |
# Initialize the state
|
188 |
+
initial_state = {
|
189 |
+
'question': question,
|
190 |
+
'current_step': 'analyze',
|
191 |
+
'tool_output': '',
|
192 |
+
'final_answer': '',
|
193 |
+
'history': [],
|
194 |
+
'needs_more_info': False,
|
195 |
+
'search_query': ''
|
196 |
+
}
|
197 |
|
198 |
# Run the workflow
|
199 |
final_state = self.workflow.invoke(initial_state)
|
200 |
+
return final_state['final_answer']
|
201 |
|
202 |
except Exception as e:
|
203 |
print(f"Error in agent processing: {e}")
|
|
|
269 |
|
270 |
try:
|
271 |
# Initialize the state for this question
|
272 |
+
initial_state = {
|
273 |
+
'question': question_text,
|
274 |
+
'current_step': 'analyze',
|
275 |
+
'tool_output': '',
|
276 |
+
'final_answer': '',
|
277 |
+
'history': [],
|
278 |
+
'needs_more_info': False,
|
279 |
+
'search_query': ''
|
280 |
+
}
|
281 |
|
282 |
# Run the workflow for this question
|
283 |
print(f"\nProcessing question {task_id}: {question_text[:50]}...")
|
|
|
288 |
f"Step: {h['step']}\n" +
|
289 |
f"Input: {h.get('input', h.get('query', ''))}\n" +
|
290 |
f"Output: {h.get('output', h.get('results', h.get('error', '')))}"
|
291 |
+
for h in final_state['history']
|
292 |
])
|
293 |
|
294 |
# Add to results
|
295 |
+
submitted_answer = final_state['final_answer']
|
296 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
297 |
results_log.append({
|
298 |
"Task ID": task_id,
|
|
|
301 |
"Workflow History": workflow_history
|
302 |
})
|
303 |
|
304 |
+
print(f"Completed question {task_id} with {len(final_state['history'])} workflow steps")
|
305 |
|
306 |
except Exception as e:
|
307 |
print(f"Error running agent workflow on task {task_id}: {e}")
|