naman1102 commited on
Commit
399fe38
·
1 Parent(s): 2683234

pydanticc_remove

Browse files
Files changed (2) hide show
  1. app.py +70 -72
  2. tools.py +43 -54
app.py CHANGED
@@ -3,10 +3,9 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from typing import Dict, Any, List, TypedDict
7
  from langgraph.graph import Graph, StateGraph
8
  from langgraph.prebuilt import ToolNode
9
- from pydantic import BaseModel, Field
10
  from tools import create_calculator_tool, create_search_tool
11
  print("trial")
12
  # (Keep Constants as is)
@@ -15,15 +14,14 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
  MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
16
  HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
17
 
18
- class AgentState(BaseModel):
19
- """Schema for the agent's state."""
20
- question: str = Field(..., description="The original question")
21
- current_step: str = Field(default="analyze", description="Current step in the workflow")
22
- tool_output: str = Field(default="", description="Output from the last tool used")
23
- final_answer: str = Field(default="", description="The final answer to be returned")
24
- history: List[Dict[str, str]] = Field(default_factory=list, description="History of operations performed")
25
- needs_more_info: bool = Field(default=False, description="Whether more information is needed")
26
- search_query: str = Field(default="", description="Current search query if any")
27
 
28
  class BasicAgent:
29
  def __init__(self):
@@ -52,7 +50,7 @@ class BasicAgent:
52
  """Create the agent workflow using LangGraph."""
53
  # Create the workflow with state schema
54
  print("Creating Stategraph : error happens here?")
55
- workflow = StateGraph(state_schema= AgentState)
56
  print("Stategraph created")
57
  # Add nodes
58
  workflow.add_node("analyze", self._analyze_question)
@@ -68,12 +66,12 @@ class BasicAgent:
68
  workflow.add_edge("search", "final_answer")
69
 
70
  # Define conditional edges
71
- def router(state: AgentState) -> str:
72
- if state.current_step == 'calculator':
73
  return 'calculator'
74
- elif state.current_step == 'search':
75
  return 'search'
76
- elif state.current_step == 'final_answer':
77
  return 'final_answer'
78
  return 'analyze'
79
 
@@ -107,9 +105,9 @@ class BasicAgent:
107
  print(f"Error calling LLM API: {e}")
108
  return f"Error getting response from LLM: {str(e)}"
109
 
110
- def _analyze_question(self, state: AgentState) -> AgentState:
111
  """Analyze the question and determine the next step."""
112
- prompt = f"""Analyze this question and determine what needs to be done: {state.question}
113
  Return your analysis in this format:
114
  {{
115
  "needs_calculation": true/false,
@@ -123,71 +121,71 @@ class BasicAgent:
123
  """
124
 
125
  analysis = eval(self._call_llm_api(prompt))
126
- state.needs_more_info = analysis.get('needs_search', False)
127
- state.search_query = analysis.get('search_query', '')
128
 
129
  if analysis.get('needs_calculation', False):
130
- state.current_step = 'calculator'
131
- state.tool_output = str(analysis['calculation'])
132
  elif analysis.get('needs_search', False):
133
- state.current_step = 'search'
134
  else:
135
- state.current_step = 'final_answer'
136
 
137
  return state
138
 
139
- def _use_calculator(self, state: AgentState) -> AgentState:
140
  """Use the calculator tool."""
141
  try:
142
  # Create calculator state with input from tool_output
143
- calc_input = eval(state.tool_output)
144
- result = self.calculator.invoke(CalculatorState(input=calc_input))
145
 
146
- state.history.append({
147
  'step': 'calculator',
148
- 'input': state.tool_output,
149
- 'output': str(result['output'].result)
150
  })
151
- state.current_step = 'final_answer'
152
  except Exception as e:
153
- state.history.append({
154
  'step': 'calculator_error',
155
  'error': str(e)
156
  })
157
- state.current_step = 'final_answer'
158
  return state
159
 
160
- def _use_search(self, state: AgentState) -> AgentState:
161
  """Use the search tool."""
162
  try:
163
  # Create search state with input from search_query
164
- search_input = SearchInput(
165
- query=state.search_query,
166
- max_results=3
167
- )
168
- result = self.search_tool.invoke(SearchState(input=search_input))
169
 
170
- state.history.append({
171
  'step': 'search',
172
- 'query': state.search_query,
173
- 'results': [str(r) for r in result['output'].results]
174
  })
175
- state.needs_more_info = False
176
- state.current_step = 'final_answer'
177
  except Exception as e:
178
- state.history.append({
179
  'step': 'search_error',
180
  'error': str(e)
181
  })
182
- state.current_step = 'final_answer'
183
  return state
184
 
185
- def _generate_final_answer(self, state: AgentState) -> AgentState:
186
  """Generate the final answer based on all gathered information."""
187
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
188
- for h in state.history])
189
 
190
- prompt = f"""Based on the following information and history, provide a final answer to the question: {state.question}
191
 
192
  History of steps taken:
193
  {history_str}
@@ -195,7 +193,7 @@ class BasicAgent:
195
  Provide a clear, concise answer that addresses the original question.
196
  """
197
 
198
- state.final_answer = self._call_llm_api(prompt)
199
  return state
200
 
201
  def __call__(self, question: str) -> str:
@@ -204,19 +202,19 @@ class BasicAgent:
204
 
205
  try:
206
  # Initialize the state
207
- initial_state = AgentState(
208
- question=question,
209
- current_step="analyze",
210
- tool_output="",
211
- final_answer="",
212
- history=[],
213
- needs_more_info=False,
214
- search_query=""
215
- )
216
 
217
  # Run the workflow
218
  final_state = self.workflow.invoke(initial_state)
219
- return final_state.final_answer
220
 
221
  except Exception as e:
222
  print(f"Error in agent processing: {e}")
@@ -289,15 +287,15 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
289
 
290
  try:
291
  # Initialize the state for this question
292
- initial_state = AgentState(
293
- question=question_text,
294
- current_step="analyze",
295
- tool_output="",
296
- final_answer="",
297
- history=[],
298
- needs_more_info=False,
299
- search_query=""
300
- )
301
 
302
  # Run the workflow for this question
303
  print(f"\nProcessing question {task_id}: {question_text[:50]}...")
@@ -308,11 +306,11 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
308
  f"Step: {h['step']}\n" +
309
  f"Input: {h.get('input', h.get('query', ''))}\n" +
310
  f"Output: {h.get('output', h.get('results', h.get('error', '')))}"
311
- for h in final_state.history
312
  ])
313
 
314
  # Add to results
315
- submitted_answer = final_state.final_answer
316
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
317
  results_log.append({
318
  "Task ID": task_id,
@@ -321,7 +319,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
321
  "Workflow History": workflow_history
322
  })
323
 
324
- print(f"Completed question {task_id} with {len(final_state.history)} workflow steps")
325
 
326
  except Exception as e:
327
  print(f"Error running agent workflow on task {task_id}: {e}")
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from typing import Dict, Any, List, TypedDict, Optional
7
  from langgraph.graph import Graph, StateGraph
8
  from langgraph.prebuilt import ToolNode
 
9
  from tools import create_calculator_tool, create_search_tool
10
  print("trial")
11
  # (Keep Constants as is)
 
14
  MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
15
  HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
16
 
17
+ class AgentState(TypedDict):
18
+ question: str
19
+ current_step: str
20
+ tool_output: str
21
+ final_answer: str
22
+ history: List[Dict[str, str]]
23
+ needs_more_info: bool
24
+ search_query: str
 
25
 
26
  class BasicAgent:
27
  def __init__(self):
 
50
  """Create the agent workflow using LangGraph."""
51
  # Create the workflow with state schema
52
  print("Creating Stategraph : error happens here?")
53
+ workflow = StateGraph(state_schema=Dict[str, Any])
54
  print("Stategraph created")
55
  # Add nodes
56
  workflow.add_node("analyze", self._analyze_question)
 
66
  workflow.add_edge("search", "final_answer")
67
 
68
  # Define conditional edges
69
+ def router(state: Dict[str, Any]) -> str:
70
+ if state["current_step"] == 'calculator':
71
  return 'calculator'
72
+ elif state["current_step"] == 'search':
73
  return 'search'
74
+ elif state["current_step"] == 'final_answer':
75
  return 'final_answer'
76
  return 'analyze'
77
 
 
105
  print(f"Error calling LLM API: {e}")
106
  return f"Error getting response from LLM: {str(e)}"
107
 
108
+ def _analyze_question(self, state: Dict[str, Any]) -> Dict[str, Any]:
109
  """Analyze the question and determine the next step."""
110
+ prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
111
  Return your analysis in this format:
112
  {{
113
  "needs_calculation": true/false,
 
121
  """
122
 
123
  analysis = eval(self._call_llm_api(prompt))
124
+ state["needs_more_info"] = analysis.get('needs_search', False)
125
+ state["search_query"] = analysis.get('search_query', '')
126
 
127
  if analysis.get('needs_calculation', False):
128
+ state["current_step"] = 'calculator'
129
+ state["tool_output"] = str(analysis['calculation'])
130
  elif analysis.get('needs_search', False):
131
+ state["current_step"] = 'search'
132
  else:
133
+ state["current_step"] = 'final_answer'
134
 
135
  return state
136
 
137
+ def _use_calculator(self, state: Dict[str, Any]) -> Dict[str, Any]:
138
  """Use the calculator tool."""
139
  try:
140
  # Create calculator state with input from tool_output
141
+ calc_input = eval(state["tool_output"])
142
+ result = self.calculator.invoke({"input": calc_input})
143
 
144
+ state["history"].append({
145
  'step': 'calculator',
146
+ 'input': state["tool_output"],
147
+ 'output': str(result['output']['result'])
148
  })
149
+ state["current_step"] = 'final_answer'
150
  except Exception as e:
151
+ state["history"].append({
152
  'step': 'calculator_error',
153
  'error': str(e)
154
  })
155
+ state["current_step"] = 'final_answer'
156
  return state
157
 
158
+ def _use_search(self, state: Dict[str, Any]) -> Dict[str, Any]:
159
  """Use the search tool."""
160
  try:
161
  # Create search state with input from search_query
162
+ search_input = {
163
+ "query": state["search_query"],
164
+ "max_results": 3
165
+ }
166
+ result = self.search_tool.invoke({"input": search_input})
167
 
168
+ state["history"].append({
169
  'step': 'search',
170
+ 'query': state["search_query"],
171
+ 'results': [str(r) for r in result['output']['results']]
172
  })
173
+ state["needs_more_info"] = False
174
+ state["current_step"] = 'final_answer'
175
  except Exception as e:
176
+ state["history"].append({
177
  'step': 'search_error',
178
  'error': str(e)
179
  })
180
+ state["current_step"] = 'final_answer'
181
  return state
182
 
183
+ def _generate_final_answer(self, state: Dict[str, Any]) -> Dict[str, Any]:
184
  """Generate the final answer based on all gathered information."""
185
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
186
+ for h in state["history"]])
187
 
188
+ prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
189
 
190
  History of steps taken:
191
  {history_str}
 
193
  Provide a clear, concise answer that addresses the original question.
194
  """
195
 
196
+ state["final_answer"] = self._call_llm_api(prompt)
197
  return state
198
 
199
  def __call__(self, question: str) -> str:
 
202
 
203
  try:
204
  # Initialize the state
205
+ initial_state = {
206
+ "question": question,
207
+ "current_step": "analyze",
208
+ "tool_output": "",
209
+ "final_answer": "",
210
+ "history": [],
211
+ "needs_more_info": False,
212
+ "search_query": ""
213
+ }
214
 
215
  # Run the workflow
216
  final_state = self.workflow.invoke(initial_state)
217
+ return final_state["final_answer"]
218
 
219
  except Exception as e:
220
  print(f"Error in agent processing: {e}")
 
287
 
288
  try:
289
  # Initialize the state for this question
290
+ initial_state = {
291
+ "question": question_text,
292
+ "current_step": "analyze",
293
+ "tool_output": "",
294
+ "final_answer": "",
295
+ "history": [],
296
+ "needs_more_info": False,
297
+ "search_query": ""
298
+ }
299
 
300
  # Run the workflow for this question
301
  print(f"\nProcessing question {task_id}: {question_text[:50]}...")
 
306
  f"Step: {h['step']}\n" +
307
  f"Input: {h.get('input', h.get('query', ''))}\n" +
308
  f"Output: {h.get('output', h.get('results', h.get('error', '')))}"
309
+ for h in final_state["history"]
310
  ])
311
 
312
  # Add to results
313
+ submitted_answer = final_state["final_answer"]
314
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
315
  results_log.append({
316
  "Task ID": task_id,
 
319
  "Workflow History": workflow_history
320
  })
321
 
322
+ print(f"Completed question {task_id} with {len(final_state['history'])} workflow steps")
323
 
324
  except Exception as e:
325
  print(f"Error running agent workflow on task {task_id}: {e}")
tools.py CHANGED
@@ -1,71 +1,60 @@
1
- from typing import Dict, Any, List
2
  from langgraph.graph import Graph, StateGraph
3
  from langgraph.prebuilt import ToolNode
4
- from pydantic import BaseModel, Field
5
  from duckduckgo_search import DDGS
6
 
7
- class CalculatorInput(BaseModel):
8
- operation: str = Field(..., description="The operation to perform (add, subtract, multiply, divide)")
9
- numbers: List[float] = Field(..., description="List of numbers to perform the operation on")
10
 
11
- class CalculatorOutput(BaseModel):
12
- result: float = Field(..., description="The result of the calculation")
13
- operation: str = Field(..., description="The operation that was performed")
14
 
15
- class CalculatorState(BaseModel):
16
- input: CalculatorInput
17
- output: CalculatorOutput = None
 
18
 
19
- class SearchInput(BaseModel):
20
- query: str = Field(..., description="The search query to look up")
21
- max_results: int = Field(default=3, description="Maximum number of results to return")
22
-
23
- class SearchResult(BaseModel):
24
- title: str = Field(..., description="Title of the search result")
25
- link: str = Field(..., description="URL of the search result")
26
- snippet: str = Field(..., description="Brief description of the search result")
27
-
28
- class SearchOutput(BaseModel):
29
- results: List[SearchResult] = Field(..., description="List of search results")
30
- query: str = Field(..., description="The original search query")
31
-
32
- class SearchState(BaseModel):
33
- input: SearchInput
34
- output: SearchOutput = None
35
 
36
  def create_calculator_tool() -> Graph:
37
  """Creates a calculator tool using LangGraph that can perform basic arithmetic operations."""
38
  print("Creating calculator tool")
39
- def calculator_function(state: CalculatorState) -> dict:
 
40
  print("Calculator function called")
41
- if len(state.input.numbers) < 2:
 
42
  raise ValueError("At least two numbers are required for calculation")
43
 
44
- result = state.input.numbers[0]
45
 
46
- for num in state.input.numbers[1:]:
47
- if state.input.operation == "add":
48
  result += num
49
- elif state.input.operation == "subtract":
50
  result -= num
51
- elif state.input.operation == "multiply":
52
  result *= num
53
- elif state.input.operation == "divide":
54
  if num == 0:
55
  raise ValueError("Cannot divide by zero")
56
  result /= num
57
  else:
58
- raise ValueError(f"Unsupported operation: {state.input.operation}")
59
 
60
  return {
61
- "output": CalculatorOutput(
62
- result=result,
63
- operation=state.input.operation
64
- )
65
  }
66
 
67
  # Create the graph with state schema
68
- workflow = StateGraph(state_schema=CalculatorState)
69
  print("Calculator graph for workflow created")
70
  # Add the calculator tool node
71
  workflow.add_node("calculator", ToolNode(calculator_function))
@@ -79,34 +68,34 @@ def create_calculator_tool() -> Graph:
79
  def create_search_tool() -> Graph:
80
  """Creates a search tool using DuckDuckGo that can search for information online."""
81
 
82
- def search_function(state: SearchState) -> dict:
83
  with DDGS() as ddgs:
84
  # Run search
85
  raw_results = list(ddgs.text(
86
- state.input.query,
87
- max_results=state.input.max_results
88
  ))
89
 
90
  results = []
91
  for r in raw_results:
92
  try:
93
- results.append(SearchResult(
94
- title=r.get("title", ""),
95
- link=r.get("href", r.get("link", "")), # DuckDuckGo sometimes uses "href"
96
- snippet=r.get("body", r.get("snippet", ""))
97
- ))
98
  except Exception as e:
99
  print("Skipping malformed search result:", r, "Error:", e)
100
 
101
  return {
102
- "output": SearchOutput(
103
- results=results,
104
- query=state.input.query
105
- )
106
  }
107
 
108
  # Create the graph with state schema
109
- workflow = StateGraph(state_schema=SearchState)
110
 
111
  # Add the search tool node
112
  workflow.add_node("search", ToolNode(search_function))
 
1
+ from typing import Dict, Any, List, TypedDict, Optional
2
  from langgraph.graph import Graph, StateGraph
3
  from langgraph.prebuilt import ToolNode
 
4
  from duckduckgo_search import DDGS
5
 
6
+ class CalculatorInput(TypedDict):
7
+ operation: str
8
+ numbers: List[float]
9
 
10
+ class CalculatorOutput(TypedDict):
11
+ result: float
12
+ operation: str
13
 
14
+ class SearchResult(TypedDict):
15
+ title: str
16
+ link: str
17
+ snippet: str
18
 
19
+ class SearchOutput(TypedDict):
20
+ results: List[SearchResult]
21
+ query: str
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def create_calculator_tool() -> Graph:
24
  """Creates a calculator tool using LangGraph that can perform basic arithmetic operations."""
25
  print("Creating calculator tool")
26
+
27
+ def calculator_function(state: Dict[str, Any]) -> dict:
28
  print("Calculator function called")
29
+ input_data = state["input"]
30
+ if len(input_data["numbers"]) < 2:
31
  raise ValueError("At least two numbers are required for calculation")
32
 
33
+ result = input_data["numbers"][0]
34
 
35
+ for num in input_data["numbers"][1:]:
36
+ if input_data["operation"] == "add":
37
  result += num
38
+ elif input_data["operation"] == "subtract":
39
  result -= num
40
+ elif input_data["operation"] == "multiply":
41
  result *= num
42
+ elif input_data["operation"] == "divide":
43
  if num == 0:
44
  raise ValueError("Cannot divide by zero")
45
  result /= num
46
  else:
47
+ raise ValueError(f"Unsupported operation: {input_data['operation']}")
48
 
49
  return {
50
+ "output": {
51
+ "result": result,
52
+ "operation": input_data["operation"]
53
+ }
54
  }
55
 
56
  # Create the graph with state schema
57
+ workflow = StateGraph(state_schema=Dict[str, Any])
58
  print("Calculator graph for workflow created")
59
  # Add the calculator tool node
60
  workflow.add_node("calculator", ToolNode(calculator_function))
 
68
  def create_search_tool() -> Graph:
69
  """Creates a search tool using DuckDuckGo that can search for information online."""
70
 
71
+ def search_function(state: Dict[str, Any]) -> dict:
72
  with DDGS() as ddgs:
73
  # Run search
74
  raw_results = list(ddgs.text(
75
+ state["input"]["query"],
76
+ max_results=state["input"].get("max_results", 3)
77
  ))
78
 
79
  results = []
80
  for r in raw_results:
81
  try:
82
+ results.append({
83
+ "title": r.get("title", ""),
84
+ "link": r.get("href", r.get("link", "")), # DuckDuckGo sometimes uses "href"
85
+ "snippet": r.get("body", r.get("snippet", ""))
86
+ })
87
  except Exception as e:
88
  print("Skipping malformed search result:", r, "Error:", e)
89
 
90
  return {
91
+ "output": {
92
+ "results": results,
93
+ "query": state["input"]["query"]
94
+ }
95
  }
96
 
97
  # Create the graph with state schema
98
+ workflow = StateGraph(state_schema=Dict[str, Any])
99
 
100
  # Add the search tool node
101
  workflow.add_node("search", ToolNode(search_function))