naman1102 commited on
Commit
ebec9e2
·
1 Parent(s): 42c961f
Files changed (4) hide show
  1. app.py +201 -5
  2. requirements.txt +4 -1
  3. retriever.py +0 -0
  4. tools.py +130 -0
app.py CHANGED
@@ -3,21 +3,217 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
 
 
6
  print("trial")
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
15
- print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from typing import Dict, Any, List, TypedDict, Annotated
7
+ from langgraph.graph import Graph, StateGraph
8
+ from langgraph.prebuilt import ToolNode
9
+ from pydantic import BaseModel, Field
10
+ from tools import create_calculator_tool, create_search_tool
11
  print("trial")
12
  # (Keep Constants as is)
13
  # --- Constants ---
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+ MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
16
+ HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
17
+
18
+ class AgentState(TypedDict):
19
+ """Type definition for the agent's state."""
20
+ question: str
21
+ current_step: str
22
+ tool_output: str
23
+ final_answer: str
24
+ history: List[Dict[str, str]]
25
+ needs_more_info: bool
26
+ search_query: str
27
 
28
  # --- Basic Agent Definition ---
29
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
30
  class BasicAgent:
31
  def __init__(self):
32
+ print("Initializing BasicAgent with Qwen2.5-Coder-32B-Instruct API...")
33
+ if not HF_TOKEN:
34
+ raise ValueError("HF_TOKEN environment variable not set. Please set your Hugging Face API token.")
35
+
36
+ # Initialize tools
37
+ self.calculator = create_calculator_tool()
38
+ self.search_tool = create_search_tool()
39
+
40
+ # Set up headers for API calls
41
+ self.headers = {
42
+ "Authorization": f"Bearer {HF_TOKEN}",
43
+ "Content-Type": "application/json"
44
+ }
45
+
46
+ # Create the agent workflow
47
+ self.workflow = self._create_workflow()
48
+ print("BasicAgent initialization complete.")
49
+
50
+ def _call_llm_api(self, prompt: str) -> str:
51
+ """Call the Qwen model through the Hugging Face API."""
52
+ try:
53
+ response = requests.post(
54
+ MODEL_API_URL,
55
+ headers=self.headers,
56
+ json={"inputs": prompt, "parameters": {"max_length": 200}}
57
+ )
58
+ response.raise_for_status()
59
+ return response.json()[0]["generated_text"]
60
+ except Exception as e:
61
+ print(f"Error calling LLM API: {e}")
62
+ return f"Error getting response from LLM: {str(e)}"
63
+
64
+ def _analyze_question(self, state: AgentState) -> AgentState:
65
+ """Analyze the question and determine the next step."""
66
+ prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
67
+ Return your analysis in this format:
68
+ {{
69
+ "needs_calculation": true/false,
70
+ "needs_search": true/false,
71
+ "search_query": "query if needed",
72
+ "calculation": {{
73
+ "operation": "add/subtract/multiply/divide",
74
+ "numbers": [numbers if needed]
75
+ }}
76
+ }}
77
+ """
78
+
79
+ analysis = eval(self._call_llm_api(prompt))
80
+ state['needs_more_info'] = analysis.get('needs_search', False)
81
+ state['search_query'] = analysis.get('search_query', '')
82
+
83
+ if analysis.get('needs_calculation', False):
84
+ state['current_step'] = 'calculator'
85
+ state['tool_output'] = str(analysis['calculation'])
86
+ elif analysis.get('needs_search', False):
87
+ state['current_step'] = 'search'
88
+ else:
89
+ state['current_step'] = 'final_answer'
90
+
91
+ return state
92
+
93
+ def _use_calculator(self, state: AgentState) -> AgentState:
94
+ """Use the calculator tool."""
95
+ try:
96
+ result = self.calculator.invoke({"input": eval(state['tool_output'])})
97
+ state['history'].append({
98
+ 'step': 'calculator',
99
+ 'input': state['tool_output'],
100
+ 'output': str(result['output'].result)
101
+ })
102
+ state['current_step'] = 'final_answer'
103
+ except Exception as e:
104
+ state['history'].append({
105
+ 'step': 'calculator_error',
106
+ 'error': str(e)
107
+ })
108
+ state['current_step'] = 'final_answer'
109
+ return state
110
+
111
+ def _use_search(self, state: AgentState) -> AgentState:
112
+ """Use the search tool."""
113
+ try:
114
+ result = self.search_tool.invoke({
115
+ "input": {
116
+ "query": state['search_query'],
117
+ "max_results": 3
118
+ }
119
+ })
120
+ state['history'].append({
121
+ 'step': 'search',
122
+ 'query': state['search_query'],
123
+ 'results': [str(r) for r in result['output'].results]
124
+ })
125
+ state['needs_more_info'] = False
126
+ state['current_step'] = 'final_answer'
127
+ except Exception as e:
128
+ state['history'].append({
129
+ 'step': 'search_error',
130
+ 'error': str(e)
131
+ })
132
+ state['current_step'] = 'final_answer'
133
+ return state
134
+
135
+ def _generate_final_answer(self, state: AgentState) -> AgentState:
136
+ """Generate the final answer based on all gathered information."""
137
+ history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
138
+ for h in state['history']])
139
+
140
+ prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
141
+
142
+ History of steps taken:
143
+ {history_str}
144
+
145
+ Provide a clear, concise answer that addresses the original question.
146
+ """
147
+
148
+ state['final_answer'] = self._call_llm_api(prompt)
149
+ return state
150
+
151
+ def _create_workflow(self) -> Graph:
152
+ """Create the agent workflow using LangGraph."""
153
+ workflow = StateGraph(AgentState)
154
+
155
+ # Add nodes
156
+ workflow.add_node("analyze", self._analyze_question)
157
+ workflow.add_node("calculator", self._use_calculator)
158
+ workflow.add_node("search", self._use_search)
159
+ workflow.add_node("final_answer", self._generate_final_answer)
160
+
161
+ # Define edges
162
+ workflow.add_edge("analyze", "calculator")
163
+ workflow.add_edge("analyze", "search")
164
+ workflow.add_edge("analyze", "final_answer")
165
+ workflow.add_edge("calculator", "final_answer")
166
+ workflow.add_edge("search", "final_answer")
167
+
168
+ # Define conditional edges
169
+ def router(state: AgentState) -> str:
170
+ if state['current_step'] == 'calculator':
171
+ return 'calculator'
172
+ elif state['current_step'] == 'search':
173
+ return 'search'
174
+ elif state['current_step'] == 'final_answer':
175
+ return 'final_answer'
176
+ return 'analyze'
177
+
178
+ workflow.add_conditional_edges(
179
+ "analyze",
180
+ router,
181
+ {
182
+ "calculator": "calculator",
183
+ "search": "search",
184
+ "final_answer": "final_answer"
185
+ }
186
+ )
187
+
188
+ # Set entry and exit points
189
+ workflow.set_entry_point("analyze")
190
+ workflow.set_finish_point("final_answer")
191
+
192
+ return workflow.compile()
193
+
194
  def __call__(self, question: str) -> str:
195
+ """Process a question through the agent workflow."""
196
+ print(f"Agent received question: {question[:50]}...")
197
+
198
+ try:
199
+ # Initialize the state
200
+ initial_state = {
201
+ "question": question,
202
+ "current_step": "analyze",
203
+ "tool_output": "",
204
+ "final_answer": "",
205
+ "history": [],
206
+ "needs_more_info": False,
207
+ "search_query": ""
208
+ }
209
+
210
+ # Run the workflow
211
+ final_state = self.workflow.invoke(initial_state)
212
+ return final_state['final_answer']
213
+
214
+ except Exception as e:
215
+ print(f"Error in agent processing: {e}")
216
+ return f"I encountered an error while processing your question: {str(e)}"
217
 
218
  def run_and_submit_all( profile: gr.OAuthProfile | None):
219
  """
requirements.txt CHANGED
@@ -1,2 +1,5 @@
1
  gradio
2
- requests
 
 
 
 
1
  gradio
2
+ requests
3
+ langgraph>=0.0.10
4
+ pydantic>=2.0.0
5
+ duckduckgo-search>=4.1.1
retriever.py ADDED
File without changes
tools.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List
2
+ from langgraph.graph import Graph, StateGraph
3
+ from langgraph.prebuilt import ToolNode
4
+ from pydantic import BaseModel, Field
5
+ from duckduckgo_search import DDGS
6
+
7
+ class CalculatorInput(BaseModel):
8
+ operation: str = Field(..., description="The operation to perform (add, subtract, multiply, divide)")
9
+ numbers: List[float] = Field(..., description="List of numbers to perform the operation on")
10
+
11
+ class CalculatorOutput(BaseModel):
12
+ result: float = Field(..., description="The result of the calculation")
13
+ operation: str = Field(..., description="The operation that was performed")
14
+
15
+ class SearchInput(BaseModel):
16
+ query: str = Field(..., description="The search query to look up")
17
+ max_results: int = Field(default=3, description="Maximum number of results to return")
18
+
19
+ class SearchResult(BaseModel):
20
+ title: str = Field(..., description="Title of the search result")
21
+ link: str = Field(..., description="URL of the search result")
22
+ snippet: str = Field(..., description="Brief description of the search result")
23
+
24
+ class SearchOutput(BaseModel):
25
+ results: List[SearchResult] = Field(..., description="List of search results")
26
+ query: str = Field(..., description="The original search query")
27
+
28
+ def create_calculator_tool() -> Graph:
29
+ """Creates a calculator tool using LangGraph that can perform basic arithmetic operations."""
30
+
31
+ def calculator_function(state: Dict[str, Any]) -> Dict[str, Any]:
32
+ input_data = CalculatorInput(**state["input"])
33
+
34
+ if len(input_data.numbers) < 2:
35
+ raise ValueError("At least two numbers are required for calculation")
36
+
37
+ result = input_data.numbers[0]
38
+
39
+ for num in input_data.numbers[1:]:
40
+ if input_data.operation == "add":
41
+ result += num
42
+ elif input_data.operation == "subtract":
43
+ result -= num
44
+ elif input_data.operation == "multiply":
45
+ result *= num
46
+ elif input_data.operation == "divide":
47
+ if num == 0:
48
+ raise ValueError("Cannot divide by zero")
49
+ result /= num
50
+ else:
51
+ raise ValueError(f"Unsupported operation: {input_data.operation}")
52
+
53
+ output = CalculatorOutput(
54
+ result=result,
55
+ operation=input_data.operation
56
+ )
57
+
58
+ return {"output": output}
59
+
60
+ # Create the graph
61
+ workflow = StateGraph()
62
+
63
+ # Add the calculator tool node
64
+ workflow.add_node("calculator", ToolNode(calculator_function))
65
+
66
+ # Set the entry and exit points
67
+ workflow.set_entry_point("calculator")
68
+ workflow.set_finish_point("calculator")
69
+
70
+ return workflow.compile()
71
+
72
+ def create_search_tool() -> Graph:
73
+ """Creates a search tool using DuckDuckGo that can search for information online."""
74
+
75
+ def search_function(state: Dict[str, Any]) -> Dict[str, Any]:
76
+ input_data = SearchInput(**state["input"])
77
+
78
+ # Initialize DuckDuckGo search
79
+ with DDGS() as ddgs:
80
+ # Perform the search
81
+ search_results = list(ddgs.text(
82
+ input_data.query,
83
+ max_results=input_data.max_results
84
+ ))
85
+
86
+ # Convert results to our model
87
+ results = [
88
+ SearchResult(
89
+ title=result["title"],
90
+ link=result["link"],
91
+ snippet=result["body"]
92
+ )
93
+ for result in search_results
94
+ ]
95
+
96
+ output = SearchOutput(
97
+ results=results,
98
+ query=input_data.query
99
+ )
100
+
101
+ return {"output": output}
102
+
103
+ # Create the graph
104
+ workflow = StateGraph()
105
+
106
+ # Add the search tool node
107
+ workflow.add_node("search", ToolNode(search_function))
108
+
109
+ # Set the entry and exit points
110
+ workflow.set_entry_point("search")
111
+ workflow.set_finish_point("search")
112
+
113
+ return workflow.compile()
114
+
115
+ # Example usage:
116
+ # if __name__ == "__main__":
117
+ # # Create the calculator tool
118
+ # calculator = create_calculator_tool()
119
+
120
+ # # Example calculation
121
+ # result = calculator.invoke({
122
+ # "input": {
123
+ # "operation": "add",
124
+ # "numbers": [1, 2, 3, 4]
125
+ # }
126
+ # })
127
+
128
+ # print(f"Result: {result['output'].result}")
129
+
130
+