naman1102 commited on
Commit
323f26e
·
1 Parent(s): 035834c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -17
app.py CHANGED
@@ -6,6 +6,8 @@ import pandas as pd
6
  import ast
7
  import operator
8
  import time
 
 
9
  from typing import List, Dict, Any, Optional, Annotated
10
  from langgraph.graph import Graph, StateGraph
11
  from langgraph.prebuilt import ToolNode
@@ -21,6 +23,26 @@ print("trial")
21
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
22
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Make sure to set this environment variable
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  class AgentState(TypedDict):
26
  question: Annotated[str, override]
@@ -30,6 +52,8 @@ class AgentState(TypedDict):
30
  history: Annotated[List[Dict[str, str]], operator.add]
31
  needs_more_info: Annotated[bool, override]
32
  search_query: Annotated[str, override]
 
 
33
 
34
  class BasicAgent:
35
  def __init__(self):
@@ -112,14 +136,11 @@ class BasicAgent:
112
  """Analyze the question and determine the next step."""
113
  prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
114
 
115
- Return only a valid Python dictionary in this exact format:
116
  {{
117
  "needs_search": true/false,
118
  "search_query": "query if needed"
119
- }}
120
-
121
- Do not include any other text or explanation. Only return the dictionary.
122
- """
123
 
124
  try:
125
  llm_response = self._call_llm_api(prompt)
@@ -127,6 +148,13 @@ Do not include any other text or explanation. Only return the dictionary.
127
  print(f"Input: {state['question']}")
128
  print(f"LLM Response: {llm_response}")
129
 
 
 
 
 
 
 
 
130
  analysis = ast.literal_eval(llm_response)
131
  state["needs_more_info"] = analysis.get('needs_search', False)
132
  state["search_query"] = analysis.get('search_query', '')
@@ -141,6 +169,12 @@ Do not include any other text or explanation. Only return the dictionary.
141
  state["needs_more_info"] = True
142
  state["search_query"] = state["question"]
143
  state["current_step"] = 'search'
 
 
 
 
 
 
144
 
145
  return state
146
 
@@ -162,6 +196,13 @@ Do not include any other text or explanation. Only return the dictionary.
162
  for i, result in enumerate(search_results, 1):
163
  print(f"{i}. {result}")
164
 
 
 
 
 
 
 
 
165
  state["history"].append({
166
  'step': 'search',
167
  'query': state["search_query"],
@@ -176,6 +217,12 @@ Do not include any other text or explanation. Only return the dictionary.
176
  'error': str(e)
177
  })
178
  state["current_step"] = 'final_answer'
 
 
 
 
 
 
179
  return state
180
 
181
  def _generate_final_answer(self, state: AgentState) -> AgentState:
@@ -183,13 +230,12 @@ Do not include any other text or explanation. Only return the dictionary.
183
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
184
  for h in state["history"]])
185
 
186
- prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
187
-
188
- History of steps taken:
189
- {history_str}
190
-
191
- Provide a clear, concise answer that addresses the original question.
192
- """
193
 
194
  print("\n=== Generate Final Answer ===")
195
  print(f"Question: {state['question']}")
@@ -200,27 +246,45 @@ Do not include any other text or explanation. Only return the dictionary.
200
  print("\nFinal Answer:")
201
  print(llm_response)
202
 
 
 
 
 
 
 
 
 
203
  state["final_answer"] = llm_response
204
  return state
205
 
206
- def __call__(self, question: str) -> str:
207
  """Process a question through the agent workflow."""
208
  print(f"Agent received question: {question[:50]}...")
209
 
210
  try:
211
  # Initialize the state
212
  initial_state: AgentState = {
213
- "question": question, # Now a string, not a list
214
  "current_step": "analyze",
215
  "tool_output": "",
216
  "final_answer": "",
217
  "history": [],
218
  "needs_more_info": False,
219
- "search_query": ""
 
 
220
  }
221
 
222
  # Run the workflow
223
  final_state = self.workflow.invoke(initial_state)
 
 
 
 
 
 
 
 
224
  return final_state["final_answer"]
225
 
226
  except Exception as e:
@@ -295,13 +359,15 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
295
  try:
296
  # Initialize the state for this question
297
  initial_state = {
298
- "question": question_text, # Now a string, not a list
299
  "current_step": "analyze",
300
  "tool_output": "",
301
  "final_answer": "",
302
  "history": [],
303
  "needs_more_info": False,
304
- "search_query": ""
 
 
305
  }
306
 
307
  # Run the workflow for this question
 
6
  import ast
7
  import operator
8
  import time
9
+ import json
10
+ from datetime import datetime
11
  from typing import List, Dict, Any, Optional, Annotated
12
  from langgraph.graph import Graph, StateGraph
13
  from langgraph.prebuilt import ToolNode
 
23
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
24
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Make sure to set this environment variable
25
 
26
+ # Create logs directory if it doesn't exist
27
+ LOGS_DIR = "question_logs"
28
+ os.makedirs(LOGS_DIR, exist_ok=True)
29
+
30
+ def log_to_file(task_id: str, question: str, log_data: Dict[str, Any]):
31
+ """Store logs for a question in a JSON file."""
32
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
33
+ filename = f"{LOGS_DIR}/question_{task_id}_{timestamp}.json"
34
+
35
+ log_entry = {
36
+ "task_id": task_id,
37
+ "question": question,
38
+ "timestamp": timestamp,
39
+ "logs": log_data
40
+ }
41
+
42
+ with open(filename, 'w', encoding='utf-8') as f:
43
+ json.dump(log_entry, f, indent=2, ensure_ascii=False)
44
+
45
+ print(f"Logs saved to {filename}")
46
 
47
  class AgentState(TypedDict):
48
  question: Annotated[str, override]
 
52
  history: Annotated[List[Dict[str, str]], operator.add]
53
  needs_more_info: Annotated[bool, override]
54
  search_query: Annotated[str, override]
55
+ task_id: Annotated[str, override] # Add task_id to state
56
+ logs: Annotated[Dict[str, Any], operator.add] # Add logs to state
57
 
58
  class BasicAgent:
59
  def __init__(self):
 
136
  """Analyze the question and determine the next step."""
137
  prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
138
 
139
+ Return ONLY a Python dictionary in this exact format, with no other text or explanation:
140
  {{
141
  "needs_search": true/false,
142
  "search_query": "query if needed"
143
+ }}"""
 
 
 
144
 
145
  try:
146
  llm_response = self._call_llm_api(prompt)
 
148
  print(f"Input: {state['question']}")
149
  print(f"LLM Response: {llm_response}")
150
 
151
+ # Log the analysis step
152
+ state["logs"]["analyze"] = {
153
+ "prompt": prompt,
154
+ "response": llm_response,
155
+ "timestamp": datetime.now().isoformat()
156
+ }
157
+
158
  analysis = ast.literal_eval(llm_response)
159
  state["needs_more_info"] = analysis.get('needs_search', False)
160
  state["search_query"] = analysis.get('search_query', '')
 
169
  state["needs_more_info"] = True
170
  state["search_query"] = state["question"]
171
  state["current_step"] = 'search'
172
+
173
+ # Log the error
174
+ state["logs"]["analyze_error"] = {
175
+ "error": str(e),
176
+ "timestamp": datetime.now().isoformat()
177
+ }
178
 
179
  return state
180
 
 
196
  for i, result in enumerate(search_results, 1):
197
  print(f"{i}. {result}")
198
 
199
+ # Log the search step
200
+ state["logs"]["search"] = {
201
+ "query": state["search_query"],
202
+ "results": search_results,
203
+ "timestamp": datetime.now().isoformat()
204
+ }
205
+
206
  state["history"].append({
207
  'step': 'search',
208
  'query': state["search_query"],
 
217
  'error': str(e)
218
  })
219
  state["current_step"] = 'final_answer'
220
+
221
+ # Log the error
222
+ state["logs"]["search_error"] = {
223
+ "error": str(e),
224
+ "timestamp": datetime.now().isoformat()
225
+ }
226
  return state
227
 
228
  def _generate_final_answer(self, state: AgentState) -> AgentState:
 
230
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
231
  for h in state["history"]])
232
 
233
+ prompt = f"""Question: {state['question']}
234
+
235
+ History of steps taken:
236
+ {history_str}
237
+
238
+ Return ONLY the direct answer to the question. Do not include any explanations, introductions, or formatting. Just the answer."""
 
239
 
240
  print("\n=== Generate Final Answer ===")
241
  print(f"Question: {state['question']}")
 
246
  print("\nFinal Answer:")
247
  print(llm_response)
248
 
249
+ # Log the final answer generation
250
+ state["logs"]["final_answer"] = {
251
+ "prompt": prompt,
252
+ "response": llm_response,
253
+ "history": history_str,
254
+ "timestamp": datetime.now().isoformat()
255
+ }
256
+
257
  state["final_answer"] = llm_response
258
  return state
259
 
260
+ def __call__(self, question: str, task_id: str = "unknown") -> str:
261
  """Process a question through the agent workflow."""
262
  print(f"Agent received question: {question[:50]}...")
263
 
264
  try:
265
  # Initialize the state
266
  initial_state: AgentState = {
267
+ "question": question,
268
  "current_step": "analyze",
269
  "tool_output": "",
270
  "final_answer": "",
271
  "history": [],
272
  "needs_more_info": False,
273
+ "search_query": "",
274
+ "task_id": task_id,
275
+ "logs": {}
276
  }
277
 
278
  # Run the workflow
279
  final_state = self.workflow.invoke(initial_state)
280
+
281
+ # Save logs to file
282
+ log_to_file(
283
+ task_id=final_state["task_id"],
284
+ question=final_state["question"],
285
+ log_data=final_state["logs"]
286
+ )
287
+
288
  return final_state["final_answer"]
289
 
290
  except Exception as e:
 
359
  try:
360
  # Initialize the state for this question
361
  initial_state = {
362
+ "question": question_text,
363
  "current_step": "analyze",
364
  "tool_output": "",
365
  "final_answer": "",
366
  "history": [],
367
  "needs_more_info": False,
368
+ "search_query": "",
369
+ "task_id": task_id,
370
+ "logs": {}
371
  }
372
 
373
  # Run the workflow for this question