naman1102 commited on
Commit
5e54175
·
1 Parent(s): b6654dc
Files changed (2) hide show
  1. app.py +14 -6
  2. tools.py +4 -3
app.py CHANGED
@@ -125,16 +125,24 @@ class BasicAgent:
125
  return text.strip()
126
 
127
  def _generate_answer(self, state: AgentState) -> AgentState:
128
- history_text = "\n".join(str(item) for item in state["history"])
 
 
 
 
 
 
 
 
129
  prompt = (
130
- f"Answer the user question as directly as possible. If sources were retrieved, incorporate them.\n"
131
- f"Question: {state['question']}\n\nContext:\n{history_text}\n\n"
 
132
  "Give ONLY the final answer without extra formatting or explanation.\n"
133
- "Put your answer in a box using [box] and [/box] tags.\n"
134
- "If you cannot find a definitive answer, say 'I cannot find a definitive answer to this question.'"
135
  )
136
  answer = self._call_llm(prompt, max_tokens=150)
137
- state["final_answer"] = self._extract_boxed_answer(answer)
138
  state["history"].append({"step": "answer", "output": answer})
139
  state["logs"]["final_answer"] = {"prompt": prompt, "response": answer}
140
  state["current_step"] = "done"
 
125
  return text.strip()
126
 
127
  def _generate_answer(self, state: AgentState) -> AgentState:
128
+ # Format search results for better LLM consumption
129
+ search_results = []
130
+ for item in state["history"]:
131
+ if item.get("step") == "search" and "results" in item:
132
+ for i, result in enumerate(item["results"], 1):
133
+ search_results.append(f"Result {i}:\n{result}\n")
134
+
135
+ history_text = "\n".join(search_results) if search_results else "No search results found."
136
+
137
  prompt = (
138
+ f"Answer the user question as directly as possible using the search results below.\n"
139
+ f"Question: {state['question']}\n\n"
140
+ f"Search Results:\n{history_text}\n\n"
141
  "Give ONLY the final answer without extra formatting or explanation.\n"
142
+ "If you cannot find a definitive answer in the search results, say 'I cannot find a definitive answer to this question.'"
 
143
  )
144
  answer = self._call_llm(prompt, max_tokens=150)
145
+ state["final_answer"] = answer.strip()
146
  state["history"].append({"step": "answer", "output": answer})
147
  state["logs"]["final_answer"] = {"prompt": prompt, "response": answer}
148
  state["current_step"] = "done"
tools.py CHANGED
@@ -17,9 +17,10 @@ def _raw_search(query: str, max_results: int = 5) -> List[str]:
17
  out = []
18
  for r in raw:
19
  try:
20
- title = r.get("title", "")
21
- link = r.get("href") or r.get("link", "")
22
- out.append(f"{title} {link}")
 
23
  except Exception:
24
  pass
25
  return out
 
17
  out = []
18
  for r in raw:
19
  try:
20
+ title = r.get("title", "")
21
+ link = r.get("href") or r.get("link", "")
22
+ snippet = r.get("body") or r.get("snippet", "")
23
+ out.append(f"{title} – {link}\n{snippet}")
24
  except Exception:
25
  pass
26
  return out