onisj's picture
Add .gitignore and clean tracked files
1bbca12
raw
history blame
2.39 kB
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from duckduckgo_search import DDGS
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable not set")
@tool
async def web_search(query: str) -> str:
"""
Performs a web search using DuckDuckGo and returns a string of results.
Args:
query (str): The search query string.
Returns:
str: A string containing the search results.
"""
try:
with DDGS() as ddgs:
results = await ddgs.atext(keywords=query, max_results=5)
return "\n".join([f"{r['title']}: {r['body']}" for r in results])
except Exception as e:
return f"Error performing web search: {str(e)}"
search_tool = web_search
class MultiHopSearchTool:
def __init__(self):
self.name = "multi_hop_search"
self.description = "Performs iterative web searches to refine results for complex queries."
self.inputs = {
"query": {"type": "string", "description": "Initial search query"},
"steps": {"type": "integer", "description": "Number of search iterations (default: 3)"}
}
self.output_type = str
self.llm = ChatOpenAI(
model="gpt-4o",
api_key=api_key,
temperature=0,
http_client=None # Explicitly disable custom HTTP client to avoid proxies
)
async def aparse(self, query: str, steps: int = 3) -> str:
try:
current_query = query
results = []
for _ in range(steps):
search_result = await web_search.invoke({"query": current_query})
results.append(search_result)
# Refine query using LLM
prompt = f"""Based on the query: {current_query}
And the search results: {search_result}
Generate a refined search query to get more precise results."""
response = await self.llm.ainvoke(prompt)
current_query = response.content
return "\n\n".join(results)
except Exception as e:
return f"Error in multi-hop search: {str(e)}"
multi_hop_search_tool = MultiHopSearchTool()