Abbasid's picture
Update agent.py
a3e36f8 verified
raw
history blame
16.1 kB
"""
agent.py
This file defines the core logic for a sophisticated AI agent using LangGraph.
## MODIFICATION: This version has been refactored for an integrated vision model.
The primary LLM now processes images directly, removing the need for a separate 'describe_image' tool.
This allows for more direct and less "lossy" multimodal reasoning.
"""
# ----------------------------------------------------------
# Section 0: Imports and Configuration
# ----------------------------------------------------------
import json
import os
import pickle
import re
import subprocess
import textwrap
import base64
import functools
from io import BytesIO
from pathlib import Path
import tempfile
import yt_dlp
from pydub import AudioSegment
import speech_recognition as sr
import requests
from cachetools import TTLCache
from PIL import Image
from langchain.schema import Document
from langchain.tools.retriever import create_retriever_tool
from langchain_community.vectorstores import FAISS
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.tools import Tool, tool
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import HuggingFaceEmbeddings
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import ToolNode, tools_condition
from dotenv import load_dotenv
load_dotenv()
# --- Configuration and Caching ---
JSONL_PATH, FAISS_CACHE, EMBED_MODEL = Path("metadata.jsonl"), Path("faiss_index.pkl"), "sentence-transformers/all-mpnet-base-v2"
RETRIEVER_K, CACHE_TTL = 5, 600
API_CACHE = TTLCache(maxsize=256, ttl=CACHE_TTL)
def cached_get(key: str, fetch_fn):
if key in API_CACHE: return API_CACHE[key]
val = fetch_fn()
API_CACHE[key] = val
return val
# ----------------------------------------------------------
# Section 2: Standalone Tool Functions
# ----------------------------------------------------------
@tool
def python_repl(code: str) -> str:
"""Executes a string of Python code and returns the stdout/stderr."""
# (Implementation remains the same)
code = textwrap.dedent(code).strip()
try:
result = subprocess.run(["python", "-c", code], capture_output=True, text=True, timeout=10, check=False)
if result.returncode == 0: return f"Execution successful.\nSTDOUT:\n```\n{result.stdout}\n```"
else: return f"Execution failed.\nSTDOUT:\n```\n{result.stdout}\n```\nSTDERR:\n```\n{result.stderr}\n```"
except subprocess.TimeoutExpired: return "Execution timed out (>10s)."
## MODIFICATION: The 'describe_image_func' has been removed. Its functionality is now
## handled by the 'preprocess_image_node' in the graph.
@tool
def process_youtube_video(url: str) -> str:
"""Downloads and processes a YouTube video, extracting audio and converting to text."""
# (Implementation remains the same)
try:
print(f"Processing YouTube video: {url}")
with tempfile.TemporaryDirectory() as temp_dir:
ydl_opts = {
'format': 'bestaudio/best', 'outtmpl': f'{temp_dir}/%(title)s.%(ext)s',
'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'wav'}],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=True)
title = info.get('title', 'Unknown')
audio_files = list(Path(temp_dir).glob("*.wav"))
if not audio_files: return "Error: Could not download audio from YouTube video"
r, transcript_parts = sr.Recognizer(), []
audio = AudioSegment.from_wav(str(audio_files[0])).set_channels(1).set_frame_rate(16000)
chunks = [audio[i:i + 30000] for i in range(0, len(audio), 30000)]
for i, chunk in enumerate(chunks[:10]):
chunk_file = Path(temp_dir) / f"chunk_{i}.wav"
chunk.export(chunk_file, format="wav")
try:
with sr.AudioFile(str(chunk_file)) as source:
text = r.recognize_google(r.record(source))
transcript_parts.append(text)
except (sr.UnknownValueError, sr.RequestError) as e:
transcript_parts.append(f"[Speech recognition error or unintelligible audio: {e}]")
return f"YouTube Video: {title}\n\nTranscript (first 5 minutes):\n{' '.join(transcript_parts)}"
except Exception as e:
print(f"Error processing YouTube video: {e}")
return f"Error processing YouTube video: {e}"
@tool
def process_audio_file(file_url: str) -> str:
"""Downloads and processes an audio file (MP3, WAV, etc.) and converts to text."""
# (Implementation remains the same)
try:
print(f"Processing audio file: {file_url}")
with tempfile.TemporaryDirectory() as temp_dir:
response = requests.get(file_url, timeout=30)
response.raise_for_status()
ext = os.path.splitext(file_url)[1][1:] or 'mp3'
audio_file = Path(temp_dir) / f"audio.{ext}"
with open(audio_file, 'wb') as f: f.write(response.content)
wav_file = Path(temp_dir) / "audio.wav"
AudioSegment.from_file(str(audio_file)).export(wav_file, format="wav")
r, transcript_parts = sr.Recognizer(), []
audio = AudioSegment.from_wav(str(wav_file)).set_channels(1).set_frame_rate(16000)
chunks = [audio[i:i + 30000] for i in range(0, len(audio), 30000)]
for i, chunk in enumerate(chunks[:20]):
chunk_file = Path(temp_dir) / f"chunk_{i}.wav"
chunk.export(chunk_file, format="wav")
try:
with sr.AudioFile(str(chunk_file)) as source:
text = r.recognize_google(r.record(source))
transcript_parts.append(text)
except (sr.UnknownValueError, sr.RequestError) as e:
transcript_parts.append(f"[Speech recognition error or unintelligible audio: {e}]")
return f"Audio file transcript:\n{' '.join(transcript_parts)}"
except Exception as e:
print(f"Error processing audio file: {e}")
return f"Error processing audio file: {e}"
def web_search_func(query: str, cache_func) -> str:
"""Performs a web search using Tavily and returns a compilation of results."""
# (Implementation remains the same)
key = f"web:{query}"
results = cache_func(key, lambda: TavilySearchResults(max_results=5).invoke(query))
return "\n\n---\n\n".join([f"Source: {res['url']}\nContent: {res['content']}" for res in results])
def wiki_search_func(query: str, cache_func) -> str:
"""Searches Wikipedia and returns the top 2 results."""
# (Implementation remains the same)
key = f"wiki:{query}"
docs = cache_func(key, lambda: WikipediaLoader(query=query, load_max_docs=2, doc_content_chars_max=2000).load())
return "\n\n---\n\n".join([f"Source: {d.metadata['source']}\n\n{d.page_content}" for d in docs])
def arxiv_search_func(query: str, cache_func) -> str:
"""Searches Arxiv for scientific papers and returns the top 2 results."""
# (Implementation remains the same)
key = f"arxiv:{query}"
docs = cache_func(key, lambda: ArxivLoader(query=query, load_max_docs=2).load())
return "\n\n---\n\n".join([f"Source: {d.metadata['source']}\nPublished: {d.metadata['Published']}\nTitle: {d.metadata['Title']}\n\nSummary:\n{d.page_content}" for d in docs])
# ----------------------------------------------------------
# Section 3: DYNAMIC SYSTEM PROMPT
# ----------------------------------------------------------
## MODIFICATION: The system prompt is updated to reflect the new workflow.
## It no longer mentions 'describe_image' but instructs the model that it can
## directly see and reason about images provided in the prompt.
SYSTEM_PROMPT_TEMPLATE = (
"""You are an expert-level multimodal research assistant. Your goal is to answer the user's question accurately using all available tools and your own vision capabilities.
**CRITICAL INSTRUCTIONS:**
1. **INTEGRATED VISION:** You can directly see and understand images provided in the user's prompt. Reason about the image content directly to answer questions.
2. **MULTIMODAL TOOL USE:** When you encounter URLs for other media types, use the appropriate tool:
- For YouTube URLs: Use the `process_youtube_video` tool
- For audio files (mp3, wav, etc.): Use the `process_audio_file` tool
3. **SEARCH & RETRIEVAL:** For information not in the prompt, use the search tools (`web_search`, `wiki_search`, `arxiv_search`) or retrieve past examples. Do not make up answers.
4. **AVAILABLE TOOLS:** Here is the exact list of tools you have access to for non-image tasks:
{tools}
5. **REASONING:** Think step-by-step. First, analyze the user's question and any attached text or images. Second, if the answer requires external data, decide which tool is appropriate. Third, call the tools with correct parameters. Finally, synthesize all information into a final answer.
6. **FINAL ANSWER FORMAT:** Your final response MUST strictly follow this format:
`FINAL ANSWER: [Your comprehensive answer incorporating all tool results and image analysis]`
"""
)
# ----------------------------------------------------------
# Section 4: Factory Function for Agent Executor
# ----------------------------------------------------------
def create_agent_executor(provider: str = "groq"):
"""
Factory function to create and compile the LangGraph agent executor.
"""
print(f"Initializing agent with provider: {provider}")
# Step 1: Build LLM
## MODIFICATION: We now only need one primary, vision-capable LLM. The 'vision_llm' is removed.
if provider == "google":
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro-latest", temperature=0)
elif provider == "groq":
# The model requested was 'llama-4-scout-17b-16e-instruct', but as of mid-2024,
# the publicly available vision model on Groq is Llama 3.1. We'll use that.
llm = ChatGroq(model_name="meta-llama/llama-4-maverick-17b-128e-instruct", temperature=0)
else:
raise ValueError(f"Provider '{provider}' not supported for integrated vision yet.")
# Step 2: Build Retriever (remains the same)
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
if FAISS_CACHE.exists():
with open(FAISS_CACHE, "rb") as f: vector_store = pickle.load(f)
else:
docs = []
if JSONL_PATH.exists():
docs = [Document(page_content=f"Question: {rec['Question']}\n\nFinal answer: {rec['Final answer']}", metadata={"source": rec["task_id"]}) for rec in (json.loads(line) for line in open(JSONL_PATH, "rt", encoding="utf-8"))]
if not docs:
docs = [Document(page_content="Sample document", metadata={"source": "sample"})]
vector_store = FAISS.from_documents(docs, embeddings)
with open(FAISS_CACHE, "wb") as f: pickle.dump(vector_store, f)
retriever = vector_store.as_retriever(search_kwargs={"k": RETRIEVER_K})
# Step 3: Create the final list of tools
## MODIFICATION: The 'describe_image' tool has been removed from the list.
tools_list = [
python_repl,
process_youtube_video,
process_audio_file,
Tool(name="web_search", func=functools.partial(web_search_func, cache_func=cached_get), description="Performs a web search using Tavily."),
Tool(name="wiki_search", func=functools.partial(wiki_search_func, cache_func=cached_get), description="Searches Wikipedia."),
Tool(name="arxiv_search", func=functools.partial(arxiv_search_func, cache_func=cached_get), description="Searches Arxiv for scientific papers."),
create_retriever_tool(retriever=retriever, name="retrieve_examples", description="Retrieve solved questions similar to the user's query."),
]
# Step 4: Format the tool list and create the final system prompt
tool_definitions = "\n".join([f"- `{tool.name}`: {tool.description}" for tool in tools_list])
final_system_prompt = SYSTEM_PROMPT_TEMPLATE.format(tools=tool_definitions)
llm_with_tools = llm.bind_tools(tools_list)
# Step 5: Define Graph Nodes
## MODIFICATION: New node to pre-process images before they reach the assistant.
def preprocess_image_node(state: MessagesState):
"""
Checks the last human message for an image URL. If found, it downloads
the image, converts it to base64, and reformats the message content
for a multimodal LLM.
"""
last_message = state["messages"][-1]
if not isinstance(last_message, HumanMessage) or not isinstance(last_message.content, str):
return state
# Regex to find image URLs
image_url_match = re.search(r'(https?://[^\s]+\.(?:png|jpg|jpeg|gif|webp))', last_message.content)
if image_url_match:
image_url = image_url_match.group(0)
print(f"--- Found image URL: {image_url} ---")
try:
# Download and process the image
response = requests.get(image_url, timeout=10)
response.raise_for_status()
img = Image.open(BytesIO(response.content))
# Convert to base64
buffered = BytesIO()
img.convert("RGB").save(buffered, format="JPEG")
b64_string = base64.b64encode(buffered.getvalue()).decode()
# Create the multimodal message content
new_content = [
{"type": "text", "text": last_message.content},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64_string}"}}
]
# Replace the last message with the new multimodal one
state["messages"][-1] = HumanMessage(content=new_content)
print("--- Image pre-processed and embedded into the message ---")
except Exception as e:
print(f"Error processing image URL: {e}")
# Optional: You could modify the message to inform the LLM of the failure
# For now, we just pass it along without the image.
return state
def retriever_node(state: MessagesState):
# (Implementation remains the same)
user_query = state["messages"][-1].content
docs = retriever.invoke(user_query)
messages = [SystemMessage(content=final_system_prompt)]
if docs:
example_text = "\n\n---\n\n".join(d.page_content for d in docs)
messages.append(AIMessage(content=f"I have found {len(docs)} similar solved examples:\n\n{example_text}", name="ExampleRetriever"))
messages.extend(state["messages"])
return {"messages": messages}
def assistant_node(state: MessagesState):
result = llm_with_tools.invoke(state["messages"])
return {"messages": [result]}
# Step 6: Build Graph
## MODIFICATION: The graph flow is updated to include the new pre-processing node.
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever_node)
builder.add_node("preprocess_image", preprocess_image_node) # New node
builder.add_node("assistant", assistant_node)
builder.add_node("tools", ToolNode(tools_list))
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "preprocess_image") # New edge
builder.add_edge("preprocess_image", "assistant") # New edge
builder.add_conditional_edges("assistant", tools_condition, {"tools": "tools", "__end__": "__end__"})
builder.add_edge("tools", "assistant")
agent_executor = builder.compile()
print("Agent Executor with integrated vision created successfully.")
return agent_executor