import gradio as gr import asyncio import os from typing import List, Tuple, Optional, Dict, Any from datetime import datetime import logging import signal import sys import json # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) try: from mcp_use import MCPClient from langchain_mcp_adapters.client import MultiServerMCPClient from langchain_community.tools.sleep.tool import SleepTool from langchain_mcp_adapters.tools import load_mcp_tools from langchain.agents import AgentExecutor, create_tool_calling_agent from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_mistralai import ChatMistralAI except ImportError as e: logger.error(f"Import error: {e}") raise class ConversationManager: """Manages conversation history with token optimization""" def __init__(self, max_history_pairs: int = 3, max_context_chars: int = 2000): self.max_history_pairs = max_history_pairs self.max_context_chars = max_context_chars self.session_context = {} # Browser state context def update_session_context(self, action: str, result: str): """Update browser session context (current page, last actions, etc.)""" self.session_context.update({ 'last_action': action, 'last_result': result[:500], # Truncate long results 'timestamp': datetime.now().isoformat() }) def get_optimized_history(self, full_history: List[Tuple[str, str]]) -> List[Tuple[str, str]]: """Get optimized history with recent messages + session context""" # Take only the last N conversation pairs recent_history = full_history[-self.max_history_pairs:] if full_history else [] # Add session context as first "message" if we have browser state if self.session_context: context_msg = f"[SESSION_CONTEXT] Browser session active. Last action: {self.session_context.get('last_action', 'none')}" recent_history.insert(0, ("system", context_msg)) return recent_history def get_context_summary(self) -> str: """Get a summary of current browser session state""" if not self.session_context: return "Browser session not active." return f"Browser session active. Last action: {self.session_context.get('last_action', 'none')} at {self.session_context.get('timestamp', 'unknown')}" class BrowserAgent: def __init__(self, api_key: str): self.api_key = api_key self.client = None self.session = None self.session_context = None self.agent_executor = None self.model = None self.initialized = False self.available_tools = {} self.system_prompt = "" # Add conversation manager for token optimization self.conversation_manager = ConversationManager( max_history_pairs=3, # Only keep last 3 exchanges max_context_chars=2000 # Limit context size ) def generate_tools_prompt(self): """Generate a detailed prompt section about available tools""" try: tools_prompt = "\n## π οΈ AVAILABLE TOOLS\n" tools_prompt += "You have access to the following browser automation tools via MCP:\n\n" for tool_name, tool_info in self.available_tools.items(): tools_prompt += f"### {tool_name}\n" # Add description from StructuredTool object description = getattr(tool_info, 'description', 'No description available') tools_prompt += f"**Description**: {description}\n" # Add parameters from args_schema if available if hasattr(tool_info, 'args_schema') and tool_info.args_schema: try: schema = tool_info.args_schema.model_json_schema() if 'properties' in schema: tools_prompt += "**Parameters**:\n" for param_name, param_info in schema['properties'].items(): param_type = param_info.get('type', 'unknown') param_desc = param_info.get('description', 'No description') required = param_name in schema.get('required', []) required_mark = " (required)" if required else " (optional)" tools_prompt += f"- `{param_name}` ({param_type}){required_mark}: {param_desc}\n" except Exception as schema_error: logger.debug(f"Could not parse schema for {tool_name}: {schema_error}") tools_prompt += "**Usage**: Call this tool when you need to perform this browser action\n" else: tools_prompt += "**Usage**: Call this tool when you need to perform this browser action\n" tools_prompt += "\n" tools_prompt += """ π― MultiβStep Workflow Navigate & Snapshot Load the target page Capture a snapshot Assess if further steps are neededβif so, proceed to the next action Perform Action & Validate if needed closes add or popups Capture a snapshot Verify results before moving on Keep Browser Open Never close the session unless explicitly instructed Avoid Redundancy Don't repeat actions (e.g., clicking) when data is already collected ## π¨ SESSION PERSISTENCE RULES - Browser stays open for the entire conversation - Each action builds on previous state - Context is maintained between requests """ return tools_prompt except Exception as e: logger.error(f"Failed to generate tools prompt: {e}") return "\n## π οΈ TOOLS\nBrowser automation tools available but not detailed.\n" def get_system_prompt_with_tools(self): base = """π Browser Agent β Persistent Session & Optimized Memory You are an intelligent browser automation agent (Playwright via MCP) tasked with keeping a lightweight, ongoing session: π― Mission Navigate pages, extract and analyze data without closing the browser Handle popβups and capture snapshots to validate each step π Session Management Browser remains open across user requests Only recent chat history is provided to save tokens Session context (current page, recent actions) is maintained separately β‘ Response Structure For each action: State β tool call Snapshot β confirmation Next plan (if needed) π‘ Best Practices Use text selectors and wait for content Pause 2 s between tool calls Be concise and focused on the current task it s important as soon as you have the information you came for return it If earlier context is needed, ask the user to clarify. """ tools_section = self.generate_tools_prompt() return base + tools_section def initialize(self): """Initialize MCP client, model, session and agent""" try: logger.info("π Initializing Browser Agent...") # LLM mistral_key = os.getenv("mistralkey") if not mistral_key: raise ValueError("Mistral API key is required") self.model = ChatMistralAI( model="devstral-small-latest", api_key=mistral_key, ) logger.info("β Mistral LLM initialized with optimized settings") # Create event loop for MCP operations loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # MCP client setup (async operations in sync wrapper) self.client = MultiServerMCPClient({ "browser": { "command": "npx", "args": ["@playwright/mcp@latest", "--browser", "chromium","--user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"], "transport": "stdio" } }) logger.info("β MCP client created") # Start persistent session (run async operation in sync context) self.session_context = self.client.session("browser") self.session = loop.run_until_complete(self.session_context.__aenter__()) logger.info("β MCP session opened") # Load tools (async operation) tools = loop.run_until_complete(load_mcp_tools(self.session)) tools.append(SleepTool(description="Wait 2 seconds between two calls")) logger.info(f"π₯ Loaded {len(tools)} tools") self.available_tools = {t.name: t for t in tools} # Install browser if needed install_tool = self.available_tools.get("browser_install") if install_tool: try: result = loop.run_until_complete(install_tool.arun({})) logger.info(f"π₯ Browser install: {result}") except Exception as e: logger.warning(f"β οΈ Browser install failed: {e}, continuing.") # System prompt self.system_prompt = self.get_system_prompt_with_tools() # Create agent prompt = ChatPromptTemplate.from_messages([ ("system", self.system_prompt), MessagesPlaceholder(variable_name="chat_history"), ("human", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ]) agent = create_tool_calling_agent( llm=self.model, tools=tools, prompt=prompt ) self.agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, max_iterations=15, # Reduced from 30 early_stopping_method="generate", handle_parsing_errors=True, return_intermediate_steps=True, max_execution_time=180 # Reduced from 300 ) self.initialized = True logger.info("β Agent initialized with persistent session and optimized memory") return True except Exception as e: logger.error(f"β Initialization failed: {e}") self.cleanup() raise def process_query(self, query: str, chat_history: List[Tuple[str, str]]) -> str: if not self.initialized: return "β Agent not initialized. Please restart the application." try: # β KEY OPTIMIZATION: Use only recent history instead of full history optimized_history = self.conversation_manager.get_optimized_history(chat_history) # Convert to message format history_messages = [] for human, ai in optimized_history: if human: history_messages.append(("human", human)) if ai: history_messages.append(("ai", ai)) # Add session context context_summary = self.conversation_manager.get_context_summary() enhanced_query = f"{query}\n\n[SESSION_INFO]: {context_summary}" # Log token savings original_pairs = len(chat_history) optimized_pairs = len(optimized_history) logger.info(f"π° Token optimization: {original_pairs} β {optimized_pairs} history pairs") # Execute with optimized history (run async operation in sync context) loop = asyncio.get_event_loop() resp = loop.run_until_complete(self.agent_executor.ainvoke({ "input": enhanced_query, "chat_history": history_messages })) # Update session context with this interaction self.conversation_manager.update_session_context( action=query, result=resp["output"] ) return resp["output"] except Exception as e: logger.error(f"Error processing query: {e}") return f"β Error: {e}\nπ‘ Ask for a screenshot to diagnose." def cleanup(self): """Cleanup resources properly""" try: if self.session_context: loop = asyncio.get_event_loop() loop.run_until_complete(self.session_context.__aexit__(None, None, None)) logger.info("β MCP session closed") self.session_context = None self.session = None if self.client: loop = asyncio.get_event_loop() loop.run_until_complete(self.client.close()) logger.info("β MCP client closed") self.client = None self.initialized = False except Exception as e: logger.error(f"Cleanup error: {e}") def get_token_usage_stats(self, full_history: List[Tuple[str, str]]) -> Dict[str, Any]: """Get statistics about token usage optimization""" original_pairs = len(full_history) optimized_pairs = len(self.conversation_manager.get_optimized_history(full_history)) # Rough token estimation (1 token β 4 characters) def estimate_tokens(text: str) -> int: return len(text) // 4 original_tokens = sum(estimate_tokens(msg[0] + msg[1]) for msg in full_history) optimized_tokens = sum(estimate_tokens(msg[0] + msg[1]) for msg in self.conversation_manager.get_optimized_history(full_history)) return { "original_pairs": original_pairs, "optimized_pairs": optimized_pairs, "pairs_saved": original_pairs - optimized_pairs, "estimated_original_tokens": original_tokens, "estimated_optimized_tokens": optimized_tokens, "estimated_tokens_saved": original_tokens - optimized_tokens, "savings_percentage": ((original_tokens - optimized_tokens) / original_tokens * 100) if original_tokens > 0 else 0 } # Global agent instance agent: Optional[BrowserAgent] = None def initialize_agent(api_key: str) -> str: """Initialize the agent""" global agent if not api_key.strip(): return "β Please provide a Mistral API key" try: # Cleanup existing agent if agent: agent.cleanup() # Create new agent agent = BrowserAgent(api_key) agent.initialize() info = agent.get_system_prompt_with_tools() return f"β Agent Initialized Successfully with Token Optimization!\n\n{info[:1000]}..." except Exception as e: logger.error(f"Initialization error: {e}") return f"β Failed to initialize agent: {e}" def process_message(message: str, history: List[List[str]]) -> List[List[str]]: """Process message and return updated history""" global agent if not agent or not agent.initialized: error_msg = "β Agent not initialized. Please initialize first with your API key." history.append([message, error_msg]) return history if not message.strip(): error_msg = "Please enter a message" history.append([message, error_msg]) return history try: # Convert history format for the agent agent_history = [(msg[0], msg[1]) for msg in history] # Get token usage stats before processing stats = agent.get_token_usage_stats(agent_history) # Process the query with optimized history response = agent.process_query(message, agent_history) # Add token savings info to response if significant savings if stats["savings_percentage"] > 50: response += f"\n\nπ° Token savings: {stats['savings_percentage']:.1f}% ({stats['estimated_tokens_saved']} tokens saved)" # Add to history history.append([message, response]) return history except Exception as e: logger.error(f"Message processing error: {e}") error_msg = f"β Error: {e}\nπ‘ Try asking for a screenshot to diagnose." history.append([message, error_msg]) return history def get_token_stats(history: List[List[str]]) -> str: """Get token usage statistics""" global agent if not agent or not agent.initialized: return "Agent not initialized" agent_history = [(msg[0], msg[1]) for msg in history] stats = agent.get_token_usage_stats(agent_history) return f"""π Token Usage Statistics: β’ Original conversation pairs: {stats['original_pairs']} β’ Optimized conversation pairs: {stats['optimized_pairs']} β’ Pairs saved: {stats['pairs_saved']} β’ Estimated original tokens: {stats['estimated_original_tokens']:,} β’ Estimated optimized tokens: {stats['estimated_optimized_tokens']:,} β’ Estimated tokens saved: {stats['estimated_tokens_saved']:,} β’ Savings percentage: {stats['savings_percentage']:.1f}%""" def screenshot_quick(history: List[List[str]]) -> List[List[str]]: """Quick screenshot function""" return process_message("Take a screenshot of the current page", history) with gr.Blocks( title="MCP Browser Agent - Token Optimized", theme=gr.themes.Soft() ) as interface: gr.HTML("""
AI-powered web browsing with persistent sessions and optimized token usage