Entz commited on
Commit
be4a1b4
·
verified ·
1 Parent(s): 249bfd7

Delete backend.py

Browse files
Files changed (1) hide show
  1. backend.py +0 -258
backend.py DELETED
@@ -1,258 +0,0 @@
1
- # backend.py
2
- """
3
- Backend module for MCP Agent
4
- Handles all the MCP server connections, LLM setup, and agent logic
5
- """
6
- import sys
7
- import os
8
- import re
9
- import asyncio
10
- from dotenv import load_dotenv
11
- from typing import Optional, Dict, List, Any
12
-
13
- from pathlib import Path # already imported
14
- here = Path(__file__).parent.resolve()
15
-
16
-
17
-
18
- ################### --- Auth setup --- ###################
19
- ##########################################################
20
- HF = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
21
- if not HF:
22
- print("WARNING: HF_TOKEN not set. The app will start, but model calls may fail.")
23
- else:
24
- os.environ["HF_TOKEN"] = HF
25
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF
26
- try:
27
- from huggingface_hub import login
28
- login(token=HF)
29
- except Exception:
30
- pass
31
-
32
-
33
- # --- LangChain / MCP ---
34
- from langgraph.prebuilt import create_react_agent
35
- from langchain_mcp_adapters.client import MultiServerMCPClient
36
- from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
37
- from langchain_core.messages import HumanMessage
38
-
39
- # First choice, then free/tiny fallbacks
40
- CANDIDATE_MODELS = [
41
- os.getenv("HF_MODEL_ID", "Qwen/Qwen2.5-7B-Instruct"),
42
- "HuggingFaceTB/SmolLM3-3B-Instruct",
43
- "Qwen/Qwen2.5-1.5B-Instruct",
44
- "microsoft/Phi-3-mini-4k-instruct",
45
- ]
46
-
47
- SYSTEM_PROMPT = (
48
- "You are an AI assistant with tools.\n"
49
- "- Use the arithmetic tools (`add`, `minus`, `multiply`, `divide`) for arithmetic or multi-step calculations.\n"
50
- "- Use the stock tools (`get_stock_price`, `get_market_summary`, `get_company_news`) for financial/market queries.\n"
51
- "- Otherwise, answer directly with your own knowledge.\n"
52
- "Be concise and accurate. Only call tools when they clearly help."
53
- )
54
-
55
- ################### --- ROUTER helpers --- ###################
56
- ##############################################################
57
-
58
- # Detect stock/financial intent
59
- def is_stock_query(q: str) -> bool:
60
- """Check if query is about stocks, markets, or financial data."""
61
- stock_patterns = [
62
- r"\b(stock|share|price|ticker|market|nasdaq|dow|s&p|spy|qqq)\b",
63
- r"\b(AAPL|GOOGL|MSFT|TSLA|AMZN|META|NVDA|AMD)\b", # Common tickers
64
- r"\$[A-Z]{1,5}\b", # $SYMBOL format
65
- r"\b(trading|invest|portfolio|earnings|dividend)\b",
66
- r"\b(bull|bear|rally|crash|volatility)\b",
67
- ]
68
- return any(re.search(pattern, q, re.I) for pattern in stock_patterns)
69
-
70
- # Extract ticker symbol from query
71
- def extract_ticker(q: str) -> str:
72
- """Extract stock ticker from query."""
73
- # Check for $SYMBOL format first
74
- dollar_match = re.search(r"\$([A-Z]{1,5})\b", q, re.I)
75
- if dollar_match:
76
- return dollar_match.group(1).upper()
77
-
78
- # Check for common patterns like "price of AAPL" or "AAPL stock"
79
- patterns = [
80
- r"(?:price of|stock price of|quote for)\s+([A-Z]{1,5})\b",
81
- r"\b([A-Z]{1,5})\s+(?:stock|share|price|quote)",
82
- r"(?:what is|what's|get)\s+([A-Z]{1,5})\b",
83
- ]
84
-
85
- for pattern in patterns:
86
- match = re.search(pattern, q, re.I)
87
- if match:
88
- return match.group(1).upper()
89
-
90
- # Look for standalone uppercase tickers
91
- words = q.split()
92
- for word in words:
93
- clean_word = word.strip(".,!?")
94
- if 2 <= len(clean_word) <= 5 and clean_word.isupper():
95
- return clean_word
96
-
97
- return None
98
-
99
- # Check if asking for market summary
100
- def wants_market_summary(q: str) -> bool:
101
- """Check if user wants overall market summary."""
102
- patterns = [
103
- r"\bmarket\s+(?:summary|overview|today|status)\b",
104
- r"\bhow(?:'s| is) the market\b",
105
- r"\b(?:dow|nasdaq|s&p)\s+(?:today|now)\b",
106
- r"\bmarket indices\b",
107
- ]
108
- return any(re.search(pattern, q, re.I) for pattern in patterns)
109
-
110
- # Check if asking for news
111
- def wants_news(q: str) -> bool:
112
- """Check if user wants company news."""
113
- return bool(re.search(r"\b(news|headline|announcement|update)\b", q, re.I))
114
-
115
- def build_tool_map(tools):
116
- mp = {t.name: t for t in tools}
117
- return mp
118
-
119
- def find_tool(name: str, tool_map: dict):
120
- name = name.lower()
121
- for k, t in tool_map.items():
122
- kl = k.lower()
123
- if kl == name or kl.endswith("/" + name):
124
- return t
125
- return None
126
-
127
- async def build_chat_llm_with_fallback():
128
- """
129
- Try each candidate model. For each:
130
- - create HuggingFaceEndpoint + ChatHuggingFace
131
- - do a tiny 'ping' with a proper LC message to trigger routing
132
- On 402/Payment Required (or other errors), fall through to next.
133
- """
134
- last_err = None
135
- for mid in CANDIDATE_MODELS:
136
- try:
137
- llm = HuggingFaceEndpoint(
138
- repo_id=mid,
139
- huggingfacehub_api_token=HF,
140
- temperature=0.1,
141
- max_new_tokens=256, # Increased for better responses
142
- )
143
- model = ChatHuggingFace(llm=llm)
144
- # PROBE with a valid message type
145
- _ = await model.ainvoke([HumanMessage(content="ping")])
146
- print(f"[LLM] Using: {mid}")
147
- return model
148
- except Exception as e:
149
- msg = str(e)
150
- if "402" in msg or "Payment Required" in msg:
151
- print(f"[LLM] {mid} requires payment; trying next...")
152
- last_err = e
153
- continue
154
- print(f"[LLM] {mid} error: {e}; trying next...")
155
- last_err = e
156
- continue
157
- raise RuntimeError(f"Could not initialize any candidate model. Last error: {last_err}")
158
-
159
- # NEW: Class to manage the MCP Agent (moved from main function)
160
- class MCPAgent:
161
- def __init__(self):
162
- self.client = None
163
- self.agent = None
164
- self.tool_map = None
165
- self.tools = None
166
- self.model = None
167
- self.initialized = False
168
-
169
- async def initialize(self):
170
- """Initialize the MCP client and agent"""
171
- if self.initialized:
172
- return
173
-
174
- # Start the Stock server separately first: `python stockserver.py`
175
- self.client = MultiServerMCPClient(
176
- {
177
- "arithmetic": {
178
- "command": sys.executable,
179
- "args": [str(here / "arithmetic_server.py")],
180
- "transport": "stdio",
181
- },
182
- "stocks": {
183
- "command": sys.executable,
184
- "args": [str(here / "stock_server.py")],
185
- "transport": "stdio",
186
- },
187
- }
188
- )
189
-
190
- # 1. MCP client + tools
191
- self.tools = await self.client.get_tools()
192
- self.tool_map = build_tool_map(self.tools)
193
-
194
- # 2. Build LLM with auto-fallback
195
- self.model = await build_chat_llm_with_fallback()
196
-
197
- # Build the ReAct agent with MCP tools
198
- self.agent = create_react_agent(self.model, self.tools)
199
-
200
- self.initialized = True
201
- return list(self.tool_map.keys()) # Return available tools
202
-
203
- async def process_message(self, user_text: str, history: List[Dict]) -> str:
204
- """Process a single message with the agent"""
205
- if not self.initialized:
206
- await self.initialize()
207
-
208
- # Try direct stock tool routing first
209
- if is_stock_query(user_text):
210
- if wants_market_summary(user_text):
211
- market_tool = find_tool("get_market_summary", self.tool_map)
212
- if market_tool:
213
- return await market_tool.ainvoke({})
214
-
215
- elif wants_news(user_text):
216
- ticker = extract_ticker(user_text)
217
- if ticker:
218
- news_tool = find_tool("get_company_news", self.tool_map)
219
- if news_tool:
220
- return await news_tool.ainvoke({"symbol": ticker, "limit": 3})
221
-
222
- else:
223
- ticker = extract_ticker(user_text)
224
- if ticker:
225
- price_tool = find_tool("get_stock_price", self.tool_map)
226
- if price_tool:
227
- return await price_tool.ainvoke({"symbol": ticker})
228
-
229
- # Fall back to agent for everything else
230
- messages = [{"role": "system", "content": SYSTEM_PROMPT}] + history + [
231
- {"role": "user", "content": user_text}
232
- ]
233
- result = await self.agent.ainvoke({"messages": messages})
234
- return result["messages"][-1].content
235
-
236
- async def cleanup(self):
237
- """Clean up resources"""
238
- if self.client:
239
- close = getattr(self.client, "close", None)
240
- if callable(close):
241
- res = close()
242
- if asyncio.iscoroutine(res):
243
- await res
244
-
245
-
246
-
247
-
248
-
249
-
250
- # NEW: Singleton instance for the agent
251
- _agent_instance = None
252
-
253
- def get_agent() -> MCPAgent:
254
- """Get or create the singleton agent instance"""
255
- global _agent_instance
256
- if _agent_instance is None:
257
- _agent_instance = MCPAgent()
258
- return _agent_instance