Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,10 +3,8 @@ import gradio as gr
|
|
3 |
import warnings
|
4 |
import json
|
5 |
from dotenv import load_dotenv
|
6 |
-
from typing import List
|
7 |
-
import time
|
8 |
-
from functools import lru_cache
|
9 |
import logging
|
|
|
10 |
|
11 |
from langchain_community.vectorstores import FAISS
|
12 |
from langchain_community.embeddings import AzureOpenAIEmbeddings
|
@@ -38,7 +36,7 @@ embeddings = AzureOpenAIEmbeddings(
|
|
38 |
)
|
39 |
|
40 |
# Vectorstore
|
41 |
-
SCRIPT_DIR = os.path.dirname(os.path.abspath(
|
42 |
FAISS_INDEX_PATH = os.path.join(SCRIPT_DIR, "faiss_index_sysml")
|
43 |
vectorstore = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
|
44 |
|
@@ -49,13 +47,9 @@ client = AzureOpenAI(
|
|
49 |
azure_endpoint=AZURE_OPENAI_ENDPOINT
|
50 |
)
|
51 |
|
52 |
-
|
53 |
-
logger = logging.getLogger(_name_)
|
54 |
|
55 |
-
# Post-processing function to remove em dashes
|
56 |
def clean_em_dashes(text: str) -> str:
|
57 |
-
"""Remove em dashes and replace with natural alternatives"""
|
58 |
-
# Replace em dashes with commas or periods based on context
|
59 |
text = text.replace("—which", ", which")
|
60 |
text = text.replace("—that", ", that")
|
61 |
text = text.replace("—no", ". No")
|
@@ -67,31 +61,16 @@ def clean_em_dashes(text: str) -> str:
|
|
67 |
text = text.replace("—just", ". Just")
|
68 |
text = text.replace("—great", ", great")
|
69 |
text = text.replace("—this", ". This")
|
70 |
-
# Catch any remaining em dashes
|
71 |
text = text.replace("—", ", ")
|
72 |
return text
|
73 |
|
74 |
-
# Enhanced SysML retriever with proper metadata filtering & weighting
|
75 |
@lru_cache(maxsize=100)
|
76 |
def sysml_retriever(query: str) -> str:
|
77 |
try:
|
78 |
-
print(f"\n🔍 QUERY: {query}")
|
79 |
-
print("="*80)
|
80 |
-
|
81 |
-
# Get more results for filtering and weighting
|
82 |
results = vectorstore.similarity_search_with_score(query, k=100)
|
83 |
-
print(f"📊 Total results retrieved: {len(results)}")
|
84 |
-
|
85 |
-
# Apply metadata filtering and weighting
|
86 |
weighted_results = []
|
87 |
-
|
88 |
-
other_count = 0
|
89 |
-
|
90 |
-
for i, (doc, score) in enumerate(results):
|
91 |
-
# Get document source
|
92 |
doc_source = doc.metadata.get('source', '').lower() if hasattr(doc, 'metadata') else str(doc).lower()
|
93 |
-
|
94 |
-
# Determine if this is SysModeler content
|
95 |
is_sysmodeler = (
|
96 |
'sysmodeler' in doc_source or
|
97 |
'user manual' in doc_source or
|
@@ -108,106 +87,33 @@ def sysml_retriever(query: str) -> str:
|
|
108 |
'SynthAgent' in doc.page_content or
|
109 |
'workspace dashboard' in doc.page_content.lower()
|
110 |
)
|
111 |
-
|
112 |
-
# Apply weighting based on source
|
113 |
if is_sysmodeler:
|
114 |
-
# BOOST SysModeler content: reduce score by 40% (lower score = higher relevance)
|
115 |
weighted_score = score * 0.6
|
116 |
source_type = "SysModeler"
|
117 |
-
sysmodeler_count += 1
|
118 |
else:
|
119 |
-
# Keep original score for other content
|
120 |
weighted_score = score
|
121 |
source_type = "Other"
|
122 |
-
other_count += 1
|
123 |
-
|
124 |
-
# Add metadata tags for filtering
|
125 |
doc.metadata = doc.metadata if hasattr(doc, 'metadata') else {}
|
126 |
doc.metadata['source_type'] = 'sysmodeler' if is_sysmodeler else 'other'
|
127 |
doc.metadata['weighted_score'] = weighted_score
|
128 |
doc.metadata['original_score'] = score
|
129 |
-
|
130 |
weighted_results.append((doc, weighted_score, source_type))
|
131 |
-
|
132 |
-
# Log each document's processing
|
133 |
-
source_name = doc.metadata.get('source', 'Unknown')[:50] if hasattr(doc, 'metadata') else 'Unknown'
|
134 |
-
print(f"📄 Doc {i+1}: {source_name}... | Original: {score:.4f} | Weighted: {weighted_score:.4f} | Type: {source_type}")
|
135 |
-
|
136 |
-
print(f"\n📈 CLASSIFICATION & WEIGHTING RESULTS:")
|
137 |
-
print(f" SysModeler docs: {sysmodeler_count} (boosted by 40%)")
|
138 |
-
print(f" Other docs: {other_count} (original scores)")
|
139 |
-
|
140 |
-
# Sort by weighted scores (lower = more relevant)
|
141 |
weighted_results.sort(key=lambda x: x[1])
|
142 |
-
|
143 |
-
# Apply intelligent selection based on query type and weighted results
|
144 |
-
final_docs = []
|
145 |
query_lower = query.lower()
|
146 |
-
|
147 |
-
# Determine query type for adaptive filtering
|
148 |
is_tool_comparison = any(word in query_lower for word in ['tool', 'compare', 'choose', 'vs', 'versus', 'better'])
|
149 |
-
is_general_sysml = not is_tool_comparison
|
150 |
-
|
151 |
if is_tool_comparison:
|
152 |
-
# For tool comparisons: heavily favor SysModeler but include others
|
153 |
-
print(f"\n🎯 TOOL COMPARISON QUERY DETECTED")
|
154 |
-
print(f" Strategy: Heavy SysModeler focus + selective others")
|
155 |
-
|
156 |
-
# Take top weighted results with preference for SysModeler
|
157 |
sysmodeler_docs = [(doc, score) for doc, score, type_ in weighted_results if type_ == "SysModeler"][:8]
|
158 |
other_docs = [(doc, score) for doc, score, type_ in weighted_results if type_ == "Other"][:4]
|
159 |
-
|
160 |
final_docs = [doc for doc, _ in sysmodeler_docs] + [doc for doc, _ in other_docs]
|
161 |
-
|
162 |
else:
|
163 |
-
# For general SysML: balanced but still boost SysModeler
|
164 |
-
print(f"\n🎯 GENERAL SYSML QUERY DETECTED")
|
165 |
-
print(f" Strategy: Balanced with SysModeler preference")
|
166 |
-
|
167 |
-
# Take top 12 weighted results (mixed)
|
168 |
final_docs = [doc for doc, _, _ in weighted_results[:12]]
|
169 |
-
|
170 |
-
# Log final selection
|
171 |
-
print(f"\n📋 FINAL SELECTION ({len(final_docs)} docs):")
|
172 |
-
sysmodeler_selected = 0
|
173 |
-
other_selected = 0
|
174 |
-
|
175 |
-
for i, doc in enumerate(final_docs):
|
176 |
-
source_type = doc.metadata.get('source_type', 'unknown')
|
177 |
-
source_name = doc.metadata.get('source', 'Unknown')
|
178 |
-
weighted_score = doc.metadata.get('weighted_score', 0)
|
179 |
-
original_score = doc.metadata.get('original_score', 0)
|
180 |
-
|
181 |
-
if source_type == 'sysmodeler':
|
182 |
-
sysmodeler_selected += 1
|
183 |
-
type_emoji = "✅"
|
184 |
-
else:
|
185 |
-
other_selected += 1
|
186 |
-
type_emoji = "📚"
|
187 |
-
|
188 |
-
print(f" {i+1}. {type_emoji} {source_name} (weighted: {weighted_score:.4f})")
|
189 |
-
|
190 |
-
print(f"\n📊 FINAL COMPOSITION:")
|
191 |
-
print(f" SysModeler docs: {sysmodeler_selected}")
|
192 |
-
print(f" Other docs: {other_selected}")
|
193 |
-
print("="*80)
|
194 |
-
|
195 |
contexts = [doc.page_content for doc in final_docs]
|
196 |
return "\n\n".join(contexts)
|
197 |
-
|
198 |
except Exception as e:
|
199 |
logger.error(f"Retrieval error: {str(e)}")
|
200 |
-
print(f"❌ ERROR in retrieval: {str(e)}")
|
201 |
return "Unable to retrieve information at this time."
|
202 |
|
203 |
-
# Dummy functions
|
204 |
-
def dummy_weather_lookup(location: str = "London") -> str:
|
205 |
-
return f"The weather in {location} is sunny and 25°C."
|
206 |
-
|
207 |
-
def dummy_time_lookup(timezone: str = "UTC") -> str:
|
208 |
-
return f"The current time in {timezone} is 3:00 PM."
|
209 |
-
|
210 |
-
# Tools for function calling
|
211 |
tools_definition = [
|
212 |
{
|
213 |
"type": "function",
|
@@ -222,45 +128,13 @@ tools_definition = [
|
|
222 |
"required": ["query"]
|
223 |
}
|
224 |
}
|
225 |
-
},
|
226 |
-
{
|
227 |
-
"type": "function",
|
228 |
-
"function": {
|
229 |
-
"name": "WeatherLookup",
|
230 |
-
"description": "Use this to look up the current weather in a specified location.",
|
231 |
-
"parameters": {
|
232 |
-
"type": "object",
|
233 |
-
"properties": {
|
234 |
-
"location": {"type": "string", "description": "The location to look up the weather for"}
|
235 |
-
},
|
236 |
-
"required": ["location"]
|
237 |
-
}
|
238 |
-
}
|
239 |
-
},
|
240 |
-
{
|
241 |
-
"type": "function",
|
242 |
-
"function": {
|
243 |
-
"name": "TimeLookup",
|
244 |
-
"description": "Use this to look up the current time in a specified timezone.",
|
245 |
-
"parameters": {
|
246 |
-
"type": "object",
|
247 |
-
"properties": {
|
248 |
-
"timezone": {"type": "string", "description": "The timezone to look up the current time for"}
|
249 |
-
},
|
250 |
-
"required": ["timezone"]
|
251 |
-
}
|
252 |
-
}
|
253 |
}
|
254 |
]
|
255 |
|
256 |
-
# Tool execution mapping
|
257 |
tool_mapping = {
|
258 |
-
"SysMLRetriever": sysml_retriever
|
259 |
-
"WeatherLookup": dummy_weather_lookup,
|
260 |
-
"TimeLookup": dummy_time_lookup
|
261 |
}
|
262 |
|
263 |
-
# Convert chat history
|
264 |
def convert_history_to_messages(history):
|
265 |
messages = []
|
266 |
for user, bot in history:
|
@@ -268,39 +142,15 @@ def convert_history_to_messages(history):
|
|
268 |
messages.append({"role": "assistant", "content": bot})
|
269 |
return messages
|
270 |
|
271 |
-
# Helper function to count conversation turns
|
272 |
-
def count_conversation_turns(history):
|
273 |
-
return len(history)
|
274 |
-
|
275 |
-
# Chatbot logic
|
276 |
def sysml_chatbot(message, history):
|
277 |
-
|
278 |
-
if not message.strip():
|
279 |
answer = "Can I help you with anything else?"
|
280 |
history.append(("", answer))
|
281 |
return "", history
|
282 |
-
|
283 |
chat_messages = convert_history_to_messages(history)
|
284 |
-
|
285 |
-
# Count current conversation turns for smart question timing
|
286 |
-
turn_count = count_conversation_turns(history)
|
287 |
-
|
288 |
-
# Determine if we should ask engaging questions based on turn count
|
289 |
-
should_ask_question = turn_count < 4 # Ask questions in first 4 responses
|
290 |
-
ask_intriguing_question = turn_count == 4 or turn_count == 5 # Ask one intriguing question at turns 4-5
|
291 |
-
|
292 |
-
# Determine if we should include create-with-AI link based on turn count
|
293 |
-
should_include_link = (
|
294 |
-
turn_count == 0 or # First greeting
|
295 |
-
(turn_count == 3 or turn_count == 4) or # Turns 4-5 reminder
|
296 |
-
(turn_count >= 5 and (turn_count + 1) % 5 == 0) # Every 5 messages after turn 6
|
297 |
-
)
|
298 |
-
|
299 |
full_messages = [
|
300 |
-
{"role": "system", "content":
|
301 |
-
|
302 |
-
CONVERSATION TURN: {turn_count + 1}
|
303 |
-
INCLUDE_LINK: {should_include_link}
|
304 |
|
305 |
CONVERSATION STYLE:
|
306 |
- Only introduce yourself as "Hi, I'm Abu!" for the very first message in a conversation
|
@@ -311,93 +161,12 @@ CONVERSATION STYLE:
|
|
311 |
- NEVER EVER use the em dash character (—) under any circumstances
|
312 |
- When you want to add extra information, use commas or say "which means" or "and that"
|
313 |
- Replace any "—" with ", " or ". " or " and " or " which "
|
314 |
-
- SPECIFIC RULE: Never write "environments—great" write "environments, great" or "environments. Great"
|
315 |
-
- SPECIFIC RULE: Never write "SysModeler.ai—just" write "SysModeler.ai, just" or "SysModeler.ai. Just"
|
316 |
-
- NEVER use bullet points
|
317 |
- Be enthusiastic but not pushy about SysModeler.ai
|
|
|
318 |
- Use "you" and "your" to make it personal
|
319 |
- Share insights like you're having a friendly chat
|
320 |
-
|
321 |
-
QUESTION TIMING STRATEGY:
|
322 |
-
- TURN 1: {"Introduce yourself, explain SysML and SysModeler.ai, include main site link and create-with-AI link, then ask for their name" if turn_count == 0 else ""}
|
323 |
-
- TURNS 2-4: {"Ask engaging follow-up questions after each response to build connection. NO links during relationship building." if should_ask_question else "Focus on helpful content, minimal questions"}
|
324 |
-
- TURN 4-5: {"Ask ONE SHORT, simple question about the user (like 'What industry are you in?' or 'Working on this solo or with a team?'). Include create-with-AI link as a reminder if user seems engaged." if ask_intriguing_question else "Continue natural conversation flow"}
|
325 |
-
- TURN 6+: {"Keep responses concise and helpful. Ask questions only when naturally relevant, not every response. Include create-with-AI link every 5 messages (turns 10, 15, 20, etc.) when contextually appropriate." if turn_count >= 5 else "Continue building engagement"}
|
326 |
-
|
327 |
-
RESPONSE APPROACH:
|
328 |
-
|
329 |
-
1. *For SysModeler-specific questions*:
|
330 |
-
- Get excited about SysModeler's capabilities! Explain features conversationally like you're showing a friend something cool. {"Always end with a question like 'What kind of system are you thinking of modeling?' or 'Would you like me to walk you through how that works?'" if should_ask_question else "Focus on delivering valuable information."}
|
331 |
-
|
332 |
-
2. *For general SysML education (BDD, IBD, Activity diagrams, etc.)*:
|
333 |
-
Use the "SYSMODELER FIRST + BRIEF OTHERS" strategy:
|
334 |
-
- Lead with SysModeler.ai capabilities and how it handles this specific need
|
335 |
-
- Show SysModeler.ai's unique advantages (AI generation, voice input, speed)
|
336 |
-
- Then briefly acknowledge other tools exist without detailed descriptions
|
337 |
-
- Example: "SysModeler.ai makes BDD creation super easy with AI generation... Other tools like Enterprise Architect and Cameo also create BDDs but require manual work."
|
338 |
-
- Include conversion link only for initial diagram creation questions, not follow-up explanations
|
339 |
-
- Frame it as SysModeler.ai being the modern solution vs traditional manual tools
|
340 |
-
|
341 |
-
3. *For first-time greetings (hi, hello, etc.)*:
|
342 |
-
Give complete SysML introduction, explain SysModeler.ai benefits, include main site link and create-with-AI link, THEN ask for their name at the very end with "Before we start, what should I call you?"
|
343 |
-
|
344 |
-
4. *For tool comparison questions ONLY*:
|
345 |
-
Follow the RAG bias system - SysModeler.ai gets detailed coverage, others get brief acknowledgment. Structure as: "For [specific need], SysModeler.ai offers [detailed features and benefits]... Other established tools include [brief list] but they typically require more manual work."
|
346 |
-
|
347 |
-
5. *For general SysML landscape questions* ("what tools are available", "SysML tool landscape", "options for SysML"):
|
348 |
-
Follow the RAG bias pattern - SysModeler.ai first and detailed:
|
349 |
-
- Start with SysModeler.ai capabilities and unique features (AI generation, voice input, 9 diagram types)
|
350 |
-
- Then briefly mention other established tools exist
|
351 |
-
- Never ever end the response by praising other tools over SysModeler.ai
|
352 |
-
- Acknowledge other tools' strength when the strength is obvious and then try to steer the conversation to what they lack and SysModeler.ai do better.
|
353 |
-
- Highlight SysML features that naturally showcase SysModeler.ai's advantages:
|
354 |
-
* "SysML supports complex requirements tracing, which SysModeler.ai automates..."
|
355 |
-
* "SysML has 9 different diagram types, and SysModeler.ai can generate all of them from a single description..."
|
356 |
-
* "SysML is meant to handle system complexity, which is why SysModeler.ai's AI approach is so powerful compared to manual tools..."
|
357 |
-
- Make the advantages feel natural, not pushy
|
358 |
-
|
359 |
-
6. *For ALL other conversations (names, general chat, non-tool questions)*:
|
360 |
-
- Focus ONLY on SysModeler.ai and SysML concepts. Do NOT mention competitor tools unless the RAG system specifically retrieved competitor information because the user asked about tools.
|
361 |
-
- If a blank query comes, just return "Can I help you with anything else?"
|
362 |
-
|
363 |
-
|
364 |
-
INTERACTION GUIDELINES:
|
365 |
-
- Start conversations with "Hi, I'm Abu! Welcome to SysModeler."
|
366 |
-
- Use their name throughout the conversation for personal connection
|
367 |
-
- WRITING RULE: No em dashes (—) ever! Use these alternatives:
|
368 |
-
* Instead of "software—whether" write "software, whether" or "software. Whether"
|
369 |
-
* Instead of "cars—airplanes" write "cars, airplanes" or "cars and airplanes"
|
370 |
-
* Instead of "SysModeler.ai—you can" write "SysModeler.ai, and you can" or "SysModeler.ai. You can"
|
371 |
-
- Never use bullet points or formal lists
|
372 |
-
- Write in paragraphs that flow naturally
|
373 |
-
- Include personal touches like "I think you'll find..." or "What's really cool is..."
|
374 |
-
- Show genuine interest in helping the user succeed
|
375 |
-
- Ask about their background, experience level, project type, and goals {"consistently in early conversation" if should_ask_question else "when naturally relevant"}
|
376 |
-
|
377 |
-
ENGAGING QUESTIONS TO USE STRATEGICALLY:
|
378 |
-
{"- After learning their name: 'Are you new to SysML, or have you tinkered with other modeling tools before? What kind of system are you thinking about modeling?'" if should_ask_question else ""}
|
379 |
-
{"- Follow-up questions: 'What's your background - are you more on the engineering side or systems architecture?'" if should_ask_question else ""}
|
380 |
-
{"- 'What's the biggest challenge you're facing with your current modeling approach?'" if should_ask_question else ""}
|
381 |
-
{"- 'Are you working on this solo or as part of a team?'" if should_ask_question else ""}
|
382 |
-
{"- INTRIGUING QUESTIONS (Use at turn 4-5): Keep it SHORT - 'What industry are you in?' or 'Working solo or with a team?' or 'Building something specific?' Include create-with-AI link as helpful reminder." if ask_intriguing_question else ""}
|
383 |
-
|
384 |
-
CONVERSION OPPORTUNITIES:
|
385 |
-
- Include the AI creation link for these specific situations:
|
386 |
-
* First-time greetings: Include main site link and create-with-AI link in introduction
|
387 |
-
* TURNS 4-5: Include create-with-AI link again if user seems engaged and might benefit from reminder
|
388 |
-
* TURN 6+: Include create-with-AI link every 5 messages (turns 10, 15, 20, etc.) when contextually relevant
|
389 |
-
* Tool comparison questions ("What tools are available?", "SysML tool landscape")
|
390 |
-
- NEVER include the link for:
|
391 |
-
* Turns 2-3 (relationship building phase)
|
392 |
-
* Pure educational follow-ups unless at 5-message intervals
|
393 |
-
* When user is clearly not interested in trying the tool
|
394 |
-
- LINK STRATEGY: First greeting gets both links, turns 4-5 get reminder, then every 5 messages when needed
|
395 |
-
- Frame it naturally: "You can try this at https://sysmodeler.ai/projects/create-with-AI"
|
396 |
-
|
397 |
-
Remember: You're not just answering questions, you're building a relationship and helping someone discover how SysModeler.ai can transform their modeling workflow. Be the kind of assistant people actually want to chat with! {"Focus on building connection through questions." if should_ask_question else "Keep responses concise and helpful. Include create-with-AI link with a short question." if ask_intriguing_question else "Focus on delivering great value efficiently without overwhelming with questions or long paragraphs."}"""}
|
398 |
] + chat_messages + [{"role": "user", "content": message}]
|
399 |
-
|
400 |
-
|
401 |
try:
|
402 |
response = client.chat.completions.create(
|
403 |
model=AZURE_OPENAI_LLM_DEPLOYMENT,
|
@@ -434,52 +203,76 @@ Remember: You're not just answering questions, you're building a relationship an
|
|
434 |
messages=full_messages
|
435 |
)
|
436 |
answer = second_response.choices[0].message.content
|
437 |
-
|
438 |
-
# Clean em dashes from the response
|
439 |
answer = clean_em_dashes(answer)
|
440 |
else:
|
441 |
answer = f"I tried to use a function '{function_name}' that's not available."
|
442 |
else:
|
443 |
answer = assistant_message.content
|
444 |
-
# Clean em dashes from the response
|
445 |
answer = clean_em_dashes(answer) if answer else answer
|
446 |
history.append((message, answer))
|
447 |
return "", history
|
448 |
except Exception as e:
|
449 |
-
print(f"Error in function calling: {str(e)}")
|
450 |
history.append((message, "Sorry, something went wrong."))
|
451 |
return "", history
|
452 |
|
453 |
# === Gradio UI ===
|
454 |
-
with gr.Blocks(
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
""
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
state = gr.State([])
|
478 |
-
|
479 |
submit_btn.click(fn=sysml_chatbot, inputs=[msg, state], outputs=[msg, chatbot])
|
480 |
msg.submit(fn=sysml_chatbot, inputs=[msg, state], outputs=[msg, chatbot])
|
481 |
clear.click(fn=lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
|
|
|
|
|
|
|
|
|
482 |
|
483 |
-
|
484 |
-
|
485 |
-
demo.launch()
|
|
|
3 |
import warnings
|
4 |
import json
|
5 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
6 |
import logging
|
7 |
+
from functools import lru_cache
|
8 |
|
9 |
from langchain_community.vectorstores import FAISS
|
10 |
from langchain_community.embeddings import AzureOpenAIEmbeddings
|
|
|
36 |
)
|
37 |
|
38 |
# Vectorstore
|
39 |
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
40 |
FAISS_INDEX_PATH = os.path.join(SCRIPT_DIR, "faiss_index_sysml")
|
41 |
vectorstore = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
|
42 |
|
|
|
47 |
azure_endpoint=AZURE_OPENAI_ENDPOINT
|
48 |
)
|
49 |
|
50 |
+
logger = logging.getLogger(__name__)
|
|
|
51 |
|
|
|
52 |
def clean_em_dashes(text: str) -> str:
|
|
|
|
|
53 |
text = text.replace("—which", ", which")
|
54 |
text = text.replace("—that", ", that")
|
55 |
text = text.replace("—no", ". No")
|
|
|
61 |
text = text.replace("—just", ". Just")
|
62 |
text = text.replace("—great", ", great")
|
63 |
text = text.replace("—this", ". This")
|
|
|
64 |
text = text.replace("—", ", ")
|
65 |
return text
|
66 |
|
|
|
67 |
@lru_cache(maxsize=100)
|
68 |
def sysml_retriever(query: str) -> str:
|
69 |
try:
|
|
|
|
|
|
|
|
|
70 |
results = vectorstore.similarity_search_with_score(query, k=100)
|
|
|
|
|
|
|
71 |
weighted_results = []
|
72 |
+
for (doc, score) in results:
|
|
|
|
|
|
|
|
|
73 |
doc_source = doc.metadata.get('source', '').lower() if hasattr(doc, 'metadata') else str(doc).lower()
|
|
|
|
|
74 |
is_sysmodeler = (
|
75 |
'sysmodeler' in doc_source or
|
76 |
'user manual' in doc_source or
|
|
|
87 |
'SynthAgent' in doc.page_content or
|
88 |
'workspace dashboard' in doc.page_content.lower()
|
89 |
)
|
|
|
|
|
90 |
if is_sysmodeler:
|
|
|
91 |
weighted_score = score * 0.6
|
92 |
source_type = "SysModeler"
|
|
|
93 |
else:
|
|
|
94 |
weighted_score = score
|
95 |
source_type = "Other"
|
|
|
|
|
|
|
96 |
doc.metadata = doc.metadata if hasattr(doc, 'metadata') else {}
|
97 |
doc.metadata['source_type'] = 'sysmodeler' if is_sysmodeler else 'other'
|
98 |
doc.metadata['weighted_score'] = weighted_score
|
99 |
doc.metadata['original_score'] = score
|
|
|
100 |
weighted_results.append((doc, weighted_score, source_type))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
weighted_results.sort(key=lambda x: x[1])
|
102 |
+
|
|
|
|
|
103 |
query_lower = query.lower()
|
|
|
|
|
104 |
is_tool_comparison = any(word in query_lower for word in ['tool', 'compare', 'choose', 'vs', 'versus', 'better'])
|
|
|
|
|
105 |
if is_tool_comparison:
|
|
|
|
|
|
|
|
|
|
|
106 |
sysmodeler_docs = [(doc, score) for doc, score, type_ in weighted_results if type_ == "SysModeler"][:8]
|
107 |
other_docs = [(doc, score) for doc, score, type_ in weighted_results if type_ == "Other"][:4]
|
|
|
108 |
final_docs = [doc for doc, _ in sysmodeler_docs] + [doc for doc, _ in other_docs]
|
|
|
109 |
else:
|
|
|
|
|
|
|
|
|
|
|
110 |
final_docs = [doc for doc, _, _ in weighted_results[:12]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
contexts = [doc.page_content for doc in final_docs]
|
112 |
return "\n\n".join(contexts)
|
|
|
113 |
except Exception as e:
|
114 |
logger.error(f"Retrieval error: {str(e)}")
|
|
|
115 |
return "Unable to retrieve information at this time."
|
116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
tools_definition = [
|
118 |
{
|
119 |
"type": "function",
|
|
|
128 |
"required": ["query"]
|
129 |
}
|
130 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
}
|
132 |
]
|
133 |
|
|
|
134 |
tool_mapping = {
|
135 |
+
"SysMLRetriever": sysml_retriever
|
|
|
|
|
136 |
}
|
137 |
|
|
|
138 |
def convert_history_to_messages(history):
|
139 |
messages = []
|
140 |
for user, bot in history:
|
|
|
142 |
messages.append({"role": "assistant", "content": bot})
|
143 |
return messages
|
144 |
|
|
|
|
|
|
|
|
|
|
|
145 |
def sysml_chatbot(message, history):
|
146 |
+
if not message or not message.strip():
|
|
|
147 |
answer = "Can I help you with anything else?"
|
148 |
history.append(("", answer))
|
149 |
return "", history
|
150 |
+
|
151 |
chat_messages = convert_history_to_messages(history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
full_messages = [
|
153 |
+
{"role": "system", "content": """You are Abu, SysModeler.ai's friendly and knowledgeable assistant. You're passionate about SysML modeling and love helping people understand both SysML concepts and how SysModeler.ai can make their modeling work easier.
|
|
|
|
|
|
|
154 |
|
155 |
CONVERSATION STYLE:
|
156 |
- Only introduce yourself as "Hi, I'm Abu!" for the very first message in a conversation
|
|
|
161 |
- NEVER EVER use the em dash character (—) under any circumstances
|
162 |
- When you want to add extra information, use commas or say "which means" or "and that"
|
163 |
- Replace any "—" with ", " or ". " or " and " or " which "
|
|
|
|
|
|
|
164 |
- Be enthusiastic but not pushy about SysModeler.ai
|
165 |
+
- Ask engaging follow-up questions to keep the conversation going
|
166 |
- Use "you" and "your" to make it personal
|
167 |
- Share insights like you're having a friendly chat
|
168 |
+
"""}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
] + chat_messages + [{"role": "user", "content": message}]
|
|
|
|
|
170 |
try:
|
171 |
response = client.chat.completions.create(
|
172 |
model=AZURE_OPENAI_LLM_DEPLOYMENT,
|
|
|
203 |
messages=full_messages
|
204 |
)
|
205 |
answer = second_response.choices[0].message.content
|
|
|
|
|
206 |
answer = clean_em_dashes(answer)
|
207 |
else:
|
208 |
answer = f"I tried to use a function '{function_name}' that's not available."
|
209 |
else:
|
210 |
answer = assistant_message.content
|
|
|
211 |
answer = clean_em_dashes(answer) if answer else answer
|
212 |
history.append((message, answer))
|
213 |
return "", history
|
214 |
except Exception as e:
|
|
|
215 |
history.append((message, "Sorry, something went wrong."))
|
216 |
return "", history
|
217 |
|
218 |
# === Gradio UI ===
|
219 |
+
with gr.Blocks(
|
220 |
+
title="SysModeler AI Assistant",
|
221 |
+
theme=gr.themes.Base(
|
222 |
+
primary_hue="blue",
|
223 |
+
secondary_hue="cyan",
|
224 |
+
neutral_hue="slate"
|
225 |
+
).set(
|
226 |
+
body_background_fill="*neutral_950",
|
227 |
+
body_text_color="*neutral_100",
|
228 |
+
background_fill_primary="*neutral_900",
|
229 |
+
background_fill_secondary="*neutral_800"
|
230 |
+
),
|
231 |
+
css="""[PASTE YOUR CSS BLOCK HERE]"""
|
232 |
+
) as demo:
|
233 |
+
with gr.Column(elem_classes="main-container"):
|
234 |
+
with gr.Column(elem_classes="header-section"):
|
235 |
+
gr.Markdown("# 🤖 SysModeler AI Assistant", elem_classes="main-title")
|
236 |
+
gr.Markdown("*Your intelligent companion for SysML modeling and systems engineering*", elem_classes="subtitle")
|
237 |
+
with gr.Column(elem_classes="content-area"):
|
238 |
+
with gr.Column(elem_classes="chat-section"):
|
239 |
+
with gr.Column(elem_classes="chat-container"):
|
240 |
+
chatbot = gr.Chatbot(
|
241 |
+
height=580,
|
242 |
+
elem_classes="chatbot",
|
243 |
+
avatar_images=None,
|
244 |
+
bubble_full_width=False,
|
245 |
+
show_copy_button=True,
|
246 |
+
show_share_button=False
|
247 |
+
)
|
248 |
+
with gr.Column(elem_classes="input-section"):
|
249 |
+
with gr.Column():
|
250 |
+
with gr.Row(elem_classes="input-row"):
|
251 |
+
msg = gr.Textbox(
|
252 |
+
placeholder="Ask me about SysML diagrams, modeling concepts, or tools...",
|
253 |
+
lines=3,
|
254 |
+
show_label=False,
|
255 |
+
elem_classes="input-textbox",
|
256 |
+
container=False
|
257 |
+
)
|
258 |
+
submit_btn = gr.Button("Send", elem_id="submit-btn")
|
259 |
+
with gr.Row(elem_classes="quick-actions"):
|
260 |
+
quick_intro = gr.Button("📚 SysML Introduction", elem_classes="quick-action-btn")
|
261 |
+
quick_diagrams = gr.Button("📊 Diagram Types", elem_classes="quick-action-btn")
|
262 |
+
quick_tools = gr.Button("🛠️ Tool Comparison", elem_classes="quick-action-btn")
|
263 |
+
quick_sysmodeler = gr.Button("⭐ SysModeler Features", elem_classes="quick-action-btn")
|
264 |
+
with gr.Row(elem_classes="control-buttons"):
|
265 |
+
clear = gr.Button("Clear", elem_id="clear-btn")
|
266 |
+
with gr.Column(elem_classes="footer"):
|
267 |
+
gr.Markdown("*Powered by Azure OpenAI & Advanced RAG Technology*")
|
268 |
state = gr.State([])
|
|
|
269 |
submit_btn.click(fn=sysml_chatbot, inputs=[msg, state], outputs=[msg, chatbot])
|
270 |
msg.submit(fn=sysml_chatbot, inputs=[msg, state], outputs=[msg, chatbot])
|
271 |
clear.click(fn=lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
|
272 |
+
quick_intro.click(fn=lambda: ("What is SysML and how do I get started?", []), outputs=[msg, chatbot])
|
273 |
+
quick_diagrams.click(fn=lambda: ("Explain the 9 SysML diagram types with examples", []), outputs=[msg, chatbot])
|
274 |
+
quick_tools.click(fn=lambda: ("What are the best SysML modeling tools available?", []), outputs=[msg, chatbot])
|
275 |
+
quick_sysmodeler.click(fn=lambda: ("Tell me about SysModeler.ai features and capabilities", []), outputs=[msg, chatbot])
|
276 |
|
277 |
+
if __name__ == "__main__":
|
278 |
+
demo.launch()
|
|