Spaces:
Running
Running
Commit
·
9c68b68
1
Parent(s):
8b0f083
Update the backend url
Browse files
main.py
CHANGED
@@ -67,9 +67,9 @@ class HealthcareChatbot:
|
|
67 |
self.endpoints_documentation = endpoints_documentation
|
68 |
self.ollama_base_url = "http://localhost:11434"
|
69 |
self.model_name = "gemma3"
|
70 |
-
self.BASE_URL = 'https://
|
71 |
self.headers = {'Content-type': 'application/json'}
|
72 |
-
self.user_id = '
|
73 |
self.max_retries = 3
|
74 |
self.retry_delay = 2
|
75 |
|
|
|
67 |
self.endpoints_documentation = endpoints_documentation
|
68 |
self.ollama_base_url = "http://localhost:11434"
|
69 |
self.model_name = "gemma3"
|
70 |
+
self.BASE_URL = 'https://fcc0-197-54-54-66.ngrok-free.app'
|
71 |
self.headers = {'Content-type': 'application/json'}
|
72 |
+
self.user_id = '31d82b01-4f9b-4abd-9505-a0ca76fc4e39'
|
73 |
self.max_retries = 3
|
74 |
self.retry_delay = 2
|
75 |
|
new.py
ADDED
@@ -0,0 +1,1052 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
import traceback
|
5 |
+
import time
|
6 |
+
import os
|
7 |
+
import asyncio
|
8 |
+
from typing import Dict, Any, List, Optional, Tuple
|
9 |
+
from datetime import datetime, timedelta
|
10 |
+
from functools import lru_cache
|
11 |
+
from concurrent.futures import ThreadPoolExecutor
|
12 |
+
|
13 |
+
# Updated imports for pydantic
|
14 |
+
from pydantic import BaseModel, Field
|
15 |
+
|
16 |
+
# Updated imports for LangChain
|
17 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
|
18 |
+
from langchain_core.output_parsers import JsonOutputParser
|
19 |
+
from langchain_ollama import OllamaLLM
|
20 |
+
from langchain.chains import LLMChain
|
21 |
+
from langchain.callbacks.manager import CallbackManager
|
22 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
23 |
+
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
|
24 |
+
|
25 |
+
# Enhanced HuggingFace imports for improved functionality
|
26 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
27 |
+
import numpy as np
|
28 |
+
|
29 |
+
# FastAPI and async HTTP client imports
|
30 |
+
from fastapi import FastAPI, HTTPException, BackgroundTasks, Depends
|
31 |
+
from fastapi.middleware.cors import CORSMiddleware
|
32 |
+
from fastapi.responses import JSONResponse
|
33 |
+
import aiohttp
|
34 |
+
import httpx
|
35 |
+
from starlette.requests import Request
|
36 |
+
from starlette.responses import Response
|
37 |
+
|
38 |
+
# Import endpoints documentation
|
39 |
+
from endpoints_documentation import endpoints_documentation
|
40 |
+
|
41 |
+
# Set environment variables for HuggingFace
|
42 |
+
os.environ["HF_HOME"] = "/tmp/huggingface"
|
43 |
+
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
|
44 |
+
|
45 |
+
# Global thread pool for CPU-bound operations
|
46 |
+
thread_pool = ThreadPoolExecutor(max_workers=4)
|
47 |
+
|
48 |
+
# Global HTTP client session for async requests
|
49 |
+
http_client = None
|
50 |
+
|
51 |
+
# Rate limiting settings
|
52 |
+
RATE_LIMIT_PER_MINUTE = 60
|
53 |
+
rate_limit_counter = 0
|
54 |
+
rate_limit_reset_time = time.time()
|
55 |
+
|
56 |
+
|
57 |
+
class ChatMessage(BaseModel):
|
58 |
+
"""Data model for chat messages"""
|
59 |
+
message_id: str = Field(..., description="Unique identifier for the message")
|
60 |
+
user_id: str = Field(..., description="User identifier")
|
61 |
+
message: str = Field(..., description="The user's message")
|
62 |
+
timestamp: datetime = Field(default_factory=datetime.now, description="When the message was sent")
|
63 |
+
language: str = Field(default="english", description="Detected language of the message")
|
64 |
+
|
65 |
+
|
66 |
+
class ChatResponse(BaseModel):
|
67 |
+
"""Data model for chatbot responses"""
|
68 |
+
response_id: str = Field(..., description="Unique identifier for the response")
|
69 |
+
response_type: str = Field(..., description="Type of response: 'conversation' or 'api_action'")
|
70 |
+
message: str = Field(..., description="The chatbot's response message")
|
71 |
+
api_call_made: bool = Field(default=False, description="Whether an API call was made")
|
72 |
+
api_data: Optional[Dict[str, Any]] = Field(default=None, description="API response data if applicable")
|
73 |
+
language: str = Field(default="english", description="Language of the response")
|
74 |
+
timestamp: datetime = Field(default_factory=datetime.now, description="When the response was generated")
|
75 |
+
|
76 |
+
|
77 |
+
class RouterResponse(BaseModel):
|
78 |
+
"""Data model for router chain response"""
|
79 |
+
intent: str = Field(..., description="Either 'API_ACTION' or 'CONVERSATION'")
|
80 |
+
confidence: float = Field(..., description="Confidence score between 0.0 and 1.0")
|
81 |
+
reasoning: str = Field(..., description="Explanation of the decision")
|
82 |
+
endpoint: Optional[str] = Field(default=None, description="API endpoint if intent is API_ACTION")
|
83 |
+
method: Optional[str] = Field(default=None, description="HTTP method if intent is API_ACTION")
|
84 |
+
params: Dict[str, Any] = Field(default_factory=dict, description="Parameters for API call")
|
85 |
+
missing_required: List[str] = Field(default_factory=list, description="Missing required parameters")
|
86 |
+
|
87 |
+
|
88 |
+
class HealthcareChatbot:
|
89 |
+
def __init__(self):
|
90 |
+
self.endpoints_documentation = endpoints_documentation
|
91 |
+
self.ollama_base_url = "http://localhost:11434"
|
92 |
+
self.model_name = "gemma3"
|
93 |
+
self.BASE_URL = 'https://8ac0-197-54-54-66.ngrok-free.app'
|
94 |
+
self.headers = {'Content-type': 'application/json'}
|
95 |
+
self.user_id = '9e889485-3db4-4f70-a7a2-e219beae6578'
|
96 |
+
self.max_retries = 3
|
97 |
+
self.retry_delay = 2
|
98 |
+
|
99 |
+
# Store conversation history with user-specific sessions
|
100 |
+
self.conversation_sessions = {}
|
101 |
+
self.max_history_length = 10
|
102 |
+
|
103 |
+
# Initialize components
|
104 |
+
self._initialize_language_tools()
|
105 |
+
self._initialize_llm()
|
106 |
+
self._initialize_parsers_and_chains()
|
107 |
+
self._initialize_date_parser()
|
108 |
+
|
109 |
+
# Initialize async HTTP client
|
110 |
+
self._initialize_http_client()
|
111 |
+
|
112 |
+
print("Healthcare Chatbot initialized successfully!")
|
113 |
+
self._print_welcome_message()
|
114 |
+
|
115 |
+
def _initialize_http_client(self):
|
116 |
+
"""Initialize async HTTP client with connection pooling"""
|
117 |
+
global http_client
|
118 |
+
if http_client is None:
|
119 |
+
http_client = httpx.AsyncClient(
|
120 |
+
timeout=30.0,
|
121 |
+
limits=httpx.Limits(max_keepalive_connections=100, max_connections=1000),
|
122 |
+
transport=httpx.AsyncHTTPTransport(retries=3)
|
123 |
+
)
|
124 |
+
|
125 |
+
async def _close_http_client(self):
|
126 |
+
"""Close the HTTP client"""
|
127 |
+
global http_client
|
128 |
+
if http_client:
|
129 |
+
await http_client.aclose()
|
130 |
+
http_client = None
|
131 |
+
|
132 |
+
def _get_user_session(self, user_id: str) -> List[Dict]:
|
133 |
+
"""Get or create user conversation session"""
|
134 |
+
if user_id not in self.conversation_sessions:
|
135 |
+
self.conversation_sessions[user_id] = []
|
136 |
+
return self.conversation_sessions[user_id]
|
137 |
+
|
138 |
+
async def _check_rate_limit(self) -> bool:
|
139 |
+
"""Check and update rate limiting"""
|
140 |
+
global rate_limit_counter, rate_limit_reset_time
|
141 |
+
current_time = time.time()
|
142 |
+
|
143 |
+
# Reset counter if a minute has passed
|
144 |
+
if current_time - rate_limit_reset_time >= 60:
|
145 |
+
rate_limit_counter = 0
|
146 |
+
rate_limit_reset_time = current_time
|
147 |
+
|
148 |
+
# Check if we're over the limit
|
149 |
+
if rate_limit_counter >= RATE_LIMIT_PER_MINUTE:
|
150 |
+
return False
|
151 |
+
|
152 |
+
rate_limit_counter += 1
|
153 |
+
return True
|
154 |
+
|
155 |
+
def _print_welcome_message(self):
|
156 |
+
"""Print welcome message in both languages"""
|
157 |
+
print("\n" + "="*60)
|
158 |
+
print("🏥 HEALTHCARE CHATBOT READY")
|
159 |
+
print("="*60)
|
160 |
+
print("English: Hello! I'm your healthcare assistant. I can help you with:")
|
161 |
+
print("• Booking and managing appointments")
|
162 |
+
print("• Finding hospital information")
|
163 |
+
print("• Viewing your medical records")
|
164 |
+
print("• General healthcare questions")
|
165 |
+
print()
|
166 |
+
print("Arabic: مرحباً! أنا مساعدك الطبي. يمكنني مساعدتك في:")
|
167 |
+
print("• حجز وإدارة المواعيد")
|
168 |
+
print("• العثور على معلومات المستشفى")
|
169 |
+
print("• عرض سجلاتك الطبية")
|
170 |
+
print("• الأسئلة الطبية العامة")
|
171 |
+
print("="*60)
|
172 |
+
print("Type 'quit' or 'خروج' to exit\n")
|
173 |
+
|
174 |
+
def _initialize_language_tools(self):
|
175 |
+
"""Initialize language processing tools"""
|
176 |
+
try:
|
177 |
+
self.embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
|
178 |
+
self.language_classifier = pipeline(
|
179 |
+
"text-classification",
|
180 |
+
model="papluca/xlm-roberta-base-language-detection",
|
181 |
+
top_k=1
|
182 |
+
)
|
183 |
+
self.sentiment_analyzer = pipeline(
|
184 |
+
"sentiment-analysis",
|
185 |
+
model="cardiffnlp/twitter-xlm-roberta-base-sentiment"
|
186 |
+
)
|
187 |
+
print("✓ Language processing models loaded successfully")
|
188 |
+
except Exception as e:
|
189 |
+
print(f"⚠ Warning: Some language models failed to load: {e}")
|
190 |
+
self.language_classifier = None
|
191 |
+
self.sentiment_analyzer = None
|
192 |
+
|
193 |
+
def _initialize_date_parser(self):
|
194 |
+
"""Initialize date parsing model"""
|
195 |
+
try:
|
196 |
+
self.date_parser = pipeline(
|
197 |
+
"token-classification",
|
198 |
+
model="Jean-Baptiste/roberta-large-ner-english",
|
199 |
+
aggregation_strategy="simple"
|
200 |
+
)
|
201 |
+
except Exception as e:
|
202 |
+
print(f"⚠ Warning: Date parsing model failed to load: {e}")
|
203 |
+
self.date_parser = None
|
204 |
+
|
205 |
+
def _initialize_llm(self):
|
206 |
+
"""Initialize the LLM"""
|
207 |
+
callbacks = [StreamingStdOutCallbackHandler()]
|
208 |
+
self.llm = OllamaLLM(
|
209 |
+
model=self.model_name,
|
210 |
+
base_url=self.ollama_base_url,
|
211 |
+
callbacks=callbacks,
|
212 |
+
temperature=0.7,
|
213 |
+
num_ctx=8192,
|
214 |
+
top_p=0.9,
|
215 |
+
request_timeout=60,
|
216 |
+
)
|
217 |
+
|
218 |
+
def _initialize_parsers_and_chains(self):
|
219 |
+
"""Initialize all prompt templates and chains - REVAMPED to 3 chains only"""
|
220 |
+
self.json_parser = JsonOutputParser(pydantic_object=RouterResponse)
|
221 |
+
|
222 |
+
# UNIFIED ROUTER CHAIN - Handles both intent classification AND API routing
|
223 |
+
self.router_prompt_template = PromptTemplate(
|
224 |
+
template="""
|
225 |
+
You are a routing system. Your job is simple:
|
226 |
+
1. Understand what the user wants
|
227 |
+
2. Handle any dates/times in their request with PRECISE calculations
|
228 |
+
3. Check if any endpoint can do what they want
|
229 |
+
4. If yes = API_ACTION, if no = CONVERSATION
|
230 |
+
|
231 |
+
## Available API Endpoints Documentation
|
232 |
+
{endpoints_documentation}
|
233 |
+
|
234 |
+
## User Query to Analyze
|
235 |
+
Query: "{user_query}"
|
236 |
+
Language: {detected_language}
|
237 |
+
Current Context:
|
238 |
+
- DateTime: {current_datetime}
|
239 |
+
- Timezone: {timezone}
|
240 |
+
- Current Day of Week: {current_day_name}
|
241 |
+
|
242 |
+
## Step-by-Step Analysis
|
243 |
+
|
244 |
+
**STEP 1: What does the user want?**
|
245 |
+
- If query is in Arabic, translate it to English first
|
246 |
+
- Identify the exact action or information the user is requesting
|
247 |
+
- Focus on understanding their underlying need, not just the words
|
248 |
+
|
249 |
+
**STEP 2: Handle Date/Time Processing with PRECISE Calculations**
|
250 |
+
IMPORTANT: Use the current datetime ({current_datetime}) and timezone ({timezone}) for ALL calculations.
|
251 |
+
|
252 |
+
### Current Date Reference Points:
|
253 |
+
- Today is: {current_datetime}
|
254 |
+
- Current day of week: {current_day_name}
|
255 |
+
- Current timezone: {timezone}
|
256 |
+
|
257 |
+
### Arabic Date/Time Expressions Processing:
|
258 |
+
**Basic Relative Dates:**
|
259 |
+
- "اليوم" (today) = {current_datetime} date portion
|
260 |
+
- "غدا" (tomorrow) = current date + 1 day
|
261 |
+
- "أمس" (yesterday) = current date - 1 day
|
262 |
+
- "بعد غد" (day after tomorrow) = current date + 2 days
|
263 |
+
|
264 |
+
**Weekly Expressions - CALCULATE PRECISELY:**
|
265 |
+
- "الأسبوع القادم" (next week) = current date + 7 days
|
266 |
+
- "الأسبوع الماضي" (last week) = current date - 7 days
|
267 |
+
|
268 |
+
**Specific Weekday Calculations - MOST IMPORTANT:**
|
269 |
+
For expressions like "يوم [weekday] القادم" (next [weekday]):
|
270 |
+
1. Identify the target weekday from Arabic names:
|
271 |
+
- الأحد (Sunday) = 0
|
272 |
+
- الاثنين (Monday) = 1
|
273 |
+
- الثلاثاء (Tuesday) = 2
|
274 |
+
- الأربعاء (Wednesday) = 3
|
275 |
+
- الخميس (Thursday) = 4
|
276 |
+
- الجمعة (Friday) = 5
|
277 |
+
- السبت (Saturday) = 6
|
278 |
+
|
279 |
+
2. Calculate days to add:
|
280 |
+
- Get current weekday number (0=Sunday, 1=Monday, etc.)
|
281 |
+
- Target weekday number
|
282 |
+
- If target > current: days_to_add = target - current
|
283 |
+
- If target <= current: days_to_add = 7 - (current - target)
|
284 |
+
- Final date = current_date + days_to_add
|
285 |
+
|
286 |
+
**Example Calculation:**
|
287 |
+
If today is Sunday (June 1, 2025) and user says "يوم الاربع القادم" (next Wednesday):
|
288 |
+
- Current weekday: 0 (Sunday)
|
289 |
+
- Target weekday: 3 (Wednesday)
|
290 |
+
- Days to add: 3 - 0 = 3
|
291 |
+
- Result: June 1 + 3 days = June 4, 2025
|
292 |
+
|
293 |
+
**Monthly/Yearly Expressions:**
|
294 |
+
- "الشهر القادم" (next month) = add 1 month to current date
|
295 |
+
- "الشهر الماضي" (last month) = subtract 1 month from current date
|
296 |
+
- "السنة القادمة" (next year) = add 1 year to current date
|
297 |
+
|
298 |
+
**Time Expressions:**
|
299 |
+
- "صباحًا" (morning/AM) = 09:00 if no specific time given
|
300 |
+
- "مساءً" (evening/PM) = 18:00 if no specific time given
|
301 |
+
- "ظهرًا" (noon) = 12:00
|
302 |
+
- "منتصف الليل" (midnight) = 00:00
|
303 |
+
- "بعد ساعتين" (in 2 hours) = current time + 2 hours
|
304 |
+
- "قبل ساعة" (1 hour ago) = current time - 1 hour
|
305 |
+
|
306 |
+
**Date Format Output:**
|
307 |
+
- Always convert final calculated date to ISO 8601 format: YYYY-MM-DDTHH:MM:SS
|
308 |
+
- Include timezone offset if available
|
309 |
+
- For date-only expressions, use 00:00:00 as default time
|
310 |
+
|
311 |
+
**STEP 3: Find matching endpoint**
|
312 |
+
- Read each endpoint description in the documentation
|
313 |
+
- Check if any endpoint's purpose can fulfill what the user wants
|
314 |
+
- Match based on functionality, not keywords
|
315 |
+
|
316 |
+
**STEP 4: Decision**
|
317 |
+
- Found matching endpoint = "API_ACTION"
|
318 |
+
- No matching endpoint = "CONVERSATION"
|
319 |
+
|
320 |
+
**STEP 5: Parameter Extraction (only if API_ACTION)**
|
321 |
+
- Extract parameter values from user query
|
322 |
+
- Use the CALCULATED dates/times from Step 2
|
323 |
+
- Convert all dates/times to ISO 8601 format (YYYY-MM-DDTHH:MM:SS)
|
324 |
+
- List any missing required parameters
|
325 |
+
- **CRITICAL: All parameters must be in English**
|
326 |
+
- Translate any Arabic text to English
|
327 |
+
- Convert names to English equivalents (e.g., "دكتور احمد" → "Dr. Ahmed")
|
328 |
+
- Use standard English terms for all parameters
|
329 |
+
|
330 |
+
## Output Format
|
331 |
+
{{
|
332 |
+
"intent": "CONVERSATION|API_ACTION",
|
333 |
+
"confidence": 0.8,
|
334 |
+
"reasoning": "User wants: [what user actually needs]. Date/time processing: [show exact calculation: current date + X days = final date]. Found endpoint: [endpoint path and why it matches] OR No endpoint matches this need",
|
335 |
+
"endpoint": "/exact/endpoint/path",
|
336 |
+
"method": "GET|POST|PUT|DELETE",
|
337 |
+
"params": {{
|
338 |
+
// ALL VALUES MUST BE IN ENGLISH
|
339 |
+
// Arabic terms must be translated to English equivalents
|
340 |
+
}},
|
341 |
+
"missing_required": [],
|
342 |
+
"calculated_datetime": "YYYY-MM-DDTHH:MM:SS (if date/time was processed)"
|
343 |
+
}}
|
344 |
+
|
345 |
+
## CRITICAL REMINDERS:
|
346 |
+
1. ALWAYS use the provided current_datetime ({current_datetime}) as your base for calculations
|
347 |
+
2. For "next weekday" expressions, calculate the exact number of days to add
|
348 |
+
3. Show your calculation work in the reasoning field
|
349 |
+
4. Double-check weekday numbers: Sunday=0, Monday=1, Tuesday=2, Wednesday=3, Thursday=4, Friday=5, Saturday=6
|
350 |
+
5. **ALL PARAMETERS MUST BE IN ENGLISH** - translate any Arabic text before output
|
351 |
+
|
352 |
+
**FINAL CHECK BEFORE OUTPUTTING:**
|
353 |
+
🔍 **MANDATORY LANGUAGE CHECK:**
|
354 |
+
1. Examine every value in the params object
|
355 |
+
2. If ANY value contains Arabic characters (ا-ي), you MUST:
|
356 |
+
- Translate it to English
|
357 |
+
- Convert names to English equivalents
|
358 |
+
- Replace Arabic terms with English counterparts
|
359 |
+
3. Only output JSON when ALL parameters are in English
|
360 |
+
|
361 |
+
Now analyze the user query step by step and give me the JSON response.
|
362 |
+
""",
|
363 |
+
input_variables=["user_query", "detected_language", "extracted_keywords",
|
364 |
+
"sentiment_analysis", "endpoints_documentation", "current_datetime",
|
365 |
+
"timezone", "current_day_name"]
|
366 |
+
)
|
367 |
+
|
368 |
+
# CONVERSATION CHAIN - Handles conversational responses
|
369 |
+
self.conversation_template = PromptTemplate(
|
370 |
+
template="""
|
371 |
+
You are a friendly and professional healthcare chatbot assistant.
|
372 |
+
|
373 |
+
=== RESPONSE GUIDELINES ===
|
374 |
+
- Respond ONLY in {detected_language}
|
375 |
+
- Be helpful, empathetic, and professional
|
376 |
+
- Keep responses concise but informative
|
377 |
+
- Use appropriate medical terminology when needed
|
378 |
+
- Maintain a caring and supportive tone
|
379 |
+
|
380 |
+
=== CONTEXT ===
|
381 |
+
User Message: {user_query}
|
382 |
+
Language: {detected_language}
|
383 |
+
Sentiment: {sentiment_analysis}
|
384 |
+
Conversation History: {conversation_history}
|
385 |
+
|
386 |
+
=== LANGUAGE-SPECIFIC INSTRUCTIONS ===
|
387 |
+
|
388 |
+
FOR ARABIC RESPONSES:
|
389 |
+
- Use Modern Standard Arabic (الفصحى)
|
390 |
+
- Be respectful and formal as appropriate in Arabic culture
|
391 |
+
- Use proper Arabic medical terminology
|
392 |
+
- Keep sentences clear and grammatically correct
|
393 |
+
|
394 |
+
FOR ENGLISH RESPONSES:
|
395 |
+
- Use clear, professional English
|
396 |
+
- Be warm and approachable
|
397 |
+
- Use appropriate medical terminology
|
398 |
+
|
399 |
+
=== RESPONSE RULES ===
|
400 |
+
1. Address the user's question or comment directly
|
401 |
+
2. Provide helpful information when possible
|
402 |
+
3. If you cannot help with something specific, explain what you CAN help with
|
403 |
+
4. Never provide specific medical advice - always recommend consulting healthcare professionals
|
404 |
+
5. Be encouraging and supportive
|
405 |
+
6. Do NOT mix languages in your response
|
406 |
+
7. End responses naturally without asking multiple questions
|
407 |
+
|
408 |
+
Generate a helpful conversational response:""",
|
409 |
+
input_variables=["user_query", "detected_language", "sentiment_analysis", "conversation_history"]
|
410 |
+
)
|
411 |
+
|
412 |
+
# API RESPONSE CHAIN - Formats API responses for users
|
413 |
+
self.api_response_template = PromptTemplate(
|
414 |
+
template="""
|
415 |
+
You are a professional healthcare assistant. Generate a natural language response to the user's query using ONLY the provided API data.
|
416 |
+
|
417 |
+
User Query: {user_query}
|
418 |
+
User Sentiment: {sentiment_analysis}
|
419 |
+
Response Language: {detected_language}
|
420 |
+
|
421 |
+
API Response Data:
|
422 |
+
{api_response}
|
423 |
+
|
424 |
+
=== CORE INSTRUCTIONS ===
|
425 |
+
|
426 |
+
1. Analyze the API response structure and extract relevant data points
|
427 |
+
2. Cross-reference with the user's query to determine what information to include
|
428 |
+
3. Respond in {detected_language} using a warm, conversational tone
|
429 |
+
4. Convert technical data into natural language appropriate for healthcare communication
|
430 |
+
|
431 |
+
=== DATE/TIME HANDLING ===
|
432 |
+
|
433 |
+
1. Identify all date/time fields in the API response (look for ISO 8601 format: YYYY-MM-DDTHH:MM:SS)
|
434 |
+
2. For English responses:
|
435 |
+
- Format dates as "Month Day, Year at HH:MM AM/PM"
|
436 |
+
- Convert times to 12-hour format with proper AM/PM
|
437 |
+
3. For Arabic responses:
|
438 |
+
- Format dates as "Day Month Year الساعة HH:MM صباحاً/مساءً"
|
439 |
+
- Use Arabic numerals (٠١٢٣٤٥٦٧٨٩)
|
440 |
+
- Use Arabic month names
|
441 |
+
4. Preserve all original date/time values - only change the formatting
|
442 |
+
|
443 |
+
=== RESPONSE GUIDELINES ===
|
444 |
+
|
445 |
+
1. Use ONLY data present in the API response
|
446 |
+
2. Maintain a professional yet friendly healthcare tone
|
447 |
+
3. Adapt to the user's sentiment:
|
448 |
+
- Positive: reinforce with encouraging language
|
449 |
+
- Neutral: provide clear, factual information
|
450 |
+
- Negative: show empathy and offer assistance
|
451 |
+
4. Structure the response to directly answer the user's query
|
452 |
+
5. Include relevant details from the API response that address the user's needs
|
453 |
+
|
454 |
+
=== CRITICAL RULES ===
|
455 |
+
|
456 |
+
1. Never invent or hallucinate information not present in the API response
|
457 |
+
2. If the API response doesn't contain requested information, say so politely
|
458 |
+
3. All dates/times must exactly match the API data
|
459 |
+
4. Maintain strict language consistency (respond only in {detected_language})
|
460 |
+
5. Format all technical data (IDs, codes, etc.) for easy understanding
|
461 |
+
|
462 |
+
Generate a helpful response that addresses the user's query using the API data.
|
463 |
+
""",
|
464 |
+
input_variables=["user_query", "api_response", "detected_language", "sentiment_analysis"]
|
465 |
+
)
|
466 |
+
|
467 |
+
# Create the 3 chains
|
468 |
+
self.router_chain = LLMChain(llm=self.llm, prompt=self.router_prompt_template)
|
469 |
+
self.conversation_chain = LLMChain(llm=self.llm, prompt=self.conversation_template)
|
470 |
+
self.api_response_chain = LLMChain(llm=self.llm, prompt=self.api_response_template)
|
471 |
+
|
472 |
+
def detect_language(self, text):
|
473 |
+
"""Detect language of the input text"""
|
474 |
+
if self.language_classifier and len(text.strip()) > 3:
|
475 |
+
try:
|
476 |
+
result = self.language_classifier(text)
|
477 |
+
detected_lang = result[0][0]['label']
|
478 |
+
confidence = result[0][0]['score']
|
479 |
+
|
480 |
+
if detected_lang in ['ar', 'arabic']:
|
481 |
+
return "arabic"
|
482 |
+
elif detected_lang in ['en', 'english']:
|
483 |
+
return "english"
|
484 |
+
elif confidence > 0.8:
|
485 |
+
return "english" # Default to English for unsupported languages
|
486 |
+
except:
|
487 |
+
pass
|
488 |
+
|
489 |
+
# Fallback: Basic Arabic detection
|
490 |
+
arabic_pattern = re.compile(r'[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]+')
|
491 |
+
if arabic_pattern.search(text):
|
492 |
+
return "arabic"
|
493 |
+
|
494 |
+
return "english"
|
495 |
+
|
496 |
+
def analyze_sentiment(self, text):
|
497 |
+
"""Analyze sentiment of the text"""
|
498 |
+
if self.sentiment_analyzer and len(text.strip()) > 3:
|
499 |
+
try:
|
500 |
+
result = self.sentiment_analyzer(text)
|
501 |
+
return {
|
502 |
+
"sentiment": result[0]['label'],
|
503 |
+
"score": result[0]['score']
|
504 |
+
}
|
505 |
+
except:
|
506 |
+
pass
|
507 |
+
|
508 |
+
return {"sentiment": "NEUTRAL", "score": 0.5}
|
509 |
+
|
510 |
+
def extract_keywords(self, text):
|
511 |
+
"""Extract keywords from text"""
|
512 |
+
# Simple keyword extraction
|
513 |
+
words = re.findall(r'\b\w+\b', text.lower())
|
514 |
+
# Filter out common words and keep meaningful ones
|
515 |
+
stopwords = {'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were'}
|
516 |
+
keywords = [w for w in words if len(w) > 3 and w not in stopwords]
|
517 |
+
return list(set(keywords))[:5] # Return top 5 unique keywords
|
518 |
+
|
519 |
+
def get_conversation_context(self, user_id: str) -> str:
|
520 |
+
"""Get recent conversation history as context"""
|
521 |
+
history = self._get_user_session(user_id)
|
522 |
+
if not history:
|
523 |
+
return "No previous conversation"
|
524 |
+
|
525 |
+
context = []
|
526 |
+
for item in history[-3:]: # Last 3 exchanges
|
527 |
+
context.append(f"User: {item['user_message']}")
|
528 |
+
context.append(f"Bot: {item['bot_response'][:100]}...") # Truncate long responses
|
529 |
+
|
530 |
+
return " | ".join(context)
|
531 |
+
|
532 |
+
def add_to_history(self, user_id: str, user_message: str, bot_response: str, response_type: str):
|
533 |
+
"""Add exchange to conversation history"""
|
534 |
+
history = self._get_user_session(user_id)
|
535 |
+
history.append({
|
536 |
+
'timestamp': datetime.now(),
|
537 |
+
'user_message': user_message,
|
538 |
+
'bot_response': bot_response,
|
539 |
+
'response_type': response_type
|
540 |
+
})
|
541 |
+
|
542 |
+
# Keep only recent history
|
543 |
+
if len(history) > self.max_history_length:
|
544 |
+
self.conversation_sessions[user_id] = history[-self.max_history_length:]
|
545 |
+
|
546 |
+
def parse_relative_date(self, text, detected_language):
|
547 |
+
"""Parse relative dates from text using a combination of methods"""
|
548 |
+
today = datetime.now()
|
549 |
+
|
550 |
+
# Handle common relative date patterns in English and Arabic
|
551 |
+
tomorrow_patterns = {
|
552 |
+
'english': [r'\btomorrow\b', r'\bnext day\b'],
|
553 |
+
'arabic': [r'\bغدا\b', r'\bبكرة\b', r'\bغدًا\b', r'\bالغد\b']
|
554 |
+
}
|
555 |
+
|
556 |
+
next_week_patterns = {
|
557 |
+
'english': [r'\bnext week\b'],
|
558 |
+
'arabic': [r'\bالأسبوع القادم\b', r'\bالأسبوع المقبل\b', r'\bالاسبوع الجاي\b']
|
559 |
+
}
|
560 |
+
|
561 |
+
# Check for "tomorrow" patterns
|
562 |
+
for pattern in tomorrow_patterns.get(detected_language, []) + tomorrow_patterns.get('english', []):
|
563 |
+
if re.search(pattern, text, re.IGNORECASE):
|
564 |
+
return (today + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S')
|
565 |
+
|
566 |
+
# Check for "next week" patterns
|
567 |
+
for pattern in next_week_patterns.get(detected_language, []) + next_week_patterns.get('english', []):
|
568 |
+
if re.search(pattern, text, re.IGNORECASE):
|
569 |
+
return (today + timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S')
|
570 |
+
|
571 |
+
# If NER model is available, use it to extract date entities
|
572 |
+
if self.date_parser and detected_language == 'english':
|
573 |
+
try:
|
574 |
+
date_entities = self.date_parser(text)
|
575 |
+
for entity in date_entities:
|
576 |
+
if entity['entity_group'] == 'DATE':
|
577 |
+
print(f"Found date entity: {entity['word']}")
|
578 |
+
# Default to tomorrow if we detect any date
|
579 |
+
return (today + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S')
|
580 |
+
except Exception as e:
|
581 |
+
print(f"Error in date parsing: {e}")
|
582 |
+
|
583 |
+
# Default return None if no date pattern is recognized
|
584 |
+
return None
|
585 |
+
|
586 |
+
def parse_router_response(self, router_text):
|
587 |
+
"""Parse the router chain response into structured data"""
|
588 |
+
try:
|
589 |
+
# Clean the response text
|
590 |
+
cleaned_response = router_text
|
591 |
+
|
592 |
+
# Remove any comments (both single-line and multi-line)
|
593 |
+
cleaned_response = re.sub(r'//.*?$', '', cleaned_response, flags=re.MULTILINE)
|
594 |
+
cleaned_response = re.sub(r'/\*.*?\*/', '', cleaned_response, flags=re.DOTALL)
|
595 |
+
|
596 |
+
# Remove any trailing commas
|
597 |
+
cleaned_response = re.sub(r',(\s*[}\]])', r'\1', cleaned_response)
|
598 |
+
|
599 |
+
# Try different methods to parse the JSON response
|
600 |
+
try:
|
601 |
+
# First attempt: direct JSON parsing of cleaned response
|
602 |
+
parsed_response = json.loads(cleaned_response)
|
603 |
+
except json.JSONDecodeError:
|
604 |
+
try:
|
605 |
+
# Second attempt: extract JSON from markdown code block
|
606 |
+
json_match = re.search(r'```(?:json)?\s*(\{.*?\})\s*```', cleaned_response, re.DOTALL)
|
607 |
+
if json_match:
|
608 |
+
parsed_response = json.loads(json_match.group(1))
|
609 |
+
else:
|
610 |
+
raise ValueError("No JSON found in code block")
|
611 |
+
except (json.JSONDecodeError, ValueError):
|
612 |
+
try:
|
613 |
+
# Third attempt: find JSON-like content using regex
|
614 |
+
json_pattern = r'\{\s*"intent"\s*:.*?\}'
|
615 |
+
json_match = re.search(json_pattern, cleaned_response, re.DOTALL)
|
616 |
+
if json_match:
|
617 |
+
json_str = json_match.group(0)
|
618 |
+
# Additional cleaning for the extracted JSON
|
619 |
+
json_str = re.sub(r'//.*?$', '', json_str, flags=re.MULTILINE)
|
620 |
+
json_str = re.sub(r',(\s*[}\]])', r'\1', json_str)
|
621 |
+
parsed_response = json.loads(json_str)
|
622 |
+
else:
|
623 |
+
raise ValueError("Could not extract JSON using regex")
|
624 |
+
except (json.JSONDecodeError, ValueError):
|
625 |
+
print(f"Failed to parse JSON. Raw response: {router_text}")
|
626 |
+
print(f"Cleaned response: {cleaned_response}")
|
627 |
+
# Return default conversation response on parse failure
|
628 |
+
return {
|
629 |
+
"intent": "CONVERSATION",
|
630 |
+
"confidence": 0.5,
|
631 |
+
"reasoning": "Failed to parse router response - defaulting to conversation",
|
632 |
+
"endpoint": None,
|
633 |
+
"method": None,
|
634 |
+
"params": {},
|
635 |
+
"missing_required": []
|
636 |
+
}
|
637 |
+
|
638 |
+
# Validate required fields and set defaults
|
639 |
+
validated_response = {
|
640 |
+
"intent": parsed_response.get("intent", "CONVERSATION"),
|
641 |
+
"confidence": parsed_response.get("confidence", 0.5),
|
642 |
+
"reasoning": parsed_response.get("reasoning", "Router decision"),
|
643 |
+
"endpoint": parsed_response.get("endpoint"),
|
644 |
+
"method": parsed_response.get("method"),
|
645 |
+
"params": parsed_response.get("params", {}),
|
646 |
+
"missing_required": parsed_response.get("missing_required", [])
|
647 |
+
}
|
648 |
+
|
649 |
+
return validated_response
|
650 |
+
|
651 |
+
except Exception as e:
|
652 |
+
print(f"Error parsing router response: {e}")
|
653 |
+
return {
|
654 |
+
"intent": "CONVERSATION",
|
655 |
+
"confidence": 0.5,
|
656 |
+
"reasoning": f"Parse error: {str(e)}",
|
657 |
+
"endpoint": None,
|
658 |
+
"method": None,
|
659 |
+
"params": {},
|
660 |
+
"missing_required": []
|
661 |
+
}
|
662 |
+
|
663 |
+
def handle_conversation(self, user_query, detected_language, sentiment_result):
|
664 |
+
"""Handle conversational responses"""
|
665 |
+
try:
|
666 |
+
result = self.conversation_chain.invoke({
|
667 |
+
"user_query": user_query,
|
668 |
+
"detected_language": detected_language,
|
669 |
+
"sentiment_analysis": json.dumps(sentiment_result),
|
670 |
+
"conversation_history": self.get_conversation_context(self.user_id)
|
671 |
+
})
|
672 |
+
|
673 |
+
return result["text"].strip()
|
674 |
+
|
675 |
+
except Exception as e:
|
676 |
+
# Fallback response
|
677 |
+
if detected_language == "arabic":
|
678 |
+
return "أعتذر، واجهت مشكلة في المعالجة. كيف يمكنني مساعدتك؟"
|
679 |
+
else:
|
680 |
+
return "I apologize, I encountered a processing issue. How can I help you?"
|
681 |
+
|
682 |
+
async def backend_call(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
683 |
+
"""Make async API call to backend with retry logic"""
|
684 |
+
endpoint_url = data.get('endpoint')
|
685 |
+
endpoint_method = data.get('method')
|
686 |
+
endpoint_params = data.get('params', {}).copy()
|
687 |
+
|
688 |
+
print(f"🔗 Making API call to {endpoint_method} {self.BASE_URL + endpoint_url} with params: {endpoint_params}")
|
689 |
+
|
690 |
+
# Inject patient_id if needed
|
691 |
+
if 'patient_id' in endpoint_params:
|
692 |
+
endpoint_params['patient_id'] = self.user_id
|
693 |
+
|
694 |
+
retries = 0
|
695 |
+
while retries < self.max_retries:
|
696 |
+
try:
|
697 |
+
if endpoint_method.upper() == 'GET':
|
698 |
+
response = await http_client.get(
|
699 |
+
self.BASE_URL + endpoint_url,
|
700 |
+
params=endpoint_params,
|
701 |
+
headers=self.headers
|
702 |
+
)
|
703 |
+
else:
|
704 |
+
response = await http_client.request(
|
705 |
+
endpoint_method.upper(),
|
706 |
+
self.BASE_URL + endpoint_url,
|
707 |
+
json=endpoint_params,
|
708 |
+
headers=self.headers
|
709 |
+
)
|
710 |
+
|
711 |
+
response.raise_for_status()
|
712 |
+
return response.json()
|
713 |
+
|
714 |
+
except httpx.HTTPError as e:
|
715 |
+
retries += 1
|
716 |
+
if retries >= self.max_retries:
|
717 |
+
return {
|
718 |
+
"error": "Backend API call failed after multiple retries",
|
719 |
+
"details": str(e),
|
720 |
+
"status_code": getattr(e.response, 'status_code', None) if hasattr(e, 'response') else None
|
721 |
+
}
|
722 |
+
|
723 |
+
await asyncio.sleep(self.retry_delay)
|
724 |
+
|
725 |
+
async def handle_api_action(self, user_query: str, detected_language: str,
|
726 |
+
sentiment_result: Dict, keywords: List[str],
|
727 |
+
router_data: Dict) -> Dict[str, Any]:
|
728 |
+
"""Handle API-based actions using router data"""
|
729 |
+
try:
|
730 |
+
# Inject patient_id if needed
|
731 |
+
if 'patient_id' in router_data['params']:
|
732 |
+
router_data['params']['patient_id'] = self.user_id
|
733 |
+
else:
|
734 |
+
router_data['params']['patient_id'] = self.user_id
|
735 |
+
|
736 |
+
print(f"🔍 Final API call data: {router_data}")
|
737 |
+
|
738 |
+
# Make backend API call
|
739 |
+
api_response = await self.backend_call(router_data)
|
740 |
+
|
741 |
+
print("🔗 API response received:", api_response)
|
742 |
+
|
743 |
+
# Generate user-friendly response using thread pool for CPU-bound LLM operation
|
744 |
+
loop = asyncio.get_event_loop()
|
745 |
+
user_response_result = await loop.run_in_executor(
|
746 |
+
thread_pool,
|
747 |
+
lambda: self.api_response_chain.invoke({
|
748 |
+
"user_query": user_query,
|
749 |
+
"api_response": json.dumps(api_response, indent=2),
|
750 |
+
"detected_language": detected_language,
|
751 |
+
"sentiment_analysis": json.dumps(sentiment_result),
|
752 |
+
})
|
753 |
+
)
|
754 |
+
|
755 |
+
print("🔗 Final user response:", user_response_result["text"].strip())
|
756 |
+
|
757 |
+
return {
|
758 |
+
"response": user_response_result["text"].strip(),
|
759 |
+
"api_data": api_response,
|
760 |
+
"routing_info": router_data
|
761 |
+
}
|
762 |
+
|
763 |
+
except Exception as e:
|
764 |
+
# Fallback error response
|
765 |
+
if detected_language == "arabic":
|
766 |
+
error_msg = "أعتذر، لم أتمكن من معالجة طلبك. يرجى المحاولة مرة أخرى أو صياغة السؤال بطريقة مختلفة."
|
767 |
+
else:
|
768 |
+
error_msg = "I apologize, I couldn't process your request. Please try again or rephrase your question."
|
769 |
+
|
770 |
+
return {
|
771 |
+
"response": error_msg,
|
772 |
+
"api_data": {"error": str(e)},
|
773 |
+
"routing_info": None
|
774 |
+
}
|
775 |
+
|
776 |
+
async def chat(self, user_message: str, user_id: str = None) -> ChatResponse:
|
777 |
+
"""Main chat method that handles user messages with async support"""
|
778 |
+
start_time = time.time()
|
779 |
+
|
780 |
+
# Use provided user_id or default
|
781 |
+
user_id = user_id or self.user_id
|
782 |
+
|
783 |
+
# Check rate limiting
|
784 |
+
if not await self._check_rate_limit():
|
785 |
+
return ChatResponse(
|
786 |
+
response_id=str(time.time()),
|
787 |
+
response_type="conversation",
|
788 |
+
message="I'm currently processing too many requests. Please try again in a moment.",
|
789 |
+
api_call_made=False,
|
790 |
+
language="english"
|
791 |
+
)
|
792 |
+
|
793 |
+
# Check for exit commands
|
794 |
+
if user_message.lower().strip() in ['quit', 'exit', 'خروج', 'bye', 'goodbye']:
|
795 |
+
if self.detect_language(user_message) == "arabic":
|
796 |
+
return ChatResponse(
|
797 |
+
response_id=str(time.time()),
|
798 |
+
response_type="conversation",
|
799 |
+
message="مع السلامة! أتمنى لك يوماً سعيداً. 👋",
|
800 |
+
language="arabic"
|
801 |
+
)
|
802 |
+
else:
|
803 |
+
return ChatResponse(
|
804 |
+
response_id=str(time.time()),
|
805 |
+
response_type="conversation",
|
806 |
+
message="Goodbye! Have a great day! 👋",
|
807 |
+
language="english"
|
808 |
+
)
|
809 |
+
|
810 |
+
try:
|
811 |
+
print(f"\n{'='*50}")
|
812 |
+
print(f"🔍 Processing: '{user_message}'")
|
813 |
+
print(f"{'='*50}")
|
814 |
+
|
815 |
+
# Step 1: Language and sentiment analysis (CPU-bound operations in thread pool)
|
816 |
+
loop = asyncio.get_event_loop()
|
817 |
+
detected_language = await loop.run_in_executor(
|
818 |
+
thread_pool, self.detect_language, user_message
|
819 |
+
)
|
820 |
+
sentiment_result = await loop.run_in_executor(
|
821 |
+
thread_pool, self.analyze_sentiment, user_message
|
822 |
+
)
|
823 |
+
keywords = await loop.run_in_executor(
|
824 |
+
thread_pool, self.extract_keywords, user_message
|
825 |
+
)
|
826 |
+
|
827 |
+
print(f"🌐 Detected Language: {detected_language}")
|
828 |
+
print(f"😊 Sentiment: {sentiment_result}")
|
829 |
+
print(f"🔑 Keywords: {keywords}")
|
830 |
+
|
831 |
+
# Step 2: Router Chain (CPU-bound LLM operation in thread pool)
|
832 |
+
print(f"\n🤖 Running Router Chain...")
|
833 |
+
router_result = await loop.run_in_executor(
|
834 |
+
thread_pool,
|
835 |
+
lambda: self.router_chain.invoke({
|
836 |
+
"user_query": user_message,
|
837 |
+
"detected_language": detected_language,
|
838 |
+
"extracted_keywords": json.dumps(keywords),
|
839 |
+
"sentiment_analysis": json.dumps(sentiment_result),
|
840 |
+
"conversation_history": self.get_conversation_context(user_id),
|
841 |
+
"endpoints_documentation": json.dumps(self.endpoints_documentation, indent=2),
|
842 |
+
"current_datetime": datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
|
843 |
+
"timezone": "UTC",
|
844 |
+
"current_day_name": datetime.now().strftime('%A'),
|
845 |
+
})
|
846 |
+
)
|
847 |
+
|
848 |
+
# Parse router response
|
849 |
+
router_data = await loop.run_in_executor(
|
850 |
+
thread_pool, self.parse_router_response, router_result["text"]
|
851 |
+
)
|
852 |
+
print(f"🎯 Router Decision: {router_data}")
|
853 |
+
|
854 |
+
# Step 3: Handle based on intent
|
855 |
+
if router_data["intent"] == "CONVERSATION" and router_data['endpoint'] == '':
|
856 |
+
print(f"\n💬 Handling as CONVERSATION")
|
857 |
+
response_text = await loop.run_in_executor(
|
858 |
+
thread_pool,
|
859 |
+
lambda: self.handle_conversation(user_message, detected_language, sentiment_result)
|
860 |
+
)
|
861 |
+
|
862 |
+
# Add to conversation history
|
863 |
+
self.add_to_history(user_id, user_message, response_text, "conversation")
|
864 |
+
|
865 |
+
return ChatResponse(
|
866 |
+
response_id=str(time.time()),
|
867 |
+
response_type="conversation",
|
868 |
+
message=response_text,
|
869 |
+
api_call_made=False,
|
870 |
+
language=detected_language,
|
871 |
+
api_data=None
|
872 |
+
)
|
873 |
+
|
874 |
+
elif router_data["intent"] == "API_ACTION":
|
875 |
+
print(f"\n🔗 Handling as API_ACTION")
|
876 |
+
|
877 |
+
# Handle API action
|
878 |
+
api_result = await self.handle_api_action(
|
879 |
+
user_message, detected_language, sentiment_result, keywords, router_data
|
880 |
+
)
|
881 |
+
|
882 |
+
# Add to conversation history
|
883 |
+
self.add_to_history(user_id, user_message, api_result["response"], "api_action")
|
884 |
+
|
885 |
+
return ChatResponse(
|
886 |
+
response_id=str(time.time()),
|
887 |
+
response_type="api_action",
|
888 |
+
message=api_result["response"],
|
889 |
+
api_call_made=True,
|
890 |
+
language=detected_language
|
891 |
+
)
|
892 |
+
|
893 |
+
else:
|
894 |
+
# Fallback for unknown intent
|
895 |
+
print(f"⚠️ Unknown intent: {router_data['intent']}")
|
896 |
+
fallback_response = await loop.run_in_executor(
|
897 |
+
thread_pool,
|
898 |
+
lambda: self.handle_conversation(user_message, detected_language, sentiment_result)
|
899 |
+
)
|
900 |
+
|
901 |
+
return ChatResponse(
|
902 |
+
response_id=str(time.time()),
|
903 |
+
response_type="conversation",
|
904 |
+
message=fallback_response,
|
905 |
+
api_call_made=False,
|
906 |
+
language=detected_language
|
907 |
+
)
|
908 |
+
|
909 |
+
except Exception as e:
|
910 |
+
print(f"❌ Error in chat method: {str(e)}")
|
911 |
+
print(f"❌ Traceback: {traceback.format_exc()}")
|
912 |
+
|
913 |
+
# Fallback error response
|
914 |
+
if self.detect_language(user_message) == "arabic":
|
915 |
+
error_message = "أعتذر، حدث خطأ في معالجة رسالتك. يرجى المحاولة مرة أخرى."
|
916 |
+
else:
|
917 |
+
error_message = "I apologize, there was an error processing your message. Please try again."
|
918 |
+
|
919 |
+
return ChatResponse(
|
920 |
+
response_id=str(time.time()),
|
921 |
+
response_type="conversation",
|
922 |
+
message=error_message,
|
923 |
+
api_call_made=False,
|
924 |
+
language=self.detect_language(user_message)
|
925 |
+
)
|
926 |
+
|
927 |
+
finally:
|
928 |
+
end_time = time.time()
|
929 |
+
print(f"⏱️ Processing time: {end_time - start_time:.2f} seconds")
|
930 |
+
|
931 |
+
async def run_interactive_chat(self):
|
932 |
+
"""Run the interactive chat interface"""
|
933 |
+
try:
|
934 |
+
while True:
|
935 |
+
try:
|
936 |
+
# Get user input
|
937 |
+
user_input = input("\n👤 You: ").strip()
|
938 |
+
|
939 |
+
if not user_input:
|
940 |
+
continue
|
941 |
+
|
942 |
+
# Process the message
|
943 |
+
response = await self.chat(user_input)
|
944 |
+
|
945 |
+
# Display the response
|
946 |
+
print(f"\n🤖 Bot: {response.message}")
|
947 |
+
|
948 |
+
# Check for exit
|
949 |
+
if user_input.lower() in ['quit', 'exit', 'خروج', 'bye', 'goodbye']:
|
950 |
+
break
|
951 |
+
|
952 |
+
except KeyboardInterrupt:
|
953 |
+
print("\n\n👋 Chat interrupted. Goodbye!")
|
954 |
+
break
|
955 |
+
except EOFError:
|
956 |
+
print("\n\n👋 Chat ended. Goodbye!")
|
957 |
+
break
|
958 |
+
except Exception as e:
|
959 |
+
print(f"\n❌ Error: {e}")
|
960 |
+
continue
|
961 |
+
|
962 |
+
except Exception as e:
|
963 |
+
print(f"❌ Fatal error in chat interface: {e}")
|
964 |
+
|
965 |
+
def clear_history(self):
|
966 |
+
"""Clear conversation history"""
|
967 |
+
self.conversation_history = []
|
968 |
+
print("🗑️ Conversation history cleared.")
|
969 |
+
|
970 |
+
|
971 |
+
def main():
|
972 |
+
"""Main function to run the healthcare chatbot"""
|
973 |
+
try:
|
974 |
+
print("🚀 Starting Healthcare Chatbot...")
|
975 |
+
chatbot = HealthcareChatbot()
|
976 |
+
chatbot.run_interactive_chat()
|
977 |
+
|
978 |
+
except KeyboardInterrupt:
|
979 |
+
print("\n\n👋 Shutting down gracefully...")
|
980 |
+
except Exception as e:
|
981 |
+
print(f"❌ Fatal error: {e}")
|
982 |
+
print(f"❌ Traceback: {traceback.format_exc()}")
|
983 |
+
|
984 |
+
|
985 |
+
if __name__ == "__main__":
|
986 |
+
main()
|
987 |
+
|
988 |
+
|
989 |
+
from fastapi import FastAPI, HTTPException
|
990 |
+
from pydantic import BaseModel
|
991 |
+
from typing import Dict, Any, Optional
|
992 |
+
|
993 |
+
|
994 |
+
# FastAPI application setup
|
995 |
+
app = FastAPI(
|
996 |
+
title="Healthcare AI Assistant",
|
997 |
+
description="An AI-powered healthcare assistant that handles appointment booking and queries",
|
998 |
+
version="1.0.0"
|
999 |
+
)
|
1000 |
+
|
1001 |
+
# Add CORS middleware
|
1002 |
+
app.add_middleware(
|
1003 |
+
CORSMiddleware,
|
1004 |
+
allow_origins=["*"],
|
1005 |
+
allow_credentials=True,
|
1006 |
+
allow_methods=["*"],
|
1007 |
+
allow_headers=["*"],
|
1008 |
+
)
|
1009 |
+
|
1010 |
+
# Initialize the AI agent
|
1011 |
+
agent = HealthcareChatbot()
|
1012 |
+
|
1013 |
+
class QueryRequest(BaseModel):
|
1014 |
+
query: str
|
1015 |
+
user_id: Optional[str] = None
|
1016 |
+
|
1017 |
+
@app.post("/query")
|
1018 |
+
async def process_query(request: QueryRequest):
|
1019 |
+
"""
|
1020 |
+
Process a user query and return a response
|
1021 |
+
"""
|
1022 |
+
try:
|
1023 |
+
response = await agent.chat(request.query, request.user_id)
|
1024 |
+
return response.dict()
|
1025 |
+
except Exception as e:
|
1026 |
+
raise HTTPException(status_code=500, detail=str(e))
|
1027 |
+
|
1028 |
+
@app.get("/health")
|
1029 |
+
async def health_check():
|
1030 |
+
"""
|
1031 |
+
Health check endpoint
|
1032 |
+
"""
|
1033 |
+
return {"status": "healthy", "service": "healthcare-ai-assistant"}
|
1034 |
+
|
1035 |
+
@app.get("/")
|
1036 |
+
async def root():
|
1037 |
+
return {"message": "Hello World"}
|
1038 |
+
|
1039 |
+
@app.on_event("startup")
|
1040 |
+
async def startup_event():
|
1041 |
+
"""Initialize resources on startup"""
|
1042 |
+
agent._initialize_http_client()
|
1043 |
+
|
1044 |
+
@app.on_event("shutdown")
|
1045 |
+
async def shutdown_event():
|
1046 |
+
"""Cleanup resources on shutdown"""
|
1047 |
+
await agent._close_http_client()
|
1048 |
+
thread_pool.shutdown(wait=True)
|
1049 |
+
|
1050 |
+
if __name__ == "__main__":
|
1051 |
+
import uvicorn
|
1052 |
+
uvicorn.run(app, host="0.0.0.0", port=8000, workers=4)
|