Spaces:
Sleeping
Sleeping
Commit
Β·
663ad87
1
Parent(s):
423d658
use async for backend request and cache endpoints documentation
Browse files
main.py
CHANGED
@@ -22,6 +22,8 @@ from langchain_huggingface.embeddings import HuggingFaceEmbeddings
|
|
22 |
# Enhanced HuggingFace imports for improved functionality
|
23 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
24 |
import numpy as np
|
|
|
|
|
25 |
|
26 |
# Import endpoints documentation
|
27 |
from endpoints_documentation import endpoints_documentation
|
@@ -66,11 +68,12 @@ class RouterResponse(BaseModel):
|
|
66 |
class HealthcareChatbot:
|
67 |
def __init__(self):
|
68 |
self.endpoints_documentation = endpoints_documentation
|
|
|
69 |
self.ollama_base_url = "http://localhost:11434"
|
70 |
self.model_name = "gemma3"
|
71 |
-
self.BASE_URL = 'https://
|
72 |
self.headers = {'Content-type': 'application/json'}
|
73 |
-
self.user_id = '
|
74 |
self.max_retries = 3
|
75 |
self.retry_delay = 2
|
76 |
|
@@ -697,7 +700,7 @@ class HealthcareChatbot:
|
|
697 |
else:
|
698 |
return "I apologize, I encountered a processing issue. How can I help you?"
|
699 |
|
700 |
-
def backend_call(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
701 |
"""Make API call to backend with retry logic"""
|
702 |
endpoint_url = data.get('endpoint')
|
703 |
endpoint_method = data.get('method')
|
@@ -710,39 +713,80 @@ class HealthcareChatbot:
|
|
710 |
endpoint_params['patient_id'] = self.user_id
|
711 |
|
712 |
retries = 0
|
713 |
-
response = None
|
714 |
-
while retries < self.max_retries:
|
715 |
-
|
716 |
-
|
717 |
-
|
718 |
-
|
719 |
-
|
720 |
-
|
721 |
-
|
722 |
-
|
723 |
-
|
724 |
-
|
725 |
-
|
726 |
-
|
727 |
-
|
728 |
-
|
729 |
-
|
730 |
-
|
731 |
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
741 |
-
|
742 |
-
|
743 |
-
|
744 |
|
745 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
746 |
|
747 |
def handle_api_action(self, user_query, detected_language, sentiment_result, keywords, router_data):
|
748 |
"""Handle API-based actions using router data"""
|
@@ -767,7 +811,10 @@ class HealthcareChatbot:
|
|
767 |
print(f"π Final API call data: {router_data}")
|
768 |
|
769 |
# Make backend API call
|
770 |
-
|
|
|
|
|
|
|
771 |
|
772 |
print("π API response received:", api_response)
|
773 |
|
@@ -843,7 +890,7 @@ class HealthcareChatbot:
|
|
843 |
"extracted_keywords": json.dumps(keywords),
|
844 |
"sentiment_analysis": json.dumps(sentiment_result),
|
845 |
"conversation_history": self.get_conversation_context(),
|
846 |
-
"endpoints_documentation":
|
847 |
"current_datetime": datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
|
848 |
"timezone": "UTC",
|
849 |
"current_day_name": datetime.now().strftime('%A'),
|
|
|
22 |
# Enhanced HuggingFace imports for improved functionality
|
23 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
24 |
import numpy as np
|
25 |
+
import aiohttp
|
26 |
+
import asyncio
|
27 |
|
28 |
# Import endpoints documentation
|
29 |
from endpoints_documentation import endpoints_documentation
|
|
|
68 |
class HealthcareChatbot:
|
69 |
def __init__(self):
|
70 |
self.endpoints_documentation = endpoints_documentation
|
71 |
+
self.cached_endpoints_documentation = json.dumps(self.endpoints_documentation, indent=2)
|
72 |
self.ollama_base_url = "http://localhost:11434"
|
73 |
self.model_name = "gemma3"
|
74 |
+
self.BASE_URL = 'https://90cb-197-54-60-164.ngrok-free.app'
|
75 |
self.headers = {'Content-type': 'application/json'}
|
76 |
+
self.user_id = '8e5720d5-7243-42bd-97aa-10217309be82'
|
77 |
self.max_retries = 3
|
78 |
self.retry_delay = 2
|
79 |
|
|
|
700 |
else:
|
701 |
return "I apologize, I encountered a processing issue. How can I help you?"
|
702 |
|
703 |
+
async def backend_call(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
704 |
"""Make API call to backend with retry logic"""
|
705 |
endpoint_url = data.get('endpoint')
|
706 |
endpoint_method = data.get('method')
|
|
|
713 |
endpoint_params['patient_id'] = self.user_id
|
714 |
|
715 |
retries = 0
|
716 |
+
# response = None
|
717 |
+
# while retries < self.max_retries:
|
718 |
+
# try:
|
719 |
+
# if endpoint_method.upper() == 'GET':
|
720 |
+
# response = requests.get(
|
721 |
+
# self.BASE_URL + endpoint_url,
|
722 |
+
# params=endpoint_params,
|
723 |
+
# headers=self.headers,
|
724 |
+
# timeout=10
|
725 |
+
# )
|
726 |
+
# elif endpoint_method.upper() in ['POST', 'PUT', 'DELETE']:
|
727 |
+
# response = requests.request(
|
728 |
+
# endpoint_method.upper(),
|
729 |
+
# self.BASE_URL + endpoint_url,
|
730 |
+
# json=endpoint_params,
|
731 |
+
# headers=self.headers,
|
732 |
+
# timeout=10
|
733 |
+
# )
|
734 |
|
735 |
+
# response.raise_for_status()
|
736 |
+
# print('Backend Response:', response.json())
|
737 |
+
# return response.json()
|
738 |
|
739 |
+
# except requests.exceptions.RequestException as e:
|
740 |
+
# retries += 1
|
741 |
+
# if retries >= self.max_retries:
|
742 |
+
# return {
|
743 |
+
# "error": "Backend API call failed after multiple retries",
|
744 |
+
# "details": str(e),
|
745 |
+
# "status_code": getattr(e.response, 'status_code', None) if hasattr(e, 'response') else None
|
746 |
+
# }
|
747 |
|
748 |
+
# time.sleep(self.retry_delay)
|
749 |
+
async with aiohttp.ClientSession() as session:
|
750 |
+
while retries < self.max_retries:
|
751 |
+
try:
|
752 |
+
if endpoint_method.upper() == 'GET':
|
753 |
+
response = await session.get(
|
754 |
+
self.BASE_URL + endpoint_url,
|
755 |
+
params=endpoint_params,
|
756 |
+
headers=self.headers,
|
757 |
+
timeout=aiohttp.ClientTimeout(total=10)
|
758 |
+
)
|
759 |
+
return await response.json()
|
760 |
+
|
761 |
+
elif endpoint_method.upper() in ['POST', 'PUT', 'DELETE']:
|
762 |
+
response = await session.request(
|
763 |
+
endpoint_method.upper(),
|
764 |
+
self.BASE_URL + endpoint_url,
|
765 |
+
json=endpoint_params,
|
766 |
+
headers=self.headers,
|
767 |
+
timeout=aiohttp.ClientTimeout(total=10)
|
768 |
+
)
|
769 |
+
return await response.json
|
770 |
+
|
771 |
+
except aiohttp.ClientResponseError as e:
|
772 |
+
retries += 1
|
773 |
+
if retries >= self.max_retries:
|
774 |
+
return {
|
775 |
+
"error": "Backend API call failed after multiple retries",
|
776 |
+
"details": str(e),
|
777 |
+
"status_code": e.status
|
778 |
+
}
|
779 |
+
await asyncio.sleep(self.retry_delay)
|
780 |
+
|
781 |
+
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
782 |
+
retries += 1
|
783 |
+
if retries >= self.max_retries:
|
784 |
+
return {
|
785 |
+
"error": "Backend API call failed after multiple retries",
|
786 |
+
"details": str(e),
|
787 |
+
"status_code": None
|
788 |
+
}
|
789 |
+
await asyncio.sleep(self.retry_delay)
|
790 |
|
791 |
def handle_api_action(self, user_query, detected_language, sentiment_result, keywords, router_data):
|
792 |
"""Handle API-based actions using router data"""
|
|
|
811 |
print(f"π Final API call data: {router_data}")
|
812 |
|
813 |
# Make backend API call
|
814 |
+
try:
|
815 |
+
api_response = asyncio.run(self.backend_call(router_data))
|
816 |
+
except:
|
817 |
+
print(traceback.format_exc())
|
818 |
|
819 |
print("π API response received:", api_response)
|
820 |
|
|
|
890 |
"extracted_keywords": json.dumps(keywords),
|
891 |
"sentiment_analysis": json.dumps(sentiment_result),
|
892 |
"conversation_history": self.get_conversation_context(),
|
893 |
+
"endpoints_documentation": self.cached_endpoints_documentation,
|
894 |
"current_datetime": datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
|
895 |
"timezone": "UTC",
|
896 |
"current_day_name": datetime.now().strftime('%A'),
|