GuglielmoTor commited on
Commit
de15097
·
verified ·
1 Parent(s): d6d277f

Create chatbot_handler.py

Browse files
Files changed (1) hide show
  1. chatbot_handler.py +116 -0
chatbot_handler.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chatbot_handler.py
2
+ import logging
3
+ import json
4
+ import aiohttp # Using asynchronous aiohttp
5
+ import os
6
+
7
+ # Ensure GROQ_API_KEY is set in your environment variables
8
+ api_key = os.getenv('GEMINI_API_KEY')
9
+
10
+ def format_history_for_gemini(gradio_chat_history: list) -> list:
11
+ """
12
+ Converts Gradio chat history (list of dicts with 'role' and 'content')
13
+ to Gemini API's 'contents' format.
14
+ Gemini expects roles 'user' and 'model'.
15
+ It also filters out system messages if any, as Gemini handles system prompts differently.
16
+ """
17
+ gemini_contents = []
18
+ for msg in gradio_chat_history:
19
+ role = "user" if msg["role"] == "user" else "model" # Gemini uses 'model' for assistant
20
+ # Ensure content is a string, skip if not (e.g. if a gr.Plot was accidentally in history)
21
+ if isinstance(msg.get("content"), str):
22
+ gemini_contents.append({"role": role, "parts": [{"text": msg["content"]}]})
23
+ else:
24
+ logging.warning(f"Skipping non-string content in chat history for Gemini: {msg.get('content')}")
25
+ return gemini_contents
26
+
27
+ async def generate_llm_response(user_message: str, plot_id: str, plot_label: str, chat_history_for_plot: list):
28
+ """
29
+ Generates a response from the LLM using Gemini API.
30
+ Args:
31
+ user_message (str): The latest message from the user.
32
+ plot_id (str): The ID of the plot being discussed.
33
+ plot_label (str): The label of the plot being discussed.
34
+ chat_history_for_plot (list): The current conversation history for this plot.
35
+ This list already includes the latest user_message.
36
+ Returns:
37
+ str: The LLM's response text.
38
+ """
39
+ logging.info(f"Generating LLM response for plot_id: {plot_id} ('{plot_label}'). User message: '{user_message}'")
40
+
41
+ # The chat_history_for_plot already contains the full conversation including the latest user message.
42
+ # The initial system-like prompt is the first message from the assistant in the history.
43
+ gemini_formatted_history = format_history_for_gemini(chat_history_for_plot)
44
+
45
+ if not gemini_formatted_history:
46
+ logging.error("Cannot generate LLM response: Formatted history is empty.")
47
+ return "I'm sorry, there was an issue processing the conversation history."
48
+
49
+ # Construct payload for Gemini API
50
+ payload = {
51
+ "contents": gemini_formatted_history,
52
+ "generationConfig": { # Optional: configure generation parameters
53
+ "temperature": 0.7,
54
+ "topK": 1,
55
+ "topP": 1,
56
+ "maxOutputTokens": 2048,
57
+ }
58
+ }
59
+
60
+
61
+ # Using gemini-2.0-flash as per instructions
62
+ apiUrl = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent?key={api_key}"
63
+
64
+ async with aiohttp.ClientSession() as session:
65
+ try:
66
+ async with session.post(apiUrl, headers={'Content-Type': 'application/json'}, json=payload, timeout=45) as resp:
67
+ response_json = await resp.json()
68
+ logging.debug(f"LLM API Raw Response for '{plot_label}': {json.dumps(response_json, indent=2)}")
69
+
70
+ if resp.status != 200:
71
+ error_detail = response_json.get('error', {}).get('message', 'Unknown API error')
72
+ # Check for specific content policy block from Gemini, even on error status
73
+ if response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
74
+ reason = response_json["promptFeedback"]["blockReason"]
75
+ safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]
76
+ details = f" Safety Ratings: {', '.join(safety_ratings_info)}" if safety_ratings_info else ""
77
+ logging.warning(f"Content blocked by API (Status {resp.status}) for '{plot_label}'. Reason: {reason}.{details}")
78
+ return f"I'm sorry, I can't respond to that due to content policy: {reason}."
79
+ logging.error(f"LLM API Error (Status {resp.status}) for '{plot_label}': {error_detail}")
80
+ return f"Sorry, the AI model returned an error (Status {resp.status}). Please try again later."
81
+
82
+ if response_json.get("candidates") and \
83
+ response_json["candidates"][0].get("content") and \
84
+ response_json["candidates"][0]["content"].get("parts") and \
85
+ response_json["candidates"][0]["content"]["parts"][0].get("text"):
86
+ response_text = response_json["candidates"][0]["content"]["parts"][0]["text"]
87
+ logging.info(f"LLM generated response for '{plot_label}': {response_text[:100]}...")
88
+ return response_text
89
+ # Check for promptFeedback even on 200 if candidates are missing (e.g. blocked content)
90
+ elif response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
91
+ reason = response_json["promptFeedback"]["blockReason"]
92
+ safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]
93
+ details = f" Safety Ratings: {', '.join(safety_ratings_info)}" if safety_ratings_info else ""
94
+ logging.warning(f"Content blocked by API (Status 200 but no candidate) for '{plot_label}'. Reason: {reason}.{details}")
95
+ return f"I'm sorry, your request was processed but could not be answered due to content policy: {reason}."
96
+ else:
97
+ logging.error(f"Unexpected LLM API response structure for '{plot_label}': {response_json}")
98
+ return "Sorry, I received an unexpected or empty response from the AI model."
99
+
100
+ except aiohttp.ClientTimeout:
101
+ logging.error(f"LLM API call timed out for '{plot_label}'.", exc_info=True)
102
+ return "Sorry, the request to the AI model timed out. Please try again."
103
+ except aiohttp.ClientError as e:
104
+ logging.error(f"Error calling LLM API (aiohttp) for '{plot_label}': {e}", exc_info=True)
105
+ return f"Sorry, I couldn't connect to the AI model at the moment. Network Error: {type(e).__name__}."
106
+ except json.JSONDecodeError as e:
107
+ logging.error(f"Error decoding LLM API response for '{plot_label}': {e}", exc_info=True)
108
+ try:
109
+ raw_text_response = await resp.text()
110
+ logging.error(f"LLM API Raw Text Response (on JSONDecodeError) for '{plot_label}': {raw_text_response}")
111
+ except Exception as read_err:
112
+ logging.error(f"Could not read raw text response: {read_err}")
113
+ return "Sorry, I received an unreadable response from the AI model."
114
+ except Exception as e:
115
+ logging.error(f"Generic error during LLM call for '{plot_label}': {e}", exc_info=True)
116
+ return f"An unexpected error occurred while trying to get an AI response: {type(e).__name__}."