Futuresony commited on
Commit
d88b860
·
verified ·
1 Parent(s): 59414a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -23
app.py CHANGED
@@ -5,9 +5,18 @@ from collections import defaultdict
5
  # Initialize the model client
6
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
7
 
8
- # Store user preferences & history
9
- user_preferences = defaultdict(int) # Tracks keywords & topics
10
- session_histories = defaultdict(list) # Stores conversation history per session
 
 
 
 
 
 
 
 
 
11
 
12
  def extract_keywords(text):
13
  """Extracts simple keywords from user input."""
@@ -18,46 +27,41 @@ def extract_keywords(text):
18
  def respond(message, history, system_message, max_tokens, temperature, top_p):
19
  session_id = id(history) # Unique ID for each session
20
  session_history = session_histories[session_id] # Retrieve session history
21
-
22
  # Extract keywords & update preferences
23
  keywords = extract_keywords(message)
24
  for kw in keywords:
25
  user_preferences[kw] += 1
26
 
27
- # Add past conversation to message history
28
- messages = [{"role": "system", "content": system_message}]
29
- for user_msg, bot_response in session_history:
30
- messages.append({"role": "user", "content": user_msg})
31
- messages.append({"role": "assistant", "content": bot_response})
32
-
33
- # Append current user message
34
- messages.append({"role": "user", "content": message})
35
 
36
- # Generate response (non-streaming fix)
37
- response_data = client.chat_completion(
38
- messages,
39
- max_tokens=max_tokens,
40
  temperature=temperature,
41
  top_p=top_p,
42
  )
43
 
44
- response = response_data["choices"][0]["message"]["content"]
 
45
 
46
  # Save to session history
47
- session_history.append((message, response))
48
 
49
- # Optionally, adapt responses based on learned preferences
50
  most_asked = max(user_preferences, key=user_preferences.get, default=None)
51
  if most_asked and most_asked in message.lower():
52
- response += f"\n\nI see you're interested in {most_asked} a lot! Want to explore more details?"
53
-
54
- return response # Return final response instead of streaming
55
 
56
  # Create Chat Interface
57
  demo = gr.ChatInterface(
58
  respond,
59
  additional_inputs=[
60
- gr.Textbox(value="You are a friendly chatbot that learns user interests.", label="System message"),
61
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
62
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
63
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
5
  # Initialize the model client
6
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
7
 
8
+ # Store user preferences & chat history
9
+ user_preferences = defaultdict(int) # Tracks user interests
10
+ session_histories = defaultdict(list) # Stores chat history per session
11
+
12
+ def format_chat_history(history, system_message):
13
+ """Formats history into a single string in Alpaca/LLaMA style."""
14
+ chat_str = f"{system_message}\n\n" # Start with system message
15
+
16
+ for user_msg, bot_response in history:
17
+ chat_str += f"### Instruction:\n{user_msg}\n\n### Response:\n{bot_response}\n\n"
18
+
19
+ return chat_str # Return formatted conversation history
20
 
21
  def extract_keywords(text):
22
  """Extracts simple keywords from user input."""
 
27
  def respond(message, history, system_message, max_tokens, temperature, top_p):
28
  session_id = id(history) # Unique ID for each session
29
  session_history = session_histories[session_id] # Retrieve session history
30
+
31
  # Extract keywords & update preferences
32
  keywords = extract_keywords(message)
33
  for kw in keywords:
34
  user_preferences[kw] += 1
35
 
36
+ # Format full conversation as a single string
37
+ formatted_input = format_chat_history(session_history, system_message) + f"### Instruction:\n{message}\n\n### Response:\n"
 
 
 
 
 
 
38
 
39
+ # Send request (fix: ensure input is a single string)
40
+ response = client.text_generation(
41
+ formatted_input,
42
+ max_new_tokens=max_tokens,
43
  temperature=temperature,
44
  top_p=top_p,
45
  )
46
 
47
+ # Extract only the response
48
+ cleaned_response = response.split("### Response:")[-1].strip()
49
 
50
  # Save to session history
51
+ session_history.append((message, cleaned_response))
52
 
53
+ # Adapt response based on learning
54
  most_asked = max(user_preferences, key=user_preferences.get, default=None)
55
  if most_asked and most_asked in message.lower():
56
+ cleaned_response += f"\n\nNaona unapenda mada ya '{most_asked}' sana! Unataka kujua zaidi?"
57
+
58
+ return cleaned_response # Fixed: Returns only the final response
59
 
60
  # Create Chat Interface
61
  demo = gr.ChatInterface(
62
  respond,
63
  additional_inputs=[
64
+ gr.Textbox(value="Wewe ni msaidizi wa kirafiki anayejifunza upendeleo wa mtumiaji.", label="System message"),
65
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
66
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
67
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),