Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
from textblob import TextBlob
|
4 |
-
import json
|
5 |
-
import os
|
6 |
-
import time
|
7 |
-
import logging
|
8 |
|
9 |
-
|
10 |
-
logging.basicConfig(level=logging.INFO)
|
11 |
-
|
12 |
-
# Get the API token from the environment variable
|
13 |
-
api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
14 |
-
|
15 |
-
client = InferenceClient(
|
16 |
-
model="Futuresony/future_ai_12_10_2024.gguf",
|
17 |
-
token=api_token
|
18 |
-
)
|
19 |
-
|
20 |
-
# Directory to store interactions and feedback
|
21 |
-
DATA_DIR = "data"
|
22 |
-
INTERACTIONS_FILE = os.path.join(DATA_DIR, "interactions.json")
|
23 |
-
|
24 |
-
# Ensure the data directory exists
|
25 |
-
os.makedirs(DATA_DIR, exist_ok=True)
|
26 |
|
27 |
def format_alpaca_prompt(user_input, system_prompt, history):
|
28 |
"""Formats input in Alpaca/LLaMA style"""
|
@@ -37,116 +16,23 @@ def format_alpaca_prompt(user_input, system_prompt, history):
|
|
37 |
"""
|
38 |
return prompt
|
39 |
|
40 |
-
def
|
41 |
-
"""Analyze the sentiment of the user's message"""
|
42 |
-
blob = TextBlob(message)
|
43 |
-
sentiment = blob.sentiment.polarity
|
44 |
-
return sentiment
|
45 |
-
|
46 |
-
def save_interaction(user_input, chatbot_response, feedback=None):
|
47 |
-
"""Save the interaction and feedback to a file"""
|
48 |
-
interaction = {
|
49 |
-
"user_input": user_input,
|
50 |
-
"chatbot_response": chatbot_response,
|
51 |
-
"feedback": feedback,
|
52 |
-
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
53 |
-
}
|
54 |
-
if os.path.exists(INTERACTIONS_FILE):
|
55 |
-
with open(INTERACTIONS_FILE, "r") as file:
|
56 |
-
interactions = json.load(file)
|
57 |
-
else:
|
58 |
-
interactions = []
|
59 |
-
|
60 |
-
interactions.append(interaction)
|
61 |
-
|
62 |
-
with open(INTERACTIONS_FILE, "w") as file:
|
63 |
-
json.dump(interactions, file, indent=4)
|
64 |
-
|
65 |
-
def respond(message, history, system_message, max_tokens, temperature, top_p, feedback=None):
|
66 |
-
sentiment = analyze_sentiment(message)
|
67 |
-
|
68 |
-
# Adjust system message based on sentiment
|
69 |
-
if sentiment < -0.2:
|
70 |
-
system_message = "You are a sympathetic Chatbot."
|
71 |
-
elif sentiment > 0.2:
|
72 |
-
system_message = "You are an enthusiastic Chatbot."
|
73 |
-
else:
|
74 |
-
system_message = "You are a friendly Chatbot."
|
75 |
-
|
76 |
formatted_prompt = format_alpaca_prompt(message, system_message, history)
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
max_new_tokens=max_tokens,
|
85 |
-
temperature=temperature,
|
86 |
-
top_p=top_p,
|
87 |
-
)
|
88 |
-
break # Exit the loop if the request is successful
|
89 |
-
except Exception as e:
|
90 |
-
logging.error(f"Attempt {attempt + 1} failed: {e}")
|
91 |
-
if attempt < max_retries - 1:
|
92 |
-
time.sleep(2 ** attempt) # Exponential backoff
|
93 |
-
else:
|
94 |
-
raise e
|
95 |
|
96 |
# ✅ Extract only the response
|
97 |
cleaned_response = response.split("### Response:")[-1].strip()
|
98 |
-
|
99 |
history.append((message, cleaned_response)) # ✅ Update history with the new message and response
|
100 |
-
|
101 |
-
save_interaction(message, cleaned_response, feedback) # ✅ Save the interaction and feedback
|
102 |
-
|
103 |
yield cleaned_response # ✅ Output only the answer
|
104 |
|
105 |
-
def collect_feedback(response, feedback):
|
106 |
-
"""Collect user feedback on the chatbot's response"""
|
107 |
-
save_interaction(response, feedback=feedback)
|
108 |
-
|
109 |
-
def view_interactions():
|
110 |
-
if os.path.exists(INTERACTIONS_FILE):
|
111 |
-
with open(INTERACTIONS_FILE, "r") as file:
|
112 |
-
interactions = json.load(file)
|
113 |
-
return json.dumps(interactions, indent=4)
|
114 |
-
else:
|
115 |
-
return "No interactions found."
|
116 |
-
|
117 |
-
def download_interactions():
|
118 |
-
"""Provide the interactions file for download"""
|
119 |
-
if os.path.exists(INTERACTIONS_FILE):
|
120 |
-
return INTERACTIONS_FILE
|
121 |
-
else:
|
122 |
-
return None
|
123 |
-
|
124 |
-
# Create a Gradio interface to display interactions
|
125 |
-
view_interface = gr.Interface(
|
126 |
-
fn=view_interactions,
|
127 |
-
inputs=[],
|
128 |
-
outputs="text",
|
129 |
-
title="View Interactions"
|
130 |
-
)
|
131 |
-
|
132 |
-
# Create a Gradio interface for downloading interactions
|
133 |
-
download_interface = gr.Interface(
|
134 |
-
fn=download_interactions,
|
135 |
-
inputs=[],
|
136 |
-
outputs=gr.File(label="Download Interactions"),
|
137 |
-
title="Download Interactions"
|
138 |
-
)
|
139 |
-
|
140 |
-
feedback_interface = gr.Interface(
|
141 |
-
fn=collect_feedback,
|
142 |
-
inputs=[
|
143 |
-
gr.Textbox(label="Response"),
|
144 |
-
gr.Radio(choices=["Good", "Bad"], label="Feedback"),
|
145 |
-
],
|
146 |
-
outputs="text",
|
147 |
-
title="Feedback Interface"
|
148 |
-
)
|
149 |
-
|
150 |
demo = gr.ChatInterface(
|
151 |
respond,
|
152 |
additional_inputs=[
|
@@ -157,9 +43,5 @@ demo = gr.ChatInterface(
|
|
157 |
],
|
158 |
)
|
159 |
|
160 |
-
# Launch all interfaces
|
161 |
if __name__ == "__main__":
|
162 |
-
demo.launch()
|
163 |
-
feedback_interface.launch()
|
164 |
-
view_interface.launch()
|
165 |
-
download_interface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
def format_alpaca_prompt(user_input, system_prompt, history):
|
7 |
"""Formats input in Alpaca/LLaMA style"""
|
|
|
16 |
"""
|
17 |
return prompt
|
18 |
|
19 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
formatted_prompt = format_alpaca_prompt(message, system_message, history)
|
21 |
|
22 |
+
response = client.text_generation(
|
23 |
+
formatted_prompt,
|
24 |
+
max_new_tokens=max_tokens,
|
25 |
+
temperature=temperature,
|
26 |
+
top_p=top_p,
|
27 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
# ✅ Extract only the response
|
30 |
cleaned_response = response.split("### Response:")[-1].strip()
|
31 |
+
|
32 |
history.append((message, cleaned_response)) # ✅ Update history with the new message and response
|
33 |
+
|
|
|
|
|
34 |
yield cleaned_response # ✅ Output only the answer
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
demo = gr.ChatInterface(
|
37 |
respond,
|
38 |
additional_inputs=[
|
|
|
43 |
],
|
44 |
)
|
45 |
|
|
|
46 |
if __name__ == "__main__":
|
47 |
+
demo.launch()
|
|
|
|
|
|