Update app.py
Browse files
app.py
CHANGED
@@ -1,86 +1,77 @@
|
|
1 |
import gradio as gr
|
2 |
import random
|
3 |
-
|
|
|
4 |
|
5 |
-
# Load the model
|
6 |
-
model_name = "
|
7 |
-
tokenizer =
|
8 |
-
model =
|
9 |
|
10 |
-
#
|
11 |
-
user_memory = {
|
12 |
-
"name": "Friend",
|
13 |
-
"age": "unknown",
|
14 |
-
"city": "unknown",
|
15 |
-
"likes": "unknown",
|
16 |
-
"favorite": "unknown"
|
17 |
-
}
|
18 |
|
19 |
-
|
20 |
-
friendly_replies = [
|
21 |
-
"That sounds awesome! π",
|
22 |
-
"You're such a cool person! π",
|
23 |
-
"Wow, tell me more! π",
|
24 |
-
"Haha, I love chatting with you! π",
|
25 |
-
"You always have interesting things to say! π€©"
|
26 |
-
]
|
27 |
-
|
28 |
-
# Questions to keep the chat fun
|
29 |
-
friendly_questions = [
|
30 |
-
"What's something fun you did today? π",
|
31 |
-
"If you could visit any place, where would it be? π",
|
32 |
-
"Whatβs your favorite hobby? π¨β½",
|
33 |
-
"Whatβs the best advice you've ever received? π§ ",
|
34 |
-
"If you had a superpower, what would it be? β‘"
|
35 |
-
]
|
36 |
-
|
37 |
-
# Chat function
|
38 |
-
def chat(user_input, history=[]):
|
39 |
global user_memory
|
40 |
-
|
41 |
-
# Check for personal detail updates
|
42 |
-
for key in user_memory.keys():
|
43 |
-
if key in user_input.lower():
|
44 |
-
user_memory[key] = user_input.split(key)[-1].strip(" ?.!@,#$")
|
45 |
-
return f"Got it! I'll remember your {key} is {user_memory[key]} π"
|
46 |
-
|
47 |
-
# Answer personal detail questions
|
48 |
-
for key, value in user_memory.items():
|
49 |
-
if f"what is your {key}" in user_input.lower():
|
50 |
-
return f"Your {key} is {value}! π"
|
51 |
-
|
52 |
-
# Generate AI response
|
53 |
-
inputs = tokenizer(user_input, return_tensors="pt")
|
54 |
-
reply_ids = model.generate(**inputs, max_length=100)
|
55 |
-
response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
|
56 |
-
|
57 |
-
# Add a friendly response and sometimes ask a fun question
|
58 |
-
response += " " + random.choice(friendly_replies)
|
59 |
-
if random.random() < 0.3:
|
60 |
-
response += " " + random.choice(friendly_questions)
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
gr.Markdown("""
|
67 |
-
<h1 style='text-align: center; color: #4CAF50;'>π AI Friend Chatbot - Your Virtual Buddy! π</h1>
|
68 |
-
<p style='text-align: center;'>Let's chat and have fun together! π</p>
|
69 |
-
""")
|
70 |
|
71 |
-
|
|
|
|
|
|
|
|
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
# Launch the
|
86 |
-
|
|
|
1 |
import gradio as gr
|
2 |
import random
|
3 |
+
import torch
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
|
6 |
+
# Load the faster, lightweight model (DialoGPT-small for speed)
|
7 |
+
model_name = "microsoft/DialoGPT-small"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
|
11 |
+
# Memory for user details (name, age, city, likes, favorites)
|
12 |
+
user_memory = {"name": "Friend", "age": "unknown", "city": "unknown", "likes": "unknown", "favorite": "unknown"}
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
def chat(user_input, chat_history=[]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
global user_memory
|
16 |
+
user_input_lower = user_input.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# Capture user details for memory
|
19 |
+
if "my name is" in user_input_lower:
|
20 |
+
user_memory["name"] = user_input.split("my name is")[-1].strip(" ?.!\n")
|
21 |
+
return f"Nice to meet you, {user_memory['name']}! π"
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
if "how old am i" in user_input_lower:
|
24 |
+
return f"You told me you are {user_memory['age']} years old! π"
|
25 |
+
if "i am" in user_input_lower and "years old" in user_input_lower:
|
26 |
+
user_memory["age"] = user_input.split("i am")[-1].split("years old")[0].strip()
|
27 |
+
return f"Got it! You are {user_memory['age']} years old. π"
|
28 |
|
29 |
+
if "where do i live" in user_input_lower:
|
30 |
+
return f"You told me you live in {user_memory['city']}! π‘"
|
31 |
+
if "i live in" in user_input_lower:
|
32 |
+
user_memory["city"] = user_input.split("i live in")[-1].strip(" ?.!\n")
|
33 |
+
return f"Awesome! {user_memory['city']} sounds like a great place! π"
|
34 |
+
|
35 |
+
if "what do i like" in user_input_lower:
|
36 |
+
return f"You said you like {user_memory['likes']}! π"
|
37 |
+
if "i like" in user_input_lower:
|
38 |
+
user_memory["likes"] = user_input.split("i like")[-1].strip(" ?.!\n")
|
39 |
+
return f"Nice! {user_memory['likes']} sounds amazing! π"
|
40 |
+
|
41 |
+
if "what is my favorite" in user_input_lower:
|
42 |
+
return f"Your favorite is {user_memory['favorite']}! π"
|
43 |
+
if "my favorite is" in user_input_lower:
|
44 |
+
user_memory["favorite"] = user_input.split("my favorite is")[-1].strip(" ?.!\n")
|
45 |
+
return f"Cool! {user_memory['favorite']} is a great choice! π"
|
46 |
+
|
47 |
+
# Chatbot response generation (with streaming for speed)
|
48 |
+
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
|
49 |
+
chat_history_ids = model.generate(input_ids, max_length=50, pad_token_id=tokenizer.eos_token_id)
|
50 |
+
bot_response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
51 |
+
|
52 |
+
return bot_response
|
53 |
|
54 |
+
# Gradio UI with attractive design
|
55 |
+
def chat_ui():
|
56 |
+
with gr.Blocks(theme=gr.themes.Soft()) as ui:
|
57 |
+
gr.Markdown("""
|
58 |
+
<h1 style='text-align: center; color: blue;'>π€ AI Friend Chatbot</h1>
|
59 |
+
<p style='text-align: center;'>Your friendly AI companion! Let's chat. π</p>
|
60 |
+
""")
|
61 |
+
|
62 |
+
chatbot = gr.Chatbot(label="Your AI Friend", bubble_full_width=False)
|
63 |
+
user_input = gr.Textbox(placeholder="Type a message...", label="You", show_label=False)
|
64 |
+
send_btn = gr.Button("Send π¬")
|
65 |
+
|
66 |
+
def respond(message, history):
|
67 |
+
response = chat(message)
|
68 |
+
history.append((message, response))
|
69 |
+
return history, ""
|
70 |
+
|
71 |
+
send_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
|
72 |
+
user_input.submit(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
|
73 |
+
|
74 |
+
return ui
|
75 |
|
76 |
+
# Launch the chatbot
|
77 |
+
chat_ui().launch()
|