mukilan-k commited on
Commit
03db4ac
Β·
verified Β·
1 Parent(s): 3b2b860

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -75
app.py CHANGED
@@ -1,86 +1,77 @@
1
  import gradio as gr
2
  import random
3
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
 
4
 
5
- # Load the model and tokenizer
6
- model_name = "facebook/blenderbot-400M-distill"
7
- tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
8
- model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
9
 
10
- # Persistent user details (Memory)
11
- user_memory = {
12
- "name": "Friend",
13
- "age": "unknown",
14
- "city": "unknown",
15
- "likes": "unknown",
16
- "favorite": "unknown"
17
- }
18
 
19
- # Friendly responses
20
- friendly_replies = [
21
- "That sounds awesome! πŸ˜ƒ",
22
- "You're such a cool person! 😊",
23
- "Wow, tell me more! 😎",
24
- "Haha, I love chatting with you! πŸ˜†",
25
- "You always have interesting things to say! 🀩"
26
- ]
27
-
28
- # Questions to keep the chat fun
29
- friendly_questions = [
30
- "What's something fun you did today? πŸŽ‰",
31
- "If you could visit any place, where would it be? 🌍",
32
- "What’s your favorite hobby? 🎨⚽",
33
- "What’s the best advice you've ever received? 🧠",
34
- "If you had a superpower, what would it be? ⚑"
35
- ]
36
-
37
- # Chat function
38
- def chat(user_input, history=[]):
39
  global user_memory
40
-
41
- # Check for personal detail updates
42
- for key in user_memory.keys():
43
- if key in user_input.lower():
44
- user_memory[key] = user_input.split(key)[-1].strip(" ?.!@,#$")
45
- return f"Got it! I'll remember your {key} is {user_memory[key]} 😊"
46
-
47
- # Answer personal detail questions
48
- for key, value in user_memory.items():
49
- if f"what is your {key}" in user_input.lower():
50
- return f"Your {key} is {value}! 😊"
51
-
52
- # Generate AI response
53
- inputs = tokenizer(user_input, return_tensors="pt")
54
- reply_ids = model.generate(**inputs, max_length=100)
55
- response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
56
-
57
- # Add a friendly response and sometimes ask a fun question
58
- response += " " + random.choice(friendly_replies)
59
- if random.random() < 0.3:
60
- response += " " + random.choice(friendly_questions)
61
 
62
- return response
63
-
64
- # Gradio UI with enhanced design
65
- with gr.Blocks(theme=gr.themes.Soft()) as iface:
66
- gr.Markdown("""
67
- <h1 style='text-align: center; color: #4CAF50;'>🌟 AI Friend Chatbot - Your Virtual Buddy! 🌟</h1>
68
- <p style='text-align: center;'>Let's chat and have fun together! 😊</p>
69
- """)
70
 
71
- chatbot = gr.Chatbot(label="Chat with Your AI Friend πŸ€–", bubble_full_width=False)
 
 
 
 
72
 
73
- with gr.Row():
74
- user_input = gr.Textbox(placeholder="Say something... ✨", label="You πŸ’¬", show_label=False)
75
- send_btn = gr.Button("πŸš€ Send")
76
-
77
- def respond(message, history):
78
- response = chat(message)
79
- history.append((message, response))
80
- return history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
- send_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
83
- user_input.submit(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # Launch the Chatbot
86
- iface.launch()
 
1
  import gradio as gr
2
  import random
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
+ # Load the faster, lightweight model (DialoGPT-small for speed)
7
+ model_name = "microsoft/DialoGPT-small"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
+ # Memory for user details (name, age, city, likes, favorites)
12
+ user_memory = {"name": "Friend", "age": "unknown", "city": "unknown", "likes": "unknown", "favorite": "unknown"}
 
 
 
 
 
 
13
 
14
+ def chat(user_input, chat_history=[]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  global user_memory
16
+ user_input_lower = user_input.lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ # Capture user details for memory
19
+ if "my name is" in user_input_lower:
20
+ user_memory["name"] = user_input.split("my name is")[-1].strip(" ?.!\n")
21
+ return f"Nice to meet you, {user_memory['name']}! 😊"
 
 
 
 
22
 
23
+ if "how old am i" in user_input_lower:
24
+ return f"You told me you are {user_memory['age']} years old! πŸŽ‚"
25
+ if "i am" in user_input_lower and "years old" in user_input_lower:
26
+ user_memory["age"] = user_input.split("i am")[-1].split("years old")[0].strip()
27
+ return f"Got it! You are {user_memory['age']} years old. πŸŽ‰"
28
 
29
+ if "where do i live" in user_input_lower:
30
+ return f"You told me you live in {user_memory['city']}! 🏑"
31
+ if "i live in" in user_input_lower:
32
+ user_memory["city"] = user_input.split("i live in")[-1].strip(" ?.!\n")
33
+ return f"Awesome! {user_memory['city']} sounds like a great place! 🌍"
34
+
35
+ if "what do i like" in user_input_lower:
36
+ return f"You said you like {user_memory['likes']}! πŸ˜ƒ"
37
+ if "i like" in user_input_lower:
38
+ user_memory["likes"] = user_input.split("i like")[-1].strip(" ?.!\n")
39
+ return f"Nice! {user_memory['likes']} sounds amazing! 😍"
40
+
41
+ if "what is my favorite" in user_input_lower:
42
+ return f"Your favorite is {user_memory['favorite']}! πŸŽ‰"
43
+ if "my favorite is" in user_input_lower:
44
+ user_memory["favorite"] = user_input.split("my favorite is")[-1].strip(" ?.!\n")
45
+ return f"Cool! {user_memory['favorite']} is a great choice! πŸ†"
46
+
47
+ # Chatbot response generation (with streaming for speed)
48
+ input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
49
+ chat_history_ids = model.generate(input_ids, max_length=50, pad_token_id=tokenizer.eos_token_id)
50
+ bot_response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
51
+
52
+ return bot_response
53
 
54
+ # Gradio UI with attractive design
55
+ def chat_ui():
56
+ with gr.Blocks(theme=gr.themes.Soft()) as ui:
57
+ gr.Markdown("""
58
+ <h1 style='text-align: center; color: blue;'>πŸ€– AI Friend Chatbot</h1>
59
+ <p style='text-align: center;'>Your friendly AI companion! Let's chat. 😊</p>
60
+ """)
61
+
62
+ chatbot = gr.Chatbot(label="Your AI Friend", bubble_full_width=False)
63
+ user_input = gr.Textbox(placeholder="Type a message...", label="You", show_label=False)
64
+ send_btn = gr.Button("Send πŸ’¬")
65
+
66
+ def respond(message, history):
67
+ response = chat(message)
68
+ history.append((message, response))
69
+ return history, ""
70
+
71
+ send_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
72
+ user_input.submit(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
73
+
74
+ return ui
75
 
76
+ # Launch the chatbot
77
+ chat_ui().launch()