i_am_lan / app.py
mukilan-k's picture
Update app.py
03db4ac verified
raw
history blame
3.48 kB
import gradio as gr
import random
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the faster, lightweight model (DialoGPT-small for speed)
model_name = "microsoft/DialoGPT-small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Memory for user details (name, age, city, likes, favorites)
user_memory = {"name": "Friend", "age": "unknown", "city": "unknown", "likes": "unknown", "favorite": "unknown"}
def chat(user_input, chat_history=[]):
global user_memory
user_input_lower = user_input.lower()
# Capture user details for memory
if "my name is" in user_input_lower:
user_memory["name"] = user_input.split("my name is")[-1].strip(" ?.!\n")
return f"Nice to meet you, {user_memory['name']}! 😊"
if "how old am i" in user_input_lower:
return f"You told me you are {user_memory['age']} years old! πŸŽ‚"
if "i am" in user_input_lower and "years old" in user_input_lower:
user_memory["age"] = user_input.split("i am")[-1].split("years old")[0].strip()
return f"Got it! You are {user_memory['age']} years old. πŸŽ‰"
if "where do i live" in user_input_lower:
return f"You told me you live in {user_memory['city']}! 🏑"
if "i live in" in user_input_lower:
user_memory["city"] = user_input.split("i live in")[-1].strip(" ?.!\n")
return f"Awesome! {user_memory['city']} sounds like a great place! 🌍"
if "what do i like" in user_input_lower:
return f"You said you like {user_memory['likes']}! πŸ˜ƒ"
if "i like" in user_input_lower:
user_memory["likes"] = user_input.split("i like")[-1].strip(" ?.!\n")
return f"Nice! {user_memory['likes']} sounds amazing! 😍"
if "what is my favorite" in user_input_lower:
return f"Your favorite is {user_memory['favorite']}! πŸŽ‰"
if "my favorite is" in user_input_lower:
user_memory["favorite"] = user_input.split("my favorite is")[-1].strip(" ?.!\n")
return f"Cool! {user_memory['favorite']} is a great choice! πŸ†"
# Chatbot response generation (with streaming for speed)
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
chat_history_ids = model.generate(input_ids, max_length=50, pad_token_id=tokenizer.eos_token_id)
bot_response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
return bot_response
# Gradio UI with attractive design
def chat_ui():
with gr.Blocks(theme=gr.themes.Soft()) as ui:
gr.Markdown("""
<h1 style='text-align: center; color: blue;'>πŸ€– AI Friend Chatbot</h1>
<p style='text-align: center;'>Your friendly AI companion! Let's chat. 😊</p>
""")
chatbot = gr.Chatbot(label="Your AI Friend", bubble_full_width=False)
user_input = gr.Textbox(placeholder="Type a message...", label="You", show_label=False)
send_btn = gr.Button("Send πŸ’¬")
def respond(message, history):
response = chat(message)
history.append((message, response))
return history, ""
send_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
user_input.submit(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
return ui
# Launch the chatbot
chat_ui().launch()