|
import gradio as gr |
|
import random |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "microsoft/DialoGPT-small" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
user_memory = {"name": "Friend", "age": "unknown", "city": "unknown", "likes": "unknown", "favorite": "unknown"} |
|
|
|
def chat(user_input, chat_history=[]): |
|
global user_memory |
|
user_input_lower = user_input.lower() |
|
|
|
|
|
if "my name is" in user_input_lower: |
|
user_memory["name"] = user_input.split("my name is")[-1].strip(" ?.!\n") |
|
return f"Nice to meet you, {user_memory['name']}! π" |
|
|
|
if "how old am i" in user_input_lower: |
|
return f"You told me you are {user_memory['age']} years old! π" |
|
if "i am" in user_input_lower and "years old" in user_input_lower: |
|
user_memory["age"] = user_input.split("i am")[-1].split("years old")[0].strip() |
|
return f"Got it! You are {user_memory['age']} years old. π" |
|
|
|
if "where do i live" in user_input_lower: |
|
return f"You told me you live in {user_memory['city']}! π‘" |
|
if "i live in" in user_input_lower: |
|
user_memory["city"] = user_input.split("i live in")[-1].strip(" ?.!\n") |
|
return f"Awesome! {user_memory['city']} sounds like a great place! π" |
|
|
|
if "what do i like" in user_input_lower: |
|
return f"You said you like {user_memory['likes']}! π" |
|
if "i like" in user_input_lower: |
|
user_memory["likes"] = user_input.split("i like")[-1].strip(" ?.!\n") |
|
return f"Nice! {user_memory['likes']} sounds amazing! π" |
|
|
|
if "what is my favorite" in user_input_lower: |
|
return f"Your favorite is {user_memory['favorite']}! π" |
|
if "my favorite is" in user_input_lower: |
|
user_memory["favorite"] = user_input.split("my favorite is")[-1].strip(" ?.!\n") |
|
return f"Cool! {user_memory['favorite']} is a great choice! π" |
|
|
|
|
|
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt") |
|
chat_history_ids = model.generate(input_ids, max_length=50, pad_token_id=tokenizer.eos_token_id) |
|
bot_response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) |
|
|
|
return bot_response |
|
|
|
|
|
def chat_ui(): |
|
with gr.Blocks(theme=gr.themes.Soft()) as ui: |
|
gr.Markdown(""" |
|
<h1 style='text-align: center; color: blue;'>π€ AI Friend Chatbot</h1> |
|
<p style='text-align: center;'>Your friendly AI companion! Let's chat. π</p> |
|
""") |
|
|
|
chatbot = gr.Chatbot(label="Your AI Friend", bubble_full_width=False) |
|
user_input = gr.Textbox(placeholder="Type a message...", label="You", show_label=False) |
|
send_btn = gr.Button("Send π¬") |
|
|
|
def respond(message, history): |
|
response = chat(message) |
|
history.append((message, response)) |
|
return history, "" |
|
|
|
send_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input]) |
|
user_input.submit(respond, inputs=[user_input, chatbot], outputs=[chatbot, user_input]) |
|
|
|
return ui |
|
|
|
|
|
chat_ui().launch() |
|
|