Spaces:
Sleeping
Sleeping
File size: 1,488 Bytes
4ff1cad 6198f9a ba063d9 e0921bd ba063d9 b245447 ba063d9 6198f9a ba063d9 e0921bd ba063d9 e0921bd 6198f9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import gradio as gr
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
# Load the Blenderbot model and tokenizer
MODEL_NAME = "facebook/blenderbot-3B"
tokenizer = BlenderbotTokenizer.from_pretrained(MODEL_NAME)
model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_NAME)
def chatbot_response(user_input, chat_history=[]):
"""Generates a response from Blenderbot with memory."""
# Format chat history
history_text = " ".join([f"User: {msg[0]} Assistant: {msg[1]}" for msg in chat_history])
formatted_input = f"You are a helpful assistant. The user says: {user_input}"
inputs = tokenizer(formatted_input, return_tensors="pt")
reply_ids = model.generate(**inputs, max_length=100)
response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
# Update chat history correctly
chat_history.append((user_input, response))
return chat_history, chat_history # ✅ Returning two values
# Set up Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Blenderbot 3B Chatbot")
chatbot = gr.Chatbot()
user_input = gr.Textbox(label="Your message")
submit_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
chat_state = gr.State([])
submit_btn.click(chatbot_response, inputs=[user_input, chat_state], outputs=[chatbot, chat_state])
clear_btn.click(lambda: ([], []), inputs=[], outputs=[chatbot, chat_state])
# Launch the chatbot
demo.launch()
|