Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,12 +7,20 @@ tokenizer = BlenderbotTokenizer.from_pretrained(MODEL_NAME)
|
|
7 |
model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_NAME)
|
8 |
|
9 |
def chatbot_response(user_input, chat_history=[]):
|
10 |
-
"""Generates a response from Blenderbot
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
13 |
response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
16 |
|
17 |
# Set up Gradio interface
|
18 |
with gr.Blocks() as demo:
|
|
|
7 |
model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_NAME)
|
8 |
|
9 |
def chatbot_response(user_input, chat_history=[]):
|
10 |
+
"""Generates a response from Blenderbot with memory."""
|
11 |
+
|
12 |
+
# Format input with chat history
|
13 |
+
history_text = " ".join([f"User: {msg[0]} Assistant: {msg[1]}" for msg in chat_history])
|
14 |
+
|
15 |
+
formatted_input = f"{history_text} User: {user_input} Assistant:"
|
16 |
+
inputs = tokenizer(formatted_input, return_tensors="pt")
|
17 |
+
reply_ids = model.generate(**inputs, max_length=100)
|
18 |
response = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
|
19 |
+
|
20 |
+
# Update chat history correctly
|
21 |
+
chat_history.append((user_input, response)) # ✅ Now a tuple (user message, bot response)
|
22 |
+
|
23 |
+
return chat_history # ✅ Returns history formatted correctly
|
24 |
|
25 |
# Set up Gradio interface
|
26 |
with gr.Blocks() as demo:
|