Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -89,19 +89,19 @@
|
|
89 |
|
90 |
import torch
|
91 |
import streamlit as st
|
92 |
-
from transformers import
|
93 |
|
94 |
model_name="facebook/blenderbot-400M-distill"
|
95 |
|
96 |
-
model=
|
97 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
98 |
ch=[]
|
99 |
def chat():
|
100 |
|
101 |
h_s="\n".join(ch)
|
102 |
i=st.input("enter")
|
103 |
-
|
104 |
-
outputs=model.generate(**
|
105 |
response=tokenizer.decode(outputs[0],skip_special_tokens=True).strip()
|
106 |
c_h.appned(i)
|
107 |
c_h.append(response)
|
|
|
89 |
|
90 |
import torch
|
91 |
import streamlit as st
|
92 |
+
from transformers import AutoModelForCausalSeq2SeqLM, AutoTokenizer
|
93 |
|
94 |
model_name="facebook/blenderbot-400M-distill"
|
95 |
|
96 |
+
model=AutoModelForCausalSeq2SeqLM.from_pretrained(model_name)
|
97 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
98 |
ch=[]
|
99 |
def chat():
|
100 |
|
101 |
h_s="\n".join(ch)
|
102 |
i=st.input("enter")
|
103 |
+
is=tokenizer.encode_plus(h_s,i,return_tensors="pt")
|
104 |
+
outputs=model.generate(**is,max_length=60)
|
105 |
response=tokenizer.decode(outputs[0],skip_special_tokens=True).strip()
|
106 |
c_h.appned(i)
|
107 |
c_h.append(response)
|