myspace / app.py
manoj555's picture
Update app.py
aaa37fd verified
raw
history blame
1.4 kB
import gradio as gr
from langchain.llms import HuggingFacePipeline
from langchain import LLMChain, PromptTemplate
from langchain.memory import ConversationBufferMemory
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load a free model from Hugging Face
model_name = "microsoft/DialoGPT-medium" # Or try "tiiuae/falcon-rw-1b" or "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
# Wrap with HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)
template = """You are a helpful assistant to answer user queries.
{chat_history}
User: {user_message}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "user_message"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
def get_text_response(user_message, history):
response = llm_chain.predict(user_message=user_message)
return response
demo = gr.ChatInterface(
get_text_response,
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
)
if __name__ == "__main__":
demo.queue().launch(share=True, debug=True)