myspace / app.py
manoj555's picture
Update app.py
9ba2a32 verified
raw
history blame
1.99 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_community.memory import ConversationBufferMemory
# Load model and tokenizer
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create text-generation pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True, truncation=True)
# Wrap with HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)
# Prompt Template
template = """You are a helpful assistant to answer user queries.
{chat_history}
User: {user_message}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "user_message"],
template=template
)
# Memory (updated)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=False)
# Create RunnableChain (recommended instead of deprecated LLMChain)
def generate_response(inputs):
formatted_prompt = prompt.format(**inputs)
return llm.invoke(formatted_prompt)
chain = RunnableLambda(generate_response)
# Gradio Chat Handler
def get_text_response(user_message, history):
chat_history = "\n".join([f"User: {msg[0]}\nChatbot: {msg[1]}" for msg in history]) if history else ""
inputs = {"chat_history": chat_history, "user_message": user_message}
response = chain.invoke(inputs)
return response
# Gradio UI
demo = gr.ChatInterface(
fn=get_text_response,
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
title="AI Chatbot",
description="A simple chatbot using LangChain + HuggingFace + Gradio",
theme="default"
)
if __name__ == "__main__":
demo.queue().launch(share=True, debug=True)