File size: 1,972 Bytes
f4ec833 49e706c 5350e2e b87a77e 5350e2e f4ec833 2111d8c b87a77e 49e706c f4ec833 5350e2e b87a77e f4ec833 b87a77e 49e706c f4ec833 2111d8c 49e706c f4ec833 49e706c 2111d8c 49e706c f4ec833 b87a77e 5350e2e f4ec833 b87a77e 49e706c 5350e2e 49e706c f4ec833 5350e2e 49e706c f4ec833 5350e2e f4ec833 5350e2e b87a77e f4ec833 b87a77e 5350e2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_community.llms import HuggingFacePipeline
from langchain_community.memory import ConversationBufferMemory
from langchain_core.prompts import PromptTemplate
from langchain.chains import LLMChain
# Load model and tokenizer
model_name = "microsoft/DialoGPT-medium" # You can change this to another HF model if needed
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create text-generation pipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=1000,
do_sample=True,
truncation=True, # Explicit truncation to avoid HF warnings
pad_token_id=tokenizer.eos_token_id # Prevents warning for open-end generation
)
# Wrap with LangChain LLM wrapper
llm = HuggingFacePipeline(pipeline=pipe)
# Prompt Template
template = """You are a helpful assistant to answer user queries.
{chat_history}
User: {user_message}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "user_message"],
template=template
)
# Conversation memory (stores past messages)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# LangChain LLM Chain
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=memory,
verbose=True
)
# Chat function
def get_text_response(user_message, history):
response = llm_chain.predict(user_message=user_message)
return response
# Gradio UI
demo = gr.ChatInterface(
fn=get_text_response,
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
title="AI Chatbot",
description="A simple chatbot using LangChain + HuggingFace + Gradio",
theme="default",
type="chat" # Uses newer format to avoid Gradio tuple warnings
)
# Launch
if __name__ == "__main__":
demo.queue().launch(share=True)
|