File size: 1,985 Bytes
f4ec833
49e706c
f4ec833
9ba2a32
 
 
 
 
 
 
49e706c
 
f4ec833
9ba2a32
 
f4ec833
aaa37fd
49e706c
f4ec833
9ba2a32
49e706c
 
 
 
f4ec833
49e706c
9ba2a32
 
49e706c
f4ec833
9ba2a32
 
f4ec833
9ba2a32
 
 
 
f4ec833
9ba2a32
 
 
49e706c
9ba2a32
 
 
49e706c
f4ec833
9ba2a32
f4ec833
9ba2a32
 
 
 
 
f4ec833
 
5350e2e
aaa37fd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_community.memory import ConversationBufferMemory

# Load model and tokenizer
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Create text-generation pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True, truncation=True)

# Wrap with HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)

# Prompt Template
template = """You are a helpful assistant to answer user queries.
{chat_history}
User: {user_message}
Chatbot:"""

prompt = PromptTemplate(
    input_variables=["chat_history", "user_message"],
    template=template
)

# Memory (updated)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=False)

# Create RunnableChain (recommended instead of deprecated LLMChain)
def generate_response(inputs):
    formatted_prompt = prompt.format(**inputs)
    return llm.invoke(formatted_prompt)

chain = RunnableLambda(generate_response)

# Gradio Chat Handler
def get_text_response(user_message, history):
    chat_history = "\n".join([f"User: {msg[0]}\nChatbot: {msg[1]}" for msg in history]) if history else ""
    inputs = {"chat_history": chat_history, "user_message": user_message}
    response = chain.invoke(inputs)
    return response

# Gradio UI
demo = gr.ChatInterface(
    fn=get_text_response,
    examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
    title="AI Chatbot",
    description="A simple chatbot using LangChain + HuggingFace + Gradio",
    theme="default"
)

if __name__ == "__main__":
    demo.queue().launch(share=True, debug=True)