File size: 1,690 Bytes
f4ec833 49e706c 5350e2e f4ec833 2111d8c 49e706c f4ec833 5350e2e f4ec833 5350e2e 49e706c f4ec833 2111d8c 49e706c f4ec833 49e706c 2111d8c 49e706c f4ec833 2111d8c 5350e2e f4ec833 5350e2e 49e706c 5350e2e 49e706c f4ec833 5350e2e 49e706c f4ec833 5350e2e f4ec833 5350e2e f4ec833 5350e2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain_community.llms import HuggingFacePipeline
from langchain_core.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_core.memory import ConversationBufferMemory
# Load model and tokenizer
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create text-generation pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=10000, do_sample=True, truncation=True)
# Wrap with HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)
# Prompt Template
template = """You are a helpful assistant to answer user queries.
{chat_history}
User: {user_message}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "user_message"],
template=template
)
# Memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Chain
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=memory,
verbose=True
)
# Chat function
def get_text_response(user_message, history):
response = llm_chain.predict(user_message=user_message)
return response
# Gradio UI
demo = gr.ChatInterface(
fn=get_text_response,
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
title="AI Chatbot",
description="A simple chatbot using LangChain + HuggingFace + Gradio",
theme="default",
chatbot=gr.Chatbot(label="Assistant", show_label=True)
)
if __name__ == "__main__":
demo.queue().launch(share=True)
|