manoj555 commited on
Commit
9ba2a32
·
verified ·
1 Parent(s): 7dda322

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -19
app.py CHANGED
@@ -1,47 +1,58 @@
1
  import gradio as gr
2
- from langchain_community.llms import HuggingFacePipeline
3
- from langchain import LLMChain, PromptTemplate
4
- from langchain.memory import ConversationBufferMemory
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
- # Load a free model from Hugging Face
8
- model_name = "microsoft/DialoGPT-medium" # Or try "tiiuae/falcon-rw-1b" or "gpt2"
 
 
 
 
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
- # Create pipeline
13
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
14
 
15
  # Wrap with HuggingFacePipeline
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
 
18
  template = """You are a helpful assistant to answer user queries.
19
  {chat_history}
20
  User: {user_message}
21
  Chatbot:"""
22
 
23
  prompt = PromptTemplate(
24
- input_variables=["chat_history", "user_message"], template=template
 
25
  )
26
 
27
- memory = ConversationBufferMemory(memory_key="chat_history")
 
28
 
29
- llm_chain = LLMChain(
30
- llm=llm,
31
- prompt=prompt,
32
- verbose=True,
33
- memory=memory,
34
- )
35
 
 
 
 
36
  def get_text_response(user_message, history):
37
- response = llm_chain.predict(user_message=user_message)
 
 
38
  return response
39
 
 
40
  demo = gr.ChatInterface(
41
- get_text_response,
42
- examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
 
 
 
43
  )
44
 
45
-
46
  if __name__ == "__main__":
47
  demo.queue().launch(share=True, debug=True)
 
1
  import gradio as gr
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
5
+ from langchain_core.prompts import PromptTemplate
6
+ from langchain_core.runnables import RunnableLambda
7
+ from langchain_community.memory import ConversationBufferMemory
8
+
9
+ # Load model and tokenizer
10
+ model_name = "microsoft/DialoGPT-medium"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
14
+ # Create text-generation pipeline
15
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True, truncation=True)
16
 
17
  # Wrap with HuggingFacePipeline
18
  llm = HuggingFacePipeline(pipeline=pipe)
19
 
20
+ # Prompt Template
21
  template = """You are a helpful assistant to answer user queries.
22
  {chat_history}
23
  User: {user_message}
24
  Chatbot:"""
25
 
26
  prompt = PromptTemplate(
27
+ input_variables=["chat_history", "user_message"],
28
+ template=template
29
  )
30
 
31
+ # Memory (updated)
32
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=False)
33
 
34
+ # Create RunnableChain (recommended instead of deprecated LLMChain)
35
+ def generate_response(inputs):
36
+ formatted_prompt = prompt.format(**inputs)
37
+ return llm.invoke(formatted_prompt)
 
 
38
 
39
+ chain = RunnableLambda(generate_response)
40
+
41
+ # Gradio Chat Handler
42
  def get_text_response(user_message, history):
43
+ chat_history = "\n".join([f"User: {msg[0]}\nChatbot: {msg[1]}" for msg in history]) if history else ""
44
+ inputs = {"chat_history": chat_history, "user_message": user_message}
45
+ response = chain.invoke(inputs)
46
  return response
47
 
48
+ # Gradio UI
49
  demo = gr.ChatInterface(
50
+ fn=get_text_response,
51
+ examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
52
+ title="AI Chatbot",
53
+ description="A simple chatbot using LangChain + HuggingFace + Gradio",
54
+ theme="default"
55
  )
56
 
 
57
  if __name__ == "__main__":
58
  demo.queue().launch(share=True, debug=True)