manoj555 commited on
Commit
8a773b5
·
verified ·
1 Parent(s): 3a7c44f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -35
app.py CHANGED
@@ -1,60 +1,47 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
- from langchain_huggingface import HuggingFacePipeline # Updated import
4
- from langchain_core.prompts import PromptTemplate
5
- from langchain_core.runnables import RunnableLambda
6
  from langchain.memory import ConversationBufferMemory
 
7
 
8
- # Load model and tokenizer
9
- model_name = "microsoft/DialoGPT-medium"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
 
13
- # Create HuggingFace pipeline
14
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
15
 
16
- # Wrap with updated HuggingFacePipeline from langchain_huggingface
17
  llm = HuggingFacePipeline(pipeline=pipe)
18
 
19
- # Prompt Template
20
- template = """You are a helpful assistant that answers user queries.
21
  {chat_history}
22
  User: {user_message}
23
  Chatbot:"""
24
 
25
  prompt = PromptTemplate(
26
- input_variables=["chat_history", "user_message"],
27
- template=template
28
  )
29
 
30
- # Updated memory object
31
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=False)
32
-
33
- # Response generation function
34
- def generate_response(inputs):
35
- formatted_prompt = prompt.format(**inputs)
36
- return llm.invoke(formatted_prompt)
37
 
38
- # Wrap as chain
39
- chain = RunnableLambda(generate_response)
 
 
 
 
40
 
41
- # Chat handler for Gradio
42
- def get_text_response(message, history):
43
- # Convert history to chat format
44
- chat_history = "\n".join([f"User: {turn['content']}\nChatbot: {response['content']}" for turn, response in zip(history[::2], history[1::2])]) if history else ""
45
- inputs = {"chat_history": chat_history, "user_message": message}
46
- response = chain.invoke(inputs)
47
- return {"role": "assistant", "content": response}
48
 
49
- # Gradio ChatInterface with updated format
50
  demo = gr.ChatInterface(
51
- fn=get_text_response,
52
- chatbot=gr.Chatbot(type="messages"),
53
- examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
54
- title="AI Chatbot",
55
- description="A simple chatbot using LangChain + HuggingFace + Gradio (2025)",
56
- theme="default"
57
  )
58
 
 
59
  if __name__ == "__main__":
60
  demo.queue().launch(share=True, debug=True)
 
1
  import gradio as gr
2
+ from langchain.llms import HuggingFacePipeline
3
+ from langchain import LLMChain, PromptTemplate
 
 
4
  from langchain.memory import ConversationBufferMemory
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
+ # Load a free model from Hugging Face
8
+ model_name = "microsoft/DialoGPT-medium" # Or try "tiiuae/falcon-rw-1b" or "gpt2"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
+ # Create pipeline
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
14
 
15
+ # Wrap with HuggingFacePipeline
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
18
+ template = """You are a helpful assistant to answer user queries.
 
19
  {chat_history}
20
  User: {user_message}
21
  Chatbot:"""
22
 
23
  prompt = PromptTemplate(
24
+ input_variables=["chat_history", "user_message"], template=template
 
25
  )
26
 
27
+ memory = ConversationBufferMemory(memory_key="chat_history")
 
 
 
 
 
 
28
 
29
+ llm_chain = LLMChain(
30
+ llm=llm,
31
+ prompt=prompt,
32
+ verbose=True,
33
+ memory=memory,
34
+ )
35
 
36
+ def get_text_response(user_message, history):
37
+ response = llm_chain.predict(user_message=user_message)
38
+ return response
 
 
 
 
39
 
 
40
  demo = gr.ChatInterface(
41
+ get_text_response,
42
+ examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
 
 
 
 
43
  )
44
 
45
+
46
  if __name__ == "__main__":
47
  demo.queue().launch(share=True, debug=True)