manoj555 commited on
Commit
aaa37fd
·
verified ·
1 Parent(s): b87a77e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -34
app.py CHANGED
@@ -1,66 +1,47 @@
1
  import gradio as gr
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
- from langchain_community.llms import HuggingFacePipeline
4
- from langchain_community.memory import ConversationBufferMemory
5
- from langchain_core.prompts import PromptTemplate
6
- from langchain.chains import LLMChain
7
 
8
- # Load model and tokenizer
9
- model_name = "microsoft/DialoGPT-medium" # You can change this to another HF model if needed
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
 
13
- # Create text-generation pipeline
14
- pipe = pipeline(
15
- "text-generation",
16
- model=model,
17
- tokenizer=tokenizer,
18
- max_length=1000,
19
- do_sample=True,
20
- truncation=True, # Explicit truncation to avoid HF warnings
21
- pad_token_id=tokenizer.eos_token_id # Prevents warning for open-end generation
22
- )
23
 
24
- # Wrap with LangChain LLM wrapper
25
  llm = HuggingFacePipeline(pipeline=pipe)
26
 
27
- # Prompt Template
28
  template = """You are a helpful assistant to answer user queries.
29
  {chat_history}
30
  User: {user_message}
31
  Chatbot:"""
32
 
33
  prompt = PromptTemplate(
34
- input_variables=["chat_history", "user_message"],
35
- template=template
36
  )
37
 
38
- # Conversation memory (stores past messages)
39
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
40
 
41
- # LangChain LLM Chain
42
  llm_chain = LLMChain(
43
  llm=llm,
44
  prompt=prompt,
 
45
  memory=memory,
46
- verbose=True
47
  )
48
 
49
- # Chat function
50
  def get_text_response(user_message, history):
51
  response = llm_chain.predict(user_message=user_message)
52
  return response
53
 
54
- # Gradio UI
55
  demo = gr.ChatInterface(
56
- fn=get_text_response,
57
- examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
58
- title="AI Chatbot",
59
- description="A simple chatbot using LangChain + HuggingFace + Gradio",
60
- theme="default",
61
- type="chat" # Uses newer format to avoid Gradio tuple warnings
62
  )
63
 
64
- # Launch
65
  if __name__ == "__main__":
66
- demo.queue().launch(share=True)
 
1
  import gradio as gr
2
+ from langchain.llms import HuggingFacePipeline
3
+ from langchain import LLMChain, PromptTemplate
4
+ from langchain.memory import ConversationBufferMemory
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
 
 
 
6
 
7
+ # Load a free model from Hugging Face
8
+ model_name = "microsoft/DialoGPT-medium" # Or try "tiiuae/falcon-rw-1b" or "gpt2"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
+ # Create pipeline
13
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
 
 
 
 
 
 
 
 
14
 
15
+ # Wrap with HuggingFacePipeline
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
 
18
  template = """You are a helpful assistant to answer user queries.
19
  {chat_history}
20
  User: {user_message}
21
  Chatbot:"""
22
 
23
  prompt = PromptTemplate(
24
+ input_variables=["chat_history", "user_message"], template=template
 
25
  )
26
 
27
+ memory = ConversationBufferMemory(memory_key="chat_history")
 
28
 
 
29
  llm_chain = LLMChain(
30
  llm=llm,
31
  prompt=prompt,
32
+ verbose=True,
33
  memory=memory,
 
34
  )
35
 
 
36
  def get_text_response(user_message, history):
37
  response = llm_chain.predict(user_message=user_message)
38
  return response
39
 
 
40
  demo = gr.ChatInterface(
41
+ get_text_response,
42
+ examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
 
 
 
 
43
  )
44
 
45
+
46
  if __name__ == "__main__":
47
+ demo.queue().launch(share=True, debug=True)