manoj555 commited on
Commit
2111d8c
·
verified ·
1 Parent(s): b94fd26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -4,44 +4,48 @@ from langchain import LLMChain, PromptTemplate
4
  from langchain.memory import ConversationBufferMemory
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
- # Load a free model from Hugging Face
8
- model_name = "microsoft/DialoGPT-medium" # Or try "tiiuae/falcon-rw-1b" or "gpt2"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
- # Create pipeline
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
14
 
15
- # Wrap with HuggingFacePipeline
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
 
18
  template = """You are a helpful assistant to answer user queries.
19
  {chat_history}
20
  User: {user_message}
21
  Chatbot:"""
22
 
23
  prompt = PromptTemplate(
24
- input_variables=["chat_history", "user_message"], template=template
 
25
  )
26
 
 
27
  memory = ConversationBufferMemory(memory_key="chat_history")
28
 
 
29
  llm_chain = LLMChain(
30
  llm=llm,
31
  prompt=prompt,
32
- verbose=True,
33
  memory=memory,
34
  )
35
 
 
36
  def get_text_response(user_message, history):
37
  response = llm_chain.predict(user_message=user_message)
38
  return response
39
 
 
40
  demo = gr.ChatInterface(
41
  get_text_response,
42
  examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
43
  )
44
 
45
-
46
- if __name__ == "__main__":
47
- demo.queue().launch(share=True, debug=True)
 
4
  from langchain.memory import ConversationBufferMemory
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
+ # Load model and tokenizer
8
+ model_name = "microsoft/DialoGPT-medium"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
+ # Create HF pipeline
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1000, do_sample=True)
14
 
15
+ # Wrap with HuggingFacePipeline for LangChain
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
18
+ # Prompt Template
19
  template = """You are a helpful assistant to answer user queries.
20
  {chat_history}
21
  User: {user_message}
22
  Chatbot:"""
23
 
24
  prompt = PromptTemplate(
25
+ input_variables=["chat_history", "user_message"],
26
+ template=template
27
  )
28
 
29
+ # Memory
30
  memory = ConversationBufferMemory(memory_key="chat_history")
31
 
32
+ # LLM Chain
33
  llm_chain = LLMChain(
34
  llm=llm,
35
  prompt=prompt,
 
36
  memory=memory,
37
  )
38
 
39
+ # Response function
40
  def get_text_response(user_message, history):
41
  response = llm_chain.predict(user_message=user_message)
42
  return response
43
 
44
+ # Gradio Chat Interface
45
  demo = gr.ChatInterface(
46
  get_text_response,
47
  examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
48
  )
49
 
50
+ # Launch the app (no share=True needed for Spaces)
51
+ demo.launch()