manoj555 commited on
Commit
5350e2e
·
verified ·
1 Parent(s): af22cbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,18 +1,19 @@
1
  import gradio as gr
2
- from langchain.llms import HuggingFacePipeline
3
- from langchain import LLMChain, PromptTemplate
4
- from langchain.memory import ConversationBufferMemory
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
 
 
 
6
 
7
  # Load model and tokenizer
8
  model_name = "microsoft/DialoGPT-medium"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
- # Create HF pipeline
13
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=10000, do_sample=True)
14
 
15
- # Wrap with HuggingFacePipeline for LangChain
16
  llm = HuggingFacePipeline(pipeline=pipe)
17
 
18
  # Prompt Template
@@ -27,25 +28,30 @@ prompt = PromptTemplate(
27
  )
28
 
29
  # Memory
30
- memory = ConversationBufferMemory(memory_key="chat_history")
31
 
32
- # LLM Chain
33
  llm_chain = LLMChain(
34
  llm=llm,
35
  prompt=prompt,
36
  memory=memory,
 
37
  )
38
 
39
- # Response function
40
  def get_text_response(user_message, history):
41
  response = llm_chain.predict(user_message=user_message)
42
  return response
43
 
44
- # Gradio Chat Interface
45
  demo = gr.ChatInterface(
46
- get_text_response,
47
- examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
 
 
 
 
48
  )
49
 
50
- # Launch the app (no share=True needed for Spaces)
51
- demo.launch(share=True)
 
1
  import gradio as gr
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ from langchain_community.llms import HuggingFacePipeline
4
+ from langchain_core.prompts import PromptTemplate
5
+ from langchain.chains import LLMChain
6
+ from langchain_core.memory import ConversationBufferMemory
7
 
8
  # Load model and tokenizer
9
  model_name = "microsoft/DialoGPT-medium"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
 
13
+ # Create text-generation pipeline
14
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=10000, do_sample=True, truncation=True)
15
 
16
+ # Wrap with HuggingFacePipeline
17
  llm = HuggingFacePipeline(pipeline=pipe)
18
 
19
  # Prompt Template
 
28
  )
29
 
30
  # Memory
31
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
32
 
33
+ # Chain
34
  llm_chain = LLMChain(
35
  llm=llm,
36
  prompt=prompt,
37
  memory=memory,
38
+ verbose=True
39
  )
40
 
41
+ # Chat function
42
  def get_text_response(user_message, history):
43
  response = llm_chain.predict(user_message=user_message)
44
  return response
45
 
46
+ # Gradio UI
47
  demo = gr.ChatInterface(
48
+ fn=get_text_response,
49
+ examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
50
+ title="AI Chatbot",
51
+ description="A simple chatbot using LangChain + HuggingFace + Gradio",
52
+ theme="default",
53
+ chatbot=gr.Chatbot(label="Assistant", show_label=True)
54
  )
55
 
56
+ if __name__ == "__main__":
57
+ demo.queue().launch(share=True)