twimbit-ai commited on
Commit
112eee5
·
1 Parent(s): ebb95ee

Update query_data.py

Browse files
Files changed (1) hide show
  1. query_data.py +47 -16
query_data.py CHANGED
@@ -1,11 +1,28 @@
1
  from langchain.prompts.prompt import PromptTemplate
2
- from langchain.llms import OpenAI, OpenAIChat
3
- from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain
4
  from langchain.chat_models import ChatOpenAI
 
5
 
6
- _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
7
- standalone question.
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  Chat History:
11
  {chat_history}
@@ -13,25 +30,39 @@ Follow Up Input: {question}
13
  Standalone question:"""
14
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
15
 
16
- template = """Give an answer to the question based on the context below, and if you don't find the answer in
17
- the context, then simply say "Sorry no information is present in twimbit's data", But do give an answer that fits
18
- right, the answer can be outside of this context and at the end of the answer, do mention that this answer is not
19
- from twimbit's data and also include sources like article links etc. And also don't use "According to the information
20
- provided in the given links, " while giving an answer.
21
 
22
  {context}
23
 
24
  Question: {question}
25
  Helpful Answer:"""
26
- QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
 
 
27
 
28
 
29
  def get_chain(vectorstore):
 
 
 
30
  llm = ChatOpenAI(temperature=0)
31
- qa_chain = ConversationalRetrievalChain.from_llm(
32
- llm,
33
- vectorstore.as_retriever(search_kwargs={"k": 8, "include_metadata": True})
34
- # qa_prompt=QA_PROMPT,
35
- # condense_question_prompt=CONDENSE_QUESTION_PROMPT,
36
- )
 
 
 
 
 
 
 
 
 
37
  return qa_chain
 
1
  from langchain.prompts.prompt import PromptTemplate
2
+ from langchain.chains import ConversationalRetrievalChain
 
3
  from langchain.chat_models import ChatOpenAI
4
+ # from langchain.llms import OpenAI
5
 
6
+ # from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT
 
7
 
8
+ # _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
9
+ # standalone question.
10
+ #
11
+ #
12
+ # Chat History:
13
+ # {chat_history}
14
+ # Follow Up Input: {question}
15
+ # Standalone question:"""
16
+ # CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
17
+ #
18
+ # template = """You are given an context and a question find an answer from the given
19
+ # context and provide an answer. If the answer is not in the context then
20
+ # simply say "No information found". Don't try to make an answer. And also don't use "According to the information
21
+ # provided in the given links," Question: {question} ========= {context} ========= Answer in Markdown: """
22
+ # QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
23
+
24
+ _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
25
+ standalone question.
26
 
27
  Chat History:
28
  {chat_history}
 
30
  Standalone question:"""
31
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
32
 
33
+ prompt_template = """Give an answer to the question based on the context below and try to explain in detail and do
34
+ add source links if exist in context, and if you don't find the answer in the context, then simply say "That's a great
35
+ question, and I want to make sure I provide you with the most accurate and helpful information possible.
36
+ Unfortunately, our current pool of insights does not have an answer to this.", don't try to make up an answer And
37
+ also don't use "According to the information provided in the given links, " while giving an answer.
38
 
39
  {context}
40
 
41
  Question: {question}
42
  Helpful Answer:"""
43
+ QA_PROMPT = PromptTemplate(
44
+ template=prompt_template, input_variables=["context", "question"]
45
+ )
46
 
47
 
48
  def get_chain(vectorstore):
49
+ from langchain.chains.llm import LLMChain
50
+ # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
51
+ from langchain.chains.question_answering import load_qa_chain
52
  llm = ChatOpenAI(temperature=0)
53
+ streaming_llm = ChatOpenAI(streaming=True, temperature=0, max_tokens=300)
54
+ question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
55
+ doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
56
+
57
+ # qa_chain = ConversationalRetrievalChain.from_llm(
58
+ # llm,
59
+ # vectorstore.as_retriever(search_kwargs={"k": 8, "include_metadata": True}),
60
+ # condense_question_prompt=CONDENSE_QUESTION_PROMPT,
61
+ # qa_prompt=QA_PROMPT
62
+ # )
63
+
64
+ qa_chain = ConversationalRetrievalChain(return_source_documents=False,
65
+ retriever=vectorstore.as_retriever(
66
+ search_kwargs={"k": 8, "include_metadata": True}),
67
+ combine_docs_chain=doc_chain, question_generator=question_generator)
68
  return qa_chain