twimbit-ai commited on
Commit
3b59724
·
1 Parent(s): 667be0c

Update query_data.py

Browse files
Files changed (1) hide show
  1. query_data.py +9 -7
query_data.py CHANGED
@@ -1,6 +1,7 @@
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.chains import ConversationalRetrievalChain
3
  from langchain.chat_models import ChatOpenAI
 
4
  # from langchain.llms import OpenAI
5
 
6
  # from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT
@@ -30,11 +31,12 @@ Follow Up Input: {question}
30
  Standalone question:"""
31
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
32
 
33
- prompt_template = """Give an answer to the question based on the context below and try to explain in detail and do
34
- add source links if exist in context, and if you don't find the answer in the context, then simply say "That's a great
35
- question, and I want to make sure I provide you with the most accurate and helpful information possible.
36
- Unfortunately, our current pool of insights does not have an answer to this.", don't try to make up an answer And
37
- also don't use "According to the information provided in the given links, " while giving an answer.
 
38
 
39
  {context}
40
 
@@ -50,7 +52,7 @@ def get_chain(vectorstore):
50
  # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
51
  from langchain.chains.question_answering import load_qa_chain
52
  llm = ChatOpenAI(temperature=0)
53
- streaming_llm = ChatOpenAI(streaming=True, temperature=0, max_tokens=300)
54
  question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
55
  doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
56
 
@@ -63,6 +65,6 @@ def get_chain(vectorstore):
63
 
64
  qa_chain = ConversationalRetrievalChain(return_source_documents=True,
65
  retriever=vectorstore.as_retriever(
66
- search_kwargs={"k": 8, "include_metadata": True}),
67
  combine_docs_chain=doc_chain, question_generator=question_generator)
68
  return qa_chain
 
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.chains import ConversationalRetrievalChain
3
  from langchain.chat_models import ChatOpenAI
4
+
5
  # from langchain.llms import OpenAI
6
 
7
  # from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT
 
31
  Standalone question:"""
32
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
33
 
34
+ prompt_template = """Give an answer to the question based on the context below and try to explain in detail and use
35
+ bullet points if answer is long and do add source links if exist in context, and if you don't find the answer in the
36
+ context, then simply say "That's a great question, and I want to make sure I provide you with the most accurate and
37
+ helpful information possible. Unfortunately, our current pool of insights does not have an answer to this.",
38
+ don't try to make up an answer And also don't use "According to the information provided in the given links,
39
+ " while giving an answer.
40
 
41
  {context}
42
 
 
52
  # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
53
  from langchain.chains.question_answering import load_qa_chain
54
  llm = ChatOpenAI(temperature=0)
55
+ streaming_llm = ChatOpenAI(streaming=False, temperature=0, max_tokens=300, top_p=1)
56
  question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
57
  doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
58
 
 
65
 
66
  qa_chain = ConversationalRetrievalChain(return_source_documents=True,
67
  retriever=vectorstore.as_retriever(
68
+ search_kwargs={"k": 5, "include_metadata": True}),
69
  combine_docs_chain=doc_chain, question_generator=question_generator)
70
  return qa_chain