twimbit / query_data.py
twimbit-ai's picture
Update query_data.py
2b129b9
raw
history blame
3.21 kB
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
# from langchain.llms import OpenAI
# from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT
# _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
# standalone question.
#
#
# Chat History:
# {chat_history}
# Follow Up Input: {question}
# Standalone question:"""
# CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
#
# template = """You are given an context and a question find an answer from the given
# context and provide an answer. If the answer is not in the context then
# simply say "No information found". Don't try to make an answer. And also don't use "According to the information
# provided in the given links," Question: {question} ========= {context} ========= Answer in Markdown: """
# QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Give an answer to the question based on the context below and try to explain in detail and do
add source links if exist in context, and if you don't find the answer in the context, then simply say "That's a great
question, and I want to make sure I provide you with the most accurate and helpful information possible.
Unfortunately, our current pool of insights does not have an answer to this.", don't try to make up an answer And
also don't use "According to the information provided in the given links, " while giving an answer.
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
def get_chain(vectorstore):
from langchain.chains.llm import LLMChain
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.question_answering import load_qa_chain
llm = ChatOpenAI(temperature=0)
streaming_llm = ChatOpenAI(streaming=True, temperature=0, max_tokens=300)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
# qa_chain = ConversationalRetrievalChain.from_llm(
# llm,
# vectorstore.as_retriever(search_kwargs={"k": 8, "include_metadata": True}),
# condense_question_prompt=CONDENSE_QUESTION_PROMPT,
# qa_prompt=QA_PROMPT
# )
qa_chain = ConversationalRetrievalChain(return_source_documents=True,
retriever=vectorstore.as_retriever(
search_kwargs={"k": 8, "include_metadata": True}),
combine_docs_chain=doc_chain, question_generator=question_generator)
return qa_chain