Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -73,82 +73,32 @@ def get_file(source_documents):
|
|
73 |
|
74 |
|
75 |
def chat_query_doc(question, chat_history_doc):
|
76 |
-
|
77 |
-
query_old = f"""Provide an elaborate, precise and pointwise reply to the question: {question}.
|
78 |
-
Also, Please consider the provided chat history: {chat_history_doc}.
|
79 |
-
Ensure that your current response is detailed, accurate, and addresses each aspect of the question thoroughly.
|
80 |
-
If the context of the question doesn't align with your last reply, please provide your response in a fresh manner.
|
81 |
-
If don't get the answer, feel free to reply from your own knowledge."""
|
82 |
-
|
83 |
-
|
84 |
-
# query = f"""You'll be asked with a User Query. If the Query is related to Electrical Domain, Provide a precise and point-wise reply to the query: {question} \
|
85 |
-
# based on provided context only. Ensure that your reply addresses each aspect of the query thoroughly. """
|
86 |
|
87 |
-
query = f"""
|
88 |
-
|
89 |
-
in your reply."""
|
90 |
|
91 |
-
retriever = vectordb.as_retriever()
|
92 |
-
#repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
|
93 |
-
repo_id="HuggingFaceH4/zephyr-7b-beta"
|
94 |
-
|
95 |
-
# ChatVectorDBChain_24.10.2024
|
96 |
#llm = OllamaLLM(model="llama3")
|
97 |
#llm = Ollama(model="llama3")
|
98 |
-
|
99 |
-
llm = HuggingFaceEndpoint(repo_id="HuggingFaceH4/zephyr-7b-beta", temperature = 0.5, huggingfacehub_api_token=hf_token,)
|
100 |
-
|
101 |
-
qa_template = """You are an AI assistant for answering questions. You are given the following extracted parts of a long document and a question.
|
102 |
-
Provide a conversational answer. If you don't know the answer, just say "Hmm, I'm not sure.".
|
103 |
-
Don't try to make up an answer.
|
104 |
-
Question: {question}
|
105 |
-
=========
|
106 |
-
{context}
|
107 |
-
=========
|
108 |
-
Answer in Markdown:"""
|
109 |
-
|
110 |
-
|
111 |
-
qa_prompt = PromptTemplate(template=qa_template, input_variables=["question", "context"])
|
112 |
-
|
113 |
-
llm_chain = qa_prompt | llm
|
114 |
-
|
115 |
-
qa_chain = ConversationalRetrievalChain.from_llm(
|
116 |
-
llm,
|
117 |
-
retriever = retriever,
|
118 |
-
return_source_documents = True
|
119 |
-
)
|
120 |
-
|
121 |
-
result = qa_chain({"question": question, "chat_history" : chat_history_doc})
|
122 |
-
chat_history_doc.append((question, result["answer"]))
|
123 |
-
|
124 |
#llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
|
125 |
-
#llm = GoogleGenerativeAI(model = "gemini-pro", google_api_key = GEMINI_API_KEY)
|
126 |
#llm = ChatGoogleGenerativeAI(model = "gemini-1.0-pro", google_api_key = GEMINI_API_KEY, temperature = 0)
|
127 |
|
128 |
-
|
129 |
-
# #memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
130 |
-
# retriever = vectordb.as_retriever()
|
131 |
-
# qa = ConversationalRetrievalChain.from_llm(llm, retriever = retriever, return_source_documents = True)
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
# Replace input() with question variable for Gradio
|
137 |
-
# result = qa({"question": query, "chat_history" : chat_history_doc})
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
142 |
|
143 |
source_docs = result["source_documents"]
|
144 |
file_names = get_file(source_docs)
|
145 |
-
# file_name = os.path.basename(source_docs[0].metadata['source'])
|
146 |
file_name = ', '.join([f"{x}" for x in file_names[:3]])
|
147 |
|
148 |
-
# print("History : ", history)
|
149 |
-
# print("\n Chat_his : ", chat_history)
|
150 |
-
|
151 |
return result["answer"] + "\n\nSources : " + file_name
|
|
|
|
|
152 |
|
153 |
|
154 |
def chat_query_IS(question, chat_history_IS):
|
|
|
73 |
|
74 |
|
75 |
def chat_query_doc(question, chat_history_doc):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
+
query = f"""Please provide a precise, point-wise reply to the query: {question}.\
|
78 |
+
Highlight the important points using properly formatted text, such as bullet points, bold text, or italics where appropriate."""
|
|
|
79 |
|
|
|
|
|
|
|
|
|
|
|
80 |
#llm = OllamaLLM(model="llama3")
|
81 |
#llm = Ollama(model="llama3")
|
82 |
+
#repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
#llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
|
84 |
+
#llm = GoogleGenerativeAI(model = "gemini-pro", google_api_key = GEMINI_API_KEY)
|
85 |
#llm = ChatGoogleGenerativeAI(model = "gemini-1.0-pro", google_api_key = GEMINI_API_KEY, temperature = 0)
|
86 |
|
87 |
+
llm = HuggingFaceEndpoint(repo_id="HuggingFaceH4/zephyr-7b-beta", temperature=0.5, huggingfacehub_api_token=HF_token)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
+
retriever = vectordb.as_retriever()
|
90 |
+
memory_doc = ConversationBufferMemory(memory_key="chat_history", return_messages=True, output_key="answer")
|
91 |
+
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, return_source_documents=True, memory=memory_doc)
|
92 |
+
result = qa({"question": query, "chat_history": chat_history_doc})
|
93 |
+
chat_history_doc.append((question, result["answer"]))
|
94 |
|
95 |
source_docs = result["source_documents"]
|
96 |
file_names = get_file(source_docs)
|
|
|
97 |
file_name = ', '.join([f"{x}" for x in file_names[:3]])
|
98 |
|
|
|
|
|
|
|
99 |
return result["answer"] + "\n\nSources : " + file_name
|
100 |
+
|
101 |
+
|
102 |
|
103 |
|
104 |
def chat_query_IS(question, chat_history_IS):
|