Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -181,18 +181,19 @@ def self_reasoning(query, context):
|
|
181 |
reasoning_prompt = f"""
|
182 |
You are an AI assistant that analyzes the context provided to answer the user's query comprehensively and clearly.
|
183 |
Answer in a concise, factual way using the terminology from the context. Avoid extra explanation unless explicitly asked.
|
184 |
-
YOU MUST mention the
|
|
|
185 |
### Example 1:
|
186 |
-
**Question:** What is the purpose of the
|
187 |
**Context:**
|
188 |
-
[
|
189 |
-
**Answer:** The
|
190 |
|
191 |
### Example 2:
|
192 |
-
**Question:** How do you
|
193 |
**Context:**
|
194 |
-
[
|
195 |
-
**Answer:**
|
196 |
|
197 |
### Now answer:
|
198 |
**Question:** {query}
|
@@ -200,6 +201,7 @@ def self_reasoning(query, context):
|
|
200 |
{context}
|
201 |
|
202 |
**Answer:**
|
|
|
203 |
"""
|
204 |
try:
|
205 |
result = llm._call(reasoning_prompt)
|
@@ -218,7 +220,7 @@ def faiss_search_with_keywords(query):
|
|
218 |
refined_query = " ".join([keyword[0] for keyword in keywords])
|
219 |
retriever = vectorstore_global.as_retriever(search_kwargs={"k": 13})
|
220 |
docs = retriever.get_relevant_documents(refined_query)
|
221 |
-
context= '\n\n'.join([f"[
|
222 |
return self_reasoning(query, context)
|
223 |
|
224 |
def get_reranker():
|
@@ -238,7 +240,7 @@ def faiss_search_with_reasoning(query):
|
|
238 |
scores = reranker.predict(pairs)
|
239 |
reranked_docs = sorted(zip(scores, docs), key=lambda x: x[0], reverse=True)
|
240 |
top_docs = [doc for _, doc in reranked_docs[:5]]
|
241 |
-
context = '\n\n'.join([f"[
|
242 |
return self_reasoning(query, context)
|
243 |
|
244 |
faiss_keyword_tool = Tool(
|
|
|
181 |
reasoning_prompt = f"""
|
182 |
You are an AI assistant that analyzes the context provided to answer the user's query comprehensively and clearly.
|
183 |
Answer in a concise, factual way using the terminology from the context. Avoid extra explanation unless explicitly asked.
|
184 |
+
YOU MUST mention the document file name (e.g., tools.html, refguide.html) in your answer.
|
185 |
+
|
186 |
### Example 1:
|
187 |
+
**Question:** What is the purpose of the Monte GUI?
|
188 |
**Context:**
|
189 |
+
[From `tools.html`] The Monte GUI provides interfaces for setting up trajectory parameters and viewing output results.
|
190 |
+
**Answer:** The Monte GUI helps users configure trajectory parameters and visualize results. (From `tools.html`)
|
191 |
|
192 |
### Example 2:
|
193 |
+
**Question:** How do you perform covariance analysis in Monte?
|
194 |
**Context:**
|
195 |
+
[From `designEdition.html`] The Monte Design Edition includes support for statistical maneuver and covariance analysis during the design phase.
|
196 |
+
**Answer:** Monte supports covariance analysis through the Design Edition. (From `designEdition.html`)
|
197 |
|
198 |
### Now answer:
|
199 |
**Question:** {query}
|
|
|
201 |
{context}
|
202 |
|
203 |
**Answer:**
|
204 |
+
|
205 |
"""
|
206 |
try:
|
207 |
result = llm._call(reasoning_prompt)
|
|
|
220 |
refined_query = " ".join([keyword[0] for keyword in keywords])
|
221 |
retriever = vectorstore_global.as_retriever(search_kwargs={"k": 13})
|
222 |
docs = retriever.get_relevant_documents(refined_query)
|
223 |
+
context = '\n\n'.join([f"[From `{doc.metadata.get('document', 'unknown.html')}`] {doc.page_content}" for doc in docs])
|
224 |
return self_reasoning(query, context)
|
225 |
|
226 |
def get_reranker():
|
|
|
240 |
scores = reranker.predict(pairs)
|
241 |
reranked_docs = sorted(zip(scores, docs), key=lambda x: x[0], reverse=True)
|
242 |
top_docs = [doc for _, doc in reranked_docs[:5]]
|
243 |
+
context = '\n\n'.join([f"[From `{doc.metadata.get('document', 'unknown.html')}`] {doc.page_content.strip()}" for doc in top_docs])
|
244 |
return self_reasoning(query, context)
|
245 |
|
246 |
faiss_keyword_tool = Tool(
|