gourisankar85 commited on
Commit
c737bb6
·
verified ·
1 Parent(s): 3d7eef8

Upload 2 files

Browse files
retriever/chat_manager.py CHANGED
@@ -1,43 +1,57 @@
 
1
  import logging
2
  from typing import List
3
  from globals import app_config
4
 
5
- def chat_response(query: str, selected_docs: List[str], history: str) -> str:
6
  """
7
  Generate a chat response based on the user's query and selected documents.
8
 
9
  Args:
10
  query (str): The user's query.
11
  selected_docs (List[str]): List of selected document filenames from the dropdown.
12
- history (str): The chat history.
13
- model_name (str): The name of the LLM model to use for generation.
14
 
15
  Returns:
16
- str: Updated chat history with the new response.
17
  """
 
 
 
18
  if not query:
19
- return history + "\n" + "Response: Please enter a query." if history else "Response: Please enter a query."
20
 
 
21
  if not selected_docs:
22
- return history + "\n" + "LLM: Please select at least one document." if history else "Response: Please select at least one document."
23
 
24
  # Retrieve the top 5 chunks based on the query and selected documents
25
  top_k_results = app_config.doc_manager.retrieve_top_k(query, selected_docs, k=5)
26
 
27
  if not top_k_results:
28
- return history + "\n" + f"User: {query}\nLLM: No relevant information found in the selected documents." if history else f"User: {query}\nLLM: No relevant information found in the selected documents."
 
 
 
29
 
30
  # Send the top K results to the LLM to generate a response
31
  try:
32
  llm_response, source_docs = app_config.gen_llm.generate_response(query, top_k_results)
33
  except Exception as e:
34
- return history + "\n" + f"User: {query}\nLLM: Error generating response: {str(e)}" if history else f"User: {query}\nLLM: Error generating response: {str(e)}"
35
-
36
- # Format the response for the chat history
37
- response = f"{llm_response}\n"
38
- '''for i, doc in enumerate(source_docs, 1):
39
- doc_id = doc.metadata.get('doc_id', 'Unknown')
40
- filename = next((name for name, d_id in app_config.doc_manager.document_ids.items() if d_id == doc_id), 'Unknown')
41
- response += f"{i}. {filename}: {doc.page_content[:100]}...\n"'''
42
-
43
- return history + "\n" + f"User: {query}\nResponse: {response}" if history else f"User: {query}\nResponse: {response}"
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
  import logging
3
  from typing import List
4
  from globals import app_config
5
 
6
+ def chat_response(query: str, selected_docs: List[str], history: List[dict]) -> List[dict]:
7
  """
8
  Generate a chat response based on the user's query and selected documents.
9
 
10
  Args:
11
  query (str): The user's query.
12
  selected_docs (List[str]): List of selected document filenames from the dropdown.
13
+ history (List[dict]): The chat history as a list of {'role': str, 'content': str} dictionaries.
 
14
 
15
  Returns:
16
+ List[dict]: Updated chat history with the new response in 'messages' format.
17
  """
18
+ timestamp = datetime.now().strftime("%H:%M:%S")
19
+
20
+ # Handle empty query
21
  if not query:
22
+ return history + [{"role": "assistant", "content": "Please enter a query."}]
23
 
24
+ # Handle no selected documents
25
  if not selected_docs:
26
+ return history + [{"role": "assistant", "content": "Please select at least one document."}]
27
 
28
  # Retrieve the top 5 chunks based on the query and selected documents
29
  top_k_results = app_config.doc_manager.retrieve_top_k(query, selected_docs, k=5)
30
 
31
  if not top_k_results:
32
+ return history + [
33
+ {"role": "user", "content": f"{query}"},
34
+ {"role": "assistant", "content": "No relevant information found in the selected documents."}
35
+ ]
36
 
37
  # Send the top K results to the LLM to generate a response
38
  try:
39
  llm_response, source_docs = app_config.gen_llm.generate_response(query, top_k_results)
40
  except Exception as e:
41
+ return history + [
42
+ {"role": "user", "content": f"{query}"},
43
+ {"role": "assistant", "content": f"Error generating response: {str(e)}"}
44
+ ]
45
+
46
+ # Format the response (uncomment and adapt if you want to include source docs)
47
+ response = llm_response
48
+ # for i, doc in enumerate(source_docs, 1):
49
+ # doc_id = doc.metadata.get('doc_id', 'Unknown')
50
+ # filename = next((name for name, d_id in app_config.doc_manager.document_ids.items() if d_id == doc_id), 'Unknown')
51
+ # response += f"\n{i}. {filename}: {doc.page_content[:100]}..."
52
+
53
+ # Return updated history with new user query and LLM response
54
+ return history + [
55
+ {"role": "user", "content": f"{query}"},
56
+ {"role": "assistant", "content": response}
57
+ ]
retriever/document_manager.py CHANGED
@@ -104,10 +104,11 @@ class DocumentManager:
104
  top_k_results = all_results[:k]
105
 
106
  # Log the list of retrieved documents
107
- logging.info("Retrieved top K documents:")
 
108
  for i, result in enumerate(top_k_results, 1):
109
  doc_id = result['metadata'].get('doc_id', 'Unknown')
110
  filename = next((name for name, d_id in self.document_ids.items() if d_id == doc_id), 'Unknown')
111
- logging.info(f"{i}. Filename: {filename}, Doc ID: {doc_id}, Score: {result['score']:.4f}, Text: {result['text'][:100]}...")
112
 
113
  return top_k_results
 
104
  top_k_results = all_results[:k]
105
 
106
  # Log the list of retrieved documents
107
+ logging.info(f"Result from search :{all_results} ")
108
+ logging.info(f"Retrieved top {k} documents:")
109
  for i, result in enumerate(top_k_results, 1):
110
  doc_id = result['metadata'].get('doc_id', 'Unknown')
111
  filename = next((name for name, d_id in self.document_ids.items() if d_id == doc_id), 'Unknown')
112
+ logging.info(f"{i}. Filename: {filename}, Doc ID: {doc_id}, Score: {result['score']:.4f}, Text: {result['text'][:200]}...")
113
 
114
  return top_k_results