Rohit1412 commited on
Commit
22257f4
·
verified ·
1 Parent(s): d642f0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -52,7 +52,7 @@ def chunk_text(text, chunk_size=500):
52
  return chunks
53
 
54
  def get_document_embeddings(documents):
55
- """Compute embeddings for documents, using cache if available."""
56
  embeddings = []
57
  for doc in documents:
58
  if doc in embedding_cache:
@@ -61,7 +61,8 @@ def get_document_embeddings(documents):
61
  emb = retriever_model.encode(doc, convert_to_tensor=True)
62
  embedding_cache[doc] = emb
63
  embeddings.append(emb)
64
- return embeddings
 
65
 
66
  def rag_pipeline(question, pdf_files):
67
  """Optimized RAG pipeline with caching, chunking, and improved retrieval."""
@@ -130,4 +131,4 @@ with gr.Blocks() as demo:
130
 
131
  submit_button.click(fn=rag_pipeline, inputs=[question_input, pdf_input], outputs=response_output)
132
 
133
- demo.launch(debug = True)
 
52
  return chunks
53
 
54
  def get_document_embeddings(documents):
55
+ """Compute embeddings for documents, using cache if available, and return a stacked tensor."""
56
  embeddings = []
57
  for doc in documents:
58
  if doc in embedding_cache:
 
61
  emb = retriever_model.encode(doc, convert_to_tensor=True)
62
  embedding_cache[doc] = emb
63
  embeddings.append(emb)
64
+ # Stack the list of tensors into a single tensor of shape (n_docs, embedding_dim)
65
+ return torch.stack(embeddings)
66
 
67
  def rag_pipeline(question, pdf_files):
68
  """Optimized RAG pipeline with caching, chunking, and improved retrieval."""
 
131
 
132
  submit_button.click(fn=rag_pipeline, inputs=[question_input, pdf_input], outputs=response_output)
133
 
134
+ demo.launch(__debug__=True)