Spaces:
Sleeping
Sleeping
File size: 12,348 Bytes
0a1db48 aa2bec3 1827766 aa2bec3 4a31251 7053770 20dd456 1827766 20dd456 aa2bec3 0a1db48 4a31251 0a1db48 4a31251 0a1db48 aa2bec3 6d72d65 aa2bec3 c8e1843 aa2bec3 20dd456 6f96a50 20dd456 6f96a50 c8e1843 20dd456 6f96a50 20dd456 1827766 20dd456 1827766 20dd456 1827766 20dd456 1827766 20dd456 6f96a50 20dd456 6f96a50 1827766 6f96a50 1827766 20dd456 6f96a50 1827766 6f96a50 c8e1843 6f96a50 1827766 6f96a50 c8e1843 1827766 6f96a50 7053770 6f96a50 7053770 6f96a50 7053770 6f96a50 c8e1843 aa2bec3 0c25e8c 5d008ae 4a31251 31ffc5e 0a1db48 aa2bec3 0a1db48 c8e1843 31ffc5e c8e1843 31ffc5e 4a31251 31ffc5e c8e1843 31ffc5e 0a1db48 c8e1843 31ffc5e 4a31251 31ffc5e 0a1db48 aa2bec3 c8e1843 20dd456 0a1db48 31ffc5e c8e1843 1827766 c8e1843 0a1db48 31ffc5e 4a31251 aa2bec3 31ffc5e 0a1db48 31ffc5e 0a1db48 31ffc5e 0a1db48 31ffc5e 0a1db48 31ffc5e 0a1db48 c5d0599 c8e1843 0a1db48 aa2bec3 31ffc5e aa2bec3 0a1db48 aa2bec3 20dd456 aa2bec3 0a1db48 aa2bec3 5d008ae 31ffc5e aa2bec3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
# app.py
import streamlit as st
import os
from io import BytesIO
from PyPDF2 import PdfReader
from PyPDF2.errors import PdfReadError
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.llms import HuggingFaceHub
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
import faiss
import uuid
from dotenv import load_dotenv
import requests
import pandas as pd
from pandas.errors import ParserError
from docx import Document
# Load environment variables
load_dotenv()
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN", "").strip()
RAG_ACCESS_KEY = os.getenv("RAG_ACCESS_KEY")
if not HUGGINGFACEHUB_API_TOKEN:
st.warning("Hugging Face API token not found! Please set HUGGINGFACEHUB_API_TOKEN in your .env file.")
# Initialize session state
if "vectorstore" not in st.session_state:
st.session_state.vectorstore = None
if "history" not in st.session_state:
st.session_state.history = []
if "authenticated" not in st.session_state:
st.session_state.authenticated = False
if "uploaded_files" not in st.session_state:
st.session_state.uploaded_files = []
# File processing logic
def process_input(input_data):
# Initialize progress bar and status
progress_bar = st.progress(0)
status = st.empty()
# Step 1: Read file in memory
status.text("Reading file...")
progress_bar.progress(0.20)
file_name = input_data.name
file_extension = file_name.lower().split('.')[-1]
documents = ""
# Step 2: Extract text based on file type
status.text("Extracting text...")
progress_bar.progress(0.40)
try:
if file_extension == 'pdf':
try:
pdf_reader = PdfReader(BytesIO(input_data.read()))
documents = "".join([page.extract_text() or "" for page in pdf_reader.pages])
except PdfReadError as e:
raise RuntimeError(f"Failed to read PDF: {str(e)}")
elif file_extension in ['xls', 'xlsx']:
try:
df = pd.read_excel(BytesIO(input_data.read()), engine='openpyxl')
documents = df.to_string(index=False)
except ParserError as e:
raise RuntimeError(f"Failed to parse Excel file: {str(e)}")
elif file_extension in ['doc', 'docx']:
try:
doc = Document(BytesIO(input_data.read()))
documents = "\n".join([para.text for para in doc.paragraphs if para.text])
except Exception as e:
raise RuntimeError(f"Failed to read DOC/DOCX: {str(e)}")
elif file_extension == 'txt':
try:
documents = input_data.read().decode('utf-8')
except UnicodeDecodeError:
documents = input_data.read().decode('latin-1')
else:
raise ValueError(f"Unsupported file type: {file_extension}")
if not documents.strip():
raise RuntimeError("No text extracted from the file.")
except Exception as e:
raise RuntimeError(f"Failed to process file: {str(e)}")
# Step 3: Split text
status.text("Splitting text into chunks...")
progress_bar.progress(0.60)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
texts = text_splitter.split_text(documents)
chunk_count = len(texts)
if chunk_count == 0:
raise RuntimeError("No text chunks created for embedding.")
# Step 4: Create embeddings
status.text(f"Embedding {chunk_count} chunks...")
progress_bar.progress(0.80)
try:
hf_embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2",
model_kwargs={'device': 'cpu'}
)
except Exception as e:
raise RuntimeError(f"Failed to initialize embeddings: {str(e)}")
# Step 5: Initialize or append to FAISS vector store
status.text("Building or updating vector store...")
progress_bar.progress(1.0)
try:
if st.session_state.vectorstore is None:
dimension = len(hf_embeddings.embed_query("test"))
index = faiss.IndexFlatL2(dimension)
vector_store = FAISS(
embedding_function=hf_embeddings,
index=index,
docstore=InMemoryDocstore({}),
index_to_docstore_id={}
)
else:
vector_store = st.session_state.vectorstore
# Add texts to vector store
uuids = [str(uuid.uuid4()) for _ in texts]
vector_store.add_texts(texts, ids=uuids)
except Exception as e:
raise RuntimeError(f"Failed to update vector store: {str(e)}")
# Complete processing
status.text("Processing complete!")
st.session_state.uploaded_files.append(file_name)
st.success(f"Embedded {chunk_count} chunks from {file_name}")
return vector_store
# Question-answering logic
def answer_question(vectorstore, query):
if not HUGGINGFACEHUB_API_TOKEN:
raise RuntimeError("Missing Hugging Face API token. Please set it in your .env file.")
try:
llm = HuggingFaceHub(
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
model_kwargs={"temperature": 0.7, "max_length": 512},
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN
)
except requests.exceptions.HTTPError as e:
raise RuntimeError(f"Failed to initialize LLM: {str(e)}. Check model availability or API token.")
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
prompt_template = PromptTemplate(
template="Use the context to answer the question concisely:\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer:",
input_variables=["context", "question"]
)
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=False,
chain_type_kwargs={"prompt": prompt_template}
)
try:
result = qa_chain({"query": query})
return result["result"].split("Answer:")[-1].strip()
except requests.exceptions.HTTPError as e:
raise RuntimeError(f"Error querying LLM: {str(e)}. Please try again or check model endpoint.")
# Sidebar with BSNL logo, authentication, and controls
with st.sidebar:
try:
st.image("bsnl_logo.png", width=200)
except Exception:
st.warning("BSNL logo not found.")
st.header("RAG Control Panel")
api_key_input = st.text_input("Enter RAG Access Key", type="password")
# Blue button styles
st.markdown("""
<style>
.auth-button button, .delete-button button {
background-color: #007BFF !important;
color: white !important;
font-weight: bold;
border-radius: 8px;
padding: 10px 20px;
border: none;
transition: all 0.3s ease;
width: 100%;
}
.auth-button button:hover, .delete-button button:hover {
background-color: #0056b3 !important;
transform: scale(1.05);
}
</style>
""", unsafe_allow_html=True)
# Authenticate button
with st.container():
st.markdown('<div class="auth-button">', unsafe_allow_html=True)
if st.button("Authenticate"):
if api_key_input == RAG_ACCESS_KEY and RAG_ACCESS_KEY is not None:
st.session_state.authenticated = True
st.success("Authentication successful!")
else:
st.error("Invalid API key.")
st.markdown('</div>', unsafe_allow_html=True)
if st.session_state.authenticated:
# Display uploaded files
if st.session_state.uploaded_files:
st.subheader("Uploaded Files")
for file_name in st.session_state.uploaded_files:
st.write(f"- {file_name}")
# File uploader
input_data = st.file_uploader("Upload a file (PDF, XLS/XLSX, DOC/DOCX, TXT)", type=["pdf", "xls", "xlsx", "doc", "docx", "txt"])
if st.button("Process File") and input_data is not None:
if input_data.name in st.session_state.uploaded_files:
st.warning(f"File '{input_data.name}' has already been processed. Please upload a different file or delete the vector store.")
else:
try:
vector_store = process_input(input_data)
st.session_state.vectorstore = vector_store
except PermissionError as e:
st.error(f"File upload failed: Permission error - {str(e)}. Check file system access.")
except OSError as e:
st.error(f"File upload failed: OS error - {str(e)}. Check server configuration.")
except ValueError as e:
st.error(f"File upload failed: {str(e)} (Invalid file format).")
except RuntimeError as e:
st.error(f"File upload failed: {str(e)} (Exception type: {type(e).__name__}).")
except Exception as e:
st.error(f"File upload failed: {str(e)} (Exception type: {type(e).__name__}). Please try again or check server logs.")
# Delete vector store button
if st.session_state.vectorstore is not None:
st.markdown('<div class="delete-button">', unsafe_allow_html=True)
if st.button("Delete Vector Store"):
st.session_state.vectorstore = None
st.session_state.uploaded_files = []
st.success("Vector store deleted successfully.")
st.markdown('</div>', unsafe_allow_html=True)
st.subheader("Chat History")
for i, (q, a) in enumerate(st.session_state.history):
st.write(f"**Q{i+1}:** {q}")
st.write(f"**A{i+1}:** {a}")
st.markdown("---")
# Main app UI
def main():
st.markdown("""
<style>
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap');
.stApp {
background-color: #FFFFFF;
font-family: 'Roboto', sans-serif;
color: #333333;
}
.stTextInput > div > div > input {
background-color: #FFFFFF;
color: #333333;
border-radius: 8px;
border: 1px solid #007BFF;
padding: 10px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.stButton > button {
background-color: #007BFF;
color: white;
border-radius: 8px;
padding: 10px 20px;
border: none;
transition: all 0.3s ease;
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
}
.stButton > button:hover {
background-color: #0056b3;
transform: scale(1.05);
}
.stSidebar {
background-color: #F5F5F5;
padding: 20px;
border-right: 2px solid #007BFF;
}
</style>
""", unsafe_allow_html=True)
st.title("RAG Q&A App with Mistral AI")
st.markdown("Welcome to the BSNL RAG App! Upload a PDF, XLS/XLSX, DOC/DOCX, or TXT file and ask questions. Files are stored in the vector store until explicitly deleted.", unsafe_allow_html=True)
if not st.session_state.authenticated:
st.warning("Please authenticate using the sidebar.")
return
if st.session_state.vectorstore is None:
st.info("Please upload and process a file.")
return
query = st.text_input("Enter your question:")
if st.button("Submit") and query:
with st.spinner("Generating answer..."):
try:
answer = answer_question(st.session_state.vectorstore, query)
st.session_state.history.append((query, answer))
st.write("**Answer:**", answer)
except Exception as e:
st.error(f"Error generating answer: {str(e)}")
if __name__ == "__main__":
main()
|