Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,10 +13,6 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
13 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
14 |
from langchain_community.vectorstores import FAISS
|
15 |
from langchain.docstore.document import Document
|
16 |
-
from langchain.chains import LLMChain, RetrievalQA, ConversationalRetrievalChain
|
17 |
-
from langchain.memory import ConversationBufferMemory
|
18 |
-
from langchain.prompts import PromptTemplate
|
19 |
-
from langchain_community.llms import HuggingFaceHub
|
20 |
|
21 |
# Import document parsers
|
22 |
import PyPDF2
|
@@ -34,12 +30,8 @@ HF_TOKEN = os.getenv("hf_token")
|
|
34 |
if not HF_TOKEN:
|
35 |
raise ValueError("HuggingFace token not found in environment variables")
|
36 |
|
37 |
-
# Initialize HuggingFace
|
38 |
-
|
39 |
-
repo_id="meta-llama/Llama-3.1-8B-Instruct",
|
40 |
-
huggingfacehub_api_token=HF_TOKEN,
|
41 |
-
model_kwargs={"temperature": 0.7, "max_length": 512}
|
42 |
-
)
|
43 |
|
44 |
# Initialize embeddings
|
45 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
@@ -205,60 +197,29 @@ class IngestionAgent:
|
|
205 |
self.message_bus.publish(response)
|
206 |
|
207 |
class RetrievalAgent:
|
208 |
-
"""Agent responsible for embedding and semantic retrieval
|
209 |
|
210 |
def __init__(self, message_bus: MessageBus):
|
211 |
self.name = "RetrievalAgent"
|
212 |
self.message_bus = message_bus
|
213 |
self.message_bus.subscribe(self.name, self.handle_message)
|
214 |
self.vector_store = None
|
215 |
-
self.retriever = None
|
216 |
-
self.qa_chain = None
|
217 |
-
self.conversation_chain = None
|
218 |
-
self.memory = ConversationBufferMemory(
|
219 |
-
memory_key="chat_history",
|
220 |
-
return_messages=True,
|
221 |
-
output_key="answer"
|
222 |
-
)
|
223 |
|
224 |
def handle_message(self, message: MCPMessage):
|
225 |
"""Handle incoming MCP messages"""
|
226 |
if message.type == "INGESTION_COMPLETE":
|
227 |
self.create_vector_store(message)
|
228 |
elif message.type == "RETRIEVAL_REQUEST":
|
229 |
-
self.
|
230 |
|
231 |
def create_vector_store(self, message: MCPMessage):
|
232 |
-
"""Create vector store
|
233 |
documents = message.payload.get("documents", [])
|
234 |
|
235 |
if documents:
|
236 |
try:
|
237 |
self.vector_store = FAISS.from_documents(documents, embeddings)
|
238 |
-
|
239 |
-
search_type="similarity",
|
240 |
-
search_kwargs={"k": 3}
|
241 |
-
)
|
242 |
-
|
243 |
-
# Create QA chain
|
244 |
-
self.qa_chain = RetrievalQA.from_chain_type(
|
245 |
-
llm=llm,
|
246 |
-
chain_type="stuff",
|
247 |
-
retriever=self.retriever,
|
248 |
-
return_source_documents=True,
|
249 |
-
verbose=True
|
250 |
-
)
|
251 |
-
|
252 |
-
# Create conversational chain
|
253 |
-
self.conversation_chain = ConversationalRetrievalChain.from_llm(
|
254 |
-
llm=llm,
|
255 |
-
retriever=self.retriever,
|
256 |
-
memory=self.memory,
|
257 |
-
return_source_documents=True,
|
258 |
-
verbose=True
|
259 |
-
)
|
260 |
-
|
261 |
-
logger.info(f"Vector store and chains created with {len(documents)} documents")
|
262 |
|
263 |
# Notify completion
|
264 |
response = MCPMessage(
|
@@ -272,60 +233,102 @@ class RetrievalAgent:
|
|
272 |
except Exception as e:
|
273 |
logger.error(f"Error creating vector store: {e}")
|
274 |
|
275 |
-
def
|
276 |
-
"""
|
277 |
query = message.payload.get("query", "")
|
278 |
-
|
279 |
|
280 |
-
if
|
281 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
283 |
try:
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
result = self.qa_chain({"query": query})
|
292 |
-
answer = result["result"]
|
293 |
-
source_docs = result.get("source_documents", [])
|
294 |
-
|
295 |
-
# Format sources
|
296 |
-
sources = []
|
297 |
-
for doc in source_docs:
|
298 |
-
sources.append({
|
299 |
-
"content": doc.page_content[:200] + "...",
|
300 |
-
"source": doc.metadata.get("source", "Unknown")
|
301 |
-
})
|
302 |
|
|
|
303 |
response = MCPMessage(
|
304 |
sender=self.name,
|
305 |
receiver="CoordinatorAgent",
|
306 |
-
msg_type="
|
307 |
trace_id=message.trace_id,
|
308 |
payload={
|
309 |
"query": query,
|
310 |
-
"
|
311 |
-
"
|
312 |
}
|
313 |
)
|
314 |
self.message_bus.publish(response)
|
315 |
|
316 |
except Exception as e:
|
317 |
-
logger.error(f"Error
|
318 |
-
# Send error
|
|
|
|
|
|
|
|
|
319 |
response = MCPMessage(
|
320 |
sender=self.name,
|
321 |
receiver="CoordinatorAgent",
|
322 |
-
msg_type="
|
323 |
trace_id=message.trace_id,
|
324 |
-
payload={
|
325 |
-
"query": query,
|
326 |
-
"answer": f"Error processing query: {str(e)}",
|
327 |
-
"sources": []
|
328 |
-
}
|
329 |
)
|
330 |
self.message_bus.publish(response)
|
331 |
|
@@ -336,15 +339,15 @@ class CoordinatorAgent:
|
|
336 |
self.name = "CoordinatorAgent"
|
337 |
self.message_bus = message_bus
|
338 |
self.message_bus.subscribe(self.name, self.handle_message)
|
|
|
339 |
self.vector_store_ready = False
|
340 |
-
self.current_response = None
|
341 |
|
342 |
def handle_message(self, message: MCPMessage):
|
343 |
"""Handle incoming MCP messages"""
|
344 |
if message.type == "VECTORSTORE_READY":
|
345 |
self.vector_store_ready = True
|
346 |
-
elif message.type == "
|
347 |
-
self.
|
348 |
|
349 |
def process_files(self, files):
|
350 |
"""Process uploaded files"""
|
@@ -364,49 +367,55 @@ class CoordinatorAgent:
|
|
364 |
|
365 |
return f"Processing {len(files)} files: {', '.join([os.path.basename(fp) for fp in file_paths])}"
|
366 |
|
367 |
-
def handle_query(self, query: str):
|
368 |
-
"""Handle user query
|
369 |
if not self.vector_store_ready:
|
370 |
-
|
|
|
371 |
|
372 |
# Send retrieval request
|
373 |
message = MCPMessage(
|
374 |
sender=self.name,
|
375 |
receiver="RetrievalAgent",
|
376 |
msg_type="RETRIEVAL_REQUEST",
|
377 |
-
payload={"query": query
|
378 |
)
|
379 |
self.message_bus.publish(message)
|
380 |
|
381 |
-
# Wait for response
|
382 |
import time
|
383 |
-
timeout =
|
384 |
start_time = time.time()
|
385 |
|
386 |
-
while not self.
|
387 |
time.sleep(0.1)
|
388 |
|
389 |
-
if self.
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
|
|
|
|
|
|
|
|
404 |
else:
|
405 |
-
|
406 |
|
407 |
# Initialize agents
|
408 |
ingestion_agent = IngestionAgent(message_bus)
|
409 |
retrieval_agent = RetrievalAgent(message_bus)
|
|
|
410 |
coordinator_agent = CoordinatorAgent(message_bus)
|
411 |
|
412 |
def create_interface():
|
@@ -453,23 +462,26 @@ def create_interface():
|
|
453 |
font-size: 0.9rem;
|
454 |
}
|
455 |
|
456 |
-
/* Chat area */
|
457 |
.chat-container {
|
458 |
flex: 1;
|
459 |
display: flex;
|
460 |
flex-direction: column;
|
461 |
-
max-width:
|
462 |
margin: 0 auto;
|
463 |
width: 100%;
|
464 |
padding: 1rem;
|
|
|
465 |
}
|
466 |
|
467 |
-
/* Chatbot styling */
|
468 |
.gradio-chatbot {
|
469 |
-
|
|
|
470 |
background: transparent !important;
|
471 |
border: none !important;
|
472 |
margin-bottom: 1rem;
|
|
|
473 |
}
|
474 |
|
475 |
/* Input area */
|
@@ -479,6 +491,8 @@ def create_interface():
|
|
479 |
padding: 1rem;
|
480 |
border: 1px solid rgba(255, 193, 7, 0.2);
|
481 |
backdrop-filter: blur(10px);
|
|
|
|
|
482 |
}
|
483 |
|
484 |
/* File upload */
|
@@ -491,7 +505,16 @@ def create_interface():
|
|
491 |
transition: all 0.3s ease;
|
492 |
}
|
493 |
|
494 |
-
/* Buttons */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
495 |
.primary-btn {
|
496 |
background: linear-gradient(135deg, #ffc107 0%, #ff8f00 100%) !important;
|
497 |
color: #000000 !important;
|
@@ -518,6 +541,19 @@ def create_interface():
|
|
518 |
color: #ffc107;
|
519 |
text-align: center;
|
520 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
521 |
""",
|
522 |
title="Agentic RAG Assistant"
|
523 |
) as iface:
|
@@ -528,48 +564,92 @@ def create_interface():
|
|
528 |
gr.HTML("""
|
529 |
<div class="header">
|
530 |
<h1>π€ Agentic RAG Assistant</h1>
|
531 |
-
<p>Upload documents and ask questions - powered by
|
532 |
</div>
|
533 |
""")
|
534 |
|
535 |
-
# Main chat
|
536 |
with gr.Row():
|
537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
538 |
|
539 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
chatbot = gr.Chatbot(
|
541 |
-
|
542 |
-
|
543 |
-
show_copy_button=True
|
|
|
|
|
544 |
)
|
545 |
|
546 |
-
# Input area
|
547 |
-
with gr.
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
|
|
554 |
)
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
567 |
|
568 |
# State to track document processing
|
569 |
doc_processed = gr.State(False)
|
570 |
|
571 |
# Event handlers
|
572 |
-
def
|
573 |
if not files:
|
574 |
return gr.update(visible=False), False
|
575 |
|
@@ -607,37 +687,46 @@ def create_interface():
|
|
607 |
|
608 |
def respond(message, history, doc_ready):
|
609 |
if not doc_ready:
|
610 |
-
|
|
|
|
|
|
|
611 |
|
612 |
if not message.strip():
|
613 |
return history, message
|
614 |
|
615 |
-
#
|
616 |
-
|
617 |
-
|
618 |
-
# Add to chat history
|
619 |
-
history.append([message, response])
|
620 |
|
621 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
622 |
|
623 |
-
#
|
624 |
-
|
625 |
-
|
626 |
inputs=[file_upload],
|
627 |
outputs=[processing_status, doc_processed]
|
628 |
)
|
629 |
|
630 |
-
# Send message
|
631 |
send_btn.click(
|
632 |
respond,
|
633 |
inputs=[msg_input, chatbot, doc_processed],
|
634 |
-
outputs=[chatbot, msg_input]
|
|
|
635 |
)
|
636 |
|
637 |
msg_input.submit(
|
638 |
respond,
|
639 |
inputs=[msg_input, chatbot, doc_processed],
|
640 |
-
outputs=[chatbot, msg_input]
|
|
|
641 |
)
|
642 |
|
643 |
return iface
|
|
|
13 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
14 |
from langchain_community.vectorstores import FAISS
|
15 |
from langchain.docstore.document import Document
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Import document parsers
|
18 |
import PyPDF2
|
|
|
30 |
if not HF_TOKEN:
|
31 |
raise ValueError("HuggingFace token not found in environment variables")
|
32 |
|
33 |
+
# Initialize HuggingFace Inference Client
|
34 |
+
client = InferenceClient(model="meta-llama/Llama-3.1-8B-Instruct", token=HF_TOKEN)
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# Initialize embeddings
|
37 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
|
197 |
self.message_bus.publish(response)
|
198 |
|
199 |
class RetrievalAgent:
|
200 |
+
"""Agent responsible for embedding and semantic retrieval"""
|
201 |
|
202 |
def __init__(self, message_bus: MessageBus):
|
203 |
self.name = "RetrievalAgent"
|
204 |
self.message_bus = message_bus
|
205 |
self.message_bus.subscribe(self.name, self.handle_message)
|
206 |
self.vector_store = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
def handle_message(self, message: MCPMessage):
|
209 |
"""Handle incoming MCP messages"""
|
210 |
if message.type == "INGESTION_COMPLETE":
|
211 |
self.create_vector_store(message)
|
212 |
elif message.type == "RETRIEVAL_REQUEST":
|
213 |
+
self.retrieve_context(message)
|
214 |
|
215 |
def create_vector_store(self, message: MCPMessage):
|
216 |
+
"""Create vector store from processed documents"""
|
217 |
documents = message.payload.get("documents", [])
|
218 |
|
219 |
if documents:
|
220 |
try:
|
221 |
self.vector_store = FAISS.from_documents(documents, embeddings)
|
222 |
+
logger.info(f"Vector store created with {len(documents)} documents")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
# Notify completion
|
225 |
response = MCPMessage(
|
|
|
233 |
except Exception as e:
|
234 |
logger.error(f"Error creating vector store: {e}")
|
235 |
|
236 |
+
def retrieve_context(self, message: MCPMessage):
|
237 |
+
"""Retrieve relevant context for a query"""
|
238 |
query = message.payload.get("query", "")
|
239 |
+
k = message.payload.get("k", 3)
|
240 |
|
241 |
+
if self.vector_store and query:
|
242 |
+
try:
|
243 |
+
docs = self.vector_store.similarity_search(query, k=k)
|
244 |
+
context = [{"content": doc.page_content, "source": doc.metadata.get("source", "")}
|
245 |
+
for doc in docs]
|
246 |
+
|
247 |
+
response = MCPMessage(
|
248 |
+
sender=self.name,
|
249 |
+
receiver="LLMResponseAgent",
|
250 |
+
msg_type="CONTEXT_RESPONSE",
|
251 |
+
trace_id=message.trace_id,
|
252 |
+
payload={
|
253 |
+
"query": query,
|
254 |
+
"retrieved_context": context,
|
255 |
+
"top_chunks": [doc.page_content for doc in docs]
|
256 |
+
}
|
257 |
+
)
|
258 |
+
self.message_bus.publish(response)
|
259 |
+
except Exception as e:
|
260 |
+
logger.error(f"Error retrieving context: {e}")
|
261 |
+
|
262 |
+
class LLMResponseAgent:
|
263 |
+
"""Agent responsible for generating LLM responses"""
|
264 |
+
|
265 |
+
def __init__(self, message_bus: MessageBus):
|
266 |
+
self.name = "LLMResponseAgent"
|
267 |
+
self.message_bus = message_bus
|
268 |
+
self.message_bus.subscribe(self.name, self.handle_message)
|
269 |
+
|
270 |
+
def handle_message(self, message: MCPMessage):
|
271 |
+
"""Handle incoming MCP messages"""
|
272 |
+
if message.type == "CONTEXT_RESPONSE":
|
273 |
+
self.generate_response(message)
|
274 |
+
|
275 |
+
def generate_response(self, message: MCPMessage):
|
276 |
+
"""Generate response using retrieved context"""
|
277 |
+
query = message.payload.get("query", "")
|
278 |
+
context = message.payload.get("retrieved_context", [])
|
279 |
+
|
280 |
+
# Build context string
|
281 |
+
context_text = "\n\n".join([f"Source: {ctx['source']}\nContent: {ctx['content']}"
|
282 |
+
for ctx in context])
|
283 |
|
284 |
+
# Create messages for conversational format
|
285 |
+
messages = [
|
286 |
+
{
|
287 |
+
"role": "system",
|
288 |
+
"content": "You are a helpful assistant. Based on the provided context below, answer the user's question accurately and comprehensively. Cite the sources if possible.",
|
289 |
+
},
|
290 |
+
{
|
291 |
+
"role": "user",
|
292 |
+
"content": f"Context:\n\n{context_text}\n\nQuestion: {query}"
|
293 |
+
}
|
294 |
+
]
|
295 |
+
|
296 |
try:
|
297 |
+
# Use client.chat_completion for conversational models
|
298 |
+
response_stream = client.chat_completion(
|
299 |
+
messages=messages,
|
300 |
+
max_tokens=512,
|
301 |
+
temperature=0.7,
|
302 |
+
stream=True
|
303 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
+
# Send streaming response
|
306 |
response = MCPMessage(
|
307 |
sender=self.name,
|
308 |
receiver="CoordinatorAgent",
|
309 |
+
msg_type="LLM_RESPONSE_STREAM",
|
310 |
trace_id=message.trace_id,
|
311 |
payload={
|
312 |
"query": query,
|
313 |
+
"response_stream": response_stream,
|
314 |
+
"context": context
|
315 |
}
|
316 |
)
|
317 |
self.message_bus.publish(response)
|
318 |
|
319 |
except Exception as e:
|
320 |
+
logger.error(f"Error generating response: {e}")
|
321 |
+
# Send an error stream back
|
322 |
+
error_msg = f"Error from LLM: {e}"
|
323 |
+
def error_generator():
|
324 |
+
yield error_msg
|
325 |
+
|
326 |
response = MCPMessage(
|
327 |
sender=self.name,
|
328 |
receiver="CoordinatorAgent",
|
329 |
+
msg_type="LLM_RESPONSE_STREAM",
|
330 |
trace_id=message.trace_id,
|
331 |
+
payload={"response_stream": error_generator()}
|
|
|
|
|
|
|
|
|
332 |
)
|
333 |
self.message_bus.publish(response)
|
334 |
|
|
|
339 |
self.name = "CoordinatorAgent"
|
340 |
self.message_bus = message_bus
|
341 |
self.message_bus.subscribe(self.name, self.handle_message)
|
342 |
+
self.current_response_stream = None
|
343 |
self.vector_store_ready = False
|
|
|
344 |
|
345 |
def handle_message(self, message: MCPMessage):
|
346 |
"""Handle incoming MCP messages"""
|
347 |
if message.type == "VECTORSTORE_READY":
|
348 |
self.vector_store_ready = True
|
349 |
+
elif message.type == "LLM_RESPONSE_STREAM":
|
350 |
+
self.current_response_stream = message.payload.get("response_stream")
|
351 |
|
352 |
def process_files(self, files):
|
353 |
"""Process uploaded files"""
|
|
|
367 |
|
368 |
return f"Processing {len(files)} files: {', '.join([os.path.basename(fp) for fp in file_paths])}"
|
369 |
|
370 |
+
def handle_query(self, query: str, history: List) -> Generator[str, None, None]:
|
371 |
+
"""Handle user query and return streaming response"""
|
372 |
if not self.vector_store_ready:
|
373 |
+
yield "Please upload and process documents first."
|
374 |
+
return
|
375 |
|
376 |
# Send retrieval request
|
377 |
message = MCPMessage(
|
378 |
sender=self.name,
|
379 |
receiver="RetrievalAgent",
|
380 |
msg_type="RETRIEVAL_REQUEST",
|
381 |
+
payload={"query": query}
|
382 |
)
|
383 |
self.message_bus.publish(message)
|
384 |
|
385 |
+
# Wait for response and stream
|
386 |
import time
|
387 |
+
timeout = 20 # seconds
|
388 |
start_time = time.time()
|
389 |
|
390 |
+
while not self.current_response_stream and (time.time() - start_time) < timeout:
|
391 |
time.sleep(0.1)
|
392 |
|
393 |
+
if self.current_response_stream:
|
394 |
+
try:
|
395 |
+
# Stream tokens directly
|
396 |
+
for chunk in self.current_response_stream:
|
397 |
+
# The token is in chunk.choices[0].delta.content for chat_completion
|
398 |
+
if hasattr(chunk, 'choices') and chunk.choices:
|
399 |
+
token = chunk.choices[0].delta.content
|
400 |
+
if token:
|
401 |
+
yield token
|
402 |
+
else:
|
403 |
+
# Fallback for different response format
|
404 |
+
if hasattr(chunk, 'token'):
|
405 |
+
yield chunk.token
|
406 |
+
elif isinstance(chunk, str):
|
407 |
+
yield chunk
|
408 |
+
except Exception as e:
|
409 |
+
yield f"Error streaming response: {e}"
|
410 |
+
finally:
|
411 |
+
self.current_response_stream = None # Reset for next query
|
412 |
else:
|
413 |
+
yield "Timeout: No response received from LLM agent."
|
414 |
|
415 |
# Initialize agents
|
416 |
ingestion_agent = IngestionAgent(message_bus)
|
417 |
retrieval_agent = RetrievalAgent(message_bus)
|
418 |
+
llm_response_agent = LLMResponseAgent(message_bus)
|
419 |
coordinator_agent = CoordinatorAgent(message_bus)
|
420 |
|
421 |
def create_interface():
|
|
|
462 |
font-size: 0.9rem;
|
463 |
}
|
464 |
|
465 |
+
/* Chat area - REDUCED HEIGHT */
|
466 |
.chat-container {
|
467 |
flex: 1;
|
468 |
display: flex;
|
469 |
flex-direction: column;
|
470 |
+
max-width: 1000px;
|
471 |
margin: 0 auto;
|
472 |
width: 100%;
|
473 |
padding: 1rem;
|
474 |
+
height: calc(100vh - 200px) !important; /* Reduced height */
|
475 |
}
|
476 |
|
477 |
+
/* Chatbot styling - SMALLER */
|
478 |
.gradio-chatbot {
|
479 |
+
height: 300px !important; /* Reduced from 500px */
|
480 |
+
max-height: 300px !important;
|
481 |
background: transparent !important;
|
482 |
border: none !important;
|
483 |
margin-bottom: 1rem;
|
484 |
+
overflow-y: auto !important;
|
485 |
}
|
486 |
|
487 |
/* Input area */
|
|
|
491 |
padding: 1rem;
|
492 |
border: 1px solid rgba(255, 193, 7, 0.2);
|
493 |
backdrop-filter: blur(10px);
|
494 |
+
position: sticky;
|
495 |
+
bottom: 0;
|
496 |
}
|
497 |
|
498 |
/* File upload */
|
|
|
505 |
transition: all 0.3s ease;
|
506 |
}
|
507 |
|
508 |
+
/* Buttons - YELLOW SEND BUTTON */
|
509 |
+
.send-btn {
|
510 |
+
background: linear-gradient(135deg, #ffc107 0%, #ff8f00 100%) !important;
|
511 |
+
color: #000000 !important;
|
512 |
+
border: none !important;
|
513 |
+
border-radius: 8px !important;
|
514 |
+
font-weight: 600 !important;
|
515 |
+
min-height: 40px !important;
|
516 |
+
}
|
517 |
+
|
518 |
.primary-btn {
|
519 |
background: linear-gradient(135deg, #ffc107 0%, #ff8f00 100%) !important;
|
520 |
color: #000000 !important;
|
|
|
541 |
color: #ffc107;
|
542 |
text-align: center;
|
543 |
}
|
544 |
+
|
545 |
+
/* Input row styling */
|
546 |
+
.input-row {
|
547 |
+
display: flex !important;
|
548 |
+
gap: 10px !important;
|
549 |
+
align-items: end !important;
|
550 |
+
}
|
551 |
+
|
552 |
+
/* Message input */
|
553 |
+
.message-input {
|
554 |
+
flex: 1 !important;
|
555 |
+
min-height: 40px !important;
|
556 |
+
}
|
557 |
""",
|
558 |
title="Agentic RAG Assistant"
|
559 |
) as iface:
|
|
|
564 |
gr.HTML("""
|
565 |
<div class="header">
|
566 |
<h1>π€ Agentic RAG Assistant</h1>
|
567 |
+
<p>Upload documents and ask questions - powered by Multi-Agent Architecture</p>
|
568 |
</div>
|
569 |
""")
|
570 |
|
571 |
+
# Main layout with sidebar and chat
|
572 |
with gr.Row():
|
573 |
+
# Left sidebar for file upload
|
574 |
+
with gr.Column(scale=1):
|
575 |
+
gr.Markdown("### π Document Upload")
|
576 |
+
|
577 |
+
file_upload = gr.File(
|
578 |
+
file_count="multiple",
|
579 |
+
file_types=[".pdf", ".pptx", ".csv", ".docx", ".txt", ".md"],
|
580 |
+
label="Upload Documents",
|
581 |
+
elem_classes=["upload-area"]
|
582 |
+
)
|
583 |
+
|
584 |
+
processing_status = gr.HTML(visible=False)
|
585 |
|
586 |
+
process_btn = gr.Button(
|
587 |
+
"Process Documents",
|
588 |
+
variant="primary",
|
589 |
+
elem_classes=["primary-btn"]
|
590 |
+
)
|
591 |
+
|
592 |
+
gr.Markdown("### βΉοΈ Architecture")
|
593 |
+
gr.Markdown("""
|
594 |
+
**Multi-Agent System:**
|
595 |
+
- π **IngestionAgent**: Document parsing
|
596 |
+
- π **RetrievalAgent**: Semantic search
|
597 |
+
- π€ **LLMAgent**: Response generation
|
598 |
+
- π― **CoordinatorAgent**: Workflow orchestration
|
599 |
+
|
600 |
+
**Features:**
|
601 |
+
- Streaming responses
|
602 |
+
- Multi-format support
|
603 |
+
- Context-aware answers
|
604 |
+
""")
|
605 |
+
|
606 |
+
# Right side - Chat interface
|
607 |
+
with gr.Column(scale=2):
|
608 |
+
gr.Markdown("### π¬ Chat Interface")
|
609 |
+
|
610 |
+
# Chatbot with reduced height
|
611 |
chatbot = gr.Chatbot(
|
612 |
+
height=300, # Reduced height
|
613 |
+
elem_classes=["gradio-chatbot"],
|
614 |
+
show_copy_button=True,
|
615 |
+
type="messages",
|
616 |
+
placeholder="Upload documents first, then start chatting!"
|
617 |
)
|
618 |
|
619 |
+
# Input area with improved layout
|
620 |
+
with gr.Row(elem_classes=["input-row"]):
|
621 |
+
msg_input = gr.Textbox(
|
622 |
+
placeholder="Ask about your documents...",
|
623 |
+
label="Message",
|
624 |
+
scale=4,
|
625 |
+
elem_classes=["message-input"],
|
626 |
+
show_label=False,
|
627 |
+
autofocus=True
|
628 |
)
|
629 |
+
send_btn = gr.Button(
|
630 |
+
"Send",
|
631 |
+
scale=1,
|
632 |
+
elem_classes=["send-btn"],
|
633 |
+
size="sm"
|
634 |
+
)
|
635 |
+
|
636 |
+
# Examples
|
637 |
+
gr.Examples(
|
638 |
+
examples=[
|
639 |
+
"What are the main topics discussed?",
|
640 |
+
"Summarize the key findings",
|
641 |
+
"What metrics are mentioned?",
|
642 |
+
"What are the recommendations?"
|
643 |
+
],
|
644 |
+
inputs=msg_input,
|
645 |
+
label="π‘ Example Questions"
|
646 |
+
)
|
647 |
|
648 |
# State to track document processing
|
649 |
doc_processed = gr.State(False)
|
650 |
|
651 |
# Event handlers
|
652 |
+
def handle_file_upload_and_process(files):
|
653 |
if not files:
|
654 |
return gr.update(visible=False), False
|
655 |
|
|
|
687 |
|
688 |
def respond(message, history, doc_ready):
|
689 |
if not doc_ready:
|
690 |
+
# Show error message
|
691 |
+
history.append({"role": "user", "content": message})
|
692 |
+
history.append({"role": "assistant", "content": "β οΈ Please upload and process documents first."})
|
693 |
+
return history, ""
|
694 |
|
695 |
if not message.strip():
|
696 |
return history, message
|
697 |
|
698 |
+
# Add user message
|
699 |
+
history.append({"role": "user", "content": message})
|
700 |
+
history.append({"role": "assistant", "content": ""})
|
|
|
|
|
701 |
|
702 |
+
# Stream response
|
703 |
+
try:
|
704 |
+
for token in coordinator_agent.handle_query(message, history):
|
705 |
+
history[-1]["content"] += token
|
706 |
+
yield history, ""
|
707 |
+
except Exception as e:
|
708 |
+
history[-1]["content"] = f"β Error: {str(e)}"
|
709 |
+
yield history, ""
|
710 |
|
711 |
+
# Event bindings
|
712 |
+
process_btn.click(
|
713 |
+
handle_file_upload_and_process,
|
714 |
inputs=[file_upload],
|
715 |
outputs=[processing_status, doc_processed]
|
716 |
)
|
717 |
|
|
|
718 |
send_btn.click(
|
719 |
respond,
|
720 |
inputs=[msg_input, chatbot, doc_processed],
|
721 |
+
outputs=[chatbot, msg_input],
|
722 |
+
show_progress=True
|
723 |
)
|
724 |
|
725 |
msg_input.submit(
|
726 |
respond,
|
727 |
inputs=[msg_input, chatbot, doc_processed],
|
728 |
+
outputs=[chatbot, msg_input],
|
729 |
+
show_progress=True
|
730 |
)
|
731 |
|
732 |
return iface
|