import gradio as gr import os import json import uuid import asyncio from datetime import datetime from typing import List, Dict, Any, Optional, Generator import logging # Import required libraries from huggingface_hub import InferenceClient from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain.docstore.document import Document # Import document parsers import PyPDF2 from pptx import Presentation import pandas as pd from docx import Document as DocxDocument import io # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Get HuggingFace token from environment HF_TOKEN = os.getenv("hf_token") if not HF_TOKEN: raise ValueError("HuggingFace token not found in environment variables") # Initialize HuggingFace Inference Client client = InferenceClient(model="meta-llama/Llama-3.1-8B-Instruct", token=HF_TOKEN) # Initialize embeddings embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") class MCPMessage: """Model Context Protocol Message Structure""" def __init__(self, sender: str, receiver: str, msg_type: str, trace_id: str = None, payload: Dict = None): self.sender = sender self.receiver = receiver self.type = msg_type self.trace_id = trace_id or str(uuid.uuid4()) self.payload = payload or {} self.timestamp = datetime.now().isoformat() def to_dict(self): return { "sender": self.sender, "receiver": self.receiver, "type": self.type, "trace_id": self.trace_id, "payload": self.payload, "timestamp": self.timestamp } class MessageBus: """In-memory message bus for MCP communication""" def __init__(self): self.messages = [] self.subscribers = {} def publish(self, message: MCPMessage): """Publish message to the bus""" self.messages.append(message) logger.info(f"Message published: {message.sender} -> {message.receiver} [{message.type}]") # Notify subscribers if message.receiver in self.subscribers: for callback in self.subscribers[message.receiver]: callback(message) def subscribe(self, agent_name: str, callback): """Subscribe agent to receive messages""" if agent_name not in self.subscribers: self.subscribers[agent_name] = [] self.subscribers[agent_name].append(callback) # Global message bus message_bus = MessageBus() class IngestionAgent: """Agent responsible for document parsing and preprocessing""" def __init__(self, message_bus: MessageBus): self.name = "IngestionAgent" self.message_bus = message_bus self.message_bus.subscribe(self.name, self.handle_message) self.text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200 ) def handle_message(self, message: MCPMessage): """Handle incoming MCP messages""" if message.type == "INGESTION_REQUEST": self.process_documents(message) def parse_pdf(self, file_path: str) -> str: """Parse PDF document""" try: with open(file_path, 'rb') as file: pdf_reader = PyPDF2.PdfReader(file) text = "" for page in pdf_reader.pages: text += page.extract_text() return text except Exception as e: logger.error(f"Error parsing PDF: {e}") return "" def parse_pptx(self, file_path: str) -> str: """Parse PPTX document""" try: prs = Presentation(file_path) text = "" for slide in prs.slides: for shape in slide.shapes: if hasattr(shape, "text"): text += shape.text + "\n" return text except Exception as e: logger.error(f"Error parsing PPTX: {e}") return "" def parse_csv(self, file_path: str) -> str: """Parse CSV document""" try: df = pd.read_csv(file_path) return df.to_string() except Exception as e: logger.error(f"Error parsing CSV: {e}") return "" def parse_docx(self, file_path: str) -> str: """Parse DOCX document""" try: doc = DocxDocument(file_path) text = "" for paragraph in doc.paragraphs: text += paragraph.text + "\n" return text except Exception as e: logger.error(f"Error parsing DOCX: {e}") return "" def parse_txt(self, file_path: str) -> str: """Parse TXT/Markdown document""" try: with open(file_path, 'r', encoding='utf-8') as file: return file.read() except Exception as e: logger.error(f"Error parsing TXT: {e}") return "" def process_documents(self, message: MCPMessage): """Process uploaded documents""" files = message.payload.get("files", []) processed_docs = [] for file_path in files: file_ext = os.path.splitext(file_path)[1].lower() # Parse document based on file type if file_ext == '.pdf': text = self.parse_pdf(file_path) elif file_ext == '.pptx': text = self.parse_pptx(file_path) elif file_ext == '.csv': text = self.parse_csv(file_path) elif file_ext == '.docx': text = self.parse_docx(file_path) elif file_ext in ['.txt', '.md']: text = self.parse_txt(file_path) else: logger.warning(f"Unsupported file type: {file_ext}") continue if text: # Split text into chunks chunks = self.text_splitter.split_text(text) docs = [Document(page_content=chunk, metadata={"source": file_path}) for chunk in chunks] processed_docs.extend(docs) # Send processed documents to RetrievalAgent response = MCPMessage( sender=self.name, receiver="RetrievalAgent", msg_type="INGESTION_COMPLETE", trace_id=message.trace_id, payload={"documents": processed_docs} ) self.message_bus.publish(response) class RetrievalAgent: """Agent responsible for embedding and semantic retrieval""" def __init__(self, message_bus: MessageBus): self.name = "RetrievalAgent" self.message_bus = message_bus self.message_bus.subscribe(self.name, self.handle_message) self.vector_store = None def handle_message(self, message: MCPMessage): """Handle incoming MCP messages""" if message.type == "INGESTION_COMPLETE": self.create_vector_store(message) elif message.type == "RETRIEVAL_REQUEST": self.retrieve_context(message) def create_vector_store(self, message: MCPMessage): """Create vector store from processed documents""" documents = message.payload.get("documents", []) if documents: try: self.vector_store = FAISS.from_documents(documents, embeddings) logger.info(f"Vector store created with {len(documents)} documents") # Notify completion response = MCPMessage( sender=self.name, receiver="CoordinatorAgent", msg_type="VECTORSTORE_READY", trace_id=message.trace_id, payload={"status": "ready"} ) self.message_bus.publish(response) except Exception as e: logger.error(f"Error creating vector store: {e}") def retrieve_context(self, message: MCPMessage): """Retrieve relevant context for a query""" query = message.payload.get("query", "") k = message.payload.get("k", 3) if self.vector_store and query: try: docs = self.vector_store.similarity_search(query, k=k) context = [{"content": doc.page_content, "source": doc.metadata.get("source", "")} for doc in docs] response = MCPMessage( sender=self.name, receiver="LLMResponseAgent", msg_type="CONTEXT_RESPONSE", trace_id=message.trace_id, payload={ "query": query, "retrieved_context": context, "top_chunks": [doc.page_content for doc in docs] } ) self.message_bus.publish(response) except Exception as e: logger.error(f"Error retrieving context: {e}") class LLMResponseAgent: """Agent responsible for generating LLM responses""" def __init__(self, message_bus: MessageBus): self.name = "LLMResponseAgent" self.message_bus = message_bus self.message_bus.subscribe(self.name, self.handle_message) def handle_message(self, message: MCPMessage): """Handle incoming MCP messages""" if message.type == "CONTEXT_RESPONSE": self.generate_response(message) def generate_response(self, message: MCPMessage): """Generate response using retrieved context""" query = message.payload.get("query", "") context = message.payload.get("retrieved_context", []) # Build context string context_text = "\n\n".join([f"Source: {ctx['source']}\nContent: {ctx['content']}" for ctx in context]) # Create messages for conversational format messages = [ { "role": "system", "content": "You are a helpful assistant. Based on the provided context below, answer the user's question accurately and comprehensively. Cite the sources if possible.", }, { "role": "user", "content": f"Context:\n\n{context_text}\n\nQuestion: {query}" } ] try: # Use client.chat_completion for conversational models response_stream = client.chat_completion( messages=messages, max_tokens=512, temperature=0.7, stream=True ) # Send streaming response response = MCPMessage( sender=self.name, receiver="CoordinatorAgent", msg_type="LLM_RESPONSE_STREAM", trace_id=message.trace_id, payload={ "query": query, "response_stream": response_stream, "context": context } ) self.message_bus.publish(response) except Exception as e: logger.error(f"Error generating response: {e}") # Send an error stream back error_msg = f"Error from LLM: {e}" def error_generator(): yield error_msg response = MCPMessage( sender=self.name, receiver="CoordinatorAgent", msg_type="LLM_RESPONSE_STREAM", trace_id=message.trace_id, payload={"response_stream": error_generator()} ) self.message_bus.publish(response) class CoordinatorAgent: """Coordinator agent that orchestrates the entire workflow""" def __init__(self, message_bus: MessageBus): self.name = "CoordinatorAgent" self.message_bus = message_bus self.message_bus.subscribe(self.name, self.handle_message) self.current_response_stream = None self.vector_store_ready = False def handle_message(self, message: MCPMessage): """Handle incoming MCP messages""" if message.type == "VECTORSTORE_READY": self.vector_store_ready = True elif message.type == "LLM_RESPONSE_STREAM": self.current_response_stream = message.payload.get("response_stream") def process_files(self, files): """Process uploaded files""" if not files: return "No files uploaded." file_paths = [file.name for file in files] # Send ingestion request message = MCPMessage( sender=self.name, receiver="IngestionAgent", msg_type="INGESTION_REQUEST", payload={"files": file_paths} ) self.message_bus.publish(message) return f"Processing {len(files)} files: {', '.join([os.path.basename(fp) for fp in file_paths])}" def handle_query(self, query: str, history: List) -> Generator[str, None, None]: """Handle user query and return streaming response""" if not self.vector_store_ready: yield "Please upload and process documents first." return # Send retrieval request message = MCPMessage( sender=self.name, receiver="RetrievalAgent", msg_type="RETRIEVAL_REQUEST", payload={"query": query} ) self.message_bus.publish(message) # Wait for response and stream import time timeout = 20 # seconds start_time = time.time() while not self.current_response_stream and (time.time() - start_time) < timeout: time.sleep(0.1) if self.current_response_stream: try: # Stream tokens directly for chunk in self.current_response_stream: # The token is in chunk.choices[0].delta.content for chat_completion if hasattr(chunk, 'choices') and chunk.choices: token = chunk.choices[0].delta.content if token: yield token else: # Fallback for different response format if hasattr(chunk, 'token'): yield chunk.token elif isinstance(chunk, str): yield chunk except Exception as e: yield f"Error streaming response: {e}" finally: self.current_response_stream = None # Reset for next query else: yield "Timeout: No response received from LLM agent." # Initialize agents ingestion_agent = IngestionAgent(message_bus) retrieval_agent = RetrievalAgent(message_bus) llm_response_agent = LLMResponseAgent(message_bus) coordinator_agent = CoordinatorAgent(message_bus) def create_interface(): """Create enhanced ChatGPT-style Gradio interface with glowing effects""" with gr.Blocks( theme=gr.themes.Base(), css=""" /* Import Google Fonts for better typography */ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); /* Dark theme styling with enhanced visuals */ .gradio-container { background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%) !important; color: #ffffff !important; height: 100vh !important; max-width: none !important; padding: 0 !important; font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important; } /* Main container with animated background */ .main-container { display: flex; flex-direction: column; height: 100vh; background: radial-gradient(circle at 20% 50%, rgba(255, 193, 7, 0.05) 0%, transparent 50%), radial-gradient(circle at 80% 20%, rgba(0, 123, 255, 0.05) 0%, transparent 50%), radial-gradient(circle at 40% 80%, rgba(255, 87, 34, 0.03) 0%, transparent 50%), linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%); animation: backgroundShift 15s ease-in-out infinite alternate; } @keyframes backgroundShift { 0% { filter: hue-rotate(0deg); } 100% { filter: hue-rotate(10deg); } } /* Enhanced Header with glow */ .header { background: rgba(255, 193, 7, 0.08); border-bottom: 2px solid transparent; border-image: linear-gradient(90deg, rgba(255, 193, 7, 0.5), rgba(0, 123, 255, 0.3)) 1; padding: 1.5rem 2rem; backdrop-filter: blur(20px); box-shadow: 0 4px 20px rgba(255, 193, 7, 0.1), inset 0 1px 0 rgba(255, 255, 255, 0.1); position: relative; overflow: hidden; } .header::before { content: ''; position: absolute; top: 0; left: -100%; width: 100%; height: 100%; background: linear-gradient(90deg, transparent, rgba(255, 193, 7, 0.1), transparent); animation: shimmer 3s ease-in-out infinite; } @keyframes shimmer { 0% { left: -100%; } 100% { left: 100%; } } .header h1 { color: #ffc107; margin: 0; font-size: 2rem; font-weight: 700; text-shadow: 0 0 10px rgba(255, 193, 7, 0.3), 0 0 20px rgba(255, 193, 7, 0.2); letter-spacing: -0.02em; } .header p { color: #e0e0e0; margin: 0.5rem 0 0 0; font-size: 1rem; font-weight: 400; opacity: 0.9; } /* Enhanced Chat container */ .chat-container { flex: 1; display: flex; flex-direction: column; max-width: 1200px; margin: 0 auto; width: 100%; padding: 2rem; height: calc(100vh - 200px) !important; gap: 1.5rem; } /* Enhanced Chatbot with glow effect */ .gradio-chatbot { height: 400px !important; max-height: 400px !important; background: rgba(45, 45, 45, 0.4) !important; border: 2px solid rgba(255, 193, 7, 0.2) !important; border-radius: 20px !important; margin-bottom: 1rem; overflow-y: auto !important; box-shadow: 0 0 30px rgba(255, 193, 7, 0.15), 0 8px 32px rgba(0, 0, 0, 0.3), inset 0 1px 0 rgba(255, 255, 255, 0.05) !important; backdrop-filter: blur(20px) !important; position: relative; animation: chatGlow 4s ease-in-out infinite alternate; } @keyframes chatGlow { 0% { box-shadow: 0 0 30px rgba(255, 193, 7, 0.15), 0 8px 32px rgba(0, 0, 0, 0.3), inset 0 1px 0 rgba(255, 255, 255, 0.05); } 100% { box-shadow: 0 0 40px rgba(255, 193, 7, 0.25), 0 12px 40px rgba(0, 0, 0, 0.4), inset 0 1px 0 rgba(255, 255, 255, 0.08); } } /* Enhanced chat messages */ .message { background: rgba(255, 255, 255, 0.05) !important; border-radius: 16px !important; padding: 1rem 1.5rem !important; margin: 0.75rem 0 !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; backdrop-filter: blur(10px) !important; font-size: 0.95rem !important; line-height: 1.6 !important; box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1) !important; transition: all 0.3s ease !important; } .message:hover { transform: translateY(-1px) !important; box-shadow: 0 4px 15px rgba(0, 0, 0, 0.15) !important; } /* User message styling */ .message.user { background: linear-gradient(135deg, rgba(255, 193, 7, 0.1), rgba(255, 193, 7, 0.05)) !important; border-color: rgba(255, 193, 7, 0.2) !important; margin-left: 15% !important; box-shadow: 0 2px 10px rgba(255, 193, 7, 0.1), inset 0 1px 0 rgba(255, 255, 255, 0.05) !important; } /* Assistant message styling */ .message.assistant { background: linear-gradient(135deg, rgba(0, 123, 255, 0.08), rgba(0, 123, 255, 0.04)) !important; border-color: rgba(0, 123, 255, 0.2) !important; margin-right: 15% !important; box-shadow: 0 2px 10px rgba(0, 123, 255, 0.1), inset 0 1px 0 rgba(255, 255, 255, 0.05) !important; } /* Enhanced Input area with glow */ .input-area { background: rgba(45, 45, 45, 0.6); border-radius: 20px; padding: 1.5rem; border: 2px solid rgba(255, 193, 7, 0.2); backdrop-filter: blur(20px); position: sticky; bottom: 0; box-shadow: 0 0 25px rgba(255, 193, 7, 0.1), 0 8px 32px rgba(0, 0, 0, 0.2), inset 0 1px 0 rgba(255, 255, 255, 0.05); animation: inputGlow 3s ease-in-out infinite alternate; } @keyframes inputGlow { 0% { box-shadow: 0 0 25px rgba(255, 193, 7, 0.1), 0 8px 32px rgba(0, 0, 0, 0.2), inset 0 1px 0 rgba(255, 255, 255, 0.05); } 100% { box-shadow: 0 0 35px rgba(255, 193, 7, 0.2), 0 12px 40px rgba(0, 0, 0, 0.3), inset 0 1px 0 rgba(255, 255, 255, 0.08); } } /* Enhanced File upload area */ .upload-area { background: linear-gradient(135deg, rgba(255, 193, 7, 0.08), rgba(255, 193, 7, 0.04)) !important; border: 2px dashed rgba(255, 193, 7, 0.4) !important; border-radius: 16px !important; padding: 1.5rem !important; margin-bottom: 1.5rem !important; transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1) !important; backdrop-filter: blur(10px) !important; box-shadow: 0 0 20px rgba(255, 193, 7, 0.05), inset 0 1px 0 rgba(255, 255, 255, 0.05) !important; } .upload-area:hover { background: linear-gradient(135deg, rgba(255, 193, 7, 0.12), rgba(255, 193, 7, 0.06)) !important; border-color: rgba(255, 193, 7, 0.6) !important; box-shadow: 0 0 30px rgba(255, 193, 7, 0.15), inset 0 1px 0 rgba(255, 255, 255, 0.08) !important; transform: translateY(-2px) !important; } /* Sidebar styling */ .sidebar { background: rgba(30, 30, 30, 0.6) !important; border-right: 2px solid rgba(255, 193, 7, 0.1) !important; backdrop-filter: blur(15px) !important; box-shadow: inset -1px 0 0 rgba(255, 255, 255, 0.05), 4px 0 20px rgba(0, 0, 0, 0.1) !important; } /* Enhanced buttons with glow effects */ .send-btn { background: linear-gradient(135deg, #ffc107 0%, #ff8f00 100%) !important; color: #000000 !important; border: none !important; border-radius: 12px !important; font-weight: 600 !important; min-height: 48px !important; font-size: 0.95rem !important; transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3), 0 0 20px rgba(255, 193, 7, 0.2) !important; position: relative; overflow: hidden; } .send-btn::before { content: ''; position: absolute; top: 0; left: -100%; width: 100%; height: 100%; background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent); transition: left 0.5s; } .send-btn:hover::before { left: 100%; } .send-btn:hover { transform: translateY(-2px) !important; box-shadow: 0 8px 25px rgba(255, 193, 7, 0.4), 0 0 30px rgba(255, 193, 7, 0.3) !important; } .primary-btn { background: linear-gradient(135deg, #ffc107 0%, #ff8f00 100%) !important; color: #000000 !important; border: none !important; border-radius: 12px !important; font-weight: 600 !important; padding: 0.75rem 1.5rem !important; font-size: 0.95rem !important; transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3), 0 0 20px rgba(255, 193, 7, 0.2) !important; } .primary-btn:hover { transform: translateY(-2px) !important; box-shadow: 0 8px 25px rgba(255, 193, 7, 0.4), 0 0 30px rgba(255, 193, 7, 0.3) !important; } /* Enhanced Text inputs with glow */ .gradio-textbox input, .gradio-textbox textarea { background: rgba(45, 45, 45, 0.8) !important; color: #ffffff !important; border: 2px solid rgba(255, 193, 7, 0.2) !important; border-radius: 12px !important; font-size: 0.95rem !important; padding: 0.75rem 1rem !important; transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; backdrop-filter: blur(10px) !important; box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1) !important; } .gradio-textbox input:focus, .gradio-textbox textarea:focus { border-color: rgba(255, 193, 7, 0.5) !important; box-shadow: 0 0 20px rgba(255, 193, 7, 0.2), inset 0 2px 4px rgba(0, 0, 0, 0.1) !important; outline: none !important; } /* Enhanced Processing indicator */ .processing-indicator { background: linear-gradient(135deg, rgba(255, 193, 7, 0.15), rgba(255, 193, 7, 0.08)); border: 2px solid rgba(255, 193, 7, 0.3); border-radius: 12px; padding: 1rem 1.5rem; margin: 1rem 0; color: #ffc107; text-align: center; font-weight: 500; backdrop-filter: blur(10px); box-shadow: 0 0 25px rgba(255, 193, 7, 0.1), inset 0 1px 0 rgba(255, 255, 255, 0.1); animation: processingPulse 2s ease-in-out infinite; } @keyframes processingPulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.8; } } /* Enhanced Input row styling */ .input-row { display: flex !important; gap: 12px !important; align-items: end !important; } /* Message input */ .message-input { flex: 1 !important; min-height: 48px !important; } /* Markdown content styling */ .markdown-content { color: #e0e0e0 !important; line-height: 1.6 !important; font-size: 0.95rem !important; } .markdown-content h1, .markdown-content h2, .markdown-content h3 { color: #ffc107 !important; margin-top: 1.5rem !important; margin-bottom: 0.5rem !important; } .markdown-content code { background: rgba(255, 193, 7, 0.1) !important; color: #ffc107 !important; padding: 0.2rem 0.4rem !important; border-radius: 4px !important; } .markdown-content pre { background: rgba(0, 0, 0, 0.3) !important; border: 1px solid rgba(255, 193, 7, 0.2) !important; border-radius: 8px !important; padding: 1rem !important; margin: 1rem 0 !important; } /* Examples styling */ .examples { background: rgba(45, 45, 45, 0.3) !important; border-radius: 12px !important; padding: 1rem !important; border: 1px solid rgba(255, 193, 7, 0.1) !important; backdrop-filter: blur(10px) !important; } /* Loading animation */ @keyframes loading { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } } .loading { animation: loading 2s linear infinite; } /* Responsive design */ @media (max-width: 768px) { .chat-container { padding: 1rem; } .header h1 { font-size: 1.5rem; } .gradio-chatbot { height: 300px !important; } .input-row { flex-direction: column !important; gap: 8px !important; } } """, title="Agentic RAG Assistant" ) as iface: # Header with gr.Row(): with gr.Column(): gr.HTML("""

✨ Agentic RAG Assistant

🤖 Upload documents and ask questions - powered by Multi-Agent Architecture with streaming responses

""") # Main layout with sidebar and chat with gr.Row(): # Left sidebar for file upload with gr.Column(scale=1, elem_classes=["sidebar"]): gr.Markdown("### 📁 Document Upload", elem_classes=["markdown-content"]) file_upload = gr.File( file_count="multiple", file_types=[".pdf", ".pptx", ".csv", ".docx", ".txt", ".md"], label="📄 Upload Documents", elem_classes=["upload-area"] ) processing_status = gr.HTML(visible=False) process_btn = gr.Button( "🚀 Process Documents", variant="primary", elem_classes=["primary-btn"] ) gr.Markdown(""" ### ⚡ Architecture **Multi-Agent System:** - 📄 **IngestionAgent**: Document parsing - 🔍 **RetrievalAgent**: Semantic search - 🤖 **LLMAgent**: Response generation - 🎯 **CoordinatorAgent**: Workflow orchestration **✨ Features:** - 🌊 Streaming responses - 📚 Multi-format support - 🧠 Context-aware answers - 🔥 Real-time processing """, elem_classes=["markdown-content"]) # Right side - Chat interface with gr.Column(scale=2): gr.Markdown("### 💬 Chat Interface", elem_classes=["markdown-content"]) # Chatbot with enhanced styling chatbot = gr.Chatbot( height=400, elem_classes=["gradio-chatbot"], show_copy_button=True, type="messages", placeholder="🚀 Upload documents first, then start chatting! Ask me anything about your documents.", avatar_images=("👤", "🤖") ) # Input area with improved layout with gr.Row(elem_classes=["input-row"]): msg_input = gr.Textbox( placeholder="💭 Ask about your documents...", label="Message", scale=4, elem_classes=["message-input"], show_label=False, autofocus=True ) send_btn = gr.Button( "🚀 Send", scale=1, elem_classes=["send-btn"], size="sm" ) # Enhanced Examples gr.Examples( examples=[ "📖 What are the main topics discussed?", "📊 Summarize the key findings", "📈 What metrics are mentioned?", "💡 What are the recommendations?", "🔍 Find specific information about...", "📋 Create a summary of the content" ], inputs=msg_input, label="💡 Example Questions", elem_classes=["examples"] ) # State to track document processing doc_processed = gr.State(False) # Event handlers def handle_file_upload_and_process(files): if not files: return gr.update(visible=False), False # Show processing indicator processing_html = f"""
🔄 Processing {len(files)} documents... Please wait while we analyze your content.
""" # Process files try: result = coordinator_agent.process_files(files) # Wait a moment for processing to complete import time time.sleep(3) success_html = """
✅ Documents processed successfully! You can now ask questions about your content.
""" return gr.update(value=success_html, visible=True), True except Exception as e: error_html = f"""
❌ Error processing documents: {str(e)}
""" return gr.update(value=error_html, visible=True), False def respond(message, history, doc_ready): if not doc_ready: # Show error message history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": "⚠️ Please upload and process documents first before asking questions."}) return history, "" if not message.strip(): return history, message # Add user message history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": ""}) # Stream response try: for token in coordinator_agent.handle_query(message, history): history[-1]["content"] += token yield history, "" except Exception as e: history[-1]["content"] = f"❌ Error generating response: {str(e)}" yield history, "" # Event bindings process_btn.click( handle_file_upload_and_process, inputs=[file_upload], outputs=[processing_status, doc_processed] ) send_btn.click( respond, inputs=[msg_input, chatbot, doc_processed], outputs=[chatbot, msg_input], show_progress=True ) msg_input.submit( respond, inputs=[msg_input, chatbot, doc_processed], outputs=[chatbot, msg_input], show_progress=True ) return iface # Launch the application if __name__ == "__main__": demo = create_interface() demo.launch( share=True, server_name="0.0.0.0", server_port=7860 )