ragunath-ravi commited on
Commit
2ea78e5
Β·
verified Β·
1 Parent(s): f32f907

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -68
app.py CHANGED
@@ -27,6 +27,9 @@ from huggingface_hub import InferenceClient
27
  logging.basicConfig(level=logging.INFO)
28
  logger = logging.getLogger(__name__)
29
 
 
 
 
30
  # MCP Message Structure
31
  @dataclass
32
  class MCPMessage:
@@ -244,11 +247,11 @@ class RetrievalAgent(BaseAgent):
244
 
245
  # LLM Response Agent
246
  class LLMResponseAgent(BaseAgent):
247
- def __init__(self, hf_token: str = None):
248
  super().__init__("LLMResponseAgent")
249
  self.client = InferenceClient(
250
  model="meta-llama/Llama-3.1-8B-Instruct",
251
- token=hf_token
252
  )
253
 
254
  def format_prompt(self, query: str, context_chunks: List[Dict]) -> str:
@@ -292,11 +295,11 @@ Answer:"""
292
 
293
  # Coordinator Agent
294
  class CoordinatorAgent(BaseAgent):
295
- def __init__(self, hf_token: str = None):
296
  super().__init__("CoordinatorAgent")
297
  self.ingestion_agent = IngestionAgent()
298
  self.retrieval_agent = RetrievalAgent()
299
- self.llm_agent = LLMResponseAgent(hf_token)
300
  self.documents_processed = False
301
 
302
  async def process_documents(self, files: List[str]) -> str:
@@ -367,48 +370,31 @@ class CoordinatorAgent(BaseAgent):
367
  return f"Error processing query: {str(e)}", []
368
 
369
  # Global coordinator instance
370
- coordinator = None
371
-
372
- def initialize_app(hf_token):
373
- """Initialize the application with HuggingFace token"""
374
- global coordinator
375
- coordinator = CoordinatorAgent(hf_token)
376
- return "βœ… Application initialized successfully!"
377
 
378
  async def process_files(files):
379
  """Process uploaded files"""
380
- if not coordinator:
381
- return "❌ Please set your HuggingFace token first!"
382
-
383
  if not files:
384
  return "❌ Please upload at least one file."
385
 
386
  # Save uploaded files to temporary directory
387
  file_paths = []
388
  for file in files:
389
- temp_path = os.path.join(tempfile.gettempdir(), file.name)
390
- with open(temp_path, 'wb') as f:
391
- f.write(file.read())
392
- file_paths.append(temp_path)
 
 
393
 
394
  result = await coordinator.process_documents(file_paths)
395
 
396
- # Cleanup temporary files
397
- for path in file_paths:
398
- try:
399
- os.remove(path)
400
- except:
401
- pass
402
-
403
  return result
404
 
405
  async def answer_question(query, history):
406
  """Answer user question"""
407
- if not coordinator:
408
- return "❌ Please set your HuggingFace token first!"
409
-
410
  if not query.strip():
411
- return "❌ Please enter a question."
412
 
413
  response, context_chunks = await coordinator.answer_query(query)
414
 
@@ -419,7 +405,10 @@ async def answer_question(query, history):
419
  sources += f"{i}. {chunk['source']} (Chunk {chunk['chunk_id']})\n"
420
  response += sources
421
 
422
- return response
 
 
 
423
 
424
  # Custom CSS
425
  custom_css = """
@@ -593,36 +582,6 @@ def create_interface():
593
  """)
594
 
595
  with gr.Tabs() as tabs:
596
- # Setup Tab
597
- with gr.TabItem("βš™οΈ Setup", elem_classes=["tab-nav"]):
598
- gr.HTML("""
599
- <div class="setup-card">
600
- <h3>πŸ”‘ Configuration</h3>
601
- <p>Enter your HuggingFace token to get started. This token is used to access the Llama-3.1-8B-Instruct model.</p>
602
- </div>
603
- """)
604
-
605
- with gr.Row():
606
- hf_token_input = gr.Textbox(
607
- label="HuggingFace Token",
608
- placeholder="hf_xxxxxxxxxxxxxxxxxxxxxxxxx",
609
- type="password",
610
- elem_classes=["input-container"]
611
- )
612
-
613
- with gr.Row():
614
- init_button = gr.Button(
615
- "Initialize Application",
616
- variant="primary",
617
- elem_classes=["primary-button"]
618
- )
619
-
620
- init_status = gr.Textbox(
621
- label="Status",
622
- interactive=False,
623
- elem_classes=["input-container"]
624
- )
625
-
626
  # Upload Tab
627
  with gr.TabItem("πŸ“ Upload Documents", elem_classes=["tab-nav"]):
628
  gr.HTML("""
@@ -741,12 +700,6 @@ def create_interface():
741
  """)
742
 
743
  # Event handlers
744
- init_button.click(
745
- fn=initialize_app,
746
- inputs=[hf_token_input],
747
- outputs=[init_status]
748
- )
749
-
750
  upload_button.click(
751
  fn=process_files,
752
  inputs=[file_upload],
@@ -756,13 +709,13 @@ def create_interface():
756
  ask_button.click(
757
  fn=answer_question,
758
  inputs=[query_input, chatbot],
759
- outputs=[chatbot]
760
  )
761
 
762
  query_input.submit(
763
  fn=answer_question,
764
  inputs=[query_input, chatbot],
765
- outputs=[chatbot]
766
  )
767
 
768
  return demo
 
27
  logging.basicConfig(level=logging.INFO)
28
  logger = logging.getLogger(__name__)
29
 
30
+ # Get HF token from environment
31
+ HF_TOKEN = os.getenv('HF_TOKEN')
32
+
33
  # MCP Message Structure
34
  @dataclass
35
  class MCPMessage:
 
247
 
248
  # LLM Response Agent
249
  class LLMResponseAgent(BaseAgent):
250
+ def __init__(self):
251
  super().__init__("LLMResponseAgent")
252
  self.client = InferenceClient(
253
  model="meta-llama/Llama-3.1-8B-Instruct",
254
+ token=HF_TOKEN
255
  )
256
 
257
  def format_prompt(self, query: str, context_chunks: List[Dict]) -> str:
 
295
 
296
  # Coordinator Agent
297
  class CoordinatorAgent(BaseAgent):
298
+ def __init__(self):
299
  super().__init__("CoordinatorAgent")
300
  self.ingestion_agent = IngestionAgent()
301
  self.retrieval_agent = RetrievalAgent()
302
+ self.llm_agent = LLMResponseAgent()
303
  self.documents_processed = False
304
 
305
  async def process_documents(self, files: List[str]) -> str:
 
370
  return f"Error processing query: {str(e)}", []
371
 
372
  # Global coordinator instance
373
+ coordinator = CoordinatorAgent()
 
 
 
 
 
 
374
 
375
  async def process_files(files):
376
  """Process uploaded files"""
 
 
 
377
  if not files:
378
  return "❌ Please upload at least one file."
379
 
380
  # Save uploaded files to temporary directory
381
  file_paths = []
382
  for file in files:
383
+ # Handle file path - Gradio returns file path as string
384
+ if hasattr(file, 'name'):
385
+ file_path = file.name
386
+ else:
387
+ file_path = str(file)
388
+ file_paths.append(file_path)
389
 
390
  result = await coordinator.process_documents(file_paths)
391
 
 
 
 
 
 
 
 
392
  return result
393
 
394
  async def answer_question(query, history):
395
  """Answer user question"""
 
 
 
396
  if not query.strip():
397
+ return history, ""
398
 
399
  response, context_chunks = await coordinator.answer_query(query)
400
 
 
405
  sources += f"{i}. {chunk['source']} (Chunk {chunk['chunk_id']})\n"
406
  response += sources
407
 
408
+ # Add to chat history
409
+ history.append((query, response))
410
+
411
+ return history, ""
412
 
413
  # Custom CSS
414
  custom_css = """
 
582
  """)
583
 
584
  with gr.Tabs() as tabs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
  # Upload Tab
586
  with gr.TabItem("πŸ“ Upload Documents", elem_classes=["tab-nav"]):
587
  gr.HTML("""
 
700
  """)
701
 
702
  # Event handlers
 
 
 
 
 
 
703
  upload_button.click(
704
  fn=process_files,
705
  inputs=[file_upload],
 
709
  ask_button.click(
710
  fn=answer_question,
711
  inputs=[query_input, chatbot],
712
+ outputs=[chatbot, query_input]
713
  )
714
 
715
  query_input.submit(
716
  fn=answer_question,
717
  inputs=[query_input, chatbot],
718
+ outputs=[chatbot, query_input]
719
  )
720
 
721
  return demo