|
""" |
|
This file is the main file for the hackathon. |
|
It contains the Gradio interface for the hackathon as an MCP server. |
|
It exposes the following tools: |
|
- search_knowledge_base_for_context |
|
- research_write_review_topic |
|
""" |
|
|
|
import os |
|
import requests |
|
import gradio as gr |
|
|
|
def search_knowledge_base_for_context(query: str) -> str: |
|
""" |
|
Searches and retrieves relevant context from a knowledge base on Sukhoi SU-35, |
|
based on the user's query. |
|
|
|
Example queries: |
|
- "What are the main features of fuel system of SU-35." |
|
- "What are the combat potential of SU-35." |
|
|
|
Args: |
|
query: The search query supplied by the user. |
|
|
|
Returns: |
|
str: Relevant text content that can be used by the LLM to answer the query. |
|
""" |
|
print(f"Searching knowledge base for RAG context via modal labs: {query}") |
|
|
|
data = {"query": query} |
|
modal_url = os.getenv("MODAL_LABS_HACKATHON_RAG_TOOLS_URL") |
|
response = requests.post(modal_url, json=data, timeout=600.0) |
|
|
|
if response.status_code != 200: |
|
print(f"Error in modal RAG response: {response.status_code} - {response.text}") |
|
return "No relevant information found from the knowledge base." |
|
|
|
print(f"modal RAG Response: {response.text}") |
|
return response.text or "No relevant information found" |
|
|
|
def research_write_review_topic(query: str) -> str: |
|
""" |
|
Helps with writing a report with research, writing, and review on any topic. |
|
Returns a reviewed topic. |
|
|
|
The query is a string that contains the topic to be researched and reviewed. |
|
|
|
Example queries: |
|
- "Write me a report on the history of the internet." |
|
- "Write me a report on origin of the universe." |
|
- "Write me a report on the impact of climate change on polar bears." |
|
- "Write me a report on the benefits of meditation." |
|
- "Write me a report on the future of artificial intelligence." |
|
- "Write me a report on the effects of social media on mental health." |
|
|
|
Args: |
|
query (str): The query to research, write and review . |
|
|
|
Returns: |
|
str: A nicely formatted string. |
|
""" |
|
print(f"Researching the topic via modal labs: {query}") |
|
|
|
data = {"query": query} |
|
modal_url = os.getenv("MODAL_LABS_HACKATHON_RESEARCH_TOOLS_URL") |
|
response = requests.post(modal_url, json=data, timeout=600.0) |
|
|
|
if response.status_code != 200: |
|
print(f"Error in modal RESEARCH response: {response.status_code} - {response.text}") |
|
return "Research completed, but no content was generated." |
|
|
|
print(f"modal RESEARCH Response: {response.text}") |
|
return response.text or "Research completed, but no content was generated." |
|
|
|
with gr.Blocks() as server_info: |
|
gr.Markdown(""" |
|
# π MCP Powered RAG and Research Topic π |
|
|
|
I present to you an π MCP-powered RAG and Research Topic π. |
|
The Tools are hosted and executed on **Modal Labs** platform. |
|
|
|
RAG Tool uses the **GroundX** storage by **eyelevel.ai** to fetch the knowledge base. The knowledge base is a document that contains information about the SU-35 aircraft, including its features, capabilities, and specifications. Please check [this PDF](https://airgroup2000.com/gallery/albums/userpics/32438/SU-35_TM_eng.pdf) to formulate queries on Sukhoi. |
|
|
|
The Research Tool is implemented using a Multi-Agent Workflow using **LlamaIndex**. |
|
<br> |
|
The Agents use **Nebius** provided LLM (meta-llama/Meta-Llama-3.1-8B-Instruct). |
|
|
|
## Available Tools |
|
|
|
### search_knowledge_base_for_context |
|
- **Description**: Searches and retrieves relevant context from a knowledge base based on the user's query. |
|
- **Example Queries**: |
|
- "What are the main features of fuel system of SU-35?" |
|
- "What is the combat potential of SU-35?" |
|
|
|
### research_write_review_topic |
|
- **Description**: Helps with writing a report with research, writing, and review on any topic. |
|
- **Example Queries**: |
|
- "Write me a report on the history of the internet." |
|
- "Write me a report on origin of the universe." |
|
- "Write me a report on the impact of climate change on polar bears." |
|
|
|
## How to Use |
|
- Use the MCP RAG Tool tab above to query the knowledge base. |
|
- Use the Research Tool tab above to write a report on any topic. |
|
|
|
## Watch the Demo Video here |
|
[Link to Demo on Youtube](https://youtu.be/wvHBqW2ABGg) |
|
""") |
|
|
|
mcp_rag_tool = gr.Interface( |
|
fn=search_knowledge_base_for_context, |
|
inputs=["text"], |
|
outputs=[gr.Textbox(label="Knowledge Base", max_lines=15)], |
|
title="MCP RAG Tool", |
|
description="Searches and retrieves relevant context from a knowledge base", |
|
concurrency_limit=1 |
|
) |
|
|
|
research_tool = gr.Interface( |
|
fn=research_write_review_topic, |
|
inputs=["text"], |
|
outputs=[gr.Textbox(label="Reviewed Topic", max_lines=15)], |
|
title="Research Tool", |
|
description="Helps with report writing with research, writing, and review agents on any topic. ", |
|
concurrency_limit=1 |
|
) |
|
|
|
named_interfaces = { |
|
"Project Information": server_info, |
|
"Search Knowledge Base": mcp_rag_tool, |
|
"Research a Topic": research_tool |
|
} |
|
|
|
|
|
tab_names = list(named_interfaces.keys()) |
|
interface_list = list(named_interfaces.values()) |
|
|
|
mcp_server = gr.TabbedInterface( |
|
interface_list, |
|
tab_names=tab_names, |
|
title="π MCP-Powered RAG and Research Topic π" |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
mcp_server.queue(default_concurrency_limit=1) |
|
mcp_server.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
share=False, |
|
debug=False, |
|
mcp_server=True |
|
) |