|
import gradio as gr |
|
|
|
|
|
perception_papers = [ |
|
{ |
|
"title": "CoSDH: Communication-Efficient Collaborative Perception", |
|
"venue": "CVPR 2025", |
|
"description": "Novel approach for efficient collaborative perception using supply-demand awareness.", |
|
"link": "https://arxiv.org/abs/2503.03430" |
|
}, |
|
{ |
|
"title": "V2X-R: Cooperative LiDAR-4D Radar Fusion", |
|
"venue": "CVPR 2025", |
|
"description": "Cooperative fusion of LiDAR and 4D radar sensors for enhanced 3D object detection.", |
|
"link": "https://arxiv.org/abs/2411.08402" |
|
}, |
|
{ |
|
"title": "Where2comm: Efficient Collaborative Perception via Spatial Confidence Maps", |
|
"venue": "NeurIPS 2022", |
|
"description": "Groundbreaking work on efficient collaborative perception using spatial confidence maps.", |
|
"link": "https://openreview.net/forum?id=dLL4KXzKUpS" |
|
} |
|
] |
|
|
|
datasets_data = [ |
|
["DAIR-V2X", "2022", "Real-world", "V2I", "71K frames", "3D boxes, Infrastructure"], |
|
["V2V4Real", "2023", "Real-world", "V2V", "20K frames", "Real V2V, Highway"], |
|
["OPV2V", "2022", "Simulation", "V2V", "Large-scale", "CARLA, Multi-agent"], |
|
["V2X-Sim", "2021", "Simulation", "Multi", "Scalable", "Multi-agent, Collaborative"] |
|
] |
|
|
|
def create_paper_card(paper): |
|
return f""" |
|
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; margin: 10px 0; background: white;"> |
|
<div style="background: #667eea; color: white; padding: 5px 10px; border-radius: 15px; display: inline-block; font-size: 0.8em; margin-bottom: 10px;"> |
|
{paper['venue']} |
|
</div> |
|
<h3 style="color: #333; margin: 10px 0;">{paper['title']}</h3> |
|
<p style="color: #666; line-height: 1.5; margin-bottom: 15px;">{paper['description']}</p> |
|
<a href="{paper['link']}" target="_blank" style="background: #667eea; color: white; padding: 8px 15px; border-radius: 5px; text-decoration: none; font-size: 0.9em;"> |
|
๐ Read Paper |
|
</a> |
|
</div> |
|
""" |
|
|
|
|
|
custom_css = """ |
|
.gradio-container { |
|
max-width: 1200px !important; |
|
} |
|
.main-header { |
|
text-align: center; |
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
color: white; |
|
padding: 40px 20px; |
|
border-radius: 15px; |
|
margin-bottom: 30px; |
|
} |
|
.stats-grid { |
|
display: grid; |
|
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); |
|
gap: 20px; |
|
margin: 20px 0; |
|
} |
|
.stat-card { |
|
background: rgba(255,255,255,0.1); |
|
padding: 20px; |
|
border-radius: 10px; |
|
text-align: center; |
|
} |
|
""" |
|
|
|
|
|
with gr.Blocks( |
|
title="๐ค Awesome Multi-Agent Collaborative Perception", |
|
theme=gr.themes.Soft(), |
|
css=custom_css |
|
) as demo: |
|
|
|
|
|
gr.HTML(""" |
|
<div class="main-header"> |
|
<h1 style="font-size: 2.5rem; margin-bottom: 10px;">๐ค Awesome Multi-Agent Collaborative Perception</h1> |
|
<p style="font-size: 1.2rem; opacity: 0.9;">Explore cutting-edge resources for Multi-Agent Collaborative Perception, Prediction, and Planning</p> |
|
<div style="display: flex; justify-content: center; gap: 30px; margin-top: 20px; flex-wrap: wrap;"> |
|
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;"> |
|
<div style="font-size: 1.5rem; font-weight: bold;">200+</div> |
|
<div>Papers</div> |
|
</div> |
|
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;"> |
|
<div style="font-size: 1.5rem; font-weight: bold;">25+</div> |
|
<div>Datasets</div> |
|
</div> |
|
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;"> |
|
<div style="font-size: 1.5rem; font-weight: bold;">50+</div> |
|
<div>Code Repos</div> |
|
</div> |
|
</div> |
|
</div> |
|
""") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
with gr.Tab("๐ Perception"): |
|
gr.Markdown("## Multi-Agent Collaborative Perception Papers") |
|
|
|
|
|
papers_html = "".join([create_paper_card(paper) for paper in perception_papers]) |
|
gr.HTML(papers_html) |
|
|
|
with gr.Tab("๐ Datasets"): |
|
gr.Markdown("## Datasets & Benchmarks") |
|
|
|
gr.Dataframe( |
|
value=datasets_data, |
|
headers=["Dataset", "Year", "Type", "Agents", "Size", "Features"], |
|
datatype=["str", "str", "str", "str", "str", "str"], |
|
interactive=False |
|
) |
|
|
|
gr.Markdown(""" |
|
### Notable Datasets: |
|
- **DAIR-V2X**: First real-world V2I collaborative perception dataset |
|
- **V2V4Real**: Real vehicle-to-vehicle communication dataset |
|
- **OPV2V**: Large-scale simulation benchmark in CARLA |
|
- **V2X-Sim**: Comprehensive multi-agent simulation platform |
|
""") |
|
|
|
with gr.Tab("๐ Tracking"): |
|
gr.Markdown("## Multi-Object Tracking & State Estimation") |
|
|
|
gr.HTML(""" |
|
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 20px;"> |
|
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; background: white;"> |
|
<h3>MOT-CUP</h3> |
|
<p>Multi-Object Tracking with Conformal Uncertainty Propagation</p> |
|
<a href="https://arxiv.org/abs/2303.14346" target="_blank" style="color: #667eea;">๐ Paper</a> |
|
</div> |
|
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; background: white;"> |
|
<h3>DMSTrack</h3> |
|
<p>Probabilistic 3D Multi-Object Cooperative Tracking (ICRA 2024)</p> |
|
<a href="https://arxiv.org/abs/2309.14655" target="_blank" style="color: #667eea;">๐ Paper</a> |
|
</div> |
|
</div> |
|
""") |
|
|
|
with gr.Tab("๐ฎ Prediction"): |
|
gr.Markdown("## Trajectory Forecasting & Motion Prediction") |
|
|
|
gr.HTML(""" |
|
<div style="background: #f8f9fa; border-radius: 10px; padding: 20px; margin: 20px 0;"> |
|
<h3>๐ง Key Approaches:</h3> |
|
<ul style="line-height: 1.8;"> |
|
<li><strong>Graph Neural Networks</strong>: Modeling agent interactions</li> |
|
<li><strong>Transformer Architectures</strong>: Attention-based prediction</li> |
|
<li><strong>Multi-Modal Fusion</strong>: Combining different sensor modalities</li> |
|
<li><strong>Uncertainty Quantification</strong>: Reliable confidence estimation</li> |
|
</ul> |
|
</div> |
|
""") |
|
|
|
with gr.Tab("๐๏ธ Conferences"): |
|
gr.Markdown("## Top Venues & Publication Trends") |
|
|
|
conference_data = [ |
|
["CVPR 2025", "5+", "End-to-end systems, robustness"], |
|
["ICLR 2025", "3+", "Learning representations, scalability"], |
|
["AAAI 2025", "4+", "AI applications, defense mechanisms"], |
|
["ICRA 2025", "6+", "Robotics applications, real-world deployment"], |
|
["NeurIPS 2024", "2+", "Theoretical foundations, novel architectures"] |
|
] |
|
|
|
gr.Dataframe( |
|
value=conference_data, |
|
headers=["Conference", "Papers", "Focus Areas"], |
|
datatype=["str", "str", "str"], |
|
interactive=False |
|
) |
|
|
|
|
|
gr.HTML(""" |
|
<div style="text-align: center; margin-top: 40px; padding: 30px; background: #f8f9fa; border-radius: 10px;"> |
|
<h3>๐ค Contributing</h3> |
|
<p>We welcome contributions! Please submit papers, datasets, and code repositories via GitHub.</p> |
|
<div style="margin-top: 20px;"> |
|
<a href="https://github.com/your-username/awesome-multi-agent-collaborative-perception" target="_blank" |
|
style="background: #667eea; color: white; padding: 10px 20px; border-radius: 5px; text-decoration: none; margin: 5px;"> |
|
๐ GitHub Repository |
|
</a> |
|
<a href="https://huggingface.co/spaces/your-username/awesome-multi-agent-collaborative-perception" target="_blank" |
|
style="background: #ff6b6b; color: white; padding: 10px 20px; border-radius: 5px; text-decoration: none; margin: 5px;"> |
|
๐ค Hugging Face Space |
|
</a> |
|
</div> |
|
</div> |
|
""") |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |