File size: 8,804 Bytes
5fc4576 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import gradio as gr
# Sample data for demonstration
perception_papers = [
{
"title": "CoSDH: Communication-Efficient Collaborative Perception",
"venue": "CVPR 2025",
"description": "Novel approach for efficient collaborative perception using supply-demand awareness.",
"link": "https://arxiv.org/abs/2503.03430"
},
{
"title": "V2X-R: Cooperative LiDAR-4D Radar Fusion",
"venue": "CVPR 2025",
"description": "Cooperative fusion of LiDAR and 4D radar sensors for enhanced 3D object detection.",
"link": "https://arxiv.org/abs/2411.08402"
},
{
"title": "Where2comm: Efficient Collaborative Perception via Spatial Confidence Maps",
"venue": "NeurIPS 2022",
"description": "Groundbreaking work on efficient collaborative perception using spatial confidence maps.",
"link": "https://openreview.net/forum?id=dLL4KXzKUpS"
}
]
datasets_data = [
["DAIR-V2X", "2022", "Real-world", "V2I", "71K frames", "3D boxes, Infrastructure"],
["V2V4Real", "2023", "Real-world", "V2V", "20K frames", "Real V2V, Highway"],
["OPV2V", "2022", "Simulation", "V2V", "Large-scale", "CARLA, Multi-agent"],
["V2X-Sim", "2021", "Simulation", "Multi", "Scalable", "Multi-agent, Collaborative"]
]
def create_paper_card(paper):
return f"""
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; margin: 10px 0; background: white;">
<div style="background: #667eea; color: white; padding: 5px 10px; border-radius: 15px; display: inline-block; font-size: 0.8em; margin-bottom: 10px;">
{paper['venue']}
</div>
<h3 style="color: #333; margin: 10px 0;">{paper['title']}</h3>
<p style="color: #666; line-height: 1.5; margin-bottom: 15px;">{paper['description']}</p>
<a href="{paper['link']}" target="_blank" style="background: #667eea; color: white; padding: 8px 15px; border-radius: 5px; text-decoration: none; font-size: 0.9em;">
๐ Read Paper
</a>
</div>
"""
# Custom CSS
custom_css = """
.gradio-container {
max-width: 1200px !important;
}
.main-header {
text-align: center;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 40px 20px;
border-radius: 15px;
margin-bottom: 30px;
}
.stats-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 20px;
margin: 20px 0;
}
.stat-card {
background: rgba(255,255,255,0.1);
padding: 20px;
border-radius: 10px;
text-align: center;
}
"""
# Create the interface
with gr.Blocks(
title="๐ค Awesome Multi-Agent Collaborative Perception",
theme=gr.themes.Soft(),
css=custom_css
) as demo:
# Header
gr.HTML("""
<div class="main-header">
<h1 style="font-size: 2.5rem; margin-bottom: 10px;">๐ค Awesome Multi-Agent Collaborative Perception</h1>
<p style="font-size: 1.2rem; opacity: 0.9;">Explore cutting-edge resources for Multi-Agent Collaborative Perception, Prediction, and Planning</p>
<div style="display: flex; justify-content: center; gap: 30px; margin-top: 20px; flex-wrap: wrap;">
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;">
<div style="font-size: 1.5rem; font-weight: bold;">200+</div>
<div>Papers</div>
</div>
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;">
<div style="font-size: 1.5rem; font-weight: bold;">25+</div>
<div>Datasets</div>
</div>
<div style="background: rgba(255,255,255,0.2); padding: 10px 20px; border-radius: 25px;">
<div style="font-size: 1.5rem; font-weight: bold;">50+</div>
<div>Code Repos</div>
</div>
</div>
</div>
""")
# Main navigation tabs
with gr.Tabs():
with gr.Tab("๐ Perception"):
gr.Markdown("## Multi-Agent Collaborative Perception Papers")
# Create paper cards
papers_html = "".join([create_paper_card(paper) for paper in perception_papers])
gr.HTML(papers_html)
with gr.Tab("๐ Datasets"):
gr.Markdown("## Datasets & Benchmarks")
gr.Dataframe(
value=datasets_data,
headers=["Dataset", "Year", "Type", "Agents", "Size", "Features"],
datatype=["str", "str", "str", "str", "str", "str"],
interactive=False
)
gr.Markdown("""
### Notable Datasets:
- **DAIR-V2X**: First real-world V2I collaborative perception dataset
- **V2V4Real**: Real vehicle-to-vehicle communication dataset
- **OPV2V**: Large-scale simulation benchmark in CARLA
- **V2X-Sim**: Comprehensive multi-agent simulation platform
""")
with gr.Tab("๐ Tracking"):
gr.Markdown("## Multi-Object Tracking & State Estimation")
gr.HTML("""
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 20px;">
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; background: white;">
<h3>MOT-CUP</h3>
<p>Multi-Object Tracking with Conformal Uncertainty Propagation</p>
<a href="https://arxiv.org/abs/2303.14346" target="_blank" style="color: #667eea;">๐ Paper</a>
</div>
<div style="border: 1px solid #ddd; border-radius: 10px; padding: 20px; background: white;">
<h3>DMSTrack</h3>
<p>Probabilistic 3D Multi-Object Cooperative Tracking (ICRA 2024)</p>
<a href="https://arxiv.org/abs/2309.14655" target="_blank" style="color: #667eea;">๐ Paper</a>
</div>
</div>
""")
with gr.Tab("๐ฎ Prediction"):
gr.Markdown("## Trajectory Forecasting & Motion Prediction")
gr.HTML("""
<div style="background: #f8f9fa; border-radius: 10px; padding: 20px; margin: 20px 0;">
<h3>๐ง Key Approaches:</h3>
<ul style="line-height: 1.8;">
<li><strong>Graph Neural Networks</strong>: Modeling agent interactions</li>
<li><strong>Transformer Architectures</strong>: Attention-based prediction</li>
<li><strong>Multi-Modal Fusion</strong>: Combining different sensor modalities</li>
<li><strong>Uncertainty Quantification</strong>: Reliable confidence estimation</li>
</ul>
</div>
""")
with gr.Tab("๐๏ธ Conferences"):
gr.Markdown("## Top Venues & Publication Trends")
conference_data = [
["CVPR 2025", "5+", "End-to-end systems, robustness"],
["ICLR 2025", "3+", "Learning representations, scalability"],
["AAAI 2025", "4+", "AI applications, defense mechanisms"],
["ICRA 2025", "6+", "Robotics applications, real-world deployment"],
["NeurIPS 2024", "2+", "Theoretical foundations, novel architectures"]
]
gr.Dataframe(
value=conference_data,
headers=["Conference", "Papers", "Focus Areas"],
datatype=["str", "str", "str"],
interactive=False
)
# Footer
gr.HTML("""
<div style="text-align: center; margin-top: 40px; padding: 30px; background: #f8f9fa; border-radius: 10px;">
<h3>๐ค Contributing</h3>
<p>We welcome contributions! Please submit papers, datasets, and code repositories via GitHub.</p>
<div style="margin-top: 20px;">
<a href="https://github.com/your-username/awesome-multi-agent-collaborative-perception" target="_blank"
style="background: #667eea; color: white; padding: 10px 20px; border-radius: 5px; text-decoration: none; margin: 5px;">
๐ GitHub Repository
</a>
<a href="https://huggingface.co/spaces/your-username/awesome-multi-agent-collaborative-perception" target="_blank"
style="background: #ff6b6b; color: white; padding: 10px 20px; border-radius: 5px; text-decoration: none; margin: 5px;">
๐ค Hugging Face Space
</a>
</div>
</div>
""")
if __name__ == "__main__":
demo.launch() |