import gradio as gr # Sample data for demonstration perception_papers = [ { "title": "CoSDH: Communication-Efficient Collaborative Perception", "venue": "CVPR 2025", "description": "Novel approach for efficient collaborative perception using supply-demand awareness.", "link": "https://arxiv.org/abs/2503.03430" }, { "title": "V2X-R: Cooperative LiDAR-4D Radar Fusion", "venue": "CVPR 2025", "description": "Cooperative fusion of LiDAR and 4D radar sensors for enhanced 3D object detection.", "link": "https://arxiv.org/abs/2411.08402" }, { "title": "Where2comm: Efficient Collaborative Perception via Spatial Confidence Maps", "venue": "NeurIPS 2022", "description": "Groundbreaking work on efficient collaborative perception using spatial confidence maps.", "link": "https://openreview.net/forum?id=dLL4KXzKUpS" } ] datasets_data = [ ["DAIR-V2X", "2022", "Real-world", "V2I", "71K frames", "3D boxes, Infrastructure"], ["V2V4Real", "2023", "Real-world", "V2V", "20K frames", "Real V2V, Highway"], ["OPV2V", "2022", "Simulation", "V2V", "Large-scale", "CARLA, Multi-agent"], ["V2X-Sim", "2021", "Simulation", "Multi", "Scalable", "Multi-agent, Collaborative"] ] def create_paper_card(paper): return f"""
{paper['venue']}

{paper['title']}

{paper['description']}

📄 Read Paper
""" # Custom CSS custom_css = """ .gradio-container { max-width: 1200px !important; } .main-header { text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 40px 20px; border-radius: 15px; margin-bottom: 30px; } .stats-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 20px; margin: 20px 0; } .stat-card { background: rgba(255,255,255,0.1); padding: 20px; border-radius: 10px; text-align: center; } """ # Create the interface with gr.Blocks( title="🤖 Awesome Multi-Agent Collaborative Perception", theme=gr.themes.Soft(), css=custom_css ) as demo: # Header gr.HTML("""

🤖 Awesome Multi-Agent Collaborative Perception

Explore cutting-edge resources for Multi-Agent Collaborative Perception, Prediction, and Planning

200+
Papers
25+
Datasets
50+
Code Repos
""") # Main navigation tabs with gr.Tabs(): with gr.Tab("🔍 Perception"): gr.Markdown("## Multi-Agent Collaborative Perception Papers") # Create paper cards papers_html = "".join([create_paper_card(paper) for paper in perception_papers]) gr.HTML(papers_html) with gr.Tab("📊 Datasets"): gr.Markdown("## Datasets & Benchmarks") gr.Dataframe( value=datasets_data, headers=["Dataset", "Year", "Type", "Agents", "Size", "Features"], datatype=["str", "str", "str", "str", "str", "str"], interactive=False ) gr.Markdown(""" ### Notable Datasets: - **DAIR-V2X**: First real-world V2I collaborative perception dataset - **V2V4Real**: Real vehicle-to-vehicle communication dataset - **OPV2V**: Large-scale simulation benchmark in CARLA - **V2X-Sim**: Comprehensive multi-agent simulation platform """) with gr.Tab("📍 Tracking"): gr.Markdown("## Multi-Object Tracking & State Estimation") gr.HTML("""

MOT-CUP

Multi-Object Tracking with Conformal Uncertainty Propagation

📄 Paper

DMSTrack

Probabilistic 3D Multi-Object Cooperative Tracking (ICRA 2024)

📄 Paper
""") with gr.Tab("🔮 Prediction"): gr.Markdown("## Trajectory Forecasting & Motion Prediction") gr.HTML("""

🧠 Key Approaches:

""") with gr.Tab("🏛️ Conferences"): gr.Markdown("## Top Venues & Publication Trends") conference_data = [ ["CVPR 2025", "5+", "End-to-end systems, robustness"], ["ICLR 2025", "3+", "Learning representations, scalability"], ["AAAI 2025", "4+", "AI applications, defense mechanisms"], ["ICRA 2025", "6+", "Robotics applications, real-world deployment"], ["NeurIPS 2024", "2+", "Theoretical foundations, novel architectures"] ] gr.Dataframe( value=conference_data, headers=["Conference", "Papers", "Focus Areas"], datatype=["str", "str", "str"], interactive=False ) # Footer gr.HTML("""

🤝 Contributing

We welcome contributions! Please submit papers, datasets, and code repositories via GitHub.

📚 GitHub Repository 🤗 Hugging Face Space
""") if __name__ == "__main__": demo.launch()