import gradio as gr # Sample data for demonstration perception_papers = [ { "title": "CoSDH: Communication-Efficient Collaborative Perception", "venue": "CVPR 2025", "description": "Novel approach for efficient collaborative perception using supply-demand awareness.", "link": "https://arxiv.org/abs/2503.03430" }, { "title": "V2X-R: Cooperative LiDAR-4D Radar Fusion", "venue": "CVPR 2025", "description": "Cooperative fusion of LiDAR and 4D radar sensors for enhanced 3D object detection.", "link": "https://arxiv.org/abs/2411.08402" }, { "title": "Where2comm: Efficient Collaborative Perception via Spatial Confidence Maps", "venue": "NeurIPS 2022", "description": "Groundbreaking work on efficient collaborative perception using spatial confidence maps.", "link": "https://openreview.net/forum?id=dLL4KXzKUpS" }, { "title": "STAMP: Scalable Task-Agnostic Collaborative Perception", "venue": "ICLR 2025", "description": "Framework for scalable collaborative perception that is both task and model agnostic.", "link": "https://openreview.net/forum?id=8NdNniulYE" }, { "title": "CoBEVFlow: Robust Asynchronous Collaborative 3D Detection", "venue": "NeurIPS 2023", "description": "Handles temporal asynchrony in collaborative perception using bird's eye view flow.", "link": "https://openreview.net/forum?id=UHIDdtxmVS" } ] datasets_data = [ ["DAIR-V2X", "2022", "Real-world", "V2I", "71K frames", "3D boxes, Infrastructure"], ["V2V4Real", "2023", "Real-world", "V2V", "20K frames", "Real V2V, Highway"], ["TUMTraf-V2X", "2024", "Real-world", "V2X", "2K sequences", "Dense labels, Urban"], ["OPV2V", "2022", "Simulation", "V2V", "Large-scale", "CARLA, Multi-agent"], ["V2X-Sim", "2021", "Simulation", "Multi", "Scalable", "Multi-agent, Collaborative"], ["DOLPHINS", "2024", "Simulation", "UAV", "UAV swarms", "AirSim, Multi-UAV"] ] def create_paper_card(paper): return f"""
{paper['venue']}

{paper['title']}

{paper['description']}

📄 Read Paper
""" # Custom CSS custom_css = """ .gradio-container { max-width: 1200px !important; } .main-header { text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 40px 20px; border-radius: 15px; margin-bottom: 30px; } """ # Create the interface with gr.Blocks( title="🤖 Awesome Multi-Agent Collaborative Perception", theme=gr.themes.Soft(), css=custom_css ) as demo: # Header gr.HTML("""

🤖 Awesome Multi-Agent Collaborative Perception

Explore cutting-edge resources for Multi-Agent Collaborative Perception, Prediction, and Planning

200+
Papers
25+
Datasets
50+
Code Repos
2025
Updated
""") # Main navigation tabs with gr.Tabs(): with gr.Tab("🔍 Perception"): gr.Markdown("## Multi-Agent Collaborative Perception Papers") gr.Markdown("*Latest research in collaborative sensing, 3D object detection, and V2X communication*") # Create paper cards papers_html = "".join([create_paper_card(paper) for paper in perception_papers]) gr.HTML(papers_html) gr.Markdown(""" ### 🔄 Key Communication Strategies: - **Early Fusion**: Raw sensor data sharing - **Late Fusion**: Detection-level information exchange - **Intermediate Fusion**: Feature-level collaboration - **Selective Communication**: Confidence-based data sharing """) with gr.Tab("📊 Datasets"): gr.Markdown("## Datasets & Benchmarks") gr.Markdown("*Comprehensive collection of real-world and simulation datasets*") gr.Dataframe( value=datasets_data, headers=["Dataset", "Year", "Type", "Agents", "Size", "Features"], datatype=["str", "str", "str", "str", "str", "str"], interactive=False ) gr.Markdown(""" ### 🌟 Notable Features: - **DAIR-V2X**: First real-world V2I collaborative perception dataset with infrastructure sensors - **V2V4Real**: Real vehicle-to-vehicle communication dataset collected on highways - **TUMTraf-V2X**: Dense annotations for urban collaborative perception scenarios - **OPV2V**: Large-scale simulation benchmark built on CARLA platform - **V2X-Sim**: Comprehensive multi-agent simulation with customizable scenarios """) with gr.Tab("📍 Tracking"): gr.Markdown("## Multi-Object Tracking & State Estimation") gr.Markdown("*Collaborative tracking across distributed agents with uncertainty quantification*") gr.HTML("""

MOT-CUP

Multi-Object Tracking with Conformal Uncertainty Propagation

📄 Paper

DMSTrack

Probabilistic 3D Multi-Object Cooperative Tracking (ICRA 2024)

📄 Paper

CoDynTrust

Dynamic Feature Trust for Robust Asynchronous Collaborative Perception (ICRA 2025)

📄 Paper
""") gr.Markdown(""" ### 🎯 Key Challenges: - **Temporal Asynchrony**: Handling different sensor timestamps and communication delays - **Uncertainty Quantification**: Reliable confidence estimation across multiple agents - **Data Association**: Multi-agent correspondence and track management - **Scalability**: Maintaining performance with increasing number of agents """) with gr.Tab("🔮 Prediction"): gr.Markdown("## Trajectory Forecasting & Motion Prediction") gr.Markdown("*Cooperative prediction for autonomous systems and multi-agent coordination*") gr.HTML("""

V2X-Graph

Learning Cooperative Trajectory Representations (NeurIPS 2024)

📄 Paper

Co-MTP

Cooperative Multi-Temporal Prediction Framework (ICRA 2025)

📄 Paper
""") gr.HTML("""

🧠 Key Approaches:

""") with gr.Tab("⚙️ Methods"): gr.Markdown("## Methods & Techniques") gr.Markdown("*Core methodologies for communication, robustness, and learning in collaborative systems*") with gr.Row(): with gr.Column(): gr.Markdown(""" ### 📡 Communication Strategies - **Bandwidth Optimization**: Compression and selective sharing - **Protocol Design**: V2V, V2I, V2X communication standards - **Network Topology**: Centralized vs. distributed architectures - **Quality of Service**: Latency and reliability management """) with gr.Column(): gr.Markdown(""" ### 🛡️ Robustness Approaches - **Byzantine Fault Tolerance**: Handling adversarial agents - **Uncertainty Handling**: Robust fusion under noise - **Privacy Preservation**: Secure multi-party computation - **Malicious Agent Detection**: CP-Guard framework (AAAI 2025) """) gr.HTML("""

🧠 Learning Paradigms

Federated Learning: Distributed model training
Transfer Learning: Cross-domain adaptation
Meta-Learning: Quick adaptation to new scenarios
Heterogeneous Learning: HEAL framework (ICLR 2024)
""") with gr.Tab("🏛️ Conferences"): gr.Markdown("## Top Venues & Publication Trends") gr.Markdown("*Premier conferences and emerging research directions in collaborative perception*") conference_data = [ ["CVPR 2025", "5+", "End-to-end systems, robustness"], ["ICLR 2025", "3+", "Learning representations, scalability"], ["AAAI 2025", "4+", "AI applications, defense mechanisms"], ["ICRA 2025", "6+", "Robotics applications, real-world deployment"], ["NeurIPS 2024", "2+", "Theoretical foundations, novel architectures"] ] gr.Dataframe( value=conference_data, headers=["Conference", "Papers", "Focus Areas"], datatype=["str", "str", "str"], interactive=False ) gr.Markdown(""" ### 📊 Research Trends (2024-2025): - **Communication Efficiency**: 40% increase in bandwidth-aware methods - **Robustness & Security**: Emerging focus on adversarial robustness (15+ papers) - **Real-World Deployment**: Growing emphasis on practical systems and field tests - **Heterogeneous Systems**: Multi-modal and multi-agent diversity becoming standard - **End-to-End Learning**: Integration of perception, prediction, and planning """) # Footer gr.HTML("""

🤝 Contributing

We welcome contributions! Please submit papers, datasets, and code repositories via GitHub.

📚 GitHub Repository 🤗 Hugging Face Space

Made with ❤️ for the Collaborative Perception Community

""") if __name__ == "__main__": demo.launch()