salma-remyx's picture
Update README.md
332ff3f verified
metadata
base_model:
  - Qwen/Qwen2.5-VL-3B-Instruct
datasets:
  - remyxai/OpenSpaces
language:
  - en
library_name: transformers
license_name: qwen-research
license_link: https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct/blob/main/LICENSE
pipeline_tag: image-text-to-text
tags:
  - remyx
  - vqasynth
  - spatial-reasoning
  - multimodal
  - vlm
  - vision-language
  - robotics
  - distance-estimation
  - embodied-ai
  - quantitative-spatial-reasoning
new_version: remyxai/SpaceThinker-Qwen2.5VL-3B
model-index:
  - name: SpaceQwen2.5-VL-3B-Instruct
    results:
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: 3DSRBench
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.515
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.4706
          - name: Object Localization / 3D Localization
            success_rate: 0.5629
          - name: Object Properties / Size
            success_rate: 0.5116
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: BLINK
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.5
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.6503
          - name: Counting / Object Counting
            success_rate: 0.6083
          - name: Depth and Distance / Relative
            success_rate: 0.5161
          - name: Object Localization / 2D Localization
            success_rate: 0.4426
          - name: Point and Object Tracking / Point Correspondence
            success_rate: 0.2849
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: MMIU
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.3045
        results_by_subcategory:
          - name: Camera and Image Transformation / 2D Transformation
            success_rate: 0.245
          - name: Camera and Image Transformation / 3D Camera Pose
            success_rate: 0.215
          - name: Camera and Image Transformation / Camera Motion
            success_rate: 0.4436
          - name: Depth and Distance / Absolute
            success_rate: 0.265
          - name: Object Localization / 3D Localization
            success_rate: 0.48
          - name: Point and Object Tracking / 3D Tracking
            success_rate: 0.24
          - name: Point and Object Tracking / Point Correspondence
            success_rate: 0.28
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: MMVP
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.5767
        results_by_subcategory:
          - name: Others / Miscellaneous
            success_rate: 0.5767
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: QSpatialBench-Plus
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.3663
        results_by_subcategory:
          - name: Depth and Distance / Absolute
            success_rate: 0.3663
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: QSpatialBench-ScanNet
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.33
        results_by_subcategory:
          - name: Depth and Distance / Absolute
            success_rate: 0.216
          - name: Object Properties / Size
            success_rate: 0.4444
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: RealWorldQA
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.4392
        results_by_subcategory:
          - name: Others / Miscellaneous
            success_rate: 0.4392
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: SpatialSense
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.6554
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.6554
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: VGBench
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.2615
        results_by_subcategory:
          - name: Camera and Image Transformation / 2D Transformation
            success_rate: 0.2277
          - name: Camera and Image Transformation / 3D Camera Pose
            success_rate: 0.2438
          - name: Depth and Distance / Absolute
            success_rate: 0.2696
          - name: Depth and Distance / Relative
            success_rate: 0.1945
          - name: Object Localization / 3D Localization
            success_rate: 0.3733
          - name: Point and Object Tracking / 3D Tracking
            success_rate: 0.2655
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: VSI-Bench_8
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.2322
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.3843
          - name: Counting / Object Counting
            success_rate: 0.1715
          - name: Depth and Distance / Absolute
            success_rate: 0.0299
          - name: Depth and Distance / Relative
            success_rate: 0.3521
          - name: Object Properties / Size
            success_rate: 0.2323
          - name: Others / Miscellaneous
            success_rate: 0.2525
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: VSR-ZeroShot
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.7373
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.7373
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: cvbench
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.5179
        results_by_subcategory:
          - name: Counting / Object Counting
            success_rate: 0.6168
          - name: Depth and Distance / Relative
            success_rate: 0.4925
          - name: Object Localization / 3D Localization
            success_rate: 0.4446
      - task:
          type: visual-question-answering
          name: Spatial Reasoning
        dataset:
          name: spatialbench
          type: benchmark
        metrics:
          - type: success_rate
            name: Overall Success Rate
            value: 0.4879
        results_by_subcategory:
          - name: 3D Positional Relation / Orientation
            success_rate: 0.5294
          - name: Counting / Object Counting
            success_rate: 0.7
          - name: Object Properties / Existence
            success_rate: 0.45
          - name: Object Properties / Reachability
            success_rate: 0.5
          - name: Object Properties / Size
            success_rate: 0.25

SpaceQwen2.5-VL-3B-Instruct

The model was presented in the paper OmniSpatial: Towards Comprehensive Spatial Reasoning Benchmark for Vision Language Models. More information can be found at the project page.

  • Model Type: Multimodal, Vision-Language Model
  • Architecture: Qwen2.5-VL-3B-Instruct
  • Model Size: 3.75B parameters (FP16)
  • Finetuned from: Qwen/Qwen2.5-VL-3B-Instruct
  • Finetune Strategy: LoRA (Low-Rank Adaptation)
  • License: Apache-2.0

Model Overview

This model uses data synthesis techniques and publicly available models to reproduce the work described in SpatialVLM to enhance the spatial reasoning of multimodal models. With a pipeline of expert models, we can infer spatial relationships between objects in a scene to create a VQA dataset for spatial reasoning.

Running SpaceQwen2.5-VL-3B-Instruct

Ollama

To launch with ollama, run:

ollama run hf.co/remyxai/SpaceQwen2.5-VL-3B-Instruct:latest

Transformers

Install qwen dependencies:

pip install qwen-vl-utils[decord]==0.0.8

To run inference on a sample image:

from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info

model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    "remyxai/SpaceQwen2.5-VL-3B-Instruct", torch_dtype="auto", device_map="auto"
)
processor = AutoProcessor.from_pretrained("remyxai/SpaceQwen2.5-VL-3B-Instruct")

messages = [
    {
        "role": "user",
        "content": [
            {
                "type": "image",
                "image": "https://raw.githubusercontent.com/remyxai/VQASynth/refs/heads/main/assets/warehouse_sample_2.jpeg",
            },
            {"type": "text", "text": "What is the height of the man in the red hat in feet?"},
        ],
    }
]

# Preparation for inference
text = processor.apply_chat_template(
    messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
    text=[text],
    images=image_inputs,
    videos=video_inputs,
    padding=True,
    return_tensors="pt",
)
inputs = inputs.to("cuda")

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
    out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)

GGUF

Or run SpaceQwen2.5-VL-3B-Instruct using llama.cpp:

./llama-qwen2vl-cli -m /path/to/SpaceQwen2.5-VL-3B-Instruct/SpaceQwen2.5-VL-3B-Instruct-F16.gguf \
                    --mmproj /path/to/SpaceQwen2.5-VL-3B-Instruct/spaceqwen2.5-vl-3b-instruct-vision.gguf \
                    -p "What's the height of the man in the red hat?" \
                    --image /path/to/warehouse_sample_2.jpeg --threads 24 -ngl 99

Dataset & Training

SpaceQwen2.5-VL-3B-Instruct uses LoRA to fine-tune Qwen2.5-VL-3B-Instruct on the OpenSpaces dataset.

Dataset Summary:

  • ~10k synthetic spatial reasoning traces

  • Question types: spatial relations (distances (units), above, left-of, contains, closest to)

  • Format: image (RGB) + question + answer

  • Dataset: OpenSpaces

  • Code: VQASynth

  • Reference: SpatialVLM

Scripts for LoRA SFT available at trl

Model Evaluation

SpatialScore

SpaceQwen shines in the 3D positional relations categories of the SpatialScore-Hard comparison featured in the table below:

image/png

Read more about the comprehensive spatial reasoning benchmark: SpatialScore.

The following chart compares performance between SpaceQwen and SpaceThinker on the SpatialScore benchmarks sources.

SpaceQwen_v_SpaceThinker

OmniSpatial

OmniSpatial is another comprehensive spatial reasoning benchmark that assesses dynamic reasoning, complex spatial logic, spatial interaction, and perspective-taking capabilities. image/png

Learn more about OmniSpatial.

SpaCE-10

Model Overall EQ SQ SA OO OS EP FR SP Source
InternVL2.5-4B 36.01 34.30 34.40 43.60 44.40 16.50 31.10 50.10 33.70 Table
SpaceThinker 32.72 32.73 24.81 47.26 50.33 33.63 9.25 37.54 26.25 GPT Eval
SpaceOm 32.32 32.47 24.81 47.63 50.00 32.52 9.12 37.04 25.00 GPT Eval
SpaceQwen 31.98 31.19 25.89 41.61 51.98 35.18 10.97 36.54 22.50 GPT Eval
Qwen2.5-VL-3B-Instruct 30.00 31.70 45.50 39.00 43.00 25.30 11.50 22.80 21.20 Table

Legend:

  • EQ: Entity Quantification
  • SQ: Scene Quantification
  • SA: Size Assessment
  • OO: Object-Object spatial relations
  • OS: Object-Scene spatial relations
  • EP: Entity Presence
  • FR: Functional Reasoning
  • SP: Spatial Planning

ℹ️ Note: Scores for SpaceQwen, SpaceThinker, SpaceOm are generated via gpt_eval_score on single-choice (*-single) versions of the SpaCE-10 benchmark tasks. Other entries reflect leaderboard accuracy scores from the official SpaCE-10 evaluation table.

Read more about the SpaCE-10 benchmark or see results here

SIRI-Bench

SIRI-Bench is a video-based benchmark designed to evaluate complex spatial reasoning capabilities

image/png

MindCube

MindCube is a benchmark for assessing Spatial Mental Modeling from Limited Views image/png

⚠️ Limitations & Ethical Considerations

  • Performance may degrade in cluttered environments or camera perspective.
  • This model was fine-tuned using synthetic reasoning over an internet image dataset.
  • Multimodal biases inherent to the base model (Qwen2.5-VL) may persist.
  • Not intended for use in safety-critical or legal decision-making.

Users are encouraged to evaluate outputs critically and consider fine-tuning for domain-specific safety and performance.

Citation

@article{chen2024spatialvlm,
  title = {SpatialVLM: Endowing Vision-Language Models with Spatial Reasoning Capabilities},
  author = {Chen, Boyuan and Xu, Zhuo and Kirmani, Sean and Ichter, Brian and Driess, Danny and Florence, Pete and Sadigh, Dorsa and Guibas, Leonidas and Xia, Fei},
  journal = {arXiv preprint arXiv:2401.12168},
  year = {2024},
  url = {https://arxiv.org/abs/2401.12168},
}

@misc{qwen2.5-VL,
    title = {Qwen2.5-VL},
    url = {https://qwenlm.github.io/blog/qwen2.5-vl/},
    author = {Qwen Team},
    month = {January},
    year = {2025}
}

@article{wu2025spatialscore,
    author    = {Wu, Haoning and Huang, Xiao and Chen, Yaohui and Zhang, Ya and Wang, Yanfeng and Xie, Weidi},
    title     = {SpatialScore: Towards Unified Evaluation for Multimodal Spatial Understanding},
    journal   = {arXiv preprint arXiv:2505.17012},
    year      = {2025},
}

@article{omnispatial25,
  title   = {OmniSpatial: Towards Comprehensive Spatial Reasoning Benchmark for Vision Language Models},
  author  = {Mengdi Jia and Zekun Qi and Shaochen Zhang and Wenyao Zhang and Xinqiang Yu and Jiawei He and He Wang and Li Yi},
  journal = {arXiv preprint arXiv:2506.03135},
  year = {2025}
}

@article{song2025siribench,
  title   = {{SIRI-Bench}: Challenging VLMs’ Spatial Intelligence through Complex Reasoning Tasks},
  author  = {Song, Zijian and Lin, Xiaoxin and Huang, Qiuming and Wang, Guangrun and Lin, Liang},
  journal = {arXiv preprint arXiv:2506.14512},
  year    = {2025},
  url     = {https://arxiv.org/abs/2506.14512}
}

@misc{yin2025spatial,
  title        = {Spatial Mental Modeling from Limited Views},
  author       = {Baiqiao Yin and Qineng Wang and Pingyue Zhang and Jianshu Zhang
                  and Kangrui Wang and Zihan Wang and Jieyu Zhang
                  and Keshigeyan Chandrasegaran and Han Liu and Ranjay Krishna
                  and Saining Xie and Manling Li and Jiajun Wu and Li Fei-Fei},
  year         = {2025},
  archivePrefix= {arXiv},
  eprint       = {2506.21458},
  primaryClass = {cs.AI},
  url          = {https://arxiv.org/abs/2506.21458}
}