rsi commited on
Commit
7f616ca
·
1 Parent(s): 8f78e1e

update readme

Browse files
Files changed (2) hide show
  1. PixelsPointsPolygons.py +170 -0
  2. README.md +73 -44
PixelsPointsPolygons.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import json
18
+ import os
19
+ import datasets
20
+
21
+ _CITATION = """\
22
+ @InProceedings{arxiv,
23
+ title = {The P3 dataset: Pixels, Points and Polygons <br> for Multimodal Building Vectorization},
24
+ author={Raphael Sulzer},
25
+ year={2025}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ The P3 dataset is a large-scale multimodal benchmark for building vectorization, constructed from aerial LiDAR point clouds, high-resolution aerial imagery, and vectorized 2D building outlines, collected across three continents.
31
+ """
32
+
33
+ _HOMEPAGE = "https://github.com/raphaelsulzer/PixelsPointsPolygons"
34
+
35
+ _LICENSE = "cc-by-4.0"
36
+
37
+
38
+ # _URLS = {
39
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
40
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
41
+ # }
42
+
43
+ class PixelsPointsPolygons(datasets.GeneratorBasedBuilder):
44
+ """The P3 dataset is a large-scale multimodal benchmark for building vectorization."""
45
+
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ # This is an example of a dataset with multiple configurations.
49
+ # If you don't want/need to define several sub-sets in your dataset,
50
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
51
+
52
+ # If you need to make complex sub-parts in the datasets with configurable options
53
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
54
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
55
+
56
+ # You will be able to load one or the other configurations in the following list with
57
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
58
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(name="all", version=VERSION, description="Data from all countries (CH, NY, NZ)"),
61
+ datasets.BuilderConfig(name="CH", version=VERSION, description="Data from Switzerland (CH) only"),
62
+ datasets.BuilderConfig(name="NY", version=VERSION, description="Data from New York State, US (NY) only"),
63
+ datasets.BuilderConfig(name="NZ", version=VERSION, description="Data from New Zealand (NZ) only"),
64
+ ]
65
+
66
+ DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
67
+
68
+ def _info(self):
69
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
70
+ # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
71
+ # features = datasets.Features(
72
+ # {
73
+ # "sentence": datasets.Value("string"),
74
+ # "option1": datasets.Value("string"),
75
+ # "answer": datasets.Value("string")
76
+ # # These are the features of your dataset like images, labels ...
77
+ # }
78
+ # )
79
+ # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
80
+ # features = datasets.Features(
81
+ # {
82
+ # "sentence": datasets.Value("string"),
83
+ # "option2": datasets.Value("string"),
84
+ # "second_domain_answer": datasets.Value("string")
85
+ # # These are the features of your dataset like images, labels ...
86
+ # }
87
+ # )
88
+ features = datasets.Features(
89
+ {
90
+ "images": datasets.Value("uint8"),
91
+ "lidar": datasets.Value("float32"),
92
+ "polygon": datasets.Value("float32"),
93
+ }
94
+ )
95
+ return datasets.DatasetInfo(
96
+ # This is the description that will appear on the datasets page.
97
+ description=_DESCRIPTION,
98
+ # This defines the different columns of the dataset and their types
99
+ features=features, # Here we define them above because they are different between the two configurations
100
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
101
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
102
+ # supervised_keys=("sentence", "label"),
103
+ # Homepage of the dataset for documentation
104
+ homepage=_HOMEPAGE,
105
+ # License for the dataset if available
106
+ license=_LICENSE,
107
+ # Citation for the dataset
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _get_urls(self):
112
+
113
+
114
+
115
+ def _split_generators(self, dl_manager):
116
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
117
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
118
+
119
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
120
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
121
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
122
+ urls = _URLS[self.config.name]
123
+ data_dir = dl_manager.download_and_extract(urls)
124
+ return [
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.TRAIN,
127
+ # These kwargs will be passed to _generate_examples
128
+ gen_kwargs={
129
+ "filepath": os.path.join(data_dir, "train.jsonl"),
130
+ "split": "train",
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.VALIDATION,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
138
+ "split": "dev",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "filepath": os.path.join(data_dir, "test.jsonl"),
146
+ "split": "test"
147
+ },
148
+ ),
149
+ ]
150
+
151
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
152
+ def _generate_examples(self, filepath, split):
153
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
154
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
155
+ with open(filepath, encoding="utf-8") as f:
156
+ for key, row in enumerate(f):
157
+ data = json.loads(row)
158
+ if self.config.name == "first_domain":
159
+ # Yields examples as (key, example) tuples
160
+ yield key, {
161
+ "sentence": data["sentence"],
162
+ "option1": data["option1"],
163
+ "answer": "" if split == "test" else data["answer"],
164
+ }
165
+ else:
166
+ yield key, {
167
+ "sentence": data["sentence"],
168
+ "option2": data["option2"],
169
+ "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
170
+ }
README.md CHANGED
@@ -20,44 +20,42 @@ tags:
20
  - Vectorization
21
  language:
22
  - en
23
- configs:
24
- - config_name: all
25
- data_files:
26
- - split: train
27
- path: "data/224/annotations/annotations_all_train.json"
28
- - split: val
29
- path: "data/224/annotations/annotations_all_val.json"
30
- - split: test
31
- path: "data/224/annotations/annotations_all_test.json"
32
- - config_name: CH
33
- data_files:
34
- - split: train
35
- path: "data/224/annotations/annotations_CH_train.json"
36
- - split: val
37
- path: "data/224/annotations/annotations_CH_val.json"
38
- - split: test
39
- path: "data/224/annotations/annotations_CH_test.json"
40
- - config_name: NY
41
- data_files:
42
- - split: train
43
- path: "data/224/annotations/annotations_NY_train.json"
44
- - split: val
45
- path: "data/224/annotations/annotations_NY_val.json"
46
- - split: test
47
- path: "data/224/annotations/annotations_NY_test.json"
48
- - config_name: NZ
49
- data_files:
50
- - split: train
51
- path: "data/224/annotations/annotations_NZ_train.json"
52
- - split: val
53
- path: "data/224/annotations/annotations_NZ_val.json"
54
- - split: test
55
- path: "data/224/annotations/annotations_NZ_test.json"
56
  ---
57
 
58
 
59
-
60
-
61
  <div align="center">
62
  <h1 align="center">The P<sup>3</sup> dataset: Pixels, Points and Polygons <br> for Multimodal Building Vectorization</h1>
63
  <h3><align="center">Raphael Sulzer<sup>1,2</sup> &nbsp;&nbsp;&nbsp; Liuyun Duan<sup>1</sup>
@@ -67,6 +65,16 @@ configs:
67
  <b>Figure 1</b>: A view of our dataset of Zurich, Switzerland
68
  </div>
69
 
 
 
 
 
 
 
 
 
 
 
70
  ## Abstract
71
 
72
  <div align="justify">
@@ -79,7 +87,6 @@ We present the P<sup>3</sup> dataset, a large-scale multimodal benchmark for bui
79
  - A library for training and evaluating state-of-the-art deep learning methods on the dataset, available at [github.com/raphaelsulzer/PixelsPointsPolygons](https://github.com/raphaelsulzer/PixelsPointsPolygons)
80
  - Pretrained model weights, available at [huggingface.co/rsi/PixelsPointsPolygons](https://huggingface.co/rsi/PixelsPointsPolygons)
81
 
82
-
83
  ## Dataset
84
 
85
  ### Overview
@@ -90,15 +97,26 @@ We present the P<sup>3</sup> dataset, a large-scale multimodal benchmark for bui
90
 
91
  ### Download
92
 
 
 
 
 
 
 
 
 
 
93
  ```
94
  git lfs install
95
  git clone https://huggingface.co/datasets/rsi/PixelsPointsPolygons $DATA_ROOT
96
  ```
97
 
 
 
98
  ### Structure
99
 
100
  <details>
101
- <summary>📁 Click to expand folder structure</summary -->
102
 
103
  ```text
104
  PixelsPointsPolygons/data/224
@@ -490,12 +508,21 @@ PixelsPointsPolygons/data/224
490
 
491
  ### Download
492
 
 
 
 
 
 
 
 
 
493
  ```
494
- git lfs install
495
  git clone https://huggingface.co/rsi/PixelsPointsPolygons $MODEL_ROOT
496
  ```
497
 
498
- ## Code
 
 
499
 
500
  ### Download
501
 
@@ -536,7 +563,7 @@ pip install .
536
 
537
  ### Setup
538
 
539
- The project supports hydra configuration which allows to modify any parameter either from a `.yaml` file of directly from the command line.
540
 
541
  To setup the project structure we recommend to specify your `$DATA_ROOT` and `$MODEL_ROOT` in `config/host/default.yaml`.
542
 
@@ -570,7 +597,7 @@ python scripts/predict_demo.py
570
 
571
  ### Reproduce paper results
572
 
573
- To reproduce the results from the paper you can run any of the following commands
574
 
575
  ```
576
  python scripts/modality_ablation.py
@@ -580,15 +607,17 @@ python scripts/all_countries.py
580
 
581
  ### Custom training, prediction and evaluation
582
 
583
- We recommend to first setup a custom `$EXP_FILE` in `config/experiment` following the structure of one of the existing experiment files, e.g. `ffl_fusion.yaml`. You can then run:
584
 
585
  ```
586
  # train your model (on multiple GPUs)
587
  torchrun --nproc_per_node=$NUM_GPU scripts/train.py experiment=$EXP_FILE
 
588
  # predict the test set with your model (on multiple GPUs)
589
- torchrun --nproc_per_node=$NUM_GPU scripts/predict.py evaluation=test checkpoint=best_val_iou
 
590
  # evaluate your prediction of the test set
591
- python scripts/evaluate.py model=<model> evaluation=test checkpoint=best_val_iou
592
  ```
593
 
594
  You could also continue training from a provided pretrained model with
 
20
  - Vectorization
21
  language:
22
  - en
23
+ # configs:
24
+ # - config_name: all
25
+ # data_files:
26
+ # - split: train
27
+ # path: "data/224/annotations/annotations_all_train.json"
28
+ # - split: val
29
+ # path: "data/224/annotations/annotations_all_val.json"
30
+ # - split: test
31
+ # path: "data/224/annotations/annotations_all_test.json"
32
+ # - config_name: CH
33
+ # data_files:
34
+ # - split: train
35
+ # path: "data/224/annotations/annotations_CH_train.json"
36
+ # - split: val
37
+ # path: "data/224/annotations/annotations_CH_val.json"
38
+ # - split: test
39
+ # path: "data/224/annotations/annotations_CH_test.json"
40
+ # - config_name: NY
41
+ # data_files:
42
+ # - split: train
43
+ # path: "data/224/annotations/annotations_NY_train.json"
44
+ # - split: val
45
+ # path: "data/224/annotations/annotations_NY_val.json"
46
+ # - split: test
47
+ # path: "data/224/annotations/annotations_NY_test.json"
48
+ # - config_name: NZ
49
+ # data_files:
50
+ # - split: train
51
+ # path: "data/224/annotations/annotations_NZ_train.json"
52
+ # - split: val
53
+ # path: "data/224/annotations/annotations_NZ_val.json"
54
+ # - split: test
55
+ # path: "data/224/annotations/annotations_NZ_test.json"
56
  ---
57
 
58
 
 
 
59
  <div align="center">
60
  <h1 align="center">The P<sup>3</sup> dataset: Pixels, Points and Polygons <br> for Multimodal Building Vectorization</h1>
61
  <h3><align="center">Raphael Sulzer<sup>1,2</sup> &nbsp;&nbsp;&nbsp; Liuyun Duan<sup>1</sup>
 
65
  <b>Figure 1</b>: A view of our dataset of Zurich, Switzerland
66
  </div>
67
 
68
+ ## Table of Contents
69
+
70
+ - [Abstract](#abstract)
71
+ - [Highlights](#highlights)
72
+ - [Dataset](#dataset)
73
+ - [Pretrained model weights](#pretrained-model-weights)
74
+ - [Code](#code)
75
+ - [Citation](#citation)
76
+ - [Acknowledgements](#acknowledgements)
77
+
78
  ## Abstract
79
 
80
  <div align="justify">
 
87
  - A library for training and evaluating state-of-the-art deep learning methods on the dataset, available at [github.com/raphaelsulzer/PixelsPointsPolygons](https://github.com/raphaelsulzer/PixelsPointsPolygons)
88
  - Pretrained model weights, available at [huggingface.co/rsi/PixelsPointsPolygons](https://huggingface.co/rsi/PixelsPointsPolygons)
89
 
 
90
  ## Dataset
91
 
92
  ### Overview
 
97
 
98
  ### Download
99
 
100
+ The recommended and fastest way to download the dataset is to run
101
+
102
+ ```
103
+ pip install huggingface_hub
104
+ python scripts/download_dataset.py --dataset-root $DATA_ROOT
105
+ ```
106
+
107
+ Optionally you can also download the dataset by running
108
+
109
  ```
110
  git lfs install
111
  git clone https://huggingface.co/datasets/rsi/PixelsPointsPolygons $DATA_ROOT
112
  ```
113
 
114
+ Both options will download the full dataset, including aerial images (as .tif), aerial lidar point clouds (as .copc.laz) and building polygon annotaions (as MS-COCO .json) into `$DATA_ROOT` . The size of the dataset is around 163GB.
115
+
116
  ### Structure
117
 
118
  <details>
119
+ <summary>📁 Click to expand dataset folder structure</summary -->
120
 
121
  ```text
122
  PixelsPointsPolygons/data/224
 
508
 
509
  ### Download
510
 
511
+ The recommended and fastest way to download the pretrained model weights is to run
512
+
513
+ ```
514
+ python scripts/download_pretrained.py --model-root $MODEL_ROOT
515
+ ```
516
+
517
+ Optionally you can also download the weights by running
518
+
519
  ```
 
520
  git clone https://huggingface.co/rsi/PixelsPointsPolygons $MODEL_ROOT
521
  ```
522
 
523
+ Both options will download all checkpoints (as .pth) and results presented in the paper (as MS-COCO .json) into `$MODEL_ROOT` .
524
+
525
+ ## Code
526
 
527
  ### Download
528
 
 
563
 
564
  ### Setup
565
 
566
+ The project supports hydra configuration which allows to modify any parameter either from a `.yaml` file or directly from the command line.
567
 
568
  To setup the project structure we recommend to specify your `$DATA_ROOT` and `$MODEL_ROOT` in `config/host/default.yaml`.
569
 
 
597
 
598
  ### Reproduce paper results
599
 
600
+ To reproduce the results from the paper you can run the following commands
601
 
602
  ```
603
  python scripts/modality_ablation.py
 
607
 
608
  ### Custom training, prediction and evaluation
609
 
610
+ We recommend to first setup a custom experiment file `$EXP_FILE` in `config/experiment/` following the structure of one of the existing files, e.g. `ffl_fusion.yaml`. You can then run
611
 
612
  ```
613
  # train your model (on multiple GPUs)
614
  torchrun --nproc_per_node=$NUM_GPU scripts/train.py experiment=$EXP_FILE
615
+
616
  # predict the test set with your model (on multiple GPUs)
617
+ torchrun --nproc_per_node=$NUM_GPU scripts/predict.py experiment=$EXP_FILE evaluation=test checkpoint=best_val_iou
618
+
619
  # evaluate your prediction of the test set
620
+ python scripts/evaluate.py model=<model> experiment=$EXP_FILE evaluation=test checkpoint=best_val_iou
621
  ```
622
 
623
  You could also continue training from a provided pretrained model with