Commit
·
996dd7f
1
Parent(s):
b1e4fd6
Update SATIN.py
Browse files
SATIN.py
CHANGED
@@ -1,189 +1,61 @@
|
|
1 |
-
"""
|
2 |
-
|
3 |
import datasets
|
4 |
-
import
|
5 |
-
import pyarrow.parquet as pq
|
6 |
-
from PIL import Image
|
7 |
-
from io import BytesIO
|
8 |
-
import numpy as np
|
9 |
-
import pandas as pd
|
10 |
-
|
11 |
-
|
12 |
-
def load_data(data_dir):
|
13 |
-
parquet_file = [file for file in os.listdir(data_dir) if file.endswith('.parquet')][0]
|
14 |
-
print(parquet_file)
|
15 |
-
parquet_path = os.path.join(data_dir, parquet_file)
|
16 |
|
17 |
-
parquet_path = data_dir
|
18 |
-
table = pq.read_table(parquet_path)
|
19 |
-
|
20 |
-
for row in table.iterrecords():
|
21 |
-
image_bytes = row['image']
|
22 |
-
image = Image.open(BytesIO(image_bytes))
|
23 |
-
label = row['label']
|
24 |
-
yield image, label
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
class SATINConfig(datasets.BuilderConfig):
|
|
|
29 |
|
30 |
-
|
31 |
-
def __init__(self, name, description, data_url, class_names, **kwargs):
|
32 |
-
|
33 |
-
Args:
|
34 |
-
data_url: `string`, url to download the zip file from.
|
35 |
-
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
|
36 |
-
**kwargs: keyword arguments forwarded to super.
|
37 |
|
38 |
super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
39 |
self.name = name
|
40 |
-
self.
|
41 |
-
self.description =
|
42 |
-
self.
|
43 |
-
|
44 |
-
|
45 |
-
class SATIN(datasets.GeneratorBasedBuilder):
|
46 |
-
SATIN Images dataset
|
47 |
-
|
48 |
-
_SAT_4_NAMES = ['barren land', 'grassland', 'other', 'trees']
|
49 |
-
_SAT_6_NAMES = ['barren land', 'building', 'grassland', 'road', 'trees', 'water']
|
50 |
-
|
51 |
-
BUILDER_CONFIGS = [
|
52 |
-
SATINConfig(
|
53 |
-
name="SAT_4",
|
54 |
-
description="SAT_4.",
|
55 |
-
data_url="https://huggingface.co/datasets/jonathan-roberts1/SAT-4/tree/main/data/",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",
|
56 |
-
class_names = _SAT_4_NAMES
|
57 |
-
#metadata_urls={
|
58 |
-
# "train": "https://link-to-breakfast-foods-train.txt",
|
59 |
-
),
|
60 |
-
SATINConfig(
|
61 |
-
name="SAT_6",
|
62 |
-
description="SAT_6.",
|
63 |
-
data_url="https://huggingface.co/datasets/jonathan-roberts1/SAT-6/tree/main/data/",#train-00000-of-00001-c47ada2c92f814d2.parquet",
|
64 |
-
class_names = _SAT_6_NAMES
|
65 |
-
)
|
66 |
-
]
|
67 |
-
|
68 |
-
@property
|
69 |
-
def url_prefix(self):
|
70 |
-
return {
|
71 |
-
"SAT-4": "https://huggingface.co/datasets/jonathan-roberts1/SAT-4/tree/main/data/",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",
|
72 |
-
"SAT-6": "https://huggingface.co/datasets/jonathan-roberts1/SAT-6/tree/main/data/",
|
73 |
-
}
|
74 |
-
|
75 |
-
def _info(self):
|
76 |
-
return datasets.DatasetInfo(
|
77 |
-
description=self.config.description,
|
78 |
-
features=datasets.Features(
|
79 |
-
{
|
80 |
-
"image": datasets.Image(),
|
81 |
-
"label": datasets.ClassLabel(names=self.config.class_names),
|
82 |
-
}
|
83 |
-
),
|
84 |
-
supervised_keys=("image", "label"),
|
85 |
-
#homepage=_HOMEPAGE,
|
86 |
-
#citation=_CITATION,
|
87 |
-
#license=_LICENSE,
|
88 |
-
#task_templates=[ImageClassification(image_column="image", label_column="label")],
|
89 |
-
)
|
90 |
-
|
91 |
-
def _split_generators(self, dl_manager):
|
92 |
-
url = self.config.data_url
|
93 |
-
data_dir = dl_manager.download_and_extract(url)#, use_auth_token=True)
|
94 |
-
print(data_dir)
|
95 |
-
return [
|
96 |
-
datasets.SplitGenerator(
|
97 |
-
name=datasets.Split.TRAIN,
|
98 |
-
gen_kwargs={"data_dir": data_dir},
|
99 |
-
),
|
100 |
-
]
|
101 |
-
|
102 |
-
def _generate_examples(self, data_dir):
|
103 |
-
#base_url = self.url_prefix[self.config.name]
|
104 |
-
file_url = self.config.data_url
|
105 |
-
use_auth_token = os.environ.get("HUGGINGFACE_TOKEN")
|
106 |
-
|
107 |
-
with NamedTemporaryFile() as file:
|
108 |
-
download(file_url, file.name, use_auth_token=use_auth_token)
|
109 |
-
df = pd.read_parquet(file.name)
|
110 |
-
|
111 |
-
for idx, row in df.iterrows():
|
112 |
-
example = {
|
113 |
-
"image": row["image"],
|
114 |
-
"label": row["label"],
|
115 |
-
}
|
116 |
-
yield idx, example
|
117 |
-
|
118 |
-
|
119 |
-
#def _generate_examples(self, data_dir):
|
120 |
-
# for idx, (image, label) in enumerate(load_data(data_dir)):
|
121 |
-
# image_array = np.array(image)
|
122 |
-
# yield idx, {"image": image_array, "label": label}
|
123 |
-
"""
|
124 |
-
|
125 |
-
|
126 |
-
from datasets.utils.download_manager import DownloadManager
|
127 |
-
import tempfile
|
128 |
-
import datasets
|
129 |
-
import os
|
130 |
-
import pyarrow.parquet as pq
|
131 |
-
from PIL import Image
|
132 |
-
from io import BytesIO
|
133 |
-
import numpy as np
|
134 |
-
import pandas as pd
|
135 |
-
|
136 |
-
|
137 |
-
class SATINConfig(datasets.BuilderConfig):
|
138 |
-
|
139 |
-
|
140 |
-
def __init__(self, name, description, data_url, class_names, **kwargs):
|
141 |
-
|
142 |
-
super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
143 |
-
self.name = name
|
144 |
-
self.data_url = data_url
|
145 |
-
self.description = description
|
146 |
-
self.class_names = class_names
|
147 |
|
|
|
|
|
|
|
148 |
|
149 |
|
150 |
class SATIN(datasets.GeneratorBasedBuilder):
|
151 |
"""SATIN Images dataset"""
|
152 |
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
SATINConfig(
|
164 |
-
name="SAT_6",
|
165 |
-
description="SAT_6.",
|
166 |
-
data_url="jonathan-roberts1/SAT-6",#"https://huggingface.co/datasets/jonathan-roberts1/SAT-6/blob/main/data/train-00000-of-00001-c47ada2c92f814d2.parquet?raw=true",
|
167 |
-
class_names=_SAT_6_NAMES
|
168 |
-
)
|
169 |
-
]
|
170 |
|
171 |
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
172 |
return datasets.DatasetInfo(
|
173 |
description=self.config.description,
|
174 |
-
features=
|
175 |
-
|
176 |
-
"image": datasets.Image(),
|
177 |
-
"label": datasets.ClassLabel(names=self.config.class_names),
|
178 |
-
}
|
179 |
-
),
|
180 |
-
supervised_keys=("image", "label"),
|
181 |
)
|
182 |
|
|
|
183 |
def _split_generators(self, dl_manager):
|
184 |
-
|
185 |
-
from datasets import load_dataset
|
186 |
-
dataset = load_dataset(self.config.data_url)
|
187 |
return [
|
188 |
datasets.SplitGenerator(
|
189 |
name=datasets.Split.TRAIN,
|
@@ -193,9 +65,11 @@ class SATIN(datasets.GeneratorBasedBuilder):
|
|
193 |
|
194 |
def _generate_examples(self, data_path):
|
195 |
# iterate over the Huggingface dataset and yield the idx, image and label
|
196 |
-
|
|
|
|
|
197 |
for idx, row in enumerate(huggingface_dataset):
|
198 |
-
yield idx, {
|
199 |
|
200 |
|
201 |
|
|
|
|
|
|
|
1 |
import datasets
|
2 |
+
from datasets import load_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
_CONSTITUENT_DATASETS = ['SAT-4', 'SAT-6', 'NASC-TG2', 'WHU-RS19', 'RSSCN7', 'RS_C11', 'SIRI-WHU', 'EuroSAT',
|
6 |
+
'NWPU-RESISC45', 'PatternNet', 'RSD46-WHU', 'GID', 'CLRS', 'Optimal-31',
|
7 |
+
'Airbus-Wind-Turbines-Patches', 'USTC_SmokeRS', 'Canadian_Cropland_Dataset',
|
8 |
+
'Ships-In-Satellite-Imagery', 'Satellite-Images-of-Hurricane-Damage',
|
9 |
+
'Brazilian_Coffee_Scenes', 'Brazilian_Cerrado-Savanna_Scenes', 'Million-AID',
|
10 |
+
'UC_Merced_LandUse_MultiLabel', 'MLRSNet_MultiLabel', 'AID_MultiLabel',
|
11 |
+
'MultiScene', 'RSI-CB256']
|
12 |
|
13 |
|
14 |
class SATINConfig(datasets.BuilderConfig):
|
15 |
+
"""BuilderConfig for SATIN"""
|
16 |
|
17 |
+
def __init__(self, name, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
20 |
self.name = name
|
21 |
+
self.hf_dataset_name = 'jonathan-roberts1' + "/" + name
|
22 |
+
self.description = None
|
23 |
+
self.features = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
#stream_dataset_info = load_dataset(self.hf_dataset_name, streaming=True, split='train').info
|
26 |
+
#self.description = stream_dataset_info.description
|
27 |
+
#self.features = stream_dataset_info.features
|
28 |
|
29 |
|
30 |
class SATIN(datasets.GeneratorBasedBuilder):
|
31 |
"""SATIN Images dataset"""
|
32 |
|
33 |
+
BUILDER_CONFIGS = [SATINConfig(name=dataset_name) for dataset_name in _CONSTITUENT_DATASETS]
|
34 |
+
|
35 |
+
"""
|
36 |
+
def __init__(self, *args, **kwargs):
|
37 |
+
super().__init__(*args, **kwargs)
|
38 |
+
self.config.hf_dataset = load_dataset(self.config.hf_dataset_name)
|
39 |
+
self.config.description = self.config.hf_dataset['train'].description
|
40 |
+
self.config.features = self.config.hf_dataset['train'].features
|
41 |
+
print(self.config.features)
|
42 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
def _info(self):
|
45 |
+
if self.config.description is None or self.config.features is None:
|
46 |
+
stream_dataset_info = load_dataset(self.config.hf_dataset_name, streaming=True, split='train').info
|
47 |
+
self.config.description = stream_dataset_info.description
|
48 |
+
self.config.features = stream_dataset_info.features
|
49 |
+
print(f'info {self.config.features}')
|
50 |
return datasets.DatasetInfo(
|
51 |
description=self.config.description,
|
52 |
+
features=self.config.features,
|
53 |
+
#supervised_keys=("image", "label"),
|
|
|
|
|
|
|
|
|
|
|
54 |
)
|
55 |
|
56 |
+
|
57 |
def _split_generators(self, dl_manager):
|
58 |
+
dataset = load_dataset(self.config.hf_dataset_name)
|
|
|
|
|
59 |
return [
|
60 |
datasets.SplitGenerator(
|
61 |
name=datasets.Split.TRAIN,
|
|
|
65 |
|
66 |
def _generate_examples(self, data_path):
|
67 |
# iterate over the Huggingface dataset and yield the idx, image and label
|
68 |
+
_DEFAULT_SPLIT = 'train'
|
69 |
+
huggingface_dataset = data_path['train']
|
70 |
+
features = huggingface_dataset.features
|
71 |
for idx, row in enumerate(huggingface_dataset):
|
72 |
+
yield idx, {feature: row[feature] for feature in features}
|
73 |
|
74 |
|
75 |
|