rare-species / scripts /export_rare_species.py
thompsonmj's picture
Convert raw images to Parquet to optimize dataset loading (#10)
2473e22 verified
import os
import argparse
import polars as pl
from datasets import load_dataset, Features, Value
from datasets.features import Image
def main():
parser = argparse.ArgumentParser(
description="Export the Rare Species dataset into a legacy on-disk folder structure"
)
parser.add_argument(
"--dataset-path", default="dataset",
help="Directory under which to write the `dataset/...` hierarchy"
)
parser.add_argument(
"--revision", default="main",
help="Hugging Face dataset revision (branch, tag, or commit SHA)"
)
args = parser.parse_args()
# Read metadata.csv from the remote
csv_url = (
f"https://huggingface.co/datasets/imageomics/rare-species/"
f"resolve/main/metadata.csv?download=true"
)
print(f"Loading metadata from {csv_url}")
df_pl = pl.read_csv(csv_url)
rel_paths = df_pl["file_name"].to_list()
# Define schema: file_name as raw bytes (no PIL decode), others as strings
features = Features({
"file_name": Image(decode=False),
**{c: Value("string") for c in df_pl.columns if c != "file_name"}
})
# Load the Parquet-backed dataset
print(f"Loading dataset imageomics/rare-species @ {args.revision}")
ds = load_dataset(
"imageomics/rare-species",
split="train",
revision=args.revision,
features=features
)
# Export each image's raw bytes under <path>/dataset/...
print(f"Exporting {len(rel_paths)} images to {args.dataset_path}/")
for idx, rel in enumerate(rel_paths):
info = ds[idx]["file_name"]
img_bytes = info["bytes"]
dst = os.path.join(args.dataset_path, rel)
os.makedirs(os.path.dirname(dst), exist_ok=True)
with open(dst, "wb") as f:
f.write(img_bytes)
print(f"Export complete: images written under {args.dataset_path}/dataset")
if __name__ == "__main__":
main()