|
import os
|
|
import tarfile
|
|
import datasets
|
|
import pandas as pd
|
|
from typing import Dict, List
|
|
import io
|
|
from tqdm import tqdm
|
|
import csv
|
|
import os
|
|
|
|
|
|
_DESCRIPTION = """
|
|
This dataset consists of about 400 hours of audio extracted from various Filimo videos in the Persian language.
|
|
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
|
|
1. Perform their own quality assessment
|
|
2. Create their own train/validation/test splits based on their specific needs
|
|
3. Validate a subset of the data if needed for their use case
|
|
"""
|
|
|
|
|
|
_CITATION = """
|
|
Use this repo info/link for citation: https://huggingface.co/datasets/msghol/filimo-farsi
|
|
"""
|
|
|
|
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
|
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/msghol/filimo-farsi"
|
|
|
|
|
|
_BASE_URL = "https://huggingface.co/datasets/msghol/filimo-farsi/resolve/main/"
|
|
|
|
_AUDIO_URL = _BASE_URL + "data/unvalidated_{shard_idx:03d}.tar"
|
|
|
|
|
|
class FilimoASRDataset(datasets.GeneratorBasedBuilder):
|
|
|
|
DEFAULT_WRITER_BATCH_SIZE = 1000
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
def _info(self):
|
|
return datasets.DatasetInfo(
|
|
features=datasets.Features({
|
|
"audio": datasets.Audio(sampling_rate=16_000),
|
|
"text": datasets.Value("string"),
|
|
"file_name": datasets.Value("string"),
|
|
}),
|
|
supervised_keys=None,
|
|
license=_LICENSE,
|
|
citation=_CITATION,
|
|
version=self.VERSION,
|
|
description=_DESCRIPTION,
|
|
homepage=_HOMEPAGE,
|
|
)
|
|
|
|
def _split_generators(self, dl_manager):
|
|
"""Returns SplitGenerators."""
|
|
|
|
|
|
|
|
|
|
archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 34)]
|
|
|
|
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
|
|
|
return [
|
|
datasets.SplitGenerator(
|
|
name="unvalidated",
|
|
gen_kwargs={
|
|
"local_extracted_archive_paths": local_extracted_archive_paths,
|
|
"archives": [dl_manager.iter_archive(path) for path in archive_paths],
|
|
"meta_path": _BASE_URL + "unvalidated.csv",
|
|
},
|
|
),
|
|
]
|
|
|
|
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
|
"""Yields examples."""
|
|
data_fields = list(self._info().features.keys())
|
|
metadata = {}
|
|
|
|
|
|
|
|
downloaded_meta_path = dl_manager.download(meta_path)
|
|
with open(downloaded_meta_path, encoding="utf-8") as f:
|
|
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
|
for row in tqdm(reader, desc="Reading metadata..."):
|
|
if not row["file_name"].endswith(".mp3"):
|
|
row["file_name"] += ".mp3"
|
|
if "sentence" in row:
|
|
row['text'] = row['sentence']
|
|
|
|
else:
|
|
row['text'] = row.get('text', '')
|
|
|
|
|
|
for field in data_fields:
|
|
if field not in row:
|
|
row[field] = ""
|
|
|
|
metadata[row["file_name"]] = row
|
|
|
|
for i, audio_archive in enumerate(archives):
|
|
for path, file_obj in audio_archive:
|
|
_, filename = os.path.split(path)
|
|
if filename in metadata:
|
|
result = dict(metadata[filename])
|
|
|
|
|
|
|
|
if local_extracted_archive_paths:
|
|
full_path_to_audio = os.path.join(local_extracted_archive_paths[i], path)
|
|
result["audio"] = {"path": full_path_to_audio, "bytes": file_obj.read()}
|
|
result["file_name"] = full_path_to_audio
|
|
else:
|
|
result["audio"] = {"path": path, "bytes": file_obj.read()}
|
|
result["file_name"] = filename
|
|
|
|
yield path, result
|
|
|
|
|