filimo-farsi-raw / filimo-persian-asr.py
Mohammad Sadegh Gholizadeh
Upload 3 files
f64e24f verified
import os
import tarfile
import datasets
import pandas as pd
from typing import Dict, List
import io
from tqdm import tqdm
import csv
import os
# TODO: Optionally update the description for your specific dataset
_DESCRIPTION = """
This dataset consists of about 400 hours of audio extracted from various Filimo videos in the Persian language.
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
1. Perform their own quality assessment
2. Create their own train/validation/test splits based on their specific needs
3. Validate a subset of the data if needed for their use case
"""
# TODO: Update with your repository information or desired citation method
_CITATION = """
Use this repo info/link for citation: https://huggingface.co/datasets/msghol/filimo-farsi
"""
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/" # Or your chosen license
# TODO: Update HOMEPAGE to your dataset's Hugging Face page
_HOMEPAGE = "https://huggingface.co/datasets/msghol/filimo-farsi"
# TODO: Update BASE_URL to point to your dataset repository
_BASE_URL = "https://huggingface.co/datasets/msghol/filimo-farsi/resolve/main/"
_AUDIO_URL = _BASE_URL + "data/unvalidated_{shard_idx:03d}.tar" # This will now use your updated _BASE_URL
# TODO: Consider renaming the class for clarity, e.g., FilimoFarsiASR
class FilimoASRDataset(datasets.GeneratorBasedBuilder): # Or class FilimoFarsiASR(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
VERSION = datasets.Version("1.0.0") # Update if desired
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({
"audio": datasets.Audio(sampling_rate=16_000), # Adjust sampling rate if your audio is different
"text": datasets.Value("string"),
"file_name": datasets.Value("string"),
}),
supervised_keys=None,
license=_LICENSE,
citation=_CITATION,
version=self.VERSION,
description=_DESCRIPTION,
homepage=_HOMEPAGE, # Added homepage here
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: Adjust the range if you have a different number of tar shards
# For example, if you have 10 shards (unvalidated_001.tar to unvalidated_010.tar):
# archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 11)]
archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 34)] # Default is 33 shards
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(
name="unvalidated", # You can rename this split if needed, e.g., datasets.Split.TRAIN
gen_kwargs={
"local_extracted_archive_paths": local_extracted_archive_paths,
"archives": [dl_manager.iter_archive(path) for path in archive_paths],
"meta_path": _BASE_URL + "unvalidated.csv", # This will now use your updated _BASE_URL
},
),
]
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
"""Yields examples."""
data_fields = list(self._info().features.keys())
metadata = {}
# For remote CSV file, dl_manager.download_and_extract may be more robust
# However, Hugging Face often handles direct HTTPS links for text files well.
# If issues arise, consider: downloaded_meta_path = dl_manager.download_and_extract(meta_path)
downloaded_meta_path = dl_manager.download(meta_path) # Download the csv file
with open(downloaded_meta_path, encoding="utf-8") as f: # Open the downloaded file
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in tqdm(reader, desc="Reading metadata..."):
if not row["file_name"].endswith(".mp3"):
row["file_name"] += ".mp3"
if "sentence" in row:
row['text'] = row['sentence']
# del row['sentence'] # Keep sentence column if you want it in final dataset
else: # Ensure 'text' field exists if 'sentence' is not present
row['text'] = row.get('text', '')
# Ensure all defined features have at least a default value
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["file_name"]] = row
for i, audio_archive in enumerate(archives):
for path, file_obj in audio_archive: # Changed 'file' to 'file_obj' to avoid conflict with os.path.file
_, filename = os.path.split(path)
if filename in metadata:
result = dict(metadata[filename])
# set the audio feature and the path to the extracted file
# If streaming, path is the original path in archive, file_obj is a file-like object
# If not streaming (local_extracted_archive_paths is populated), path needs to be joined
if local_extracted_archive_paths:
full_path_to_audio = os.path.join(local_extracted_archive_paths[i], path)
result["audio"] = {"path": full_path_to_audio, "bytes": file_obj.read()}
result["file_name"] = full_path_to_audio # Or keep original filename: filename
else: # Streaming case
result["audio"] = {"path": path, "bytes": file_obj.read()} # path is relative path in archive
result["file_name"] = filename # Or path if you prefer relative path from archive root
yield path, result # Path can be used as a unique key
# else:
# print(f"Warning: File {filename} from archive not found in metadata.") # Optional: for debugging