|
import pandas as pd |
|
import numpy as np |
|
from pathlib import Path |
|
import argparse |
|
import logging |
|
from sklearn.model_selection import train_test_split |
|
|
|
def setup_logging(): |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
) |
|
return logging.getLogger(__name__) |
|
|
|
def split_dataset(input_path: str, output_dir: str, chunk_size: int = 100000): |
|
""" |
|
Split a large CSV file into train, test, and validation sets. |
|
Processes the file in chunks to handle large datasets efficiently. |
|
|
|
Args: |
|
input_path: Path to input CSV file |
|
output_dir: Directory to save split datasets |
|
chunk_size: Number of rows to process at a time |
|
""" |
|
logger = setup_logging() |
|
output_path = Path(output_dir) |
|
output_path.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
train_file = open(output_path / 'train.csv', 'w') |
|
test_file = open(output_path / 'test.csv', 'w') |
|
val_file = open(output_path / 'val.csv', 'w') |
|
|
|
|
|
np.random.seed(42) |
|
|
|
|
|
chunk_iterator = pd.read_csv(input_path, chunksize=chunk_size) |
|
|
|
is_first_chunk = True |
|
total_rows = 0 |
|
|
|
logger.info("Starting dataset split...") |
|
|
|
for i, chunk in enumerate(chunk_iterator): |
|
|
|
train_chunk, test_val_chunk = train_test_split(chunk, train_size=0.7, random_state=42) |
|
test_chunk, val_chunk = train_test_split(test_val_chunk, train_size=0.67, random_state=42) |
|
|
|
|
|
if is_first_chunk: |
|
train_chunk.to_csv(train_file, index=False) |
|
test_chunk.to_csv(test_file, index=False) |
|
val_chunk.to_csv(val_file, index=False) |
|
is_first_chunk = False |
|
else: |
|
train_chunk.to_csv(train_file, index=False, header=False) |
|
test_chunk.to_csv(test_file, index=False, header=False) |
|
val_chunk.to_csv(val_file, index=False, header=False) |
|
|
|
total_rows += len(chunk) |
|
logger.info(f"Processed {total_rows} rows...") |
|
|
|
|
|
train_file.close() |
|
test_file.close() |
|
val_file.close() |
|
|
|
logger.info("Dataset splitting complete!") |
|
logger.info(f"Total rows processed: {total_rows}") |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Split large CSV dataset into train/test/val sets") |
|
parser.add_argument("--input_path", required=True, help="Path to input CSV file") |
|
parser.add_argument("--output_dir", required=True, help="Directory to save split datasets") |
|
parser.add_argument("--chunk_size", type=int, default=100000, help="Chunk size for processing") |
|
|
|
args = parser.parse_args() |
|
|
|
split_dataset(args.input_path, args.output_dir, args.chunk_size) |