File size: 2,917 Bytes
a61db6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import pandas as pd
import numpy as np
from pathlib import Path
import argparse
import logging
from sklearn.model_selection import train_test_split
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
def split_dataset(input_path: str, output_dir: str, chunk_size: int = 100000):
"""
Split a large CSV file into train, test, and validation sets.
Processes the file in chunks to handle large datasets efficiently.
Args:
input_path: Path to input CSV file
output_dir: Directory to save split datasets
chunk_size: Number of rows to process at a time
"""
logger = setup_logging()
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Open output files
train_file = open(output_path / 'train.csv', 'w')
test_file = open(output_path / 'test.csv', 'w')
val_file = open(output_path / 'val.csv', 'w')
# Set random seed for reproducibility
np.random.seed(42)
# Process the CSV in chunks
chunk_iterator = pd.read_csv(input_path, chunksize=chunk_size)
is_first_chunk = True
total_rows = 0
logger.info("Starting dataset split...")
for i, chunk in enumerate(chunk_iterator):
# Split chunk into train (70%), test (20%), val (10%)
train_chunk, test_val_chunk = train_test_split(chunk, train_size=0.7, random_state=42)
test_chunk, val_chunk = train_test_split(test_val_chunk, train_size=0.67, random_state=42)
# Write header for first chunk only
if is_first_chunk:
train_chunk.to_csv(train_file, index=False)
test_chunk.to_csv(test_file, index=False)
val_chunk.to_csv(val_file, index=False)
is_first_chunk = False
else:
train_chunk.to_csv(train_file, index=False, header=False)
test_chunk.to_csv(test_file, index=False, header=False)
val_chunk.to_csv(val_file, index=False, header=False)
total_rows += len(chunk)
logger.info(f"Processed {total_rows} rows...")
# Close files
train_file.close()
test_file.close()
val_file.close()
logger.info("Dataset splitting complete!")
logger.info(f"Total rows processed: {total_rows}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Split large CSV dataset into train/test/val sets")
parser.add_argument("--input_path", required=True, help="Path to input CSV file")
parser.add_argument("--output_dir", required=True, help="Directory to save split datasets")
parser.add_argument("--chunk_size", type=int, default=100000, help="Chunk size for processing")
args = parser.parse_args()
split_dataset(args.input_path, args.output_dir, args.chunk_size) |