Spaces:
Running
Running
import torchvision.transforms as transforms | |
from torchvision import datasets | |
from torch.utils.data import DataLoader | |
from torchvision.datasets import ImageFolder | |
def MNISTDataLoader(data_dir, batch_size, img_size=32): | |
train_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)) | |
]) | |
train_dataset = datasets.MNIST(root=data_dir, train=True, | |
download=True, transform=train_transform) | |
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) | |
test_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)) | |
]) | |
test_dataset = datasets.MNIST(root=data_dir, train=False, | |
transform=test_transform) | |
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) | |
return train_loader, test_loader | |
def CIFAR10DataLoader(data_dir, batch_size, img_size=32): | |
train_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.RandomCrop(32, padding=4), | |
transforms.RandomHorizontalFlip(), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | |
]) | |
train_dataset = datasets.CIFAR10(root=data_dir, train=True, | |
download=True, transform=train_transform) | |
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) | |
test_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | |
]) | |
test_dataset = datasets.CIFAR10(root=data_dir, train=False, | |
download=True, transform=test_transform) | |
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) | |
return train_loader, test_loader | |
def CIFAR100DataLoader(data_dir, batch_size, img_size=32): | |
train_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.RandomCrop(32, padding=4), | |
transforms.RandomHorizontalFlip(), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | |
]) | |
train_dataset = datasets.CIFAR100(root=data_dir, train=True, | |
download=True, transform=train_transform) | |
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) | |
test_transform = transforms.Compose([ | |
transforms.Resize(img_size), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | |
]) | |
test_dataset = datasets.CIFAR100(root=data_dir, train=False, | |
download=True, transform=test_transform) | |
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) | |
return train_loader, test_loader | |
def ImageNetDataLoader(train_data_root, val_data_root, batch_size=128, num_workers=8): | |
# https://github.com/floydhub/imagenet/blob/master/main.py | |
img_size = 224 | |
train_transform = transforms.Compose([ | |
transforms.RandomResizedCrop(img_size), | |
transforms.RandomHorizontalFlip(), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
]) | |
train_dataset = ImageFolder(train_data_root, transform=train_transform) | |
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True) | |
test_transform = transforms.Compose([ | |
transforms.Resize(256), | |
transforms.CenterCrop(img_size), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
]) | |
test_dataset = ImageFolder(val_data_root, transform=test_transform) | |
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True) | |
return train_loader, test_loader | |