Spaces:
Running
Running
#! /usr/bin/env python | |
# -*- coding: utf-8 -*- | |
# Copyright 2020 Imperial College London (Pingchuan Ma) | |
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) | |
""" TCN for lipreading""" | |
import os | |
import time | |
import random | |
import argparse # ๋ช ๋ นํ ์ธ์๋ฅผ ํ์ฑํด์ฃผ๋ ๋ชจ๋ | |
import numpy as np | |
from tqdm import tqdm # ์์ ์งํ๋ฅ ํ์ํ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ | |
import torch # ํ์ดํ ์น | |
import torch.nn as nn # ํด๋์ค # attribute ๋ฅผ ํ์ฉํด state ๋ฅผ ์ ์ฅํ๊ณ ํ์ฉ | |
import torch.nn.functional as F # ํจ์ # ์ธ์คํด์คํ์ํฌ ํ์์์ด ์ฌ์ฉ ๊ฐ๋ฅ | |
from lipreading.utils import get_save_folder | |
from lipreading.utils import load_json, save2npz | |
from lipreading.utils import load_model, CheckpointSaver | |
from lipreading.utils import get_logger, update_logger_batch | |
from lipreading.utils import showLR, calculateNorm2, AverageMeter | |
from lipreading.model import Lipreading | |
from lipreading.mixup import mixup_data, mixup_criterion | |
from lipreading.optim_utils import get_optimizer, CosineScheduler | |
from lipreading.dataloaders import get_data_loaders, get_preprocessing_pipelines | |
from pathlib import Path | |
import wandb # ํ์ต ๊ด๋ฆฌ ํด (Loss, Acc ์๋ ์ ์ฅ) | |
# ์ธ์๊ฐ์ ๋ฐ์์ ์ฒ๋ฆฌํ๋ ํจ์ | |
def load_args(default_config=None): | |
# ์ธ์๊ฐ์ ๋ฐ์ ์ ์๋ ์ธ์คํด์ค ์์ฑ | |
parser = argparse.ArgumentParser(description='Pytorch Lipreading ') | |
# ์ ๋ ฅ๋ฐ์ ์ธ์๊ฐ ๋ชฉ๋ก | |
# -- dataset config | |
parser.add_argument('--dataset', default='lrw', help='dataset selection') | |
parser.add_argument('--num-classes', type=int, default=30, help='Number of classes') | |
parser.add_argument('--modality', default='video', choices=['video', 'raw_audio'], help='choose the modality') | |
# -- directory | |
parser.add_argument('--data-dir', default='./datasets/visual', help='Loaded data directory') | |
parser.add_argument('--label-path', type=str, default='./labels/30VietnameseSort.txt', help='Path to txt file with labels') | |
parser.add_argument('--annonation-direc', default=None, help='Loaded data directory') | |
# -- model config | |
parser.add_argument('--backbone-type', type=str, default='resnet', choices=['resnet', 'shufflenet'], help='Architecture used for backbone') | |
parser.add_argument('--relu-type', type=str, default='relu', choices=['relu','prelu'], help='what relu to use' ) | |
parser.add_argument('--width-mult', type=float, default=1.0, help='Width multiplier for mobilenets and shufflenets') | |
# -- TCN config | |
parser.add_argument('--tcn-kernel-size', type=int, nargs="+", help='Kernel to be used for the TCN module') | |
parser.add_argument('--tcn-num-layers', type=int, default=4, help='Number of layers on the TCN module') | |
parser.add_argument('--tcn-dropout', type=float, default=0.2, help='Dropout value for the TCN module') | |
parser.add_argument('--tcn-dwpw', default=False, action='store_true', help='If True, use the depthwise seperable convolution in TCN architecture') | |
parser.add_argument('--tcn-width-mult', type=int, default=1, help='TCN width multiplier') | |
# -- train | |
parser.add_argument('--training-mode', default='tcn', help='tcn') | |
parser.add_argument('--batch-size', type=int, default=8, help='Mini-batch size') # dafault=32 ์์ default=8 (OOM ๋ฐฉ์ง) ๋ก ๋ณ๊ฒฝ | |
parser.add_argument('--optimizer',type=str, default='adamw', choices = ['adam','sgd','adamw']) | |
parser.add_argument('--lr', default=3e-4, type=float, help='initial learning rate') | |
parser.add_argument('--init-epoch', default=0, type=int, help='epoch to start at') | |
parser.add_argument('--epochs', default=100, type=int, help='number of epochs') # dafault=80 ์์ default=10 (ํ ์คํธ ์ฉ๋) ๋ก ๋ณ๊ฒฝ | |
parser.add_argument('--test', default=False, action='store_true', help='training mode') | |
parser.add_argument('--save-dir', type=Path, default=Path('/kaggle/working/result/')) | |
# -- mixup | |
parser.add_argument('--alpha', default=0.4, type=float, help='interpolation strength (uniform=1., ERM=0.)') | |
# -- test | |
parser.add_argument('--model-path', type=str, default=None, help='Pretrained model pathname') | |
parser.add_argument('--allow-size-mismatch', default=False, action='store_true', | |
help='If True, allows to init from model with mismatching weight tensors. Useful to init from model with diff. number of classes') | |
# -- feature extractor | |
parser.add_argument('--extract-feats', default=False, action='store_true', help='Feature extractor') | |
parser.add_argument('--mouth-patch-path', type=str, default=None, help='Path to the mouth ROIs, assuming the file is saved as numpy.array') | |
parser.add_argument('--mouth-embedding-out-path', type=str, default=None, help='Save mouth embeddings to a specificed path') | |
# -- json pathname | |
parser.add_argument('--config-path', type=str, default=None, help='Model configuration with json format') | |
# -- other vars | |
parser.add_argument('--interval', default=50, type=int, help='display interval') | |
parser.add_argument('--workers', default=2, type=int, help='number of data loading workers') # dafault=8 ์์ default=2 (GCP core 4๊ฐ์ ์ ๋ฐ) ๋ก ๋ณ๊ฒฝ | |
# paths | |
parser.add_argument('--logging-dir', type=str, default='/kaggle/working/train_logs', help = 'path to the directory in which to save the log file') | |
# ์ ๋ ฅ๋ฐ์ ์ธ์๊ฐ์ args์ ์ ์ฅ (type: namespace) | |
args = parser.parse_args() | |
return args | |
args = load_args() # args ํ์ฑ ๋ฐ ๋ก๋ | |
# ์คํ ์ฌํ์ ์ํด์ ๋์ ๊ณ ์ | |
torch.manual_seed(1) # ๋ฉ์ธ ํ๋ ์์ํฌ์ธ pytorch ์์ random seed ๊ณ ์ | |
np.random.seed(1) # numpy ์์ random seed ๊ณ ์ | |
random.seed(1) # python random ๋ผ์ด๋ธ๋ฌ๋ฆฌ์์ random seed ๊ณ ์ | |
# ์ฐธ๊ณ : ์คํ ์ฌํํ๋ ค๋ฉด torch.backends.cudnn.deterministic = True, torch.backends.cudnn.benchmark = False ์ด์ด์ผ ํจ | |
torch.backends.cudnn.benchmark = True # ๋ด์ฅ๋ cudnn ์๋ ํ๋๋ฅผ ํ์ฑํํ์ฌ, ํ๋์จ์ด์ ๋ง๊ฒ ์ฌ์ฉํ ์ต์์ ์๊ณ ๋ฆฌ์ฆ(ํ ์ ํฌ๊ธฐ๋ conv ์ฐ์ฐ์ ๋ง๊ฒ)์ ์ฐพ์ | |
# feature ์ถ์ถ | |
def extract_feats(model): | |
""" | |
:rtype: FloatTensor | |
""" | |
model.eval() # evaluation ๊ณผ์ ์์ ์ฌ์ฉํ์ง ์์์ผ ํ๋ layer๋ค์ ์์์ off ์ํค๋๋ก ํ๋ ํจ์ | |
preprocessing_func = get_preprocessing_pipelines()['test'] # test ์ ์ฒ๋ฆฌ | |
mouth_patch_path = args.mouth_patch_path.replace('.','') | |
dir_name = os.path.dirname(os.path.abspath(__file__)) | |
dir_name = dir_name + mouth_patch_path | |
data_paths = [os.path.join(pth, f) for pth, dirs, files in os.walk(dir_name) for f in files] | |
npz_files = np.load(data_paths[0])['data'] | |
data = preprocessing_func(npz_files) # data: TxHxW | |
# data = preprocessing_func(np.load(args.mouth_patch_path)['data']) # data: TxHxW | |
return data_paths[0], model(torch.FloatTensor(data)[None, None, :, :, :].cuda(), lengths=[data.shape[0]]) | |
# return model(torch.FloatTensor(data)[None, None, :, :, :].cuda(), lengths=[data.shape[0]]) | |
# ํ๊ฐ | |
def evaluate(model, dset_loader, criterion, is_print=False): | |
model.eval() # evaluation ๊ณผ์ ์์ ์ฌ์ฉํ์ง ์์์ผ ํ๋ layer๋ค์ ์์์ off ์ํค๋๋ก ํ๋ ํจ์ | |
# running_loss = 0. | |
# running_corrects = 0. | |
prediction='' | |
# evaluation/validation ๊ณผ์ ์์ ๋ณดํต model.eval()๊ณผ torch.no_grad()๋ฅผ ํจ๊ป ์ฌ์ฉํจ | |
with torch.no_grad(): | |
inferences = [] | |
for batch_idx, (input, lengths, labels) in enumerate(tqdm(dset_loader)): | |
# ๋ชจ๋ธ ์์ฑ | |
# input ํ ์์ ์ฐจ์์ ํ๋ ๋ ๋๋ฆฌ๊ณ gpu ์ ํ ๋น | |
logits = model(input.unsqueeze(1).cuda(), lengths=lengths) | |
# _, preds = torch.max(F.softmax(logits, dim=1).data, dim=1) # softmax ์ ์ฉ ํ ๊ฐ ์์ ์ค ์ต๋๊ฐ ๊ฐ์ ธ์ค๊ธฐ | |
# running_corrects += preds.eq(labels.cuda().view_as(preds)).sum().item() # ์ ํ๋ ๊ณ์ฐ | |
# loss = criterion(logits, labels.cuda()) # loss ๊ณ์ฐ | |
# running_loss += loss.item() * input.size(0) # loss.item(): loss ๊ฐ ๊ฐ๊ณ ์๋ scalar ๊ฐ | |
# # ------------ Prediction, Confidence ์ถ๋ ฅ ------------ | |
probs = torch.nn.functional.softmax(logits, dim=-1) | |
probs = probs[0].detach().cpu().numpy() | |
label_path = args.label_path | |
with Path(label_path).open() as fp: | |
vocab = fp.readlines() | |
top = np.argmax(probs) | |
prediction = vocab[top].strip() | |
# confidence = np.round(probs[top], 3) | |
# inferences.append({ | |
# 'prediction': prediction, | |
# 'confidence': confidence | |
# }) | |
with open("/home/user/app/result/ho.txt", 'w') as f: | |
f.writelines(prediction) | |
if is_print: | |
print() | |
print(f'Prediction: {prediction}') | |
# print(f'Confidence: {confidence}') | |
print() | |
return prediction | |
# ------------ Prediction, Confidence ํ ์คํธ ํ์ผ ์ ์ฅ ------------ | |
# txt_save_path = str(args.save_dir) + f'/predict.txt' | |
# # ํ์ผ ์์ ๊ฒฝ์ฐ | |
# if not os.path.exists(os.path.dirname(txt_save_path)): | |
# os.makedirs(os.path.dirname(txt_save_path)) # ๋๋ ํ ๋ฆฌ ์์ฑ | |
# with open(txt_save_path, 'w') as f: | |
# for inference in inferences: | |
# prediction = inference['prediction'] | |
# confidence = inference['confidence'] | |
# f.writelines(f'Prediction: {prediction}, Confidence: {confidence}\n') | |
# print('Test Dataset {} In Total \t CR: {}'.format( len(dset_loader.dataset), running_corrects/len(dset_loader.dataset))) # ๋ฐ์ดํฐ๊ฐ์, ์ ํ๋ ์ถ๋ ฅ | |
# return running_corrects/len(dset_loader.dataset), running_loss/len(dset_loader.dataset), inferences # ์ ํ๋, loss, inferences ๋ฐํ | |
# ๋ชจ๋ธ ํ์ต | |
# def train(wandb, model, dset_loader, criterion, epoch, optimizer, logger): | |
# data_time = AverageMeter() # ํ๊ท , ํ์ฌ๊ฐ ์ ์ฅ | |
# batch_time = AverageMeter() # ํ๊ท , ํ์ฌ๊ฐ ์ ์ฅ | |
# lr = showLR(optimizer) # LR ๋ณํ๊ฐ | |
# # ๋ก๊ฑฐ INFO ์์ฑ | |
# logger.info('-' * 10) | |
# logger.info('Epoch {}/{}'.format(epoch, args.epochs - 1)) # epoch ์์ฑ | |
# logger.info('Current learning rate: {}'.format(lr)) # learning rate ์์ฑ | |
# model.train() # train mode | |
# running_loss = 0. | |
# running_corrects = 0. | |
# running_all = 0. | |
# end = time.time() # ํ์ฌ ์๊ฐ | |
# for batch_idx, (input, lengths, labels) in enumerate(dset_loader): | |
# # measure data loading time | |
# data_time.update(time.time() - end) # ํ๊ท , ํ์ฌ๊ฐ ์ ๋ฐ์ดํธ | |
# # -- | |
# # mixup augmentation ๊ณ์ฐ | |
# input, labels_a, labels_b, lam = mixup_data(input, labels, args.alpha) | |
# labels_a, labels_b = labels_a.cuda(), labels_b.cuda() # tensor ๋ฅผ gpu ์ ํ ๋น | |
# # Pytorch์์๋ gradients๊ฐ๋ค์ ์ถํ์ backward๋ฅผ ํด์ค๋ ๊ณ์ ๋ํด์ฃผ๊ธฐ ๋๋ฌธ | |
# optimizer.zero_grad() # ํญ์ backpropagation์ ํ๊ธฐ์ ์ gradients๋ฅผ zero๋ก ๋ง๋ค์ด์ฃผ๊ณ ์์์ ํด์ผ ํจ | |
# # ๋ชจ๋ธ ์์ฑ | |
# # input ํ ์์ ์ฐจ์์ ํ๋ ๋ ๋๋ฆฌ๊ณ gpu ์ ํ ๋น | |
# logits = model(input.unsqueeze(1).cuda(), lengths=lengths) | |
# loss_func = mixup_criterion(labels_a, labels_b, lam) # mixup ์ ์ฉ | |
# loss = loss_func(criterion, logits) # loss ๊ณ์ฐ | |
# loss.backward() # gradient ๊ณ์ฐ | |
# optimizer.step() # ์ ์ฅ๋ gradient ๊ฐ์ ์ด์ฉํ์ฌ ํ๋ผ๋ฏธํฐ๋ฅผ ์ ๋ฐ์ดํธ | |
# # measure elapsed time # ๊ฒฝ๊ณผ ์๊ฐ ์ธก์ | |
# batch_time.update(time.time() - end) # ํ๊ท , ํ์ฌ๊ฐ ์ ๋ฐ์ดํธ | |
# end = time.time() # ํ์ฌ ์๊ฐ | |
# # -- compute running performance # ์ปดํจํ ์คํ ์ฑ๋ฅ | |
# _, predicted = torch.max(F.softmax(logits, dim=1).data, dim=1) # softmax ์ ์ฉ ํ ๊ฐ ์์ ์ค ์ต๋๊ฐ ๊ฐ์ ธ์ค๊ธฐ | |
# running_loss += loss.item()*input.size(0) # loss.item(): loss ๊ฐ ๊ฐ๊ณ ์๋ scalar ๊ฐ | |
# running_corrects += lam * predicted.eq(labels_a.view_as(predicted)).sum().item() + (1 - lam) * predicted.eq(labels_b.view_as(predicted)).sum().item() # ์ ํ๋ ๊ณ์ฐ | |
# running_all += input.size(0) | |
# # ------------------ wandb ๋ก๊ทธ ์ ๋ ฅ ------------------ | |
# wandb.log({'loss': running_loss, 'acc': running_corrects}, step=epoch) | |
# # -- log intermediate results # ์ค๊ฐ ๊ฒฐ๊ณผ ๊ธฐ๋ก | |
# if batch_idx % args.interval == 0 or (batch_idx == len(dset_loader)-1): | |
# # ๋ก๊ฑฐ INFO ์์ฑ | |
# update_logger_batch( args, logger, dset_loader, batch_idx, running_loss, running_corrects, running_all, batch_time, data_time ) | |
# return model # ๋ชจ๋ธ ๋ฐํ | |
# model ์ค์ ์ ๋ํ json ์์ฑ | |
def get_model_from_json(): | |
# json ํ์ผ์ด ์๋์ง ํ์ธ, ์์ผ๋ฉด AssertionError ๋ฉ์์ง๋ฅผ ๋์ | |
assert args.config_path.endswith('.json') and os.path.isfile(args.config_path), \ | |
"'.json' config path does not exist. Path input: {}".format(args.config_path) # ์ํ๋ ์กฐ๊ฑด์ ๋ณ์๊ฐ์ ๋ณด์ฆํ๊ธฐ ์ํด ์ฌ์ฉ | |
args_loaded = load_json( args.config_path) # json ์ฝ์ด์ค๊ธฐ | |
args.backbone_type = args_loaded['backbone_type'] # json ์์ backbone_type ๊ฐ์ ธ์ค๊ธฐ | |
args.width_mult = args_loaded['width_mult'] # json ์์ width_mult ๊ฐ์ ธ์ค๊ธฐ | |
args.relu_type = args_loaded['relu_type'] # json ์์ relu_type ๊ฐ์ ธ์ค๊ธฐ | |
# TCN ์ต์ ์ค์ | |
tcn_options = { 'num_layers': args_loaded['tcn_num_layers'], | |
'kernel_size': args_loaded['tcn_kernel_size'], | |
'dropout': args_loaded['tcn_dropout'], | |
'dwpw': args_loaded['tcn_dwpw'], | |
'width_mult': args_loaded['tcn_width_mult'], | |
} | |
# ๋ฆฝ๋ฆฌ๋ฉ ๋ชจ๋ธ ์์ฑ | |
model = Lipreading( modality=args.modality, | |
num_classes=args.num_classes, | |
tcn_options=tcn_options, | |
backbone_type=args.backbone_type, | |
relu_type=args.relu_type, | |
width_mult=args.width_mult, | |
extract_feats=args.extract_feats).cuda() | |
calculateNorm2(model) # ๋ชจ๋ธ ํ์ต์ด ์ ์งํ๋๋์ง ํ์ธ - ์ผ๋ฐ์ ์ผ๋ก parameter norm(L2)์ ํ์ต์ด ์งํ๋ ์๋ก ์ปค์ ธ์ผ ํจ | |
return model # ๋ชจ๋ธ ๋ฐํ | |
# main() ํจ์ | |
def main(): | |
# wandb ์ฐ๊ฒฐ | |
# wandb.init(project="Lipreading_using_TCN_running") | |
# wandb.config = { | |
# "learning_rate": args.lr, | |
# "epochs": args.epochs, | |
# "batch_size": args.batch_size | |
# } | |
# os.environ['CUDA_LAUNCH_BLOCKING']="1" | |
# os.environ["CUDA_VISIBLE_DEVICES"]="0" # GPU ์ ํ ์ฝ๋ ์ถ๊ฐ | |
# -- logging | |
save_path = get_save_folder( args) # ์ ์ฅ ๋๋ ํ ๋ฆฌ | |
print("Model and log being saved in: {}".format(save_path)) # ์ ์ฅ ๋๋ ํ ๋ฆฌ ๊ฒฝ๋ก ์ถ๋ ฅ | |
logger = get_logger(args, save_path) # ๋ก๊ฑฐ ์์ฑ ๋ฐ ์ค์ | |
ckpt_saver = CheckpointSaver(save_path) # ์ฒดํฌํฌ์ธํธ ์ ์ฅ ์ค์ | |
# -- get model | |
model = get_model_from_json() | |
# -- get dataset iterators | |
dset_loaders = get_data_loaders(args) | |
# -- get loss function | |
criterion = nn.CrossEntropyLoss() | |
# -- get optimizer | |
optimizer = get_optimizer(args, optim_policies=model.parameters()) | |
# -- get learning rate scheduler | |
scheduler = CosineScheduler(args.lr, args.epochs) # ์ฝ์ฌ์ธ ์ค์ผ์ค๋ฌ ์ค์ | |
if args.model_path: | |
# tar ํ์ผ์ด ์๋์ง ํ์ธ, ์์ผ๋ฉด AssertionError ๋ฉ์์ง๋ฅผ ๋์ | |
assert args.model_path.endswith('.tar') and os.path.isfile(args.model_path), \ | |
"'.tar' model path does not exist. Path input: {}".format(args.model_path) # ์ํ๋ ์กฐ๊ฑด์ ๋ณ์๊ฐ์ ๋ณด์ฆํ๊ธฐ ์ํด ์ฌ์ฉ | |
# resume from checkpoint | |
if args.init_epoch > 0: | |
model, optimizer, epoch_idx, ckpt_dict = load_model(args.model_path, model, optimizer) # ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ | |
args.init_epoch = epoch_idx # epoch ์ค์ | |
ckpt_saver.set_best_from_ckpt(ckpt_dict) # best ์ฒดํฌํฌ์ธํธ ์ ์ฅ | |
logger.info('Model and states have been successfully loaded from {}'.format( args.model_path )) # ๋ก๊ฑฐ INFO ์์ฑ | |
# init from trained model | |
else: | |
model = load_model(args.model_path, model, allow_size_mismatch=args.allow_size_mismatch) # ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ | |
logger.info('Model has been successfully loaded from {}'.format( args.model_path )) # ๋ก๊ฑฐ INFO ์์ฑ | |
# feature extraction | |
if args.mouth_patch_path: | |
filename, embeddings = extract_feats(model) | |
filename = filename.split('/')[-1] | |
save_npz_path = os.path.join(args.mouth_embedding_out_path, filename) | |
# ExtractEmbedding ์ ์ฝ๋ ์์ ์ด ํ์ํจ! | |
save2npz(save_npz_path, data = embeddings.cpu().detach().numpy()) # npz ํ์ผ ์ ์ฅ | |
# save2npz( args.mouth_embedding_out_path, data = extract_feats(model).cpu().detach().numpy()) # npz ํ์ผ ์ ์ฅ | |
return | |
# if test-time, performance on test partition and exit. Otherwise, performance on validation and continue (sanity check for reload) | |
if args.test: | |
predicthi = evaluate(model, dset_loaders['test'], criterion, is_print=False) # ๋ชจ๋ธ ํ๊ฐ | |
# logging_sentence = 'Test-time performance on partition {}: Loss: {:.4f}\tAcc:{:.4f}'.format( 'test', loss_avg_test, acc_avg_test) | |
# logger.info(logging_sentence) # ๋ก๊ฑฐ INFO ์์ฑ | |
return predicthi | |
# -- fix learning rate after loading the ckeckpoint (latency) | |
if args.model_path and args.init_epoch > 0: | |
scheduler.adjust_lr(optimizer, args.init_epoch-1) # learning rate ์ ๋ฐ์ดํธ | |
epoch = args.init_epoch # epoch ์ด๊ธฐํ | |
while epoch < args.epochs: | |
model = train(wandb, model, dset_loaders['train'], criterion, epoch, optimizer, logger) # ๋ชจ๋ธ ํ์ต | |
acc_avg_val, loss_avg_val, inferences = evaluate(model, dset_loaders['val'], criterion) # ๋ชจ๋ธ ํ๊ฐ | |
logger.info('{} Epoch:\t{:2}\tLoss val: {:.4f}\tAcc val:{:.4f}, LR: {}'.format('val', epoch, loss_avg_val, acc_avg_val, showLR(optimizer))) # ๋ก๊ฑฐ INFO ์์ฑ | |
# -- save checkpoint # ์ฒดํฌํฌ์ธํธ ์ํ ๊ธฐ๋ก | |
save_dict = { | |
'epoch_idx': epoch + 1, | |
'model_state_dict': model.state_dict(), | |
'optimizer_state_dict': optimizer.state_dict() | |
} | |
ckpt_saver.save(save_dict, acc_avg_val) # ์ฒดํฌํฌ์ธํธ ์ ์ฅ | |
scheduler.adjust_lr(optimizer, epoch) # learning rate ์ ๋ฐ์ดํธ | |
epoch += 1 | |
# -- evaluate best-performing epoch on test partition # test ๋ฐ์ดํฐ๋ก best ์ฑ๋ฅ์ epoch ํ๊ฐ | |
best_fp = os.path.join(ckpt_saver.save_dir, ckpt_saver.best_fn) # best ์ฒดํฌํฌ์ธํธ ๊ฒฝ๋ก | |
_ = load_model(best_fp, model) # ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ | |
acc_avg_test, loss_avg_test, inferences = evaluate(model, dset_loaders['test'], criterion) # ๋ชจ๋ธ ํ๊ฐ | |
logger.info('Test time performance of best epoch: {} (loss: {})'.format(acc_avg_test, loss_avg_test)) # ๋ก๊ฑฐ INFO ์์ฑ | |
torch.cuda.empty_cache() # GPU ์บ์ ๋ฐ์ดํฐ ์ญ์ | |
# ํด๋น ๋ชจ๋์ด ์ํฌํธ๋ ๊ฒฝ์ฐ๊ฐ ์๋๋ผ ์ธํฐํ๋ฆฌํฐ์์ ์ง์ ์คํ๋ ๊ฒฝ์ฐ์๋ง, if๋ฌธ ์ดํ์ ์ฝ๋๋ฅผ ๋๋ฆฌ๋ผ๋ ๋ช ๋ น | |
# => main.py ์คํํ ๊ฒฝ์ฐ ์ ์ผ ๋จผ์ ํธ์ถ๋๋ ๋ถ๋ถ | |
if __name__ == '__main__': # ํ์ฌ ์คํฌ๋ฆฝํธ ํ์ผ์ด ์คํ๋๋ ์ํ ํ์ | |
main() # main() ํจ์ ํธ์ถ |