import datetime
import os
import time
import torch
import torch.utils.data
from torch import nn
from torchvision import transforms
from resnet import resnet152
import utils
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, print_freq):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('img/s', utils.SmoothedValue(window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
for image, antibody, target in metric_logger.log_every(data_loader, print_freq, header):
start_time = time.time()
image, antibody, target = image.to(device), antibody.to(device), target.to(device)
output = model(image, antibody)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = utils.accuracy(output, target, topk=(1, 2))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc2'].update(acc5.item(), n=batch_size)
metric_logger.meters['img/s'].update(batch_size / (time.time() - start_time))
def evaluate(model, criterion, data_loader, device, print_freq=100):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
with torch.no_grad():
for image, antibody, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
antibody = antibody.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image, antibody)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 2))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc2'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Acc@1 {top1.global_avg:.3f} Acc@2 {top5.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc2))
return metric_logger.acc1.global_avg
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def load_data(traindir, valdir, antibody_train, antibody_val, cache_dataset, distributed):
# Data loading code
print("Loading data")
normalize = transforms.Normalize(mean=[0.168, 0.174, 0.182],
std =[0.159, 0.160, 0.162])
expression_tfs = transforms.Compose([nn.Dropout(0.3)])
print("Loading data")
st = time.time()
dataset = utils.HTDataset(
traindir, antibody_train,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(degrees=180),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.0),
transforms.ToTensor(),
normalize,
]), expression_tfs)
dataset_test = utils.HTDataset(
valdir, antibody_val,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]), None)
print("Took", time.time() - st)
print("Creating data loaders")
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler
def main(args):
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
train_dir = args.train_file
val_dir = args.val_file
dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir,
args.antibodytrn, args.antibodyval,
args.cache_dataset, args.distributed)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=args.batch_size,
sampler=test_sampler, num_workers=args.workers, pin_memory=True)
print("Creating model")
model = resnet152(num_classes=2, antibody_nums=6) # 6 antibodies
image_checkpoint = "../hashimoto_thyroiditis/model_79.pth"
flag = os.path.exists(image_checkpoint)
if flag:
checkpoint = torch.load(image_checkpoint, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'], strict=False)
print(msg)
print("Parameters to be updated:")
parameters_to_be_updated = ['fc.weight', 'fc.bias'] + msg.missing_keys
print(parameters_to_be_updated)
for name, param in model.named_parameters():
if name not in parameters_to_be_updated:
param.requires_grad = False
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if flag:
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == len(parameters_to_be_updated)
else:
parameters = model.parameters()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq)
lr_scheduler.step()
evaluate(model, criterion, data_loader_test, device=device)
if args.output_dir:
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args}
utils.save_on_master(
checkpoint,
os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
utils.save_on_master(
checkpoint,
os.path.join(args.output_dir, 'checkpoint.pth'))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='PyTorch Classification Training')
parser.add_argument('--train-file', help='training set of image file')
parser.add_argument('--val-file', help='validation set of image file')
parser.add_argument('--antibodytrn', help='training set of antibody')
parser.add_argument('--antibodyval', help='validation set of antibody')
parser.add_argument('--num-classes', help='number of classes for the objective task', type=int)
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int)
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-step-size', default=30, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output-dir', default='.', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
# distributed training parameters
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)