code stringlengths 17 6.64M |
|---|
def rotate_camera_to_point_at(up_from, lookat_from, up_to, lookat_to):
inputs = [up_from, lookat_from, up_to, lookat_to]
for i in range(4):
inputs[i] = normalize(np.array(inputs[i]).reshape(((- 1),)))
(up_from, lookat_from, up_to, lookat_to) = inputs
r1 = r_between(lookat_from, lookat_to)
new_x = np.dot(r1, np.array([1, 0, 0]).reshape(((- 1), 1))).reshape((- 1))
to_x = normalize(np.cross(lookat_to, up_to))
angle = np.arccos(np.dot(new_x, to_x))
if (angle > ANGLE_EPS):
if (angle < (np.pi - ANGLE_EPS)):
ax = normalize(np.cross(new_x, to_x))
flip = np.dot(lookat_to, ax)
if (flip > 0):
r2 = get_r_matrix(lookat_to, angle)
elif (flip < 0):
r2 = get_r_matrix(lookat_to, ((- 1.0) * angle))
else:
r2 = get_r_matrix(lookat_to, angle)
else:
r2 = np.eye(3)
return np.dot(r2, r1)
|
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, mixup_fn: Optional[Mixup]=None, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if (not math.isfinite(loss_value_reduce)):
print('Loss is {}, stopping training'.format(loss_value_reduce))
sys.exit(1)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
' We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n '
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[(- 1)]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
(loss, _, _) = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss.item()
loss /= accum_iter
loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]['lr']
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if (not math.isfinite(loss_value_reduce)):
print('Loss is {}, stopping training'.format(loss_value_reduce))
sys.exit(1)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
' We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n '
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def get_args_parser():
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int, help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.001, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75, help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min-lr', type=float, default=1e-06, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR')
parser.add_argument('--use-adan', action='store_true', default=True, help='whether to use Adan')
parser.add_argument('--max-grad-norm', type=float, default=0.0, help='max grad norm (default: 0.0 for no clip)')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--bias-decay', action='store_true', default=False, help='whether to decay bias term')
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT', help='Color jitter factor (enabled only when not using Auto/RandAug)')
(parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),)
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool', help='Use class token instead of global pool for classification')
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types')
parser.add_argument('--output_dir', default=None, help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir/', help='path where to tensorboard log')
parser.add_argument('--device', default='cuda:0', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
|
def main(args):
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0
args.gpu = 0
misc.init_distributed_ddpjob(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = (args.seed + misc.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
if True:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print(('Sampler_train = %s' % str(sampler_train)))
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if (misc.is_main_process() and (args.log_dir is not None) and (not args.eval)):
TIMESTAMP = '{0:%Y-%m-%dT%H-%M-%S/}'.format(datetime.datetime.now())
args.log_dir = ((args.log_dir + 'mae-') + TIMESTAMP)
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
if mixup_active:
print('Mixup is activated!')
mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](num_classes=args.nb_classes, drop_path_rate=args.drop_path, global_pool=args.global_pool)
if (args.finetune and (not args.eval)):
print(('Load pre-trained checkpoint from: %s' % args.finetune))
checkpoint_model = load_state_dict(args.finetune)
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if ((k in checkpoint_model) and (checkpoint_model[k].shape != state_dict[k].shape)):
print(f'Removing key {k} from pretrained checkpoint')
del checkpoint_model[k]
interpolate_pos_embed(model, checkpoint_model)
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'})
else:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias'})
trunc_normal_(model.head.weight, std=1e-05)
model.to(device)
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Model = %s' % str(model_without_ddp)))
print(('number of params (M): %.2f' % (n_parameters / 1000000.0)))
eff_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
if (args.lr is None):
args.lr = ((args.blr * eff_batch_size) / 256)
print(('base lr: %.2e' % ((args.lr * 256) / eff_batch_size)))
print(('actual lr: %.2e' % args.lr))
print(('accumulate grad iterations: %d' % args.accum_iter))
print(('effective batch size: %d' % eff_batch_size))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.use_FAM:
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=([] if args.bias_decay else model_without_ddp.no_weight_decay()), layer_decay=args.layer_decay)
optimizer = Name(param_groups, weight_decay=args.weight_decay, lr=args.lr, betas=args.opt_betas, eps=args.opt_eps, max_grad_norm=args.max_grad_norm)
else:
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=model_without_ddp.no_weight_decay(), layer_decay=args.layer_decay)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if (mixup_fn is not None):
criterion = SoftTargetCrossEntropy()
elif (args.smoothing > 0.0):
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print(('criterion = %s' % str(criterion)))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f'Start training for {args.epochs} epochs')
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, mixup_fn, log_writer=log_writer, args=args)
if (args.output_dir and (((epoch + 1) % 10) == 0)):
misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats['acc1'])
print(f'Max accuracy: {max_accuracy:.2f}%')
if (log_writer is not None):
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.log_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.log_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
def get_args_parser():
parser = argparse.ArgumentParser('MAE linear probing for image classification', add_help=False)
parser.add_argument('--batch_size', default=512, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=90, type=int)
parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay (default: 0 for linear probe following MoCo v1)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0.0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N', help='epochs to warmup LR')
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=False)
parser.add_argument('--cls_token', action='store_false', dest='global_pool', help='Use class token instead of global pool for classification')
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir', help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
|
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = (args.seed + misc.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
transform_train = transforms.Compose([RandomResizedCrop(224, interpolation=3), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_val = transforms.Compose([transforms.Resize(256, interpolation=3), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, 'val'), transform=transform_val)
print(dataset_train)
print(dataset_val)
if True:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print(('Sampler_train = %s' % str(sampler_train)))
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if ((global_rank == 0) and (args.log_dir is not None) and (not args.eval)):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
model = models_vit.__dict__[args.model](num_classes=args.nb_classes, global_pool=args.global_pool)
if (args.finetune and (not args.eval)):
checkpoint = torch.load(args.finetune, map_location='cpu')
print(('Load pre-trained checkpoint from: %s' % args.finetune))
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if ((k in checkpoint_model) and (checkpoint_model[k].shape != state_dict[k].shape)):
print(f'Removing key {k} from pretrained checkpoint')
del checkpoint_model[k]
interpolate_pos_embed(model, checkpoint_model)
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'})
else:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias'})
trunc_normal_(model.head.weight, std=0.01)
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-06), model.head)
for (_, p) in model.named_parameters():
p.requires_grad = False
for (_, p) in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Model = %s' % str(model_without_ddp)))
print(('number of params (M): %.2f' % (n_parameters / 1000000.0)))
eff_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
if (args.lr is None):
args.lr = ((args.blr * eff_batch_size) / 256)
print(('base lr: %.2e' % ((args.lr * 256) / eff_batch_size)))
print(('actual lr: %.2e' % args.lr))
print(('accumulate grad iterations: %d' % args.accum_iter))
print(('effective batch size: %d' % eff_batch_size))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
loss_scaler = NativeScaler()
criterion = torch.nn.CrossEntropyLoss()
print(('criterion = %s' % str(criterion)))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f'Start training for {args.epochs} epochs')
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, max_norm=None, log_writer=log_writer, args=args)
if args.output_dir:
misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats['acc1'])
print(f'Max accuracy: {max_accuracy:.2f}%')
if (log_writer is not None):
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
def get_args_parser():
parser = argparse.ArgumentParser('MAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=400, type=int)
parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
parser.add_argument('--model', default='mae_vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int, help='images input size')
parser.add_argument('--mask_ratio', default=0.75, type=float, help='Masking ratio (percentage of removed patches).')
parser.add_argument('--norm_pix_loss', action='store_true', help='Use (per-patch) normalized pixels as targets for computing loss')
parser.set_defaults(norm_pix_loss=False)
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.001, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0.0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR')
parser.add_argument('--use-adan', action='store_true', default=False, help='whether to use Adan')
parser.add_argument('--max-grad-norm', type=float, default=0.0, help='max grad norm (default: 0.0 for no clip)')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--bias-decay', action='store_true', default=False, help='whether to decay bias term')
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
parser.add_argument('--output_dir', default=None, help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./pretrain_dir/', help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default=None, help='resume from checkpoint')
parser.add_argument('--no-resume-opt', action='store_true', default=False, help='prevent resume of optimizer state when resuming model')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
|
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = (args.seed + misc.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
transform_train = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
print(dataset_train)
if True:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print(('Sampler_train = %s' % str(sampler_train)))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if (misc.is_main_process() and (args.log_dir is not None)):
TIMESTAMP = '{0:%Y-%m-%dT%H-%M-%S/}'.format(datetime.datetime.now())
args.log_dir = ((args.log_dir + 'mae-') + TIMESTAMP)
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
model = models_mae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss)
model.to(device)
model_without_ddp = model
print(('Model = %s' % str(model_without_ddp)))
eff_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
if (args.lr is None):
args.lr = ((args.blr * eff_batch_size) / 256)
print(('base lr: %.2e' % ((args.lr * 256) / eff_batch_size)))
print(('actual lr: %.2e' % args.lr))
print(('accumulate grad iterations: %d' % args.accum_iter))
print(('effective batch size: %d' % eff_batch_size))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
if args.use_adan:
if args.bias_decay:
param = model_without_ddp.parameters()
else:
param = param_groups
args.weight_decay = 0.0
optimizer = Adan(param, weight_decay=args.weight_decay, lr=args.lr, betas=args.opt_betas, eps=args.opt_eps, max_grad_norm=args.max_grad_norm)
else:
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
print(optimizer)
loss_scaler = NativeScaler()
resume_epoch = None
if (not args.resume):
resume_path = os.path.join(args.output_dir, 'last.pth.tar')
print(resume_path, os.path.isfile(resume_path))
if os.path.isfile(resume_path):
args.resume = resume_path
if args.resume:
resume_epoch = resume_checkpoint(model_without_ddp, args.resume, optimizer=(None if args.no_resume_opt else optimizer), loss_scaler=(None if args.no_resume_opt else loss_scaler), log_info=misc.is_main_process())
if (resume_epoch is not None):
args.start_epoch = resume_epoch
print(f'Start training for {args.epochs} epochs')
saver = None
if (misc.is_main_process() and (args.output_dir is not None)):
saver = CheckpointSaver(model=model, optimizer=optimizer, args=args, amp_scaler=loss_scaler, checkpoint_dir=args.output_dir, recovery_dir=args.output_dir, decreasing=True, max_history=2)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, data_loader_train, optimizer, device, epoch, loss_scaler, log_writer=log_writer, args=args)
if (saver is not None):
saver.save_checkpoint(epoch, train_stats['loss'])
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
class RandomResizedCrop(transforms.RandomResizedCrop):
"\n RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.\n This may lead to results different with torchvision's version.\n Following BYOL's TF code:\n https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206\n "
@staticmethod
def get_params(img, scale, ratio):
(width, height) = F._get_image_size(img)
area = (height * width)
target_area = (area * torch.empty(1).uniform_(scale[0], scale[1]).item())
log_ratio = torch.log(torch.tensor(ratio))
aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
w = int(round(math.sqrt((target_area * aspect_ratio))))
h = int(round(math.sqrt((target_area / aspect_ratio))))
w = min(w, width)
h = min(h, height)
i = torch.randint(0, ((height - h) + 1), size=(1,)).item()
j = torch.randint(0, ((width - w) + 1), size=(1,)).item()
return (i, j, h, w)
|
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
|
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation='bicubic', re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, mean=mean, std=std)
return transform
t = []
if (args.input_size <= 224):
crop_pct = (224 / 256)
else:
crop_pct = 1.0
size = int((args.input_size / crop_pct))
t.append(transforms.Resize(size, interpolation=PIL.Image.BICUBIC))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
class LARS(torch.optim.Optimizer):
'\n LARS optimizer, no rate scaling or weight decay for parameters <= 1D.\n '
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if (dp is None):
continue
if (p.ndim > 1):
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where((param_norm > 0.0), torch.where((update_norm > 0), ((g['trust_coefficient'] * param_norm) / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if ('mu' not in param_state):
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=(- g['lr']))
|
def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=0.75):
'\n Parameter groups for layer-wise lr decay\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58\n '
param_group_names = {}
param_groups = {}
num_layers = (len(model.blocks) + 1)
layer_scales = list(((layer_decay ** (num_layers - i)) for i in range((num_layers + 1))))
for (n, p) in model.named_parameters():
if (not p.requires_grad):
continue
if ((p.ndim == 1) or (n in no_weight_decay_list)):
g_decay = 'no_decay'
this_decay = 0.0
else:
g_decay = 'decay'
this_decay = weight_decay
layer_id = get_layer_id_for_vit(n, num_layers)
group_name = ('layer_%d_%s' % (layer_id, g_decay))
if (group_name not in param_group_names):
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []}
param_groups[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []}
param_group_names[group_name]['params'].append(n)
param_groups[group_name]['params'].append(p)
return list(param_groups.values())
|
def get_layer_id_for_vit(name, num_layers):
'\n Assign a parameter with its layer id\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33\n '
if (name in ['cls_token', 'pos_embed']):
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('blocks'):
return (int(name.split('.')[1]) + 1)
else:
return num_layers
|
def adjust_learning_rate(optimizer, epoch, args):
'Decay the learning rate with half-cycle cosine after warmup'
if (epoch < args.warmup_epochs):
lr = ((args.lr * epoch) / args.warmup_epochs)
else:
lr = (args.min_lr + (((args.lr - args.min_lr) * 0.5) * (1.0 + math.cos(((math.pi * (epoch - args.warmup_epochs)) / (args.epochs - args.warmup_epochs))))))
for param_group in optimizer.param_groups:
if ('lr_scale' in param_group):
param_group['lr'] = (lr * param_group['lr_scale'])
else:
param_group['lr'] = lr
return lr
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = (force or (get_world_size() > 8))
if (is_master or force):
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='')
builtin_print(*args, **kwargs)
builtins.print = print
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_ddpjob(args=None):
'\n initialize the ddp job \n '
if ((not dist.is_available()) or (not dist.is_initialized())):
try:
os.environ['MASTER_PORT'] = '40101'
torch.distributed.init_process_group(backend='nccl')
except Exception:
(world_size, rank) = (1, 0)
print('distributed training not available')
print(Exception)
world_size = dist.get_world_size()
rank = dist.get_rank()
assert (rank >= 0)
args.gpu = args.rank
(args.world_size, args.rank) = (world_size, rank)
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
setup_for_distributed((args.rank == 0))
|
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True)
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0))
|
class NativeScalerWithGradNormCount():
state_dict_key = 'amp_scaler'
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if (clip_grad is not None):
assert (parameters is not None)
self._scaler.unscale_(optimizer)
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
|
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if (p.grad is not None)]
norm_type = float(norm_type)
if (len(parameters) == 0):
return torch.tensor(0.0)
device = parameters[0].grad.device
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
|
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if (loss_scaler is not None):
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state)
|
def load_model(args, model_without_ddp, optimizer, loss_scaler):
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print(('Resume checkpoint %s' % args.resume))
if (('optimizer' in checkpoint) and ('epoch' in checkpoint) and (not (hasattr(args, 'eval') and args.eval))):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = (checkpoint['epoch'] + 1)
if ('scaler' in checkpoint):
loss_scaler.load_state_dict(checkpoint['scaler'])
print('With optim & sched!')
|
def all_reduce_mean(x):
world_size = get_world_size()
if (world_size > 1):
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
|
def param_groups_weight_decay(model: nn.Module, weight_decay=1e-05, no_weight_decay_list=()):
no_weight_decay_list = set(no_weight_decay_list)
decay = []
no_decay = []
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((param.ndim <= 1) or name.endswith('.bias') or (name in no_weight_decay_list)):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay, 'weight_decay': weight_decay}]
|
def _group(it, size):
it = iter(it)
return iter((lambda : tuple(islice(it, size))), ())
|
def _layer_map(model, layers_per_group=12, num_groups=None):
def _in_head(n, hp):
if (not hp):
return True
elif isinstance(hp, (tuple, list)):
return any([n.startswith(hpi) for hpi in hp])
else:
return n.startswith(hp)
head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None)
names_trunk = []
names_head = []
for (n, _) in model.named_parameters():
(names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n))
num_trunk_layers = len(names_trunk)
if (num_groups is not None):
layers_per_group = (- (num_trunk_layers // (- num_groups)))
names_trunk = list(_group(names_trunk, layers_per_group))
num_trunk_groups = len(names_trunk)
layer_map = {n: i for (i, l) in enumerate(names_trunk) for n in l}
layer_map.update({n: num_trunk_groups for n in names_head})
return layer_map
|
def param_groups_layer_decay(model: nn.Module, weight_decay: float=0.05, no_weight_decay_list: Tuple[str]=(), layer_decay: float=0.75, end_layer_decay: Optional[float]=None):
'\n Parameter groups for layer-wise lr decay & weight decay\n Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58\n '
no_weight_decay_list = set(no_weight_decay_list)
param_group_names = {}
param_groups = {}
if hasattr(model, 'group_matcher'):
layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True)
else:
layer_map = _layer_map(model)
num_layers = (max(layer_map.values()) + 1)
layer_max = (num_layers - 1)
layer_scales = list(((layer_decay ** (layer_max - i)) for i in range(num_layers)))
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((param.ndim == 1) or (name in no_weight_decay_list)):
g_decay = 'no_decay'
this_decay = 0.0
else:
g_decay = 'decay'
this_decay = weight_decay
layer_id = layer_map.get(name, layer_max)
group_name = ('layer_%d_%s' % (layer_id, g_decay))
if (group_name not in param_groups):
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'param_names': []}
param_groups[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []}
param_group_names[group_name]['param_names'].append(name)
param_groups[group_name]['params'].append(param)
print(('parameter groups: \n%s' % json.dumps(param_group_names, indent=2)))
return list(param_groups.values())
|
def optimizer_kwargs(cfg):
' cfg/argparse to kwargs helper\n Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.\n '
kwargs = dict(opt=cfg.opt, lr=cfg.lr, weight_decay=cfg.weight_decay, momentum=cfg.momentum)
if (getattr(cfg, 'opt_eps', None) is not None):
kwargs['eps'] = cfg.opt_eps
if (getattr(cfg, 'opt_betas', None) is not None):
kwargs['betas'] = cfg.opt_betas
if (getattr(cfg, 'layer_decay', None) is not None):
kwargs['layer_decay'] = cfg.layer_decay
if (getattr(cfg, 'opt_args', None) is not None):
kwargs.update(cfg.opt_args)
return kwargs
|
def create_optimizer(args, model, filter_bias_and_bn=True):
' Legacy optimizer factory for backwards compatibility.\n NOTE: Use create_optimizer_v2 for new code.\n '
return create_optimizer_v2(model, **optimizer_kwargs(cfg=args), filter_bias_and_bn=filter_bias_and_bn)
|
def create_optimizer_v2(model_or_params, opt: str='sgd', lr: Optional[float]=None, weight_decay: float=0.0, momentum: float=0.9, filter_bias_and_bn: bool=True, layer_decay: Optional[float]=None, param_group_fn: Optional[Callable]=None, **kwargs):
' Create an optimizer.\n\n TODO currently the model is passed in and all parameters are selected for optimization.\n For more general use an interface that allows selection of parameters to optimize and lr groups, one of:\n * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion\n * expose the parameters interface and leave it up to caller\n\n Args:\n model_or_params (nn.Module): model containing parameters to optimize\n opt: name of optimizer to create\n lr: initial learning rate\n weight_decay: weight decay to apply in optimizer\n momentum: momentum for momentum based optimizers (others may use betas via kwargs)\n filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay\n **kwargs: extra optimizer specific kwargs to pass through\n\n Returns:\n Optimizer\n '
if isinstance(model_or_params, nn.Module):
no_weight_decay = {}
if hasattr(model_or_params, 'no_weight_decay'):
no_weight_decay = model_or_params.no_weight_decay()
if param_group_fn:
parameters = param_group_fn(model_or_params)
elif (layer_decay is not None):
parameters = param_groups_layer_decay(model_or_params, weight_decay=weight_decay, layer_decay=layer_decay, no_weight_decay_list=no_weight_decay)
weight_decay = 0.0
elif (weight_decay and filter_bias_and_bn):
parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay)
weight_decay = 0.0
else:
parameters = model_or_params.parameters()
else:
parameters = model_or_params
opt_lower = opt.lower()
opt_split = opt_lower.split('_')
opt_lower = opt_split[(- 1)]
if ('fused' in opt_lower):
assert (has_apex and torch.cuda.is_available()), 'APEX and CUDA required for fused optimizers'
opt_args = dict(weight_decay=weight_decay, **kwargs)
if (lr is not None):
opt_args.setdefault('lr', lr)
if ((opt_lower == 'sgd') or (opt_lower == 'nesterov')):
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif (opt_lower == 'sam'):
opt_args.pop('eps', None)
optimizer = SAM(parameters, optim.SGD, momentum=momentum, nesterov=True, **opt_args)
elif (opt_lower == 'adan'):
optimizer = Adan(parameters, **opt_args)
elif (opt_lower == 'momentum'):
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif (opt_lower == 'sgdp'):
optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args)
elif (opt_lower == 'adam'):
optimizer = optim.Adam(parameters, **opt_args)
elif (opt_lower == 'adamw'):
optimizer = optim.AdamW(parameters, **opt_args)
elif (opt_lower == 'adamp'):
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif (opt_lower == 'nadam'):
try:
optimizer = optim.Nadam(parameters, **opt_args)
except AttributeError:
optimizer = Nadam(parameters, **opt_args)
elif (opt_lower == 'radam'):
optimizer = RAdam(parameters, **opt_args)
elif (opt_lower == 'adamax'):
optimizer = optim.Adamax(parameters, **opt_args)
elif (opt_lower == 'adabelief'):
optimizer = AdaBelief(parameters, rectify=False, **opt_args)
elif (opt_lower == 'radabelief'):
optimizer = AdaBelief(parameters, rectify=True, **opt_args)
elif (opt_lower == 'adadelta'):
optimizer = optim.Adadelta(parameters, **opt_args)
elif (opt_lower == 'adagrad'):
opt_args.setdefault('eps', 1e-08)
optimizer = optim.Adagrad(parameters, **opt_args)
elif (opt_lower == 'adafactor'):
optimizer = Adafactor(parameters, **opt_args)
elif (opt_lower == 'lamb'):
optimizer = Lamb(parameters, **opt_args)
elif (opt_lower == 'lambc'):
optimizer = Lamb(parameters, trust_clip=True, **opt_args)
elif (opt_lower == 'larc'):
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args)
elif (opt_lower == 'lars'):
optimizer = Lars(parameters, momentum=momentum, **opt_args)
elif (opt_lower == 'nlarc'):
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args)
elif (opt_lower == 'nlars'):
optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args)
elif (opt_lower == 'madgrad'):
optimizer = MADGRAD(parameters, momentum=momentum, **opt_args)
elif (opt_lower == 'madgradw'):
optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args)
elif ((opt_lower == 'novograd') or (opt_lower == 'nvnovograd')):
optimizer = NvNovoGrad(parameters, **opt_args)
elif (opt_lower == 'rmsprop'):
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif (opt_lower == 'rmsproptf'):
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif (opt_lower == 'adahessian'):
optimizer = Adahessian(parameters, **opt_args)
elif (opt_lower == 'fusedsgd'):
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif (opt_lower == 'fusedmomentum'):
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif (opt_lower == 'fusedadam'):
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif (opt_lower == 'fusedadamw'):
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif (opt_lower == 'fusedlamb'):
optimizer = FusedLAMB(parameters, **opt_args)
elif (opt_lower == 'fusednovograd'):
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert (False and 'Invalid optimizer')
raise ValueError
if (len(opt_split) > 1):
if (opt_split[0] == 'lookahead'):
optimizer = Lookahead(optimizer)
return optimizer
|
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert (rho >= 0.0), f'Invalid rho, should be non-negative: {rho}'
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = (group['rho'] / (grad_norm + 1e-12))
for p in group['params']:
if (p.grad is None):
continue
self.state[p]['old_p'] = p.data.clone()
e_w = (((torch.pow(p, 2) if group['adaptive'] else 1.0) * p.grad) * scale.to(p))
p.add_(e_w)
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.data = self.state[p]['old_p']
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert (closure is not None), 'Sharpness Aware Minimization requires closure, but it was not provided'
closure = torch.enable_grad()(closure)
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]['params'][0].device
norm = torch.norm(torch.stack([((torch.abs(p) if group['adaptive'] else 1.0) * p.grad).norm(p=2).to(shared_device) for group in self.param_groups for p in group['params'] if (p.grad is not None)]), p=2)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return (args, args_text)
|
def main():
setup_default_logging()
(args, args_text) = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = (not args.no_prefetcher)
args.distributed = False
if ('WORLD_SIZE' in os.environ):
args.distributed = (int(os.environ['WORLD_SIZE']) > 1)
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0
if args.distributed:
args.device = ('cuda:%d' % args.local_rank)
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info(('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert (args.rank >= 0)
use_amp = None
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if (args.apex_amp and has_apex):
use_amp = 'apex'
elif (args.native_amp and has_native_amp):
use_amp = 'native'
elif (args.apex_amp or args.native_amp):
_logger.warning('Neither APEX or native Torch AMP is available, using float32. Install NVIDA apex or upgrade to PyTorch 1.6')
random_seed(args.seed, args.rank)
model = create_model(args.model, pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, drop_connect_rate=args.drop_connect, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, global_pool=args.gp, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, scriptable=args.torchscript, checkpoint_path=args.initial_checkpoint)
if (args.num_classes is None):
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if (args.local_rank == 0):
_logger.info(f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=(args.local_rank == 0))
num_aug_splits = 0
if (args.aug_splits > 0):
assert (args.aug_splits > 1), 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
if args.split_bn:
assert ((num_aug_splits > 1) or args.resplit)
model = convert_splitbn_model(model, max(num_aug_splits, 2))
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if (args.distributed and args.sync_bn):
assert (not args.split_bn)
if (has_apex and (use_amp == 'apex')):
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if (args.local_rank == 0):
_logger.info('Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert (not (use_amp == 'apex')), 'Cannot use APEX AMP with torchscripted model'
assert (not args.sync_bn), 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
opt_lower = args.opt.lower()
if (opt_lower == 'adan'):
args.opt_args = {'max_grad_norm': args.max_grad_norm, 'no_prox': args.no_prox}
optimizer = create_optimizer(args, model, filter_bias_and_bn=(not args.bias_decay))
print(optimizer)
amp_autocast = suppress
loss_scaler = None
if (use_amp == 'apex'):
(model, optimizer) = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if (args.local_rank == 0):
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif (use_amp == 'native'):
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if (args.local_rank == 0):
_logger.info('Using native Torch AMP. Training in mixed precision.')
elif (args.local_rank == 0):
_logger.info('AMP not enabled. Training in float32.')
resume_epoch = None
if args.experiment:
output_dir = get_outdir((args.output if args.output else './output/train'), args.experiment)
resume_path = os.path.join(output_dir, 'last.pth.tar')
print(resume_path, os.path.exists(resume_path))
if (os.path.exists(resume_path) and (not args.resume)):
args.resume = resume_path
if args.resume:
resume_epoch = resume_checkpoint(model, args.resume, optimizer=(None if args.no_resume_opt else optimizer), loss_scaler=(None if args.no_resume_opt else loss_scaler), log_info=(args.local_rank == 0))
model_ema = None
if args.model_ema:
model_ema = ModelEmaV2(model, decay=args.model_ema_decay, device=('cpu' if args.model_ema_force_cpu else None))
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
if args.distributed:
if (has_apex and (use_amp == 'apex')):
if (args.local_rank == 0):
_logger.info('Using NVIDIA APEX DistributedDataParallel.')
model = ApexDDP(model, delay_allreduce=True)
else:
if (args.local_rank == 0):
_logger.info('Using native Torch DistributedDataParallel.')
model = NativeDDP(model, device_ids=[args.local_rank])
(lr_scheduler, num_epochs) = create_scheduler(args, optimizer)
start_epoch = 0
if (args.start_epoch is not None):
start_epoch = args.start_epoch
elif (resume_epoch is not None):
start_epoch = resume_epoch
if ((lr_scheduler is not None) and (start_epoch > 0)):
lr_scheduler.step(start_epoch)
if (args.local_rank == 0):
_logger.info('Scheduled epochs: {}'.format(num_epochs))
dataset_train = create_dataset(args.dataset, root=args.data_dir, split=args.train_split, is_training=True, batch_size=args.batch_size, repeats=args.epoch_repeats)
dataset_eval = create_dataset(args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size)
collate_fn = None
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
if mixup_active:
mixup_args = dict(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert (not num_aug_splits)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
if (num_aug_splits > 1):
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
train_interpolation = args.train_interpolation
if (args.no_aug or (not train_interpolation)):
train_interpolation = data_config['interpolation']
loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=args.batch_size, is_training=True, use_prefetcher=args.prefetcher, no_aug=args.no_aug, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, re_split=args.resplit, scale=args.scale, ratio=args.ratio, hflip=args.hflip, vflip=args.vflip, color_jitter=args.color_jitter, auto_augment=args.aa, num_aug_repeats=args.aug_repeats, num_aug_splits=num_aug_splits, interpolation=train_interpolation, mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, collate_fn=collate_fn, pin_memory=args.pin_mem, use_multi_epochs_loader=args.use_multi_epochs_loader)
loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=(args.validation_batch_size or args.batch_size), is_training=False, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, crop_pct=data_config['crop_pct'], pin_memory=args.pin_mem)
if args.jsd_loss:
assert (num_aug_splits > 1)
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
if args.bce_loss:
train_loss_fn = nn.BCEWithLogitsLoss()
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if (args.rank == 0):
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), safe_model_name(args.model), str(data_config['input_size'][(- 1)])])
output_dir = get_outdir((args.output if args.output else './output/train'), exp_name)
decreasing = (True if (eval_metric == 'loss') else False)
saver = CheckpointSaver(model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if (args.distributed and hasattr(loader_train.sampler, 'set_epoch')):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(epoch, model, loader_train, optimizer, train_loss_fn, args, lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if (args.distributed and (args.dist_bn in ('broadcast', 'reduce'))):
if (args.local_rank == 0):
_logger.info('Distributing BatchNorm running means and vars')
distribute_bn(model, args.world_size, (args.dist_bn == 'reduce'))
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if ((model_ema is not None) and (not args.model_ema_force_cpu)):
if (args.distributed and (args.dist_bn in ('broadcast', 'reduce'))):
distribute_bn(model_ema, args.world_size, (args.dist_bn == 'reduce'))
ema_eval_metrics = validate(model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if (lr_scheduler is not None):
lr_scheduler.step((epoch + 1), eval_metrics[eval_metric])
if (output_dir is not None):
update_summary(epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), write_header=(best_metric is None), log_wandb=(args.log_wandb and has_wandb))
if (saver is not None):
save_metric = eval_metrics[eval_metric]
(best_metric, best_epoch) = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if (best_metric is not None):
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
|
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup_enabled):
loader.mixup_enabled = False
elif (mixup_fn is not None):
mixup_fn.mixup_enabled = False
second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if (not args.prefetcher):
(input, target) = (input.cuda(), target.cuda())
if (mixup_fn is not None):
(input, target) = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=model_parameters(model, exclude_head=('agc' in args.clip_mode)), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if (args.clip_grad is not None):
dispatch_clip_grad(model_parameters(model, exclude_head=('agc' in args.clip_mode)), value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if (model_ema is not None):
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (args.local_rank == 0):
_logger.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:#.4g} ({loss.avg:#.3g}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader), ((100.0 * batch_idx) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((input.size(0) * args.world_size) / batch_time_m.val), rate_avg=((input.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
|
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
if (not args.prefetcher):
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
reduce_factor = args.tta
if (reduce_factor > 1):
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update((time.time() - end))
end = time.time()
if ((args.local_rank == 0) and (last_batch or ((batch_idx % args.log_interval) == 0))):
log_name = ('Test' + log_suffix)
_logger.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
|
@dataclass
class FairseqAdanConfig(FairseqDataclass):
adan_betas: Any = field(default=(0.98, 0.92, 0.99), metadata={'help': 'betas for Adan optimizer'})
adan_eps: float = field(default=1e-08, metadata={'help': 'epsilon for Adam optimizer'})
weight_decay: float = field(default=0.0, metadata={'help': 'weight decay'})
no_prox: bool = field(default=False, metadata={'help': 'wether to perform prox operator'})
fp16_adan_stats: bool = field(default=False, metadata={'help': 'use FP16 stats (with automatic scaling)'})
tpu: bool = II('common.tpu')
lr: List[float] = II('optimization.lr')
|
@register_optimizer('adan', dataclass=FairseqAdanConfig)
class FairseqAdan(FairseqOptimizer):
'\n Adan optimizer for fairseq.\n '
def __init__(self, cfg: FairseqAdanConfig, params):
super().__init__(cfg)
fused_adan_cls = None
use_fused_adan = ((fused_adan_cls is not None) and torch.cuda.is_available())
if getattr(cfg, 'tpu', False):
if self.cfg.fp16_adan_stats:
raise NotImplementedError('--fp16-adam-stats is only supported on GPU')
self._optimizer = Adan(params, **self.optimizer_config)
elif use_fused_adan:
raise NotImplementedError('--fp16-adam-stats is only supported on GPU')
else:
if self.cfg.fp16_adan_stats:
raise NotImplementedError('--fp16-adam-stats is only supported with FusedAdanV1')
self._optimizer = Adan(params, **self.optimizer_config)
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': (self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr), 'betas': (eval(self.cfg.adan_betas) if isinstance(self.cfg.adan_betas, str) else OmegaConf.to_container(self.cfg.adan_betas)), 'eps': self.cfg.adan_eps, 'weight_decay': self.cfg.weight_decay}
def average_params(self):
'Reduce Params is only used during BMUF distributed training.'
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
value['exp_avg_diff'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_diff'], op=dist.ReduceOp.SUM)
|
class Adan(torch.optim.Optimizer):
'Implements Adan algorithm.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.98, 0.92, 0.99))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n '
def __init__(self, params, lr=0.001, betas=(0.98, 0.92, 0.99), eps=1e-08, weight_decay=0.0, no_prox=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, no_prox=no_prox)
super(Adan, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
(beta1, beta2, beta3) = group['betas']
if ('step' in group):
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = (1.0 - (beta1 ** group['step']))
bias_correction2 = (1.0 - (beta2 ** group['step']))
bias_correction3 = (1.0 - (beta3 ** group['step']))
for p in group['params']:
if (p.grad is None):
continue
p_data_fp32 = p.data
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
state['exp_avg_diff'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].to(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].to(p_data_fp32)
state['exp_avg_diff'] = state['exp_avg_diff'].to(p_data_fp32)
grad = p.grad.data
if (grad.dtype in {torch.float16, torch.bfloat16}):
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adan does not support sparse gradients, please consider SparseAdam instead')
if (('pre_grad' not in state) or (group['step'] == 1)):
state['pre_grad'] = grad
copy_grad = grad.clone()
(exp_avg, exp_avg_sq, exp_avg_diff) = (state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff'])
diff = (grad - state['pre_grad'])
update = (grad + (beta2 * diff))
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_diff.mul_(beta2).add_(diff, alpha=(1 - beta2))
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=(1 - beta3))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])
update = ((exp_avg / bias_correction1) + ((beta2 * exp_avg_diff) / bias_correction2)).div_(denom)
if group['no_prox']:
p_data_fp32.mul_((1 - (group['lr'] * group['weight_decay'])))
p_data_fp32.add_(update, alpha=(- group['lr']))
else:
p_data_fp32.add_(update, alpha=(- group['lr']))
p_data_fp32.div_((1 + (group['lr'] * group['weight_decay'])))
state['pre_grad'] = copy_grad
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p.data.copy_(p_data_fp32)
return loss
|
def get_acc(model_path, data_path, bin_path, task='rte'):
acc_list = []
(gold, pred) = ([], [])
roberta = RobertaModel.from_pretrained(model_path, checkpoint_file='checkpoint_best.pt', data_name_or_path=bin_path)
label_fn = (lambda label: roberta.task.label_dictionary.string([(label + roberta.task.label_dictionary.nspecial)]))
(ncorrect, nsamples) = (0, 0)
roberta.cuda()
roberta.eval()
if ('mnli' not in task):
dev_files = ['dev.tsv']
else:
dev_files = ['dev_mismatched.tsv', 'dev_matched.tsv']
for dev_file in dev_files:
with open(os.path.join(data_path, dev_file)) as fin:
fin.readline()
for (index, line) in enumerate(fin):
tokens = line.strip().split('\t')
if (('rte' in task) or ('qnli' in task)):
(sent1, sent2, target) = (tokens[1], tokens[2], tokens[3])
tokens = roberta.encode(sent1, sent2)
elif ('qqp' in task):
(sent1, sent2, target) = (tokens[3], tokens[4], tokens[5])
tokens = roberta.encode(sent1, sent2)
elif ('mnli' in task):
(sent1, sent2, target) = (tokens[8], tokens[9], tokens[11])
tokens = roberta.encode(sent1, sent2)
elif ('mrpc' in task):
(sent1, sent2, target) = (tokens[3], tokens[4], tokens[0])
tokens = roberta.encode(sent1, sent2)
elif ('sts_b' in task):
(sent1, sent2, target) = (tokens[7], tokens[8], float(tokens[9]))
tokens = roberta.encode(sent1, sent2)
elif ('sst_2' in task):
(sent, target) = (tokens[0], tokens[1])
tokens = roberta.encode(sent)
elif ('cola' in task):
(sent, target) = (tokens[3], tokens[1])
tokens = roberta.encode(sent)
if ('sts_b' not in task):
prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int((prediction_label == target))
nsamples += 1
if ('cola' in task):
target = int(target)
prediction_label = int(prediction_label)
pred.append(prediction_label)
gold.append(target)
else:
features = roberta.extract_features(tokens)
predictions = (5.0 * roberta.model.classification_heads['sentence_classification_head'](features))
gold.append(target)
pred.append(predictions.item())
if ('cola' in task):
out = matthews_corrcoef(gold, pred)
elif ('sts_b' in task):
out = pearsonr(gold, pred)[0]
else:
out = (float(ncorrect) / float(nsamples))
acc_list.append(out)
return acc_list
|
def evaluate(eval_iter):
model.eval()
(total_len, total_loss) = (0, 0.0)
start_time = time.time()
with torch.no_grad():
mems = tuple()
for (idx, (data, target, seq_len)) in enumerate(eval_iter):
ret = model(data, target, *mems)
(loss, mems) = (ret[0], ret[1:])
loss = loss.mean()
total_loss += (seq_len * loss.item())
total_len += seq_len
total_time = (time.time() - start_time)
logging('Time : {:.2f}s, {:.2f}ms/segment'.format(total_time, ((1000 * total_time) / (idx + 1))))
return (total_loss / total_len)
|
def format_log(loss, split):
if (args.dataset in ['enwik8', 'text8']):
log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format(split, loss, (loss / math.log(2)))
else:
log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(split, loss, math.exp(loss))
return log_str
|
def init_weight(weight):
if (args.init == 'uniform'):
nn.init.uniform_(weight, (- args.init_range), args.init_range)
elif (args.init == 'normal'):
nn.init.normal_(weight, 0.0, args.init_std)
|
def init_bias(bias):
nn.init.constant_(bias, 0.0)
|
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
if (hasattr(m, 'weight') and (m.weight is not None)):
init_weight(m.weight)
if (hasattr(m, 'bias') and (m.bias is not None)):
init_bias(m.bias)
elif (classname.find('AdaptiveEmbedding') != (- 1)):
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if (m.emb_projs[i] is not None):
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif (classname.find('Embedding') != (- 1)):
if hasattr(m, 'weight'):
init_weight(m.weight)
elif (classname.find('ProjectedAdaptiveLogSoftmax') != (- 1)):
if (hasattr(m, 'cluster_weight') and (m.cluster_weight is not None)):
init_weight(m.cluster_weight)
if (hasattr(m, 'cluster_bias') and (m.cluster_bias is not None)):
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if (m.out_projs[i] is not None):
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif (classname.find('LayerNorm') != (- 1)):
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if (hasattr(m, 'bias') and (m.bias is not None)):
init_bias(m.bias)
elif (classname.find('TransformerLM') != (- 1)):
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
|
def update_dropout(m):
classname = m.__class__.__name__
if (classname.find('Dropout') != (- 1)):
if hasattr(m, 'p'):
m.p = args.dropout
|
def update_dropatt(m):
if hasattr(m, 'dropatt'):
m.dropatt.p = args.dropatt
|
def evaluate(eval_iter):
model.eval()
if (args.mem_len == 0):
model.reset_length(args.eval_tgt_len, ((args.ext_len + args.tgt_len) - args.eval_tgt_len), args.mem_len)
else:
model.reset_length(args.eval_tgt_len, args.ext_len, ((args.mem_len + args.tgt_len) - args.eval_tgt_len))
(total_len, total_loss) = (0, 0.0)
with torch.no_grad():
mems = tuple()
for (i, (data, target, seq_len)) in enumerate(eval_iter):
if ((args.max_eval_steps > 0) and (i >= args.max_eval_steps)):
break
ret = model(data, target, *mems)
(loss, mems) = (ret[0], ret[1:])
loss = loss.mean()
total_loss += (seq_len * loss.float().item())
total_len += seq_len
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
model.train()
return (total_loss / total_len)
|
def train():
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
if (args.batch_chunk > 1):
mems = [tuple() for _ in range(args.batch_chunk)]
else:
mems = tuple()
train_iter = (tr_iter.get_varlen_iter() if args.varlen else tr_iter)
for (batch, (data, target, seq_len)) in enumerate(train_iter):
model.zero_grad()
if (args.batch_chunk > 1):
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
(loss, mems[i]) = (ret[0], ret[1:])
loss = (loss.float().mean().type_as(loss) / args.batch_chunk)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
(loss, mems) = (ret[0], ret[1:])
loss = loss.float().mean().type_as(loss)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
if args.fp16:
optimizer.clip_master_grads(args.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
if (args.sample_softmax > 0):
optimizer_sparse.step()
train_step += 1
if (args.scheduler in ['cosine', 'constant', 'dev_perf']):
if (train_step < args.warmup_step):
curr_lr = ((args.lr * train_step) / args.warmup_step)
optimizer.param_groups[0]['lr'] = curr_lr
if (args.sample_softmax > 0):
optimizer_sparse.param_groups[0]['lr'] = (curr_lr * 2)
elif (args.scheduler == 'cosine'):
scheduler.step(train_step)
if (args.sample_softmax > 0):
scheduler_sparse.step(train_step)
elif (args.scheduler == 'inv_sqrt'):
scheduler.step(train_step)
if ((train_step % args.log_interval) == 0):
cur_loss = (train_loss / args.log_interval)
elapsed = (time.time() - log_start_time)
log_str = '| epoch {:3d} step {:>8d} | {:>6d} batches | lr {:.3g} | ms/batch {:5.2f} | loss {:5.2f}'.format(epoch, train_step, (batch + 1), optimizer.param_groups[0]['lr'], ((elapsed * 1000) / args.log_interval), cur_loss)
if (args.dataset in ['enwik8', 'text8']):
log_str += ' | bpc {:9.5f}'.format((cur_loss / math.log(2)))
else:
log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss))
logging(log_str)
train_loss = 0
log_start_time = time.time()
if ((train_step % args.eval_interval) == 0):
val_loss = evaluate(va_iter)
logging(('-' * 100))
log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s | valid loss {:5.2f}'.format((train_step // args.eval_interval), train_step, (time.time() - eval_start_time), val_loss)
if (args.dataset in ['enwik8', 'text8']):
log_str += ' | bpc {:9.5f}'.format((val_loss / math.log(2)))
else:
log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss))
logging(log_str)
logging(('-' * 100))
if ((not best_val_loss) or (val_loss < best_val_loss)):
if (not args.debug):
with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f:
torch.save(model, f)
with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f:
torch.save(optimizer.state_dict(), f)
best_val_loss = val_loss
if (args.scheduler == 'dev_perf'):
scheduler.step(val_loss)
if (args.sample_softmax > 0):
scheduler_sparse.step(val_loss)
eval_start_time = time.time()
if (train_step == args.max_step):
break
|
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
'\n Slices tensors into approximately equal chunks and\n distributes them across given GPUs. Duplicates\n references to objects that are not tensors.\n '
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
return list(map(list, zip(*map(scatter_map, obj))))
if (isinstance(obj, dict) and (len(obj) > 0)):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
'Scatter with support for kwargs dictionary'
inputs = (scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs)
|
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if (not self.device_ids):
return self.module(*inputs, **kwargs)
if (self.gpu0_bsz == 0):
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
(inputs, kwargs) = self.scatter(inputs, kwargs, device_ids)
if (len(self.device_ids) == 1):
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if (self.gpu0_bsz == 0):
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = ((bsz - gpu0_bsz) // (num_dev - 1))
if (gpu0_bsz < bsz_unit):
chunk_sizes = ([gpu0_bsz] + ([bsz_unit] * (num_dev - 1)))
delta = (bsz - sum(chunk_sizes))
for i in range(delta):
chunk_sizes[(i + 1)] += 1
if (gpu0_bsz == 0):
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
|
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write((s + '\n'))
|
def get_logger(log_path, **kwargs):
return functools.partial(logging, log_path=log_path, **kwargs)
|
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
return functools.partial(logging, log_path=None, log_=False)
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
if (scripts_to_save is not None):
script_path = os.path.join(dir_path, 'scripts')
if (not os.path.exists(script_path)):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt'))
|
def save_checkpoint(model, optimizer, path, epoch):
torch.save(model, os.path.join(path, 'model_{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer_{}.pt'.format(epoch)))
|
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True, delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
if self.lower_case:
line = line.lower()
if (self.delimiter == ''):
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos:
return ((['<S>'] + symbols) + ['<S>'])
elif add_eos:
return (symbols + ['<eos>'])
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for (idx, line) in enumerate(f):
if (verbose and (idx > 0) and ((idx % 500000) == 0)):
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
'\n sents : a list of sentences, each a list of tokenized symbols\n '
if verbose:
print('counting {} sents ...'.format(len(sents)))
for (idx, symbols) in enumerate(sents):
if (verbose and (idx > 0) and ((idx % 500000) == 0)):
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for (sym, cnt) in self.counter.most_common(self.max_size):
if (cnt < self.min_freq):
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for (idx, line) in enumerate(f):
if (verbose and (idx > 0) and ((idx % 500000) == 0)):
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for (idx, symbols) in enumerate(sents):
if (verbose and (idx > 0) and ((idx % 500000) == 0)):
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if (sym not in self.sym2idx):
self.idx2sym.append(sym)
self.sym2idx[sym] = (len(self.idx2sym) - 1)
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if (sym not in self.sym2idx):
self.idx2sym.append(sym)
self.sym2idx[sym] = (len(self.idx2sym) - 1)
def get_sym(self, idx):
assert (0 <= idx < len(self)), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if (sym in self.sym2idx):
return self.sym2idx[sym]
else:
assert ('<eos>' not in sym)
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if (exclude is None):
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if (idx not in exclude)])
def __len__(self):
return len(self.idx2sym)
|
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.available = False
MultiTensorApply.import_err = err
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
|
class Adan(Optimizer):
"\n Implements a pytorch variant of Adan\n Adan was proposed in\n Adan: Adaptive Nesterov Momentum Algorithm for\n Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022.\n https://arxiv.org/abs/2208.06677\n Arguments:\n params (iterable): iterable of parameters to optimize or\n dicts defining parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float, flot], optional): coefficients used for\n first- and second-order moments. (default: (0.98, 0.92, 0.99))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): decoupled weight decay\n (L2 penalty) (default: 0)\n max_grad_norm (float, optional): value used to clip\n global grad norm (default: 0.0 no clip)\n no_prox (bool): how to perform the decoupled weight decay\n (default: False)\n foreach (bool): if True would use torch._foreach implementation.\n It's faster but uses slightly more memory. (default: True)\n fused (bool, optional): whether fused implementation is used.\n (default: False)\n "
def __init__(self, params, lr=0.001, betas=(0.98, 0.92, 0.99), eps=1e-08, weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool=True, fused: bool=False):
if (not (0.0 <= max_grad_norm)):
raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm))
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= betas[2] < 1.0)):
raise ValueError('Invalid beta parameter at index 2: {}'.format(betas[2]))
if fused:
_check_fused_available()
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, no_prox=no_prox, foreach=foreach, fused=fused)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.'
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
if (self.defaults['max_grad_norm'] > 0):
device = self.param_groups[0]['params'][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
for group in self.param_groups:
for p in group['params']:
if (p.grad is not None):
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp((max_grad_norm / (global_grad_norm + group['eps'])), max=1.0).item()
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
neg_pre_grads = []
(beta1, beta2, beta3) = group['betas']
if ('step' in group):
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = (1.0 - (beta1 ** group['step']))
bias_correction2 = (1.0 - (beta2 ** group['step']))
bias_correction3 = (1.0 - (beta3 ** group['step']))
for p in group['params']:
if (p.grad is None):
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
if (('neg_pre_grad' not in state) or (group['step'] == 1)):
state['neg_pre_grad'] = p.grad.clone().mul_((- clip_global_grad_norm))
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
exp_avg_diffs.append(state['exp_avg_diff'])
neg_pre_grads.append(state['neg_pre_grad'])
if (not params_with_grad):
continue
kwargs = dict(params=params_with_grad, grads=grads, exp_avgs=exp_avgs, exp_avg_sqs=exp_avg_sqs, exp_avg_diffs=exp_avg_diffs, neg_pre_grads=neg_pre_grads, beta1=beta1, beta2=beta2, beta3=beta3, bias_correction1=bias_correction1, bias_correction2=bias_correction2, bias_correction3_sqrt=math.sqrt(bias_correction3), lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], no_prox=group['no_prox'], clip_global_grad_norm=clip_global_grad_norm)
if group['foreach']:
if group['fused']:
if torch.cuda.is_available():
_fused_adan_multi_tensor(**kwargs)
else:
raise ValueError('Fused Adan does not support CPU')
else:
_multi_tensor_adan(**kwargs)
elif group['fused']:
if torch.cuda.is_available():
_fused_adan_single_tensor(**kwargs)
else:
raise ValueError('Fused Adan does not support CPU')
else:
_single_tensor_adan(**kwargs)
return loss
|
def _single_tensor_adan(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], exp_avg_diffs: List[Tensor], neg_pre_grads: List[Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: Tensor):
for (i, param) in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
neg_grad_or_diff = neg_pre_grads[i]
grad.mul_(clip_global_grad_norm)
neg_grad_or_diff.add_(grad)
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha=(1 - beta2))
neg_grad_or_diff.mul_(beta2).add_(grad)
exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, neg_grad_or_diff, value=(1 - beta3))
denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps)
step_size_diff = ((lr * beta2) / bias_correction2)
step_size = (lr / bias_correction1)
if no_prox:
param.mul_((1 - (lr * weight_decay)))
param.addcdiv_(exp_avg, denom, value=(- step_size))
param.addcdiv_(exp_avg_diff, denom, value=(- step_size_diff))
else:
param.addcdiv_(exp_avg, denom, value=(- step_size))
param.addcdiv_(exp_avg_diff, denom, value=(- step_size_diff))
param.div_((1 + (lr * weight_decay)))
neg_grad_or_diff.zero_().add_(grad, alpha=(- 1.0))
|
def _multi_tensor_adan(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], exp_avg_diffs: List[Tensor], neg_pre_grads: List[Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: Tensor):
if (len(params) == 0):
return
torch._foreach_mul_(grads, clip_global_grad_norm)
torch._foreach_add_(neg_pre_grads, grads)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=(1 - beta1))
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, neg_pre_grads, alpha=(1 - beta2))
torch._foreach_mul_(neg_pre_grads, beta2)
torch._foreach_add_(neg_pre_grads, grads)
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(exp_avg_sqs, neg_pre_grads, neg_pre_grads, value=(1 - beta3))
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
step_size_diff = ((lr * beta2) / bias_correction2)
step_size = (lr / bias_correction1)
if no_prox:
torch._foreach_mul_(params, (1 - (lr * weight_decay)))
torch._foreach_addcdiv_(params, exp_avgs, denom, value=(- step_size))
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=(- step_size_diff))
else:
torch._foreach_addcdiv_(params, exp_avgs, denom, value=(- step_size))
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=(- step_size_diff))
torch._foreach_div_(params, (1 + (lr * weight_decay)))
torch._foreach_zero_(neg_pre_grads)
torch._foreach_add_(neg_pre_grads, grads, alpha=(- 1.0))
|
def _fused_adan_multi_tensor(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], exp_avg_diffs: List[Tensor], neg_pre_grads: List[Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: Tensor):
import fused_adan
multi_tensor_applier = MultiTensorApply((2048 * 32))
_dummy_overflow_buf = torch.cuda.IntTensor([0])
multi_tensor_applier(fused_adan.adan_multi_tensor, _dummy_overflow_buf, [params, grads, exp_avgs, exp_avg_sqs, exp_avg_diffs, neg_pre_grads], beta1, beta2, beta3, bias_correction1, bias_correction2, bias_correction3_sqrt, lr, weight_decay, eps, no_prox, clip_global_grad_norm)
torch._foreach_zero_(neg_pre_grads)
torch._foreach_add_(neg_pre_grads, grads, alpha=(- 1.0))
|
def _fused_adan_single_tensor(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], exp_avg_diffs: List[Tensor], neg_pre_grads: List[Tensor], *, beta1: float, beta2: float, beta3: float, bias_correction1: float, bias_correction2: float, bias_correction3_sqrt: float, lr: float, weight_decay: float, eps: float, no_prox: bool, clip_global_grad_norm: Tensor):
for (i, param) in enumerate(params):
p_data_fp32 = param.data.float()
out_p = param.data
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
neg_grad = neg_pre_grads[i]
with torch.cuda.device(param.device):
import fused_adan
fused_adan.adan_single_tensor(p_data_fp32, out_p, grad, exp_avg, exp_avg_sq, exp_avg_diff, neg_grad, beta1, beta2, beta3, bias_correction1, bias_correction2, bias_correction3_sqrt, lr, weight_decay, eps, no_prox, clip_global_grad_norm)
neg_grad.zero_().add_(grad, alpha=(- 1.0))
|
def _check_fused_available():
try:
import fused_adan
except ImportError as exc:
if torch.cuda.is_available():
raise ImportError((str(exc) + '\nThis could be caused by not having compiled the CUDA extension during package installation. Please try to re-install the package with the environment flag `FORCE_CUDA=1` set.'))
else:
raise ImportError((str(exc) + '\nFused Adan does not support CPU.'))
|
def create_diffusion(timestep_respacing, noise_schedule='linear', use_kl=False, sigma_small=False, predict_xstart=False, learn_sigma=True, rescale_learned_sigmas=False, diffusion_steps=1000):
betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if ((timestep_respacing is None) or (timestep_respacing == '')):
timestep_respacing = [diffusion_steps]
return SpacedDiffusion(use_timesteps=space_timesteps(diffusion_steps, timestep_respacing), betas=betas, model_mean_type=(gd.ModelMeanType.EPSILON if (not predict_xstart) else gd.ModelMeanType.START_X), model_var_type=((gd.ModelVarType.FIXED_LARGE if (not sigma_small) else gd.ModelVarType.FIXED_SMALL) if (not learn_sigma) else gd.ModelVarType.LEARNED_RANGE), loss_type=loss_type)
|
def diffusion_defaults():
'\n Defaults for image and classifier training.\n '
return dict(learn_sigma=True, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, sigma_small=False, rescale_learned_sigmas=False)
|
def model_and_diffusion_defaults():
'\n Defaults for image training.\n '
res = dict(image_size=256, mask_ratio=None, decode_layer=None, class_cond=True, use_fp16=False)
res.update(diffusion_defaults())
return res
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + th.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * th.exp((- logvar2)))))
|
def approx_standard_normal_cdf(x):
'\n A fast approximation of the cumulative distribution function of the\n standard normal.\n '
return (0.5 * (1.0 + th.tanh((np.sqrt((2.0 / np.pi)) * (x + (0.044715 * th.pow(x, 3)))))))
|
def continuous_gaussian_log_likelihood(x, *, means, log_scales):
'\n Compute the log-likelihood of a continuous Gaussian distribution.\n :param x: the targets\n :param means: the Gaussian mean Tensor.\n :param log_scales: the Gaussian log stddev Tensor.\n :return: a tensor like x of log probabilities (in nats).\n '
centered_x = (x - means)
inv_stdv = th.exp((- log_scales))
normalized_x = (centered_x * inv_stdv)
log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x)
return log_probs
|
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
'\n Compute the log-likelihood of a Gaussian distribution discretizing to a\n given image.\n :param x: the target images. It is assumed that this was uint8 values,\n rescaled to the range [-1, 1].\n :param means: the Gaussian mean Tensor.\n :param log_scales: the Gaussian log stddev Tensor.\n :return: a tensor like x of log probabilities (in nats).\n '
assert (x.shape == means.shape == log_scales.shape)
centered_x = (x - means)
inv_stdv = th.exp((- log_scales))
plus_in = (inv_stdv * (centered_x + (1.0 / 255.0)))
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = (inv_stdv * (centered_x - (1.0 / 255.0)))
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = (cdf_plus - cdf_min)
log_probs = th.where((x < (- 0.999)), log_cdf_plus, th.where((x > 0.999), log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))))
assert (log_probs.shape == x.shape)
return log_probs
|
def synchronize():
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
world_size = dist.get_world_size()
if (world_size == 1):
return
dist.barrier()
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = (force or (get_world_size() > 8))
if (is_master or force):
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='')
builtin_print(*args, **kwargs)
builtins.print = print
|
def setup_dist_multinode(args):
'\n Setup a distributed process group.\n '
if ((not dist.is_available()) or (not dist.is_initialized())):
th.distributed.init_process_group(backend='nccl', init_method='env://')
world_size = dist.get_world_size()
local_rank = int(os.getenv('LOCAL_RANK'))
print('rank', local_rank)
device = local_rank
th.cuda.set_device(device)
setup_for_distributed((device == 0))
synchronize()
else:
print('ddp failed!')
exit()
|
def setup_dist():
'\n Setup a distributed process group.\n '
if dist.is_initialized():
return
th.cuda.set_device(int(os.environ['LOCAL_RANK']))
th.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
|
def dev():
'\n Get the device to use for torch.distributed.\n '
if th.cuda.is_available():
return th.device(f'cuda')
return th.device('cpu')
|
def load_state_dict(path, **kwargs):
'\n Load a PyTorch file without redundant fetches across MPI ranks.\n '
chunk_size = (2 ** 30)
if (MPI.COMM_WORLD.Get_rank() == 0):
with bf.BlobFile(path, 'rb') as f:
data = f.read()
num_chunks = (len(data) // chunk_size)
if (len(data) % chunk_size):
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i:(i + chunk_size)])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
|
def sync_params(params):
'\n Synchronize a sequence of Tensors across ranks from rank 0.\n '
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
|
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
|
def convert_module_to_f16(l):
'\n Convert primitive modules to float16.\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if (l.bias is not None):
l.bias.data = l.bias.data.half()
|
def convert_module_to_f32(l):
'\n Convert primitive modules to float32, undoing convert_module_to_f16().\n '
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if (l.bias is not None):
l.bias.data = l.bias.data.float()
|
def make_master_params(param_groups_and_shapes):
'\n Copy model parameters into a (differently-shaped) list of full-precision\n parameters.\n '
master_params = []
for (param_group, shape) in param_groups_and_shapes:
master_param = nn.Parameter(_flatten_dense_tensors([param.detach().float() for (_, param) in param_group]).view(shape))
master_param.requires_grad = True
master_params.append(master_param)
return master_params
|
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
'\n Copy the gradients from the model parameters into the master parameters\n from make_master_params().\n '
for (master_param, (param_group, shape)) in zip(master_params, param_groups_and_shapes):
master_param.grad = _flatten_dense_tensors([param_grad_or_zeros(param) for (_, param) in param_group]).view(shape)
|
def master_params_to_model_params(param_groups_and_shapes, master_params):
'\n Copy the master parameter data back into the model parameters.\n '
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((_, param), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
param.detach().copy_(unflat_master_param)
|
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
|
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim <= 1)], (- 1))
matrix_named_params = ([(n, p) for (n, p) in named_model_params if (p.ndim > 1)], (1, (- 1)))
return [scalar_vector_named_params, matrix_named_params]
|
def master_params_to_state_dict(model, param_groups_and_shapes, master_params, use_fp16):
if use_fp16:
state_dict = model.state_dict()
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((name, _), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
assert (name in state_dict)
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for (i, (name, _value)) in enumerate(model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
|
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [(name, state_dict[name]) for (name, _) in model.named_parameters()]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for (name, _) in model.named_parameters()]
return master_params
|
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
|
def zero_grad(model_params):
for param in model_params:
if (param.grad is not None):
param.grad.detach_()
param.grad.zero_()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.