code stringlengths 17 6.64M |
|---|
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(base_losses, base_top1, base_top5) = (AverageMeter(), AverageMeter(), AverageMeter())
(arch_losses, arch_top1, arch_top5) = (AverageMeter(), AverageMeter(), AverageMeter())
end = time.time()
network.train()
for (step, (base_inputs, base_targets, arch_inputs, arch_targets)) in enumerate(xloader):
scheduler.update(None, ((1.0 * step) / len(xloader)))
base_inputs = base_inputs.cuda(non_blocking=True)
arch_inputs = arch_inputs.cuda(non_blocking=True)
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
data_time.update((time.time() - end))
if (algo == 'setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif (algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (algo == 'random'):
network.set_cal_mode('urs', None)
elif (algo == 'enas'):
with torch.no_grad():
network.controller.eval()
(_, _, sampled_arch) = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
(_, logits) = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
w_optimizer.step()
(base_prec1, base_prec5) = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
if (algo == 'setn'):
network.set_cal_mode('joint')
elif (algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (algo == 'random'):
network.set_cal_mode('urs', None)
elif (algo != 'enas'):
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if (algo == 'darts-v2'):
(arch_loss, logits) = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)
a_optimizer.step()
elif ((algo == 'random') or (algo == 'enas')):
with torch.no_grad():
(_, logits) = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
else:
(_, logits) = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
(arch_prec1, arch_prec5) = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((step % print_freq) == 0) or ((step + 1) == len(xloader))):
Sstr = (('*SEARCH* ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Wstr) + ' ') + Astr))
return (base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg)
|
def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time())
controller_num_aggregate = 20
controller_train_steps = 50
controller_bl_dec = 0.99
controller_entropy_weight = 0.0001
network.eval()
network.controller.train()
network.controller.zero_grad()
loader_iter = iter(xloader)
for step in range((controller_train_steps * controller_num_aggregate)):
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
data_time.update((time.time() - xend))
(log_prob, entropy, sampled_arch) = network.controller()
with torch.no_grad():
network.set_cal_mode('dynamic', sampled_arch)
(_, logits) = network(inputs)
(val_top1, val_top5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
val_top1 = (val_top1.view((- 1)) / 100)
reward = (val_top1 + (controller_entropy_weight * entropy))
if (prev_baseline is None):
baseline = val_top1
else:
baseline = (prev_baseline - ((1 - controller_bl_dec) * (prev_baseline - reward)))
loss = (((- 1) * log_prob) * (reward - baseline))
RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item())
ValAccMeter.update((val_top1.item() * 100))
LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
loss = (loss / controller_num_aggregate)
loss.backward(retain_graph=True)
batch_time.update((time.time() - xend))
xend = time.time()
if (((step + 1) % controller_num_aggregate) == 0):
grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0)
GradnormMeter.update(grad_norm)
optimizer.step()
network.controller.zero_grad()
if ((step % print_freq) == 0):
Sstr = (('*Train-Controller* ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, (controller_train_steps * controller_num_aggregate)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Wstr) + ' ') + Estr))
return (LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg)
|
def get_best_arch(xloader, network, n_samples, algo):
with torch.no_grad():
network.eval()
if (algo == 'random'):
(archs, valid_accs) = (network.return_topK(n_samples, True), [])
elif (algo == 'setn'):
(archs, valid_accs) = (network.return_topK(n_samples, False), [])
elif (algo.startswith('darts') or (algo == 'gdas')):
arch = network.genotype
(archs, valid_accs) = ([arch], [])
elif (algo == 'enas'):
(archs, valid_accs) = ([], [])
for _ in range(n_samples):
(_, _, sampled_arch) = network.controller()
archs.append(sampled_arch)
else:
raise ValueError('Invalid algorithm name : {:}'.format(algo))
loader_iter = iter(xloader)
for (i, sampled_arch) in enumerate(archs):
network.set_cal_mode('dynamic', sampled_arch)
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
(_, logits) = network(inputs.cuda(non_blocking=True))
(val_top1, val_top5) = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))
valid_accs.append(val_top1.item())
best_idx = np.argmax(valid_accs)
(best_arch, best_valid_acc) = (archs[best_idx], valid_accs[best_idx])
return (best_arch, best_valid_acc)
|
def valid_func(xloader, network, criterion, algo, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(arch_losses, arch_top1, arch_top5) = (AverageMeter(), AverageMeter(), AverageMeter())
end = time.time()
with torch.no_grad():
network.eval()
for (step, (arch_inputs, arch_targets)) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True)
data_time.update((time.time() - end))
(_, logits) = network(arch_inputs.cuda(non_blocking=True))
arch_loss = criterion(logits, arch_targets)
(arch_prec1, arch_prec5) = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
return (arch_losses.avg, arch_top1.avg, arch_top5.avg)
|
def main(xargs):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(xargs.workers)
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
(train_data, valid_data, xshape, class_num) = get_datasets(xargs.dataset, xargs.data_path, (- 1))
if (xargs.overwite_epochs is None):
extra_info = {'class_num': class_num, 'xshape': xshape}
else:
extra_info = {'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs}
config = load_config(xargs.config_path, extra_info, logger)
(search_loader, train_loader, valid_loader) = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers)
logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
search_space = get_search_spaces(xargs.search_space, 'nats-bench')
model_config = dict2config(dict(name='generic', C=xargs.channel, N=xargs.num_cells, max_nodes=xargs.max_nodes, num_classes=class_num, space=search_space, affine=bool(xargs.affine), track_running_stats=bool(xargs.track_running_stats)), None)
logger.log('search space : {:}'.format(search_space))
logger.log('model config : {:}'.format(model_config))
search_model = get_cell_based_tiny_net(model_config)
search_model.set_algo(xargs.algo)
(w_optimizer, w_scheduler, criterion) = get_optim_scheduler(search_model.weights, config)
a_optimizer = torch.optim.Adam(search_model.alphas, lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, eps=xargs.arch_eps)
logger.log('w-optimizer : {:}'.format(w_optimizer))
logger.log('a-optimizer : {:}'.format(a_optimizer))
logger.log('w-scheduler : {:}'.format(w_scheduler))
logger.log('criterion : {:}'.format(criterion))
params = count_parameters_in_MB(search_model)
logger.log('The parameters of the search model = {:.2f} MB'.format(params))
logger.log('search-space : {:}'.format(search_space))
if bool(xargs.use_api):
api = create('./output/NATS-Bench-topology/NATS-tss-v1_0-daa55-simple', 'topology', fast_mode=True, verbose=False)
else:
api = None
logger.log('{:} create API = {:} done'.format(time_string(), api))
(last_info, model_base_path, model_best_path) = (logger.path('info'), logger.path('model'), logger.path('best'))
(network, criterion) = (search_model.cuda(), criterion.cuda())
(last_info, model_base_path, model_best_path) = (logger.path('info'), logger.path('model'), logger.path('best'))
if last_info.exists():
logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
last_info = torch.load(last_info)
start_epoch = last_info['epoch']
checkpoint = torch.load(last_info['last_checkpoint'])
genotypes = checkpoint['genotypes']
baseline = checkpoint['baseline']
valid_accuracies = checkpoint['valid_accuracies']
search_model.load_state_dict(checkpoint['search_model'])
w_scheduler.load_state_dict(checkpoint['w_scheduler'])
w_optimizer.load_state_dict(checkpoint['w_optimizer'])
a_optimizer.load_state_dict(checkpoint['a_optimizer'])
logger.log("=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch))
else:
logger.log('=> do not find the last-info file : {:}'.format(last_info))
(start_epoch, valid_accuracies, genotypes) = (0, {'best': (- 1)}, {(- 1): network.return_topK(1, True)[0]})
baseline = None
(start_time, search_time, epoch_time, total_epoch) = (time.time(), AverageMeter(), AverageMeter(), (config.epochs + config.warmup))
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = 'Time Left: {:}'.format(convert_secs2time((epoch_time.val * (total_epoch - epoch)), True))
epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)
logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr())))
network.set_drop_path((float((epoch + 1)) / total_epoch), xargs.drop_path_rate)
if (xargs.algo == 'gdas'):
network.set_tau((xargs.tau_max - (((xargs.tau_max - xargs.tau_min) * epoch) / (total_epoch - 1))))
logger.log('[RESET tau as : {:} and drop_path as {:}]'.format(network.tau, network.drop_path))
(search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5) = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, xargs.algo, logger)
search_time.update((time.time() - start_time))
logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum))
logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5))
if (xargs.algo == 'enas'):
(ctl_loss, ctl_acc, baseline, ctl_reward) = train_controller(valid_loader, network, criterion, a_optimizer, baseline, epoch_str, xargs.print_freq, logger)
logger.log('[{:}] controller : loss={:}, acc={:}, baseline={:}, reward={:}'.format(epoch_str, ctl_loss, ctl_acc, baseline, ctl_reward))
(genotype, temp_accuracy) = get_best_arch(valid_loader, network, xargs.eval_candidate_num, xargs.algo)
if ((xargs.algo == 'setn') or (xargs.algo == 'enas')):
network.set_cal_mode('dynamic', genotype)
elif (xargs.algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif xargs.algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (xargs.algo == 'random'):
network.set_cal_mode('urs', None)
else:
raise ValueError('Invalid algorithm name : {:}'.format(xargs.algo))
logger.log('[{:}] - [get_best_arch] : {:} -> {:}'.format(epoch_str, genotype, temp_accuracy))
(valid_a_loss, valid_a_top1, valid_a_top5) = valid_func(valid_loader, network, criterion, xargs.algo, logger)
logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype))
valid_accuracies[epoch] = valid_a_top1
genotypes[epoch] = genotype
logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))
save_path = save_checkpoint({'epoch': (epoch + 1), 'args': deepcopy(xargs), 'baseline': baseline, 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'a_optimizer': a_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies}, model_base_path, logger)
last_info = save_checkpoint({'epoch': (epoch + 1), 'args': deepcopy(args), 'last_checkpoint': save_path}, logger.path('info'), logger)
with torch.no_grad():
logger.log('{:}'.format(search_model.show_alphas()))
if (api is not None):
logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '200')))
epoch_time.update((time.time() - start_time))
start_time = time.time()
start_time = time.time()
(genotype, temp_accuracy) = get_best_arch(valid_loader, network, xargs.eval_candidate_num, xargs.algo)
if ((xargs.algo == 'setn') or (xargs.algo == 'enas')):
network.set_cal_mode('dynamic', genotype)
elif (xargs.algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif xargs.algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (xargs.algo == 'random'):
network.set_cal_mode('urs', None)
else:
raise ValueError('Invalid algorithm name : {:}'.format(xargs.algo))
search_time.update((time.time() - start_time))
(valid_a_loss, valid_a_top1, valid_a_top5) = valid_func(valid_loader, network, criterion, xargs.algo, logger)
logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1))
logger.log(('\n' + ('-' * 100)))
logger.log('[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(xargs.algo, total_epoch, search_time.sum, genotype))
if (api is not None):
logger.log('{:}'.format(api.query_by_arch(genotype, '200')))
logger.close()
|
def version():
versions = ['0.9.9']
versions = ['1.0.0']
return versions[(- 1)]
|
def arg_str2bool(v):
if isinstance(v, bool):
return v
elif (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
def obtain_attention_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--att_channel', type=int, help='.')
parser.add_argument('--att_spatial', type=str, help='.')
parser.add_argument('--att_active', type=str, help='.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
return args
|
def obtain_basic_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--model_source', type=str, default='normal', help='The source of model defination.')
parser.add_argument('--extra_model_path', type=str, default=None, help='The extra model ckp file (help to indicate the searched architecture).')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
return args
|
def obtain_cls_init_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--init_checkpoint', type=str, help='The checkpoint path to the initial model.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
return args
|
def obtain_cls_kd_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--KD_checkpoint', type=str, help='The teacher checkpoint in knowledge distillation.')
parser.add_argument('--KD_alpha', type=float, help='The alpha parameter in knowledge distillation.')
parser.add_argument('--KD_temperature', type=float, help='The temperature parameter in knowledge distillation.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
return args
|
def convert_param(original_lists):
assert isinstance(original_lists, list), 'The type is not right : {:}'.format(original_lists)
(ctype, value) = (original_lists[0], original_lists[1])
assert (ctype in support_types), 'Ctype={:}, support={:}'.format(ctype, support_types)
is_list = isinstance(value, list)
if (not is_list):
value = [value]
outs = []
for x in value:
if (ctype == 'int'):
x = int(x)
elif (ctype == 'str'):
x = str(x)
elif (ctype == 'bool'):
x = bool(int(x))
elif (ctype == 'float'):
x = float(x)
elif (ctype == 'none'):
if (x.lower() != 'none'):
raise ValueError('For the none type, the value must be none instead of {:}'.format(x))
x = None
else:
raise TypeError('Does not know this type : {:}'.format(ctype))
outs.append(x)
if (not is_list):
outs = outs[0]
return outs
|
def load_config(path, extra, logger):
path = str(path)
if hasattr(logger, 'log'):
logger.log(path)
assert os.path.exists(path), 'Can not find {:}'.format(path)
with open(path, 'r') as f:
data = json.load(f)
content = {k: convert_param(v) for (k, v) in data.items()}
assert ((extra is None) or isinstance(extra, dict)), 'invalid type of extra : {:}'.format(extra)
if isinstance(extra, dict):
content = {**content, **extra}
Arguments = namedtuple('Configure', ' '.join(content.keys()))
content = Arguments(**content)
if hasattr(logger, 'log'):
logger.log('{:}'.format(content))
return content
|
def configure2str(config, xpath=None):
if (not isinstance(config, dict)):
config = config._asdict()
def cstring(x):
return '"{:}"'.format(x)
def gtype(x):
if isinstance(x, list):
x = x[0]
if isinstance(x, str):
return 'str'
elif isinstance(x, bool):
return 'bool'
elif isinstance(x, int):
return 'int'
elif isinstance(x, float):
return 'float'
elif (x is None):
return 'none'
else:
raise ValueError('invalid : {:}'.format(x))
def cvalue(x, xtype):
if isinstance(x, list):
is_list = True
else:
(is_list, x) = (False, [x])
temps = []
for temp in x:
if (xtype == 'bool'):
temp = cstring(int(temp))
elif (xtype == 'none'):
temp = cstring('None')
else:
temp = cstring(temp)
temps.append(temp)
if is_list:
return '[{:}]'.format(', '.join(temps))
else:
return temps[0]
xstrings = []
for (key, value) in config.items():
xtype = gtype(value)
string = ' {:20s} : [{:8s}, {:}]'.format(cstring(key), cstring(xtype), cvalue(value, xtype))
xstrings.append(string)
Fstring = (('{\n' + ',\n'.join(xstrings)) + '\n}')
if (xpath is not None):
parent = Path(xpath).resolve().parent
parent.mkdir(parents=True, exist_ok=True)
if osp.isfile(xpath):
os.remove(xpath)
with open(xpath, 'w') as text_file:
text_file.write('{:}'.format(Fstring))
return Fstring
|
def dict2config(xdict, logger):
assert isinstance(xdict, dict), 'invalid type : {:}'.format(type(xdict))
Arguments = namedtuple('Configure', ' '.join(xdict.keys()))
content = Arguments(**xdict)
if hasattr(logger, 'log'):
logger.log('{:}'.format(content))
return content
|
def obtain_pruning_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--keep_ratio', type=float, help='The left channel ratio compared to the original network.')
parser.add_argument('--model_version', type=str, help='The network version.')
parser.add_argument('--KD_alpha', type=float, help='The alpha parameter in knowledge distillation.')
parser.add_argument('--KD_temperature', type=float, help='The temperature parameter in knowledge distillation.')
parser.add_argument('--Regular_W_feat', type=float, help='The .')
parser.add_argument('--Regular_W_conv', type=float, help='The .')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
assert ((args.keep_ratio > 0) and (args.keep_ratio <= 1)), 'invalid keep ratio : {:}'.format(args.keep_ratio)
return args
|
def obtain_RandomSearch_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--init_model', type=str, help='The initialization model path.')
parser.add_argument('--expect_flop', type=float, help='The expected flop keep ratio.')
parser.add_argument('--arch_nums', type=int, help='The maximum number of running random arch generating..')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--random_mode', type=str, choices=['random', 'fix'], help='The path to the optimizer configuration')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
return args
|
def obtain_search_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--split_path', type=str, help='The split file path.')
parser.add_argument('--gumbel_tau_max', type=float, help='The maximum tau for Gumbel.')
parser.add_argument('--gumbel_tau_min', type=float, help='The minimum tau for Gumbel.')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--FLOP_ratio', type=float, help='The expected FLOP ratio.')
parser.add_argument('--FLOP_weight', type=float, help='The loss weight for FLOP.')
parser.add_argument('--FLOP_tolerant', type=float, help='The tolerant range for FLOP.')
parser.add_argument('--ablation_num_select', type=int, help='The number of randomly selected channels.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
assert ((args.gumbel_tau_max is not None) and (args.gumbel_tau_min is not None))
assert ((args.FLOP_tolerant is not None) and (args.FLOP_tolerant > 0)), 'invalid FLOP_tolerant : {:}'.format(FLOP_tolerant)
return args
|
def obtain_search_single_args():
parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', type=str, help='Resume path.')
parser.add_argument('--model_config', type=str, help='The path to the model configuration')
parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration')
parser.add_argument('--split_path', type=str, help='The split file path.')
parser.add_argument('--search_shape', type=str, help='The shape to be searched.')
parser.add_argument('--gumbel_tau_max', type=float, help='The maximum tau for Gumbel.')
parser.add_argument('--gumbel_tau_min', type=float, help='The minimum tau for Gumbel.')
parser.add_argument('--procedure', type=str, help='The procedure basic prefix.')
parser.add_argument('--FLOP_ratio', type=float, help='The expected FLOP ratio.')
parser.add_argument('--FLOP_weight', type=float, help='The loss weight for FLOP.')
parser.add_argument('--FLOP_tolerant', type=float, help='The tolerant range for FLOP.')
add_shared_args(parser)
parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.')
args = parser.parse_args()
if ((args.rand_seed is None) or (args.rand_seed < 0)):
args.rand_seed = random.randint(1, 100000)
assert (args.save_dir is not None), 'save-path argument can not be None'
assert ((args.gumbel_tau_max is not None) and (args.gumbel_tau_min is not None))
assert ((args.FLOP_tolerant is not None) and (args.FLOP_tolerant > 0)), 'invalid FLOP_tolerant : {:}'.format(FLOP_tolerant)
return args
|
def add_shared_args(parser):
parser.add_argument('--dataset', type=str, help='The dataset name.')
parser.add_argument('--data_path', type=str, help='The dataset name.')
parser.add_argument('--cutout_length', type=int, help='The cutout length, negative means not use.')
parser.add_argument('--print_freq', type=int, default=100, help='print frequency (default: 200)')
parser.add_argument('--print_freq_eval', type=int, default=100, help='print frequency (default: 200)')
parser.add_argument('--eval_frequency', type=int, default=1, help='evaluation frequency (default: 200)')
parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers (default: 8)')
parser.add_argument('--rand_seed', type=int, default=(- 1), help='manual seed')
|
class PrintLogger(object):
def __init__(self):
'Create a summary writer logging to log_dir.'
self.name = 'PrintLogger'
def log(self, string):
print(string)
def close(self):
print(((('-' * 30) + ' close printer ') + ('-' * 30)))
|
class Logger(object):
def __init__(self, log_dir, seed, create_model_dir=True, use_tf=False):
'Create a summary writer logging to log_dir.'
self.seed = int(seed)
self.log_dir = Path(log_dir)
self.model_dir = (Path(log_dir) / 'checkpoint')
self.log_dir.mkdir(parents=True, exist_ok=True)
if create_model_dir:
self.model_dir.mkdir(parents=True, exist_ok=True)
self.use_tf = bool(use_tf)
self.tensorboard_dir = (self.log_dir / 'tensorboard-{:}'.format(time.strftime('%d-%h', time.gmtime(time.time()))))
self.logger_path = (self.log_dir / 'seed-{:}-T-{:}.log'.format(self.seed, time.strftime('%d-%h-at-%H-%M-%S', time.gmtime(time.time()))))
self.logger_file = open(self.logger_path, 'w')
if self.use_tf:
self.tensorboard_dir.mkdir(mode=509, parents=True, exist_ok=True)
self.writer = tf.summary.FileWriter(str(self.tensorboard_dir))
else:
self.writer = None
def __repr__(self):
return '{name}(dir={log_dir}, use-tf={use_tf}, writer={writer})'.format(name=self.__class__.__name__, **self.__dict__)
def path(self, mode):
valids = ('model', 'best', 'info', 'log', None)
if (mode is None):
return self.log_dir
elif (mode == 'model'):
return (self.model_dir / 'seed-{:}-basic.pth'.format(self.seed))
elif (mode == 'best'):
return (self.model_dir / 'seed-{:}-best.pth'.format(self.seed))
elif (mode == 'info'):
return (self.log_dir / 'seed-{:}-last-info.pth'.format(self.seed))
elif (mode == 'log'):
return self.log_dir
else:
raise TypeError('Unknow mode = {:}, valid modes = {:}'.format(mode, valids))
def extract_log(self):
return self.logger_file
def close(self):
self.logger_file.close()
if (self.writer is not None):
self.writer.close()
def log(self, string, save=True, stdout=False):
if stdout:
sys.stdout.write(string)
sys.stdout.flush()
else:
print(string)
if save:
self.logger_file.write('{:}\n'.format(string))
self.logger_file.flush()
def scalar_summary(self, tags, values, step):
'Log a scalar variable.'
if (not self.use_tf):
warnings.warn('Do set use-tensorflow installed but call scalar_summary')
else:
assert (isinstance(tags, list) == isinstance(values, list)), 'Type : {:} vs {:}'.format(type(tags), type(values))
if (not isinstance(tags, list)):
(tags, values) = ([tags], [values])
for (tag, value) in zip(tags, values):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
self.writer.flush()
def image_summary(self, tag, images, step):
'Log a list of images.'
import scipy
if (not self.use_tf):
warnings.warn('Do set use-tensorflow installed but call scalar_summary')
return
img_summaries = []
for (i, img) in enumerate(images):
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format='png')
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])
img_summaries.append(tf.Summary.Value(tag='{}/{}'.format(tag, i), image=img_sum))
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
self.writer.flush()
def histo_summary(self, tag, values, step, bins=1000):
'Log a histogram of the tensor of values.'
if (not self.use_tf):
raise ValueError('Do not have tensorflow')
import tensorflow as tf
(counts, bin_edges) = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum((values ** 2)))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
|
def pickle_save(obj, path):
file_path = Path(path)
file_dir = file_path.parent
file_dir.mkdir(parents=True, exist_ok=True)
with file_path.open('wb') as f:
pickle.dump(obj, f)
|
def pickle_load(path):
if (not Path(path).exists()):
raise ValueError('{:} does not exists'.format(path))
with Path(path).open('rb') as f:
data = pickle.load(f)
return data
|
def time_for_file():
ISOTIMEFORMAT = '%d-%h-at-%H-%M-%S'
return '{:}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
|
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{:}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
|
def time_string_short():
ISOTIMEFORMAT = '%Y%m%d'
string = '{:}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
|
def time_print(string, is_print=True):
if is_print:
print('{} : {}'.format(time_string(), string))
|
def convert_secs2time(epoch_time, return_str=False):
need_hour = int((epoch_time / 3600))
need_mins = int(((epoch_time - (3600 * need_hour)) / 60))
need_secs = int(((epoch_time - (3600 * need_hour)) - (60 * need_mins)))
if return_str:
str = '[{:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
return str
else:
return (need_hour, need_mins, need_secs)
|
def print_log(print_string, log):
if hasattr(log, 'log'):
log.log('{:}'.format(print_string))
else:
print('{:}'.format(print_string))
if (log is not None):
log.write('{:}\n'.format(print_string))
log.flush()
|
class Bottleneck(nn.Module):
def __init__(self, nChannels, growthRate):
super(Bottleneck, self).__init__()
interChannels = (4 * growthRate)
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
|
class SingleLayer(nn.Module):
def __init__(self, nChannels, growthRate):
super(SingleLayer, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
|
class Transition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
|
class DenseNet(nn.Module):
def __init__(self, growthRate, depth, reduction, nClasses, bottleneck):
super(DenseNet, self).__init__()
if bottleneck:
nDenseBlocks = int(((depth - 4) / 6))
else:
nDenseBlocks = int(((depth - 4) / 3))
self.message = 'CifarDenseNet : block : {:}, depth : {:}, reduction : {:}, growth-rate = {:}, class = {:}'.format(('bottleneck' if bottleneck else 'basic'), depth, reduction, growthRate, nClasses)
nChannels = (2 * growthRate)
self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += (nDenseBlocks * growthRate)
nOutChannels = int(math.floor((nChannels * reduction)))
self.trans1 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += (nDenseBlocks * growthRate)
nOutChannels = int(math.floor((nChannels * reduction)))
self.trans2 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += (nDenseBlocks * growthRate)
self.act = nn.Sequential(nn.BatchNorm2d(nChannels), nn.ReLU(inplace=True), nn.AvgPool2d(8))
self.fc = nn.Linear(nChannels, nClasses)
self.apply(initialize_resnet)
def get_message(self):
return self.message
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(Bottleneck(nChannels, growthRate))
else:
layers.append(SingleLayer(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, inputs):
out = self.conv1(inputs)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
features = self.act(out)
features = features.view(features.size(0), (- 1))
out = self.fc(features)
return (features, out)
|
class Downsample(nn.Module):
def __init__(self, nIn, nOut, stride):
super(Downsample, self).__init__()
assert ((stride == 2) and (nOut == (2 * nIn))), 'stride:{} IO:{},{}'.format(stride, nIn, nOut)
self.in_dim = nIn
self.out_dim = nOut
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
self.conv = nn.Conv2d(nIn, nOut, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x = self.avg(x)
out = self.conv(x)
return out
|
class ConvBNReLU(nn.Module):
def __init__(self, nIn, nOut, kernel, stride, padding, bias, relu):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, bias=bias)
self.bn = nn.BatchNorm2d(nOut)
if relu:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
self.out_dim = nOut
self.num_conv = 1
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
if self.relu:
return self.relu(bn)
else:
return bn
|
class ResNetBasicblock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_a = ConvBNReLU(inplanes, planes, 3, stride, 1, False, True)
self.conv_b = ConvBNReLU(planes, planes, 3, 1, 1, False, False)
if (stride == 2):
self.downsample = Downsample(inplanes, planes, stride)
elif (inplanes != planes):
self.downsample = ConvBNReLU(inplanes, planes, 1, 1, 0, False, False)
else:
self.downsample = None
self.out_dim = planes
self.num_conv = 2
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = additive_func(residual, basicblock)
return F.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_1x1 = ConvBNReLU(inplanes, planes, 1, 1, 0, False, True)
self.conv_3x3 = ConvBNReLU(planes, planes, 3, stride, 1, False, True)
self.conv_1x4 = ConvBNReLU(planes, (planes * self.expansion), 1, 1, 0, False, False)
if (stride == 2):
self.downsample = Downsample(inplanes, (planes * self.expansion), stride)
elif (inplanes != (planes * self.expansion)):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, False)
else:
self.downsample = None
self.out_dim = (planes * self.expansion)
self.num_conv = 3
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = additive_func(residual, bottleneck)
return F.relu(out, inplace=True)
|
class CifarResNet(nn.Module):
def __init__(self, block_name, depth, num_classes, zero_init_residual):
super(CifarResNet, self).__init__()
if (block_name == 'ResNetBasicblock'):
block = ResNetBasicblock
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
elif (block_name == 'ResNetBottleneck'):
block = ResNetBottleneck
assert (((depth - 2) % 9) == 0), 'depth should be one of 164'
layer_blocks = ((depth - 2) // 9)
else:
raise ValueError('invalid block : {:}'.format(block_name))
self.message = 'CifarResNet : Block : {:}, Depth : {:}, Layers for each block : {:}'.format(block_name, depth, layer_blocks)
self.num_classes = num_classes
self.channels = [16]
self.layers = nn.ModuleList([ConvBNReLU(3, 16, 3, 1, 1, False, True)])
for stage in range(3):
for iL in range(layer_blocks):
iC = self.channels[(- 1)]
planes = (16 * (2 ** stage))
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iC, planes, stride)
self.channels.append(module.out_dim)
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iC={:3d}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), iC, module.out_dim, stride)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(module.out_dim, num_classes)
assert ((sum((x.num_conv for x in self.layers)) + 1) == depth), 'invalid depth check {:} vs {:}'.format((sum((x.num_conv for x in self.layers)) + 1), depth)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBasicblock):
nn.init.constant_(m.conv_b.bn.weight, 0)
elif isinstance(m, ResNetBottleneck):
nn.init.constant_(m.conv_1x4.bn.weight, 0)
def get_message(self):
return self.message
def forward(self, inputs):
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class WideBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride, dropout=False):
super(WideBasicblock, self).__init__()
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if dropout:
self.dropout = nn.Dropout2d(p=0.5, inplace=True)
else:
self.dropout = None
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if (inplanes != planes):
self.downsample = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False)
else:
self.downsample = None
def forward(self, x):
basicblock = self.bn_a(x)
basicblock = F.relu(basicblock)
basicblock = self.conv_a(basicblock)
basicblock = self.bn_b(basicblock)
basicblock = F.relu(basicblock)
if (self.dropout is not None):
basicblock = self.dropout(basicblock)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
x = self.downsample(x)
return (x + basicblock)
|
class CifarWideResNet(nn.Module):
'\n ResNet optimized for the Cifar dataset, as specified in\n https://arxiv.org/abs/1512.03385.pdf\n '
def __init__(self, depth, widen_factor, num_classes, dropout):
super(CifarWideResNet, self).__init__()
assert (((depth - 4) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 4) // 6)
print('CifarPreResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.dropout = dropout
self.conv_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.message = 'Wide ResNet : depth={:}, widen_factor={:}, class={:}'.format(depth, widen_factor, num_classes)
self.inplanes = 16
self.stage_1 = self._make_layer(WideBasicblock, (16 * widen_factor), layer_blocks, 1)
self.stage_2 = self._make_layer(WideBasicblock, (32 * widen_factor), layer_blocks, 2)
self.stage_3 = self._make_layer(WideBasicblock, (64 * widen_factor), layer_blocks, 2)
self.lastact = nn.Sequential(nn.BatchNorm2d((64 * widen_factor)), nn.ReLU(inplace=True))
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear((64 * widen_factor), num_classes)
self.apply(initialize_resnet)
def get_message(self):
return self.message
def _make_layer(self, block, planes, blocks, stride):
layers = []
layers.append(block(self.inplanes, planes, stride, self.dropout))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, self.dropout))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_3x3(x)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.lastact(x)
x = self.avgpool(x)
features = x.view(x.size(0), (- 1))
outs = self.classifier(features)
return (features, outs)
|
class ConvBNReLU(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
super(ConvBNReLU, self).__init__()
padding = ((kernel_size - 1) // 2)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
return out
|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
class MobileNetV2(nn.Module):
def __init__(self, num_classes, width_mult, input_channel, last_channel, block_name, dropout):
super(MobileNetV2, self).__init__()
if (block_name == 'InvertedResidual'):
block = InvertedResidual
else:
raise ValueError('invalid block name : {:}'.format(block_name))
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
input_channel = int((input_channel * width_mult))
self.last_channel = int((last_channel * max(1.0, width_mult)))
features = [ConvBNReLU(3, input_channel, stride=2)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = int((c * width_mult))
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(self.last_channel, num_classes))
self.message = 'MobileNetV2 : width_mult={:}, in-C={:}, last-C={:}, block={:}, dropout={:}'.format(width_mult, input_channel, last_channel, block_name, dropout)
self.apply(initialize_resnet)
def get_message(self):
return self.message
def forward(self, inputs):
features = self.features(inputs)
vectors = features.mean([2, 3])
predicts = self.classifier(vectors)
return (features, predicts)
|
def conv3x3(in_planes, out_planes, stride=1, groups=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64):
super(BasicBlock, self).__init__()
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64):
super(Bottleneck, self).__init__()
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = conv3x3(width, width, stride, groups)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block_name, layers, deep_stem, num_classes, zero_init_residual, groups, width_per_group):
super(ResNet, self).__init__()
if (block_name == 'BasicBlock'):
block = BasicBlock
elif (block_name == 'Bottleneck'):
block = Bottleneck
else:
raise ValueError('invalid block-name : {:}'.format(block_name))
if (not deep_stem):
self.conv = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
else:
self.conv = nn.Sequential(nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.inplanes = 64
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, groups=groups, base_width=width_per_group)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, groups=groups, base_width=width_per_group)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, groups=groups, base_width=width_per_group)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, groups=groups, base_width=width_per_group)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
self.message = 'block = {:}, layers = {:}, deep_stem = {:}, num_classes = {:}'.format(block, layers, deep_stem, num_classes)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride, groups, base_width):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (stride == 2):
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2, padding=0), conv1x1(self.inplanes, (planes * block.expansion), 1), nn.BatchNorm2d((planes * block.expansion)))
elif (stride == 1):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
else:
raise ValueError('invalid stride [{:}] for downsample'.format(stride))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, groups, base_width))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, groups, base_width))
return nn.Sequential(*layers)
def get_message(self):
return self.message
def forward(self, x):
x = self.conv(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.fc(features)
return (features, logits)
|
def get_cell_based_tiny_net(config):
if isinstance(config, dict):
config = dict2config(config, None)
super_type = getattr(config, 'super_type', 'basic')
group_names = ['DARTS-V1', 'DARTS-V2', 'GDAS', 'SETN', 'ENAS', 'RANDOM', 'generic']
if ((super_type == 'basic') and (config.name in group_names)):
from .cell_searchs import nas201_super_nets as nas_super_nets
try:
return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space, config.affine, config.track_running_stats)
except:
return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space)
elif (super_type == 'search-shape'):
from .shape_searchs import GenericNAS301Model
genotype = CellStructure.str2structure(config.genotype)
return GenericNAS301Model(config.candidate_Cs, config.max_num_Cs, genotype, config.num_classes, config.affine, config.track_running_stats)
elif (super_type == 'nasnet-super'):
from .cell_searchs import nasnet_super_nets as nas_super_nets
return nas_super_nets[config.name](config.C, config.N, config.steps, config.multiplier, config.stem_multiplier, config.num_classes, config.space, config.affine, config.track_running_stats)
elif (config.name == 'infer.tiny'):
from .cell_infers import TinyNetwork
if hasattr(config, 'genotype'):
genotype = config.genotype
elif hasattr(config, 'arch_str'):
genotype = CellStructure.str2structure(config.arch_str)
else:
raise ValueError('Can not find genotype from this config : {:}'.format(config))
return TinyNetwork(config.C, config.N, genotype, config.num_classes)
elif (config.name == 'infer.shape.tiny'):
from .shape_infers import DynamicShapeTinyNet
if isinstance(config.channels, str):
channels = tuple([int(x) for x in config.channels.split(':')])
else:
channels = config.channels
genotype = CellStructure.str2structure(config.genotype)
return DynamicShapeTinyNet(channels, genotype, config.num_classes)
elif (config.name == 'infer.nasnet-cifar'):
from .cell_infers import NASNetonCIFAR
raise NotImplementedError
else:
raise ValueError('invalid network name : {:}'.format(config.name))
|
def get_search_spaces(xtype, name) -> List[Text]:
if ((xtype == 'cell') or (xtype == 'tss')):
from .cell_operations import SearchSpaceNames
assert (name in SearchSpaceNames), 'invalid name [{:}] in {:}'.format(name, SearchSpaceNames.keys())
return SearchSpaceNames[name]
elif (xtype == 'sss'):
if (name in ['nats-bench', 'nats-bench-size']):
return {'candidates': [8, 16, 24, 32, 40, 48, 56, 64], 'numbers': 5}
else:
raise ValueError('Invalid name : {:}'.format(name))
else:
raise ValueError('invalid search-space type is {:}'.format(xtype))
|
def get_cifar_models(config, extra_path=None):
super_type = getattr(config, 'super_type', 'basic')
if (super_type == 'basic'):
from .CifarResNet import CifarResNet
from .CifarDenseNet import DenseNet
from .CifarWideResNet import CifarWideResNet
if (config.arch == 'resnet'):
return CifarResNet(config.module, config.depth, config.class_num, config.zero_init_residual)
elif (config.arch == 'densenet'):
return DenseNet(config.growthRate, config.depth, config.reduction, config.class_num, config.bottleneck)
elif (config.arch == 'wideresnet'):
return CifarWideResNet(config.depth, config.wide_factor, config.class_num, config.dropout)
else:
raise ValueError('invalid module type : {:}'.format(config.arch))
elif super_type.startswith('infer'):
from .shape_infers import InferWidthCifarResNet
from .shape_infers import InferDepthCifarResNet
from .shape_infers import InferCifarResNet
from .cell_infers import NASNetonCIFAR
assert (len(super_type.split('-')) == 2), 'invalid super_type : {:}'.format(super_type)
infer_mode = super_type.split('-')[1]
if (infer_mode == 'width'):
return InferWidthCifarResNet(config.module, config.depth, config.xchannels, config.class_num, config.zero_init_residual)
elif (infer_mode == 'depth'):
return InferDepthCifarResNet(config.module, config.depth, config.xblocks, config.class_num, config.zero_init_residual)
elif (infer_mode == 'shape'):
return InferCifarResNet(config.module, config.depth, config.xblocks, config.xchannels, config.class_num, config.zero_init_residual)
elif (infer_mode == 'nasnet.cifar'):
genotype = config.genotype
if (extra_path is not None):
if (not osp.isfile(extra_path)):
raise ValueError('invalid extra_path : {:}'.format(extra_path))
xdata = torch.load(extra_path)
current_epoch = xdata['epoch']
genotype = xdata['genotypes'][(current_epoch - 1)]
C = (config.C if hasattr(config, 'C') else config.ichannel)
N = (config.N if hasattr(config, 'N') else config.layers)
return NASNetonCIFAR(C, N, config.stem_multi, config.class_num, genotype, config.auxiliary)
else:
raise ValueError('invalid infer-mode : {:}'.format(infer_mode))
else:
raise ValueError('invalid super-type : {:}'.format(super_type))
|
def get_imagenet_models(config):
super_type = getattr(config, 'super_type', 'basic')
if (super_type == 'basic'):
from .ImageNet_ResNet import ResNet
from .ImageNet_MobileNetV2 import MobileNetV2
if (config.arch == 'resnet'):
return ResNet(config.block_name, config.layers, config.deep_stem, config.class_num, config.zero_init_residual, config.groups, config.width_per_group)
elif (config.arch == 'mobilenet_v2'):
return MobileNetV2(config.class_num, config.width_multi, config.input_channel, config.last_channel, 'InvertedResidual', config.dropout)
else:
raise ValueError('invalid arch : {:}'.format(config.arch))
elif super_type.startswith('infer'):
assert (len(super_type.split('-')) == 2), 'invalid super_type : {:}'.format(super_type)
infer_mode = super_type.split('-')[1]
if (infer_mode == 'shape'):
from .shape_infers import InferImagenetResNet
from .shape_infers import InferMobileNetV2
if (config.arch == 'resnet'):
return InferImagenetResNet(config.block_name, config.layers, config.xblocks, config.xchannels, config.deep_stem, config.class_num, config.zero_init_residual)
elif (config.arch == 'MobileNetV2'):
return InferMobileNetV2(config.class_num, config.xchannels, config.xblocks, config.dropout)
else:
raise ValueError('invalid arch-mode : {:}'.format(config.arch))
else:
raise ValueError('invalid infer-mode : {:}'.format(infer_mode))
else:
raise ValueError('invalid super-type : {:}'.format(super_type))
|
def obtain_model(config, extra_path=None):
if (config.dataset == 'cifar'):
return get_cifar_models(config, extra_path)
elif (config.dataset == 'imagenet'):
return get_imagenet_models(config)
else:
raise ValueError('invalid dataset in the model config : {:}'.format(config))
|
def obtain_search_model(config):
if (config.dataset == 'cifar'):
if (config.arch == 'resnet'):
from .shape_searchs import SearchWidthCifarResNet
from .shape_searchs import SearchDepthCifarResNet
from .shape_searchs import SearchShapeCifarResNet
if (config.search_mode == 'width'):
return SearchWidthCifarResNet(config.module, config.depth, config.class_num)
elif (config.search_mode == 'depth'):
return SearchDepthCifarResNet(config.module, config.depth, config.class_num)
elif (config.search_mode == 'shape'):
return SearchShapeCifarResNet(config.module, config.depth, config.class_num)
else:
raise ValueError('invalid search mode : {:}'.format(config.search_mode))
elif (config.arch == 'simres'):
from .shape_searchs import SearchWidthSimResNet
if (config.search_mode == 'width'):
return SearchWidthSimResNet(config.depth, config.class_num)
else:
raise ValueError('invalid search mode : {:}'.format(config.search_mode))
else:
raise ValueError('invalid arch : {:} for dataset [{:}]'.format(config.arch, config.dataset))
elif (config.dataset == 'imagenet'):
from .shape_searchs import SearchShapeImagenetResNet
assert (config.search_mode == 'shape'), 'invalid search-mode : {:}'.format(config.search_mode)
if (config.arch == 'resnet'):
return SearchShapeImagenetResNet(config.block_name, config.layers, config.deep_stem, config.class_num)
else:
raise ValueError('invalid model config : {:}'.format(config))
else:
raise ValueError('invalid dataset in the model config : {:}'.format(config))
|
def load_net_from_checkpoint(checkpoint):
assert osp.isfile(checkpoint), 'checkpoint {:} does not exist'.format(checkpoint)
checkpoint = torch.load(checkpoint)
model_config = dict2config(checkpoint['model-config'], None)
model = obtain_model(model_config)
model.load_state_dict(checkpoint['base-model'])
return model
|
class InferCell(nn.Module):
def __init__(self, genotype, C_in, C_out, stride, affine=True, track_running_stats=True):
super(InferCell, self).__init__()
self.layers = nn.ModuleList()
self.node_IN = []
self.node_IX = []
self.genotype = deepcopy(genotype)
for i in range(1, len(genotype)):
node_info = genotype[(i - 1)]
cur_index = []
cur_innod = []
for (op_name, op_in) in node_info:
if (op_in == 0):
layer = OPS[op_name](C_in, C_out, stride, affine, track_running_stats)
else:
layer = OPS[op_name](C_out, C_out, 1, affine, track_running_stats)
cur_index.append(len(self.layers))
cur_innod.append(op_in)
self.layers.append(layer)
self.node_IX.append(cur_index)
self.node_IN.append(cur_innod)
self.nodes = len(genotype)
self.in_dim = C_in
self.out_dim = C_out
def extra_repr(self):
string = 'info :: nodes={nodes}, inC={in_dim}, outC={out_dim}'.format(**self.__dict__)
laystr = []
for (i, (node_layers, node_innods)) in enumerate(zip(self.node_IX, self.node_IN)):
y = ['I{:}-L{:}'.format(_ii, _il) for (_il, _ii) in zip(node_layers, node_innods)]
x = '{:}<-({:})'.format((i + 1), ','.join(y))
laystr.append(x)
return ((string + ', [{:}]'.format(' | '.join(laystr))) + ', {:}'.format(self.genotype.tostr()))
def forward(self, inputs):
nodes = [inputs]
for (i, (node_layers, node_innods)) in enumerate(zip(self.node_IX, self.node_IN)):
node_feature = sum((self.layers[_il](nodes[_ii]) for (_il, _ii) in zip(node_layers, node_innods)))
nodes.append(node_feature)
return nodes[(- 1)]
|
class NASNetInferCell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, affine, track_running_stats):
super(NASNetInferCell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = OPS['skip_connect'](C_prev_prev, C, 2, affine, track_running_stats)
else:
self.preprocess0 = OPS['nor_conv_1x1'](C_prev_prev, C, 1, affine, track_running_stats)
self.preprocess1 = OPS['nor_conv_1x1'](C_prev, C, 1, affine, track_running_stats)
if (not reduction):
(nodes, concats) = (genotype['normal'], genotype['normal_concat'])
else:
(nodes, concats) = (genotype['reduce'], genotype['reduce_concat'])
self._multiplier = len(concats)
self._concats = concats
self._steps = len(nodes)
self._nodes = nodes
self.edges = nn.ModuleDict()
for (i, node) in enumerate(nodes):
for in_node in node:
(name, j) = (in_node[0], in_node[1])
stride = (2 if (reduction and (j < 2)) else 1)
node_str = '{:}<-{:}'.format((i + 2), j)
self.edges[node_str] = OPS[name](C, C, stride, affine, track_running_stats)
def forward(self, s0, s1, unused_drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for (i, node) in enumerate(self._nodes):
clist = []
for in_node in node:
(name, j) = (in_node[0], in_node[1])
node_str = '{:}<-{:}'.format((i + 2), j)
op = self.edges[node_str]
clist.append(op(states[j]))
states.append(sum(clist))
return torch.cat([states[x] for x in self._concats], dim=1)
|
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
'assuming input size 8x8'
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x
|
class NASNetonCIFAR(nn.Module):
def __init__(self, C, N, stem_multiplier, num_classes, genotype, auxiliary, affine=True, track_running_stats=True):
super(NASNetonCIFAR, self).__init__()
self._C = C
self._layerN = N
self.stem = nn.Sequential(nn.Conv2d(3, (C * stem_multiplier), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((C * stem_multiplier)))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * (N - 1))) + [(C * 4)]) + ([(C * 4)] * (N - 1)))
layer_reductions = ((((([False] * N) + [True]) + ([False] * (N - 1))) + [True]) + ([False] * (N - 1)))
(C_prev_prev, C_prev, C_curr, reduction_prev) = ((C * stem_multiplier), (C * stem_multiplier), C, False)
self.auxiliary_index = None
self.auxiliary_head = None
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = InferCell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, affine, track_running_stats)
self.cells.append(cell)
(C_prev_prev, C_prev, reduction_prev) = (C_prev, (cell._multiplier * C_curr), reduction)
if (reduction and (C_curr == (C * 4)) and auxiliary):
self.auxiliary_head = AuxiliaryHeadCIFAR(C_prev, num_classes)
self.auxiliary_index = index
self._Layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = (- 1)
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def auxiliary_param(self):
if (self.auxiliary_head is None):
return []
else:
return list(self.auxiliary_head.parameters())
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
(stem_feature, logits_aux) = (self.stem(inputs), None)
cell_results = [stem_feature, stem_feature]
for (i, cell) in enumerate(self.cells):
cell_feature = cell(cell_results[(- 2)], cell_results[(- 1)], self.drop_path_prob)
cell_results.append(cell_feature)
if ((self.auxiliary_index is not None) and (i == self.auxiliary_index) and self.training):
logits_aux = self.auxiliary_head(cell_results[(- 1)])
out = self.lastact(cell_results[(- 1)])
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
if (logits_aux is None):
return (out, logits)
else:
return (out, [logits, logits_aux])
|
class TinyNetwork(nn.Module):
def __init__(self, C, N, genotype, num_classes):
super(TinyNetwork, self).__init__()
self._C = C
self._layerN = N
self.channel = (1 if (num_classes == 18) else 3)
self.stem = nn.Sequential(nn.Conv2d(self.channel, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
C_prev = C
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2, True)
else:
cell = InferCell(genotype, C_prev, C_curr, 1)
self.cells.append(cell)
C_prev = cell.out_dim
self._Layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
def main():
controller = Controller(6, 4)
predictions = controller()
|
class Controller(nn.Module):
def __init__(self, edge2index, op_names, max_nodes, lstm_size=32, lstm_num_layers=2, tanh_constant=2.5, temperature=5.0):
super(Controller, self).__init__()
self.max_nodes = max_nodes
self.num_edge = len(edge2index)
self.edge2index = edge2index
self.num_ops = len(op_names)
self.op_names = op_names
self.lstm_size = lstm_size
self.lstm_N = lstm_num_layers
self.tanh_constant = tanh_constant
self.temperature = temperature
self.register_parameter('input_vars', nn.Parameter(torch.Tensor(1, 1, lstm_size)))
self.w_lstm = nn.LSTM(input_size=self.lstm_size, hidden_size=self.lstm_size, num_layers=self.lstm_N)
self.w_embd = nn.Embedding(self.num_ops, self.lstm_size)
self.w_pred = nn.Linear(self.lstm_size, self.num_ops)
nn.init.uniform_(self.input_vars, (- 0.1), 0.1)
nn.init.uniform_(self.w_lstm.weight_hh_l0, (- 0.1), 0.1)
nn.init.uniform_(self.w_lstm.weight_ih_l0, (- 0.1), 0.1)
nn.init.uniform_(self.w_embd.weight, (- 0.1), 0.1)
nn.init.uniform_(self.w_pred.weight, (- 0.1), 0.1)
def convert_structure(self, _arch):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_index = _arch[self.edge2index[node_str]]
op_name = self.op_names[op_index]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def forward(self):
(inputs, h0) = (self.input_vars, None)
(log_probs, entropys, sampled_arch) = ([], [], [])
for iedge in range(self.num_edge):
(outputs, h0) = self.w_lstm(inputs, h0)
logits = self.w_pred(outputs)
logits = (logits / self.temperature)
logits = (self.tanh_constant * torch.tanh(logits))
op_distribution = Categorical(logits=logits)
op_index = op_distribution.sample()
sampled_arch.append(op_index.item())
op_log_prob = op_distribution.log_prob(op_index)
log_probs.append(op_log_prob.view((- 1)))
op_entropy = op_distribution.entropy()
entropys.append(op_entropy.view((- 1)))
inputs = self.w_embd(op_index)
return (torch.sum(torch.cat(log_probs)), torch.sum(torch.cat(entropys)), self.convert_structure(sampled_arch))
|
class GenericNAS201Model(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(GenericNAS201Model, self).__init__()
self._C = C
self._layerN = N
self._max_nodes = max_nodes
self._stem = nn.Sequential(nn.Conv2d(1, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self._cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self._cells.append(cell)
C_prev = cell.out_dim
self._op_names = deepcopy(search_space)
self._Layer = len(self._cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev, affine=affine, track_running_stats=track_running_stats), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._num_edge = num_edge
self.arch_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self._mode = None
self.dynamic_cell = None
self._tau = None
self._algo = None
self._drop_path = None
self.verbose = False
def set_algo(self, algo: Text):
assert (self._algo is None), 'This functioin can only be called once.'
self._algo = algo
if (algo == 'enas'):
self.controller = Controller(self.edge2index, self._op_names, self._max_nodes)
else:
self.arch_parameters = nn.Parameter((0.001 * torch.randn(self._num_edge, len(self._op_names))))
if (algo == 'gdas'):
self._tau = 10
def set_cal_mode(self, mode, dynamic_cell=None):
assert (mode in ['gdas', 'enas', 'urs', 'joint', 'select', 'dynamic'])
self._mode = mode
if (mode == 'dynamic'):
self.dynamic_cell = deepcopy(dynamic_cell)
else:
self.dynamic_cell = None
def set_drop_path(self, progress, drop_path_rate):
if (drop_path_rate is None):
self._drop_path = None
elif (progress is None):
self._drop_path = drop_path_rate
else:
self._drop_path = (progress * drop_path_rate)
@property
def mode(self):
return self._mode
@property
def drop_path(self):
return self._drop_path
@property
def weights(self):
xlist = list(self._stem.parameters())
xlist += list(self._cells.parameters())
xlist += list(self.lastact.parameters())
xlist += list(self.global_pooling.parameters())
xlist += list(self.classifier.parameters())
return xlist
def set_tau(self, tau):
self._tau = tau
@property
def tau(self):
return self._tau
@property
def alphas(self):
if (self._algo == 'enas'):
return list(self.controller.parameters())
else:
return [self.arch_parameters]
@property
def message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self._cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self._cells), cell.extra_repr())
return string
def show_alphas(self):
with torch.no_grad():
if (self._algo == 'enas'):
return 'w_pred :\n{:}'.format(self.controller.w_pred.weight)
else:
return 'arch-parameters :\n{:}'.format(nn.functional.softmax(self.arch_parameters, dim=(- 1)).cpu())
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={_max_nodes}, N={_layerN}, L={_Layer}, alg={_algo})'.format(name=self.__class__.__name__, **self.__dict__)
@property
def genotype(self):
genotypes = []
for i in range(1, self._max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self._op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def dync_genotype(self, use_random=False):
genotypes = []
with torch.no_grad():
alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=(- 1))
for i in range(1, self._max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if use_random:
op_name = random.choice(self._op_names)
else:
weights = alphas_cpu[self.edge2index[node_str]]
op_index = torch.multinomial(weights, 1).item()
op_name = self._op_names[op_index]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def get_log_prob(self, arch):
with torch.no_grad():
logits = nn.functional.log_softmax(self.arch_parameters, dim=(- 1))
select_logits = []
for (i, node_info) in enumerate(arch.nodes):
for (op, xin) in node_info:
node_str = '{:}<-{:}'.format((i + 1), xin)
op_index = self._op_names.index(op)
select_logits.append(logits[(self.edge2index[node_str], op_index)])
return sum(select_logits).item()
def return_topK(self, K, use_random=False):
archs = Structure.gen_all(self._op_names, self._max_nodes, False)
pairs = [(self.get_log_prob(arch), arch) for arch in archs]
if ((K < 0) or (K >= len(archs))):
K = len(archs)
if use_random:
return random.sample(archs, K)
else:
sorted_pairs = sorted(pairs, key=(lambda x: (- x[0])))
return_pairs = [sorted_pairs[_][1] for _ in range(K)]
return return_pairs
def normalize_archp(self):
if (self.mode == 'gdas'):
while True:
gumbels = (- torch.empty_like(self.arch_parameters).exponential_().log())
logits = ((self.arch_parameters.log_softmax(dim=1) + gumbels) / self.tau)
probs = nn.functional.softmax(logits, dim=1)
index = probs.max((- 1), keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_((- 1), index, 1.0)
hardwts = ((one_h - probs.detach()) + probs)
if (torch.isinf(gumbels).any() or torch.isinf(probs).any() or torch.isnan(probs).any()):
continue
else:
break
with torch.no_grad():
hardwts_cpu = hardwts.detach().cpu()
return (hardwts, hardwts_cpu, index, 'GUMBEL')
else:
alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1))
index = alphas.max((- 1), keepdim=True)[1]
with torch.no_grad():
alphas_cpu = alphas.detach().cpu()
return (alphas, alphas_cpu, index, 'SOFTMAX')
def forward(self, inputs):
(alphas, alphas_cpu, index, verbose_str) = self.normalize_archp()
feature = self._stem(inputs)
for (i, cell) in enumerate(self._cells):
if isinstance(cell, SearchCell):
if (self.mode == 'urs'):
feature = cell.forward_urs(feature)
if self.verbose:
verbose_str += '-forward_urs'
elif (self.mode == 'select'):
feature = cell.forward_select(feature, alphas_cpu)
if self.verbose:
verbose_str += '-forward_select'
elif (self.mode == 'joint'):
feature = cell.forward_joint(feature, alphas)
if self.verbose:
verbose_str += '-forward_joint'
elif (self.mode == 'dynamic'):
feature = cell.forward_dynamic(feature, self.dynamic_cell)
if self.verbose:
verbose_str += '-forward_dynamic'
elif (self.mode == 'gdas'):
feature = cell.forward_gdas(feature, alphas, index)
if self.verbose:
verbose_str += '-forward_gdas'
else:
raise ValueError('invalid mode={:}'.format(self.mode))
else:
feature = cell(feature)
if (self.drop_path is not None):
feature = drop_path(feature, self.drop_path)
if (self.verbose and (random.random() < 0.001)):
print(verbose_str)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
def get_combination(space, num):
combs = []
for i in range(num):
if (i == 0):
for func in space:
combs.append([(func, i)])
else:
new_combs = []
for string in combs:
for func in space:
xstring = (string + [(func, i)])
new_combs.append(xstring)
combs = new_combs
return combs
|
class Structure():
def __init__(self, genotype):
assert (isinstance(genotype, list) or isinstance(genotype, tuple)), 'invalid class of genotype : {:}'.format(type(genotype))
self.node_num = (len(genotype) + 1)
self.nodes = []
self.node_N = []
for (idx, node_info) in enumerate(genotype):
assert (isinstance(node_info, list) or isinstance(node_info, tuple)), 'invalid class of node_info : {:}'.format(type(node_info))
assert (len(node_info) >= 1), 'invalid length : {:}'.format(len(node_info))
for node_in in node_info:
assert (isinstance(node_in, list) or isinstance(node_in, tuple)), 'invalid class of in-node : {:}'.format(type(node_in))
assert ((len(node_in) == 2) and (node_in[1] <= idx)), 'invalid in-node : {:}'.format(node_in)
self.node_N.append(len(node_info))
self.nodes.append(tuple(deepcopy(node_info)))
def tolist(self, remove_str):
genotypes = []
for node_info in self.nodes:
node_info = list(node_info)
node_info = sorted(node_info, key=(lambda x: (x[1], x[0])))
node_info = tuple(filter((lambda x: (x[0] != remove_str)), node_info))
if (len(node_info) == 0):
return (None, False)
genotypes.append(node_info)
return (genotypes, True)
def node(self, index):
assert ((index > 0) and (index <= len(self))), 'invalid index={:} < {:}'.format(index, len(self))
return self.nodes[index]
def tostr(self):
strings = []
for node_info in self.nodes:
string = '|'.join([(x[0] + '~{:}'.format(x[1])) for x in node_info])
string = '|{:}|'.format(string)
strings.append(string)
return '+'.join(strings)
def check_valid(self):
nodes = {0: True}
for (i, node_info) in enumerate(self.nodes):
sums = []
for (op, xin) in node_info:
if ((op == 'none') or (nodes[xin] is False)):
x = False
else:
x = True
sums.append(x)
nodes[(i + 1)] = (sum(sums) > 0)
return nodes[len(self.nodes)]
def to_unique_str(self, consider_zero=False):
nodes = {0: '0'}
for (i_node, node_info) in enumerate(self.nodes):
cur_node = []
for (op, xin) in node_info:
if (consider_zero is None):
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
elif consider_zero:
if ((op == 'none') or (nodes[xin] == '#')):
x = '#'
elif (op == 'skip_connect'):
x = nodes[xin]
else:
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
elif (op == 'skip_connect'):
x = nodes[xin]
else:
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
cur_node.append(x)
nodes[(i_node + 1)] = '+'.join(sorted(cur_node))
return nodes[len(self.nodes)]
def check_valid_op(self, op_names):
for node_info in self.nodes:
for inode_edge in node_info:
if (inode_edge[0] not in op_names):
return False
return True
def __repr__(self):
return '{name}({node_num} nodes with {node_info})'.format(name=self.__class__.__name__, node_info=self.tostr(), **self.__dict__)
def __len__(self):
return (len(self.nodes) + 1)
def __getitem__(self, index):
return self.nodes[index]
@staticmethod
def str2structure(xstr):
if isinstance(xstr, Structure):
return xstr
assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))
nodestrs = xstr.split('+')
genotypes = []
for (i, node_str) in enumerate(nodestrs):
inputs = list(filter((lambda x: (x != '')), node_str.split('|')))
for xinput in inputs:
assert (len(xinput.split('~')) == 2), 'invalid input length : {:}'.format(xinput)
inputs = (xi.split('~') for xi in inputs)
input_infos = tuple(((op, int(IDX)) for (op, IDX) in inputs))
genotypes.append(input_infos)
return Structure(genotypes)
@staticmethod
def str2fullstructure(xstr, default_name='none'):
assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))
nodestrs = xstr.split('+')
genotypes = []
for (i, node_str) in enumerate(nodestrs):
inputs = list(filter((lambda x: (x != '')), node_str.split('|')))
for xinput in inputs:
assert (len(xinput.split('~')) == 2), 'invalid input length : {:}'.format(xinput)
inputs = (xi.split('~') for xi in inputs)
input_infos = list(((op, int(IDX)) for (op, IDX) in inputs))
all_in_nodes = list((x[1] for x in input_infos))
for j in range(i):
if (j not in all_in_nodes):
input_infos.append((default_name, j))
node_info = sorted(input_infos, key=(lambda x: (x[1], x[0])))
genotypes.append(tuple(node_info))
return Structure(genotypes)
@staticmethod
def gen_all(search_space, num, return_ori):
assert (isinstance(search_space, list) or isinstance(search_space, tuple)), 'invalid class of search-space : {:}'.format(type(search_space))
assert (num >= 2), 'There should be at least two nodes in a neural cell instead of {:}'.format(num)
all_archs = get_combination(search_space, 1)
for (i, arch) in enumerate(all_archs):
all_archs[i] = [tuple(arch)]
for inode in range(2, num):
cur_nodes = get_combination(search_space, inode)
new_all_archs = []
for previous_arch in all_archs:
for cur_node in cur_nodes:
new_all_archs.append((previous_arch + [tuple(cur_node)]))
all_archs = new_all_archs
if return_ori:
return all_archs
else:
return [Structure(x) for x in all_archs]
|
class NAS201SearchCell(nn.Module):
def __init__(self, C_in, C_out, stride, max_nodes, op_names, affine=False, track_running_stats=True):
super(NAS201SearchCell, self).__init__()
self.op_names = deepcopy(op_names)
self.edges = nn.ModuleDict()
self.max_nodes = max_nodes
self.in_dim = C_in
self.out_dim = C_out
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if (j == 0):
xlists = [OPS[op_name](C_in, C_out, stride, affine, track_running_stats) for op_name in op_names]
else:
xlists = [OPS[op_name](C_in, C_out, 1, affine, track_running_stats) for op_name in op_names]
self.edges[node_str] = nn.ModuleList(xlists)
self.edge_keys = sorted(list(self.edges.keys()))
self.edge2index = {key: i for (i, key) in enumerate(self.edge_keys)}
self.num_edges = len(self.edges)
def extra_repr(self):
string = 'info :: {max_nodes} nodes, inC={in_dim}, outC={out_dim}'.format(**self.__dict__)
return string
def forward(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[self.edge2index[node_str]]
inter_nodes.append(sum(((layer(nodes[j]) * w) for (layer, w) in zip(self.edges[node_str], weights))))
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
def forward_gdas(self, inputs, hardwts, index):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = hardwts[self.edge2index[node_str]]
argmaxs = index[self.edge2index[node_str]].item()
weigsum = sum((((weights[_ie] * edge(nodes[j])) if (_ie == argmaxs) else weights[_ie]) for (_ie, edge) in enumerate(self.edges[node_str])))
inter_nodes.append(weigsum)
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
def forward_joint(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[self.edge2index[node_str]]
aggregation = sum(((layer(nodes[j]) * w) for (layer, w) in zip(self.edges[node_str], weights)))
inter_nodes.append(aggregation)
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
def forward_urs(self, inputs):
nodes = [inputs]
for i in range(1, self.max_nodes):
while True:
(sops, has_non_zero) = ([], False)
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
candidates = self.edges[node_str]
select_op = random.choice(candidates)
sops.append(select_op)
if ((not hasattr(select_op, 'is_zero')) or (select_op.is_zero is False)):
has_non_zero = True
if has_non_zero:
break
inter_nodes = []
for (j, select_op) in enumerate(sops):
inter_nodes.append(select_op(nodes[j]))
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
def forward_select(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[self.edge2index[node_str]]
inter_nodes.append(self.edges[node_str][weights.argmax().item()](nodes[j]))
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
def forward_dynamic(self, inputs, structure):
nodes = [inputs]
for i in range(1, self.max_nodes):
cur_op_node = structure.nodes[(i - 1)]
inter_nodes = []
for (op_name, j) in cur_op_node:
node_str = '{:}<-{:}'.format(i, j)
op_index = self.op_names.index(op_name)
inter_nodes.append(self.edges[node_str][op_index](nodes[j]))
nodes.append(sum(inter_nodes))
return nodes[(- 1)]
|
class MixedOp(nn.Module):
def __init__(self, space, C, stride, affine, track_running_stats):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in space:
op = OPS[primitive](C, C, stride, affine, track_running_stats)
self._ops.append(op)
def forward_gdas(self, x, weights, index):
return (self._ops[index](x) * weights[index])
def forward_darts(self, x, weights):
return sum(((w * op(x)) for (w, op) in zip(weights, self._ops)))
|
class NASNetSearchCell(nn.Module):
def __init__(self, space, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, affine, track_running_stats):
super(NASNetSearchCell, self).__init__()
self.reduction = reduction
self.op_names = deepcopy(space)
if reduction_prev:
self.preprocess0 = OPS['skip_connect'](C_prev_prev, C, 2, affine, track_running_stats)
else:
self.preprocess0 = OPS['nor_conv_1x1'](C_prev_prev, C, 1, affine, track_running_stats)
self.preprocess1 = OPS['nor_conv_1x1'](C_prev, C, 1, affine, track_running_stats)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self.edges = nn.ModuleDict()
for i in range(self._steps):
for j in range((2 + i)):
node_str = '{:}<-{:}'.format(i, j)
stride = (2 if (reduction and (j < 2)) else 1)
op = MixedOp(space, C, stride, affine, track_running_stats)
self.edges[node_str] = op
self.edge_keys = sorted(list(self.edges.keys()))
self.edge2index = {key: i for (i, key) in enumerate(self.edge_keys)}
self.num_edges = len(self.edges)
@property
def multiplier(self):
return self._multiplier
def forward_gdas(self, s0, s1, weightss, indexs):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
clist = []
for (j, h) in enumerate(states):
node_str = '{:}<-{:}'.format(i, j)
op = self.edges[node_str]
weights = weightss[self.edge2index[node_str]]
index = indexs[self.edge2index[node_str]].item()
clist.append(op.forward_gdas(h, weights, index))
states.append(sum(clist))
return torch.cat(states[(- self._multiplier):], dim=1)
def forward_darts(self, s0, s1, weightss):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
clist = []
for (j, h) in enumerate(states):
node_str = '{:}<-{:}'.format(i, j)
op = self.edges[node_str]
weights = weightss[self.edge2index[node_str]]
clist.append(op.forward_darts(h, weights))
states.append(sum(clist))
return torch.cat(states[(- self._multiplier):], dim=1)
|
class TinyNetworkDarts(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(TinyNetworkDarts, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self.stem = nn.Sequential(nn.Conv2d(1, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def get_alphas(self):
return [self.arch_parameters]
def show_alphas(self):
with torch.no_grad():
return 'arch-parameters :\n{:}'.format(nn.functional.softmax(self.arch_parameters, dim=(- 1)).cpu())
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def forward(self, inputs):
alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1))
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell(feature, alphas)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class NASNetworkDARTS(nn.Module):
def __init__(self, C: int, N: int, steps: int, multiplier: int, stem_multiplier: int, num_classes: int, search_space: List[Text], affine: bool, track_running_stats: bool):
super(NASNetworkDARTS, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier
self.stem = nn.Sequential(nn.Conv2d(3, (C * stem_multiplier), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((C * stem_multiplier)))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * (N - 1))) + [(C * 4)]) + ([(C * 4)] * (N - 1)))
layer_reductions = ((((([False] * N) + [True]) + ([False] * (N - 1))) + [True]) + ([False] * (N - 1)))
(num_edge, edge2index) = (None, None)
(C_prev_prev, C_prev, C_curr, reduction_prev) = ((C * stem_multiplier), (C * stem_multiplier), C, False)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = SearchCell(search_space, steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
(C_prev_prev, C_prev, reduction_prev) = (C_prev, (multiplier * C_curr), reduction)
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_normal_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.arch_reduce_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
def get_weights(self) -> List[torch.nn.Parameter]:
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def get_alphas(self) -> List[torch.nn.Parameter]:
return [self.arch_normal_parameters, self.arch_reduce_parameters]
def show_alphas(self) -> Text:
with torch.no_grad():
A = 'arch-normal-parameters :\n{:}'.format(nn.functional.softmax(self.arch_normal_parameters, dim=(- 1)).cpu())
B = 'arch-reduce-parameters :\n{:}'.format(nn.functional.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu())
return '{:}\n{:}'.format(A, B)
def get_message(self) -> Text:
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self) -> Text:
return '{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self) -> Dict[(Text, List)]:
def _parse(weights):
gene = []
for i in range(self._steps):
edges = []
for j in range((2 + i)):
node_str = '{:}<-{:}'.format(i, j)
ws = weights[self.edge2index[node_str]]
for (k, op_name) in enumerate(self.op_names):
if (op_name == 'none'):
continue
edges.append((op_name, j, ws[k]))
edges = sorted(edges, key=(lambda x: (- x[(- 1)])))
selected_edges = edges[:2]
gene.append(tuple(selected_edges))
return gene
with torch.no_grad():
gene_normal = _parse(torch.softmax(self.arch_normal_parameters, dim=(- 1)).cpu().numpy())
gene_reduce = _parse(torch.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu().numpy())
return {'normal': gene_normal, 'normal_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2))), 'reduce': gene_reduce, 'reduce_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2)))}
def forward(self, inputs):
normal_w = nn.functional.softmax(self.arch_normal_parameters, dim=1)
reduce_w = nn.functional.softmax(self.arch_reduce_parameters, dim=1)
s0 = s1 = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
ww = reduce_w
else:
ww = normal_w
(s0, s1) = (s1, cell.forward_darts(s0, s1, ww))
out = self.lastact(s1)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class TinyNetworkENAS(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(TinyNetworkENAS, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.sampled_arch = None
def update_arch(self, _arch):
if (_arch is None):
self.sampled_arch = None
elif isinstance(_arch, Structure):
self.sampled_arch = _arch
elif isinstance(_arch, (list, tuple)):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_index = _arch[self.edge2index[node_str]]
op_name = self.op_names[op_index]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
self.sampled_arch = Structure(genotypes)
else:
raise ValueError('invalid type of input architecture : {:}'.format(_arch))
return self.sampled_arch
def create_controller(self):
return Controller(len(self.edge2index), len(self.op_names))
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell.forward_dynamic(feature, self.sampled_arch)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class Controller(nn.Module):
def __init__(self, num_edge, num_ops, lstm_size=32, lstm_num_layers=2, tanh_constant=2.5, temperature=5.0):
super(Controller, self).__init__()
self.num_edge = num_edge
self.num_ops = num_ops
self.lstm_size = lstm_size
self.lstm_N = lstm_num_layers
self.tanh_constant = tanh_constant
self.temperature = temperature
self.register_parameter('input_vars', nn.Parameter(torch.Tensor(1, 1, lstm_size)))
self.w_lstm = nn.LSTM(input_size=self.lstm_size, hidden_size=self.lstm_size, num_layers=self.lstm_N)
self.w_embd = nn.Embedding(self.num_ops, self.lstm_size)
self.w_pred = nn.Linear(self.lstm_size, self.num_ops)
nn.init.uniform_(self.input_vars, (- 0.1), 0.1)
nn.init.uniform_(self.w_lstm.weight_hh_l0, (- 0.1), 0.1)
nn.init.uniform_(self.w_lstm.weight_ih_l0, (- 0.1), 0.1)
nn.init.uniform_(self.w_embd.weight, (- 0.1), 0.1)
nn.init.uniform_(self.w_pred.weight, (- 0.1), 0.1)
def forward(self):
(inputs, h0) = (self.input_vars, None)
(log_probs, entropys, sampled_arch) = ([], [], [])
for iedge in range(self.num_edge):
(outputs, h0) = self.w_lstm(inputs, h0)
logits = self.w_pred(outputs)
logits = (logits / self.temperature)
logits = (self.tanh_constant * torch.tanh(logits))
op_distribution = Categorical(logits=logits)
op_index = op_distribution.sample()
sampled_arch.append(op_index.item())
op_log_prob = op_distribution.log_prob(op_index)
log_probs.append(op_log_prob.view((- 1)))
op_entropy = op_distribution.entropy()
entropys.append(op_entropy.view((- 1)))
inputs = self.w_embd(op_index)
return (torch.sum(torch.cat(log_probs)), torch.sum(torch.cat(entropys)), sampled_arch)
|
class TinyNetworkGDAS(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(TinyNetworkGDAS, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.tau = 10
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def get_alphas(self):
return [self.arch_parameters]
def show_alphas(self):
with torch.no_grad():
return 'arch-parameters :\n{:}'.format(nn.functional.softmax(self.arch_parameters, dim=(- 1)).cpu())
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def forward(self, inputs):
while True:
gumbels = (- torch.empty_like(self.arch_parameters).exponential_().log())
logits = ((self.arch_parameters.log_softmax(dim=1) + gumbels) / self.tau)
probs = nn.functional.softmax(logits, dim=1)
index = probs.max((- 1), keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_((- 1), index, 1.0)
hardwts = ((one_h - probs.detach()) + probs)
if (torch.isinf(gumbels).any() or torch.isinf(probs).any() or torch.isnan(probs).any()):
continue
else:
break
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell.forward_gdas(feature, hardwts, index)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class NASNetworkGDAS_FRC(nn.Module):
def __init__(self, C, N, steps, multiplier, stem_multiplier, num_classes, search_space, affine, track_running_stats):
super(NASNetworkGDAS_FRC, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier
self.stem = nn.Sequential(nn.Conv2d(3, (C * stem_multiplier), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((C * stem_multiplier)))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * (N - 1))) + [(C * 4)]) + ([(C * 4)] * (N - 1)))
layer_reductions = ((((([False] * N) + [True]) + ([False] * (N - 1))) + [True]) + ([False] * (N - 1)))
(num_edge, edge2index) = (None, None)
(C_prev_prev, C_prev, C_curr, reduction_prev) = ((C * stem_multiplier), (C * stem_multiplier), C, False)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = RAW_OP_CLASSES['gdas_reduction'](C_prev_prev, C_prev, C_curr, reduction_prev, affine, track_running_stats)
else:
cell = SearchCell(search_space, steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert (reduction or ((num_edge == cell.num_edges) and (edge2index == cell.edge2index))), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
(C_prev_prev, C_prev, reduction_prev) = (C_prev, (cell.multiplier * C_curr), reduction)
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.tau = 10
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def get_alphas(self):
return [self.arch_parameters]
def show_alphas(self):
with torch.no_grad():
A = 'arch-normal-parameters :\n{:}'.format(nn.functional.softmax(self.arch_parameters, dim=(- 1)).cpu())
return '{:}'.format(A)
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
def _parse(weights):
gene = []
for i in range(self._steps):
edges = []
for j in range((2 + i)):
node_str = '{:}<-{:}'.format(i, j)
ws = weights[self.edge2index[node_str]]
for (k, op_name) in enumerate(self.op_names):
if (op_name == 'none'):
continue
edges.append((op_name, j, ws[k]))
edges = sorted(edges, key=(lambda x: (- x[(- 1)])))
selected_edges = edges[:2]
gene.append(tuple(selected_edges))
return gene
with torch.no_grad():
gene_normal = _parse(torch.softmax(self.arch_parameters, dim=(- 1)).cpu().numpy())
return {'normal': gene_normal, 'normal_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2)))}
def forward(self, inputs):
def get_gumbel_prob(xins):
while True:
gumbels = (- torch.empty_like(xins).exponential_().log())
logits = ((xins.log_softmax(dim=1) + gumbels) / self.tau)
probs = nn.functional.softmax(logits, dim=1)
index = probs.max((- 1), keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_((- 1), index, 1.0)
hardwts = ((one_h - probs.detach()) + probs)
if (torch.isinf(gumbels).any() or torch.isinf(probs).any() or torch.isnan(probs).any()):
continue
else:
break
return (hardwts, index)
(hardwts, index) = get_gumbel_prob(self.arch_parameters)
s0 = s1 = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
(s0, s1) = (s1, cell(s0, s1))
else:
(s0, s1) = (s1, cell.forward_gdas(s0, s1, hardwts, index))
out = self.lastact(s1)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class NASNetworkGDAS(nn.Module):
def __init__(self, C, N, steps, multiplier, stem_multiplier, num_classes, search_space, affine, track_running_stats):
super(NASNetworkGDAS, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier
self.stem = nn.Sequential(nn.Conv2d(3, (C * stem_multiplier), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((C * stem_multiplier)))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * (N - 1))) + [(C * 4)]) + ([(C * 4)] * (N - 1)))
layer_reductions = ((((([False] * N) + [True]) + ([False] * (N - 1))) + [True]) + ([False] * (N - 1)))
(num_edge, edge2index) = (None, None)
(C_prev_prev, C_prev, C_curr, reduction_prev) = ((C * stem_multiplier), (C * stem_multiplier), C, False)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = SearchCell(search_space, steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
(C_prev_prev, C_prev, reduction_prev) = (C_prev, (multiplier * C_curr), reduction)
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_normal_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.arch_reduce_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.tau = 10
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def get_alphas(self):
return [self.arch_normal_parameters, self.arch_reduce_parameters]
def show_alphas(self):
with torch.no_grad():
A = 'arch-normal-parameters :\n{:}'.format(nn.functional.softmax(self.arch_normal_parameters, dim=(- 1)).cpu())
B = 'arch-reduce-parameters :\n{:}'.format(nn.functional.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu())
return '{:}\n{:}'.format(A, B)
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
def _parse(weights):
gene = []
for i in range(self._steps):
edges = []
for j in range((2 + i)):
node_str = '{:}<-{:}'.format(i, j)
ws = weights[self.edge2index[node_str]]
for (k, op_name) in enumerate(self.op_names):
if (op_name == 'none'):
continue
edges.append((op_name, j, ws[k]))
edges = sorted(edges, key=(lambda x: (- x[(- 1)])))
selected_edges = edges[:2]
gene.append(tuple(selected_edges))
return gene
with torch.no_grad():
gene_normal = _parse(torch.softmax(self.arch_normal_parameters, dim=(- 1)).cpu().numpy())
gene_reduce = _parse(torch.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu().numpy())
return {'normal': gene_normal, 'normal_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2))), 'reduce': gene_reduce, 'reduce_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2)))}
def forward(self, inputs):
def get_gumbel_prob(xins):
while True:
gumbels = (- torch.empty_like(xins).exponential_().log())
logits = ((xins.log_softmax(dim=1) + gumbels) / self.tau)
probs = nn.functional.softmax(logits, dim=1)
index = probs.max((- 1), keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_((- 1), index, 1.0)
hardwts = ((one_h - probs.detach()) + probs)
if (torch.isinf(gumbels).any() or torch.isinf(probs).any() or torch.isnan(probs).any()):
continue
else:
break
return (hardwts, index)
(normal_hardwts, normal_index) = get_gumbel_prob(self.arch_normal_parameters)
(reduce_hardwts, reduce_index) = get_gumbel_prob(self.arch_reduce_parameters)
s0 = s1 = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
(hardwts, index) = (reduce_hardwts, reduce_index)
else:
(hardwts, index) = (normal_hardwts, normal_index)
(s0, s1) = (s1, cell.forward_gdas(s0, s1, hardwts, index))
out = self.lastact(s1)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class TinyNetworkRANDOM(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(TinyNetworkRANDOM, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_cache = None
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def random_genotype(self, set_cache):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = random.choice(self.op_names)
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
arch = Structure(genotypes)
if set_cache:
self.arch_cache = arch
return arch
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell.forward_dynamic(feature, self.arch_cache)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class TinyNetworkSETN(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats):
super(TinyNetworkSETN, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.mode = 'urs'
self.dynamic_cell = None
def set_cal_mode(self, mode, dynamic_cell=None):
assert (mode in ['urs', 'joint', 'select', 'dynamic'])
self.mode = mode
if (mode == 'dynamic'):
self.dynamic_cell = deepcopy(dynamic_cell)
else:
self.dynamic_cell = None
def get_cal_mode(self):
return self.mode
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def get_alphas(self):
return [self.arch_parameters]
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def dync_genotype(self, use_random=False):
genotypes = []
with torch.no_grad():
alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=(- 1))
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if use_random:
op_name = random.choice(self.op_names)
else:
weights = alphas_cpu[self.edge2index[node_str]]
op_index = torch.multinomial(weights, 1).item()
op_name = self.op_names[op_index]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def get_log_prob(self, arch):
with torch.no_grad():
logits = nn.functional.log_softmax(self.arch_parameters, dim=(- 1))
select_logits = []
for (i, node_info) in enumerate(arch.nodes):
for (op, xin) in node_info:
node_str = '{:}<-{:}'.format((i + 1), xin)
op_index = self.op_names.index(op)
select_logits.append(logits[(self.edge2index[node_str], op_index)])
return sum(select_logits).item()
def return_topK(self, K):
archs = Structure.gen_all(self.op_names, self.max_nodes, False)
pairs = [(self.get_log_prob(arch), arch) for arch in archs]
if ((K < 0) or (K >= len(archs))):
K = len(archs)
sorted_pairs = sorted(pairs, key=(lambda x: (- x[0])))
return_pairs = [sorted_pairs[_][1] for _ in range(K)]
return return_pairs
def forward(self, inputs):
alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1))
with torch.no_grad():
alphas_cpu = alphas.detach().cpu()
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
if (self.mode == 'urs'):
feature = cell.forward_urs(feature)
elif (self.mode == 'select'):
feature = cell.forward_select(feature, alphas_cpu)
elif (self.mode == 'joint'):
feature = cell.forward_joint(feature, alphas)
elif (self.mode == 'dynamic'):
feature = cell.forward_dynamic(feature, self.dynamic_cell)
else:
raise ValueError('invalid mode={:}'.format(self.mode))
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
class NASNetworkSETN(nn.Module):
def __init__(self, C: int, N: int, steps: int, multiplier: int, stem_multiplier: int, num_classes: int, search_space: List[Text], affine: bool, track_running_stats: bool):
super(NASNetworkSETN, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier
self.stem = nn.Sequential(nn.Conv2d(3, (C * stem_multiplier), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((C * stem_multiplier)))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * (N - 1))) + [(C * 4)]) + ([(C * 4)] * (N - 1)))
layer_reductions = ((((([False] * N) + [True]) + ([False] * (N - 1))) + [True]) + ([False] * (N - 1)))
(num_edge, edge2index) = (None, None)
(C_prev_prev, C_prev, C_curr, reduction_prev) = ((C * stem_multiplier), (C * stem_multiplier), C, False)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = SearchCell(search_space, steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
(C_prev_prev, C_prev, reduction_prev) = (C_prev, (multiplier * C_curr), reduction)
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_normal_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.arch_reduce_parameters = nn.Parameter((0.001 * torch.randn(num_edge, len(search_space))))
self.mode = 'urs'
self.dynamic_cell = None
def set_cal_mode(self, mode, dynamic_cell=None):
assert (mode in ['urs', 'joint', 'select', 'dynamic'])
self.mode = mode
if (mode == 'dynamic'):
self.dynamic_cell = deepcopy(dynamic_cell)
else:
self.dynamic_cell = None
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def get_alphas(self):
return [self.arch_normal_parameters, self.arch_reduce_parameters]
def show_alphas(self):
with torch.no_grad():
A = 'arch-normal-parameters :\n{:}'.format(nn.functional.softmax(self.arch_normal_parameters, dim=(- 1)).cpu())
B = 'arch-reduce-parameters :\n{:}'.format(nn.functional.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu())
return '{:}\n{:}'.format(A, B)
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def dync_genotype(self, use_random=False):
genotypes = []
with torch.no_grad():
alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=(- 1))
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if use_random:
op_name = random.choice(self.op_names)
else:
weights = alphas_cpu[self.edge2index[node_str]]
op_index = torch.multinomial(weights, 1).item()
op_name = self.op_names[op_index]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def genotype(self):
def _parse(weights):
gene = []
for i in range(self._steps):
edges = []
for j in range((2 + i)):
node_str = '{:}<-{:}'.format(i, j)
ws = weights[self.edge2index[node_str]]
for (k, op_name) in enumerate(self.op_names):
if (op_name == 'none'):
continue
edges.append((op_name, j, ws[k]))
edges = sorted(edges, key=(lambda x: (- x[(- 1)])))
selected_edges = edges[:2]
gene.append(tuple(selected_edges))
return gene
with torch.no_grad():
gene_normal = _parse(torch.softmax(self.arch_normal_parameters, dim=(- 1)).cpu().numpy())
gene_reduce = _parse(torch.softmax(self.arch_reduce_parameters, dim=(- 1)).cpu().numpy())
return {'normal': gene_normal, 'normal_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2))), 'reduce': gene_reduce, 'reduce_concat': list(range(((2 + self._steps) - self._multiplier), (self._steps + 2)))}
def forward(self, inputs):
normal_hardwts = nn.functional.softmax(self.arch_normal_parameters, dim=(- 1))
reduce_hardwts = nn.functional.softmax(self.arch_reduce_parameters, dim=(- 1))
s0 = s1 = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
raise NotImplementedError
if cell.reduction:
(hardwts, index) = (reduce_hardwts, reduce_index)
else:
(hardwts, index) = (normal_hardwts, normal_index)
(s0, s1) = (s1, cell.forward_gdas(s0, s1, hardwts, index))
out = self.lastact(s1)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
def initialize_resnet(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
|
class ConvBNReLU(nn.Module):
def __init__(self, nIn, nOut, kernel, stride, padding, bias, has_avg, has_bn, has_relu):
super(ConvBNReLU, self).__init__()
if has_avg:
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.avg = None
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, dilation=1, groups=1, bias=bias)
if has_bn:
self.bn = nn.BatchNorm2d(nOut)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
def forward(self, inputs):
if self.avg:
out = self.avg(inputs)
else:
out = inputs
conv = self.conv(out)
if self.bn:
out = self.bn(conv)
else:
out = conv
if self.relu:
out = self.relu(out)
else:
out = out
return out
|
class ResNetBasicblock(nn.Module):
num_conv = 2
expansion = 1
def __init__(self, iCs, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 3), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_a = ConvBNReLU(iCs[0], iCs[1], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_b = ConvBNReLU(iCs[1], iCs[2], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
residual_in = iCs[2]
elif (iCs[0] != iCs[2]):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = iCs[2]
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + basicblock)
return F.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, iCs, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 4), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_1x1 = ConvBNReLU(iCs[0], iCs[1], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(iCs[1], iCs[2], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(iCs[2], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
residual_in = iCs[3]
elif (iCs[0] != iCs[3]):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=False, has_relu=False)
residual_in = iCs[3]
else:
self.downsample = None
self.out_dim = iCs[3]
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + bottleneck)
return F.relu(out, inplace=True)
|
class InferCifarResNet(nn.Module):
def __init__(self, block_name, depth, xblocks, xchannels, num_classes, zero_init_residual):
super(InferCifarResNet, self).__init__()
if (block_name == 'ResNetBasicblock'):
block = ResNetBasicblock
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
elif (block_name == 'ResNetBottleneck'):
block = ResNetBottleneck
assert (((depth - 2) % 9) == 0), 'depth should be one of 164'
layer_blocks = ((depth - 2) // 9)
else:
raise ValueError('invalid block : {:}'.format(block_name))
assert (len(xblocks) == 3), 'invalid xblocks : {:}'.format(xblocks)
self.message = 'InferWidthCifarResNet : Depth : {:} , Layers for each block : {:}'.format(depth, layer_blocks)
self.num_classes = num_classes
self.xchannels = xchannels
self.layers = nn.ModuleList([ConvBNReLU(xchannels[0], xchannels[1], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=True)])
last_channel_idx = 1
for stage in range(3):
for iL in range(layer_blocks):
num_conv = block.num_conv
iCs = self.xchannels[last_channel_idx:((last_channel_idx + num_conv) + 1)]
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iCs, stride)
last_channel_idx += num_conv
self.xchannels[last_channel_idx] = module.out_dim
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iCs={:}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), iCs, module.out_dim, stride)
if ((iL + 1) == xblocks[stage]):
out_channel = module.out_dim
for iiL in range((iL + 1), layer_blocks):
last_channel_idx += num_conv
self.xchannels[last_channel_idx] = module.out_dim
break
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(self.xchannels[(- 1)], num_classes)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBasicblock):
nn.init.constant_(m.conv_b.bn.weight, 0)
elif isinstance(m, ResNetBottleneck):
nn.init.constant_(m.conv_1x4.bn.weight, 0)
def get_message(self):
return self.message
def forward(self, inputs):
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class ConvBNReLU(nn.Module):
def __init__(self, nIn, nOut, kernel, stride, padding, bias, has_avg, has_bn, has_relu):
super(ConvBNReLU, self).__init__()
if has_avg:
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.avg = None
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, dilation=1, groups=1, bias=bias)
if has_bn:
self.bn = nn.BatchNorm2d(nOut)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
def forward(self, inputs):
if self.avg:
out = self.avg(inputs)
else:
out = inputs
conv = self.conv(out)
if self.bn:
out = self.bn(conv)
else:
out = conv
if self.relu:
out = self.relu(out)
else:
out = out
return out
|
class ResNetBasicblock(nn.Module):
num_conv = 2
expansion = 1
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_a = ConvBNReLU(inplanes, planes, 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_b = ConvBNReLU(planes, planes, 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=False)
if (stride == 2):
self.downsample = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
elif (inplanes != planes):
self.downsample = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = planes
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + basicblock)
return F.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, inplanes, planes, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_1x1 = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(planes, planes, 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(planes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
if (stride == 2):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
elif (inplanes != (planes * self.expansion)):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=False, has_relu=False)
else:
self.downsample = None
self.out_dim = (planes * self.expansion)
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + bottleneck)
return F.relu(out, inplace=True)
|
class InferDepthCifarResNet(nn.Module):
def __init__(self, block_name, depth, xblocks, num_classes, zero_init_residual):
super(InferDepthCifarResNet, self).__init__()
if (block_name == 'ResNetBasicblock'):
block = ResNetBasicblock
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
elif (block_name == 'ResNetBottleneck'):
block = ResNetBottleneck
assert (((depth - 2) % 9) == 0), 'depth should be one of 164'
layer_blocks = ((depth - 2) // 9)
else:
raise ValueError('invalid block : {:}'.format(block_name))
assert (len(xblocks) == 3), 'invalid xblocks : {:}'.format(xblocks)
self.message = 'InferWidthCifarResNet : Depth : {:} , Layers for each block : {:}'.format(depth, layer_blocks)
self.num_classes = num_classes
self.layers = nn.ModuleList([ConvBNReLU(3, 16, 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=True)])
self.channels = [16]
for stage in range(3):
for iL in range(layer_blocks):
iC = self.channels[(- 1)]
planes = (16 * (2 ** stage))
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iC, planes, stride)
self.channels.append(module.out_dim)
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iC={:}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), planes, module.out_dim, stride)
if ((iL + 1) == xblocks[stage]):
break
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(self.channels[(- 1)], num_classes)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBasicblock):
nn.init.constant_(m.conv_b.bn.weight, 0)
elif isinstance(m, ResNetBottleneck):
nn.init.constant_(m.conv_1x4.bn.weight, 0)
def get_message(self):
return self.message
def forward(self, inputs):
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class ConvBNReLU(nn.Module):
def __init__(self, nIn, nOut, kernel, stride, padding, bias, has_avg, has_bn, has_relu):
super(ConvBNReLU, self).__init__()
if has_avg:
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.avg = None
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, dilation=1, groups=1, bias=bias)
if has_bn:
self.bn = nn.BatchNorm2d(nOut)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
def forward(self, inputs):
if self.avg:
out = self.avg(inputs)
else:
out = inputs
conv = self.conv(out)
if self.bn:
out = self.bn(conv)
else:
out = conv
if self.relu:
out = self.relu(out)
else:
out = out
return out
|
class ResNetBasicblock(nn.Module):
num_conv = 2
expansion = 1
def __init__(self, iCs, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 3), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_a = ConvBNReLU(iCs[0], iCs[1], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_b = ConvBNReLU(iCs[1], iCs[2], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
residual_in = iCs[2]
elif (iCs[0] != iCs[2]):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = iCs[2]
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + basicblock)
return F.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, iCs, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 4), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_1x1 = ConvBNReLU(iCs[0], iCs[1], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(iCs[1], iCs[2], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(iCs[2], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
residual_in = iCs[3]
elif (iCs[0] != iCs[3]):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=False, has_relu=False)
residual_in = iCs[3]
else:
self.downsample = None
self.out_dim = iCs[3]
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + bottleneck)
return F.relu(out, inplace=True)
|
class InferWidthCifarResNet(nn.Module):
def __init__(self, block_name, depth, xchannels, num_classes, zero_init_residual):
super(InferWidthCifarResNet, self).__init__()
if (block_name == 'ResNetBasicblock'):
block = ResNetBasicblock
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
elif (block_name == 'ResNetBottleneck'):
block = ResNetBottleneck
assert (((depth - 2) % 9) == 0), 'depth should be one of 164'
layer_blocks = ((depth - 2) // 9)
else:
raise ValueError('invalid block : {:}'.format(block_name))
self.message = 'InferWidthCifarResNet : Depth : {:} , Layers for each block : {:}'.format(depth, layer_blocks)
self.num_classes = num_classes
self.xchannels = xchannels
self.layers = nn.ModuleList([ConvBNReLU(xchannels[0], xchannels[1], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=True)])
last_channel_idx = 1
for stage in range(3):
for iL in range(layer_blocks):
num_conv = block.num_conv
iCs = self.xchannels[last_channel_idx:((last_channel_idx + num_conv) + 1)]
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iCs, stride)
last_channel_idx += num_conv
self.xchannels[last_channel_idx] = module.out_dim
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iCs={:}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), iCs, module.out_dim, stride)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(self.xchannels[(- 1)], num_classes)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBasicblock):
nn.init.constant_(m.conv_b.bn.weight, 0)
elif isinstance(m, ResNetBottleneck):
nn.init.constant_(m.conv_1x4.bn.weight, 0)
def get_message(self):
return self.message
def forward(self, inputs):
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class ConvBNReLU(nn.Module):
num_conv = 1
def __init__(self, nIn, nOut, kernel, stride, padding, bias, has_avg, has_bn, has_relu):
super(ConvBNReLU, self).__init__()
if has_avg:
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.avg = None
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, dilation=1, groups=1, bias=bias)
if has_bn:
self.bn = nn.BatchNorm2d(nOut)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = None
def forward(self, inputs):
if self.avg:
out = self.avg(inputs)
else:
out = inputs
conv = self.conv(out)
if self.bn:
out = self.bn(conv)
else:
out = conv
if self.relu:
out = self.relu(out)
else:
out = out
return out
|
class ResNetBasicblock(nn.Module):
num_conv = 2
expansion = 1
def __init__(self, iCs, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 3), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_a = ConvBNReLU(iCs[0], iCs[1], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_b = ConvBNReLU(iCs[1], iCs[2], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=True, has_bn=True, has_relu=False)
residual_in = iCs[2]
elif (iCs[0] != iCs[2]):
self.downsample = ConvBNReLU(iCs[0], iCs[2], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = iCs[2]
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + basicblock)
return F.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, iCs, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
assert (isinstance(iCs, tuple) or isinstance(iCs, list)), 'invalid type of iCs : {:}'.format(iCs)
assert (len(iCs) == 4), 'invalid lengths of iCs : {:}'.format(iCs)
self.conv_1x1 = ConvBNReLU(iCs[0], iCs[1], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(iCs[1], iCs[2], 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(iCs[2], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[0]
if (stride == 2):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=True, has_bn=True, has_relu=False)
residual_in = iCs[3]
elif (iCs[0] != iCs[3]):
self.downsample = ConvBNReLU(iCs[0], iCs[3], 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
residual_in = iCs[3]
else:
self.downsample = None
self.out_dim = iCs[3]
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = (residual + bottleneck)
return F.relu(out, inplace=True)
|
class InferImagenetResNet(nn.Module):
def __init__(self, block_name, layers, xblocks, xchannels, deep_stem, num_classes, zero_init_residual):
super(InferImagenetResNet, self).__init__()
if (block_name == 'BasicBlock'):
block = ResNetBasicblock
elif (block_name == 'Bottleneck'):
block = ResNetBottleneck
else:
raise ValueError('invalid block : {:}'.format(block_name))
assert (len(xblocks) == len(layers)), 'invalid layers : {:} vs xblocks : {:}'.format(layers, xblocks)
self.message = 'InferImagenetResNet : Depth : {:} -> {:}, Layers for each block : {:}'.format((sum(layers) * block.num_conv), (sum(xblocks) * block.num_conv), xblocks)
self.num_classes = num_classes
self.xchannels = xchannels
if (not deep_stem):
self.layers = nn.ModuleList([ConvBNReLU(xchannels[0], xchannels[1], 7, 2, 3, False, has_avg=False, has_bn=True, has_relu=True)])
last_channel_idx = 1
else:
self.layers = nn.ModuleList([ConvBNReLU(xchannels[0], xchannels[1], 3, 2, 1, False, has_avg=False, has_bn=True, has_relu=True), ConvBNReLU(xchannels[1], xchannels[2], 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=True)])
last_channel_idx = 2
self.layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
for (stage, layer_blocks) in enumerate(layers):
for iL in range(layer_blocks):
num_conv = block.num_conv
iCs = self.xchannels[last_channel_idx:((last_channel_idx + num_conv) + 1)]
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iCs, stride)
last_channel_idx += num_conv
self.xchannels[last_channel_idx] = module.out_dim
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iCs={:}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), iCs, module.out_dim, stride)
if ((iL + 1) == xblocks[stage]):
out_channel = module.out_dim
for iiL in range((iL + 1), layer_blocks):
last_channel_idx += num_conv
self.xchannels[last_channel_idx] = module.out_dim
break
assert ((last_channel_idx + 1) == len(self.xchannels)), '{:} vs {:}'.format(last_channel_idx, len(self.xchannels))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(self.xchannels[(- 1)], num_classes)
self.apply(initialize_resnet)
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBasicblock):
nn.init.constant_(m.conv_b.bn.weight, 0)
elif isinstance(m, ResNetBottleneck):
nn.init.constant_(m.conv_1x4.bn.weight, 0)
def get_message(self):
return self.message
def forward(self, inputs):
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class ConvBNReLU(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, groups, has_bn=True, has_relu=True):
super(ConvBNReLU, self).__init__()
padding = ((kernel_size - 1) // 2)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False)
if has_bn:
self.bn = nn.BatchNorm2d(out_planes)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU6(inplace=True)
else:
self.relu = None
def forward(self, x):
out = self.conv(x)
if self.bn:
out = self.bn(out)
if self.relu:
out = self.relu(out)
return out
|
class InvertedResidual(nn.Module):
def __init__(self, channels, stride, expand_ratio, additive):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2]), 'invalid stride : {:}'.format(stride)
assert (len(channels) in [2, 3]), 'invalid channels : {:}'.format(channels)
if (len(channels) == 2):
layers = []
else:
layers = [ConvBNReLU(channels[0], channels[1], 1, 1, 1)]
layers.extend([ConvBNReLU(channels[(- 2)], channels[(- 2)], 3, stride, channels[(- 2)]), ConvBNReLU(channels[(- 2)], channels[(- 1)], 1, 1, 1, True, False)])
self.conv = nn.Sequential(*layers)
self.additive = additive
if (self.additive and (channels[0] != channels[(- 1)])):
self.shortcut = ConvBNReLU(channels[0], channels[(- 1)], 1, 1, 1, True, False)
else:
self.shortcut = None
self.out_dim = channels[(- 1)]
def forward(self, x):
out = self.conv(x)
if self.shortcut:
return (out + self.shortcut(x))
else:
return out
|
class InferMobileNetV2(nn.Module):
def __init__(self, num_classes, xchannels, xblocks, dropout):
super(InferMobileNetV2, self).__init__()
block = InvertedResidual
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
assert (len(inverted_residual_setting) == len(xblocks)), 'invalid number of layers : {:} vs {:}'.format(len(inverted_residual_setting), len(xblocks))
for (block_num, ir_setting) in zip(xblocks, inverted_residual_setting):
assert (block_num <= ir_setting[2]), '{:} vs {:}'.format(block_num, ir_setting)
xchannels = parse_channel_info(xchannels)
self.xchannels = xchannels
self.message = 'InferMobileNetV2 : xblocks={:}'.format(xblocks)
features = [ConvBNReLU(xchannels[0][0], xchannels[0][1], 3, 2, 1)]
last_channel_idx = 1
for (stage, (t, c, n, s)) in enumerate(inverted_residual_setting):
for i in range(n):
stride = (s if (i == 0) else 1)
additv = (True if (i > 0) else False)
module = block(self.xchannels[last_channel_idx], stride, t, additv)
features.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, Cs={:}, stride={:}, expand={:}, original-C={:}'.format(stage, i, n, len(features), self.xchannels[last_channel_idx], stride, t, c)
last_channel_idx += 1
if ((i + 1) == xblocks[stage]):
out_channel = module.out_dim
for iiL in range((i + 1), n):
last_channel_idx += 1
self.xchannels[last_channel_idx][0] = module.out_dim
break
features.append(ConvBNReLU(self.xchannels[last_channel_idx][0], self.xchannels[last_channel_idx][1], 1, 1, 1))
assert ((last_channel_idx + 2) == len(self.xchannels)), '{:} vs {:}'.format(last_channel_idx, len(self.xchannels))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(self.xchannels[last_channel_idx][1], num_classes))
self.apply(initialize_resnet)
def get_message(self):
return self.message
def forward(self, inputs):
features = self.features(inputs)
vectors = features.mean([2, 3])
predicts = self.classifier(vectors)
return (features, predicts)
|
class DynamicShapeTinyNet(nn.Module):
def __init__(self, channels: List[int], genotype: Any, num_classes: int):
super(DynamicShapeTinyNet, self).__init__()
self._channels = channels
if ((len(channels) % 3) != 2):
raise ValueError('invalid number of layers : {:}'.format(len(channels)))
self._num_stage = N = (len(channels) // 3)
self.stem = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(channels[0]))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
c_prev = channels[0]
self.cells = nn.ModuleList()
for (index, (c_curr, reduction)) in enumerate(zip(channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(c_prev, c_curr, 2, True)
else:
cell = InferCell(genotype, c_prev, c_curr, 1)
self.cells.append(cell)
c_prev = cell.out_dim
self._num_layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, num_classes)
def get_message(self) -> Text:
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_channels}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.