code stringlengths 17 6.64M |
|---|
def _key_is_deprecated(full_key):
if (full_key in _DEPRECATED_KEYS):
logger.warn('Deprecated config key (ignoring): {}'.format(full_key))
return True
return False
|
def _key_is_renamed(full_key):
return (full_key in _RENAMED_KEYS)
|
def _raise_key_rename_error(full_key):
new_key = _RENAMED_KEYS[full_key]
if isinstance(new_key, tuple):
msg = (' Note: ' + new_key[1])
new_key = new_key[0]
else:
msg = ''
raise KeyError('Key {} was renamed to {}; please update your config.{}'.format(full_key, new_key, msg))
|
def _decode_cfg_value(v):
'Decodes a raw config value (e.g., from a yaml config files or command\n line argument) into a Python object.\n '
if isinstance(v, dict):
return AttrDict(v)
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v
|
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
'Checks that `value_a`, which is intended to replace `value_b` is of the\n right type. The type is correct if it matches exactly or is one of a few\n cases in which the type can be easily coerced.\n '
type_b = type(value_b)
type_a = type(value_a)
if (type_a is type_b):
return value_a
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif (isinstance(value_a, tuple) and isinstance(value_b, list)):
value_a = list(value_a)
elif (isinstance(value_a, list) and isinstance(value_b, tuple)):
value_a = tuple(value_a)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a
|
def save_object(obj, file_name):
'Save a Python object by pickling it.'
file_name = os.path.abspath(file_name)
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
def cache_url(url_or_file, cache_dir):
'Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.\n '
is_url = (re.match('^(?:http)s?://', url_or_file, re.IGNORECASE) is not None)
if (not is_url):
return url_or_file
url = url_or_file
assert url.startswith(_DETECTRON_S3_BASE_URL), 'Detectron only automatically caches URLs in the Detectron S3 bucket: {}'.format(_DETECTRON_S3_BASE_URL)
cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if (not os.path.exists(cache_file_dir)):
os.makedirs(cache_file_dir)
logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
|
def assert_cache_file_is_ok(url, file_path):
'Check that cache file has the correct hash.'
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert (cache_file_md5sum == ref_md5sum), 'Target URL {} appears to be downloaded to the local cache file {}, but the md5 hash of the local file does not match the reference (actual: {} vs. expected: {}). You may wish to delete the cached file and try again to trigger automatic download.'.format(url, file_path, cache_file_md5sum, ref_md5sum)
|
def _progress_bar(count, total):
'Report download progress.\n Credit:\n https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113\n '
bar_len = 60
filled_len = int(round(((bar_len * count) / float(total))))
percents = round(((100.0 * count) / float(total)), 1)
bar = (('=' * filled_len) + ('-' * (bar_len - filled_len)))
sys.stdout.write(' [{}] {}% of {:.1f}MB file \r'.format(bar, percents, ((total / 1024) / 1024)))
sys.stdout.flush()
if (count >= total):
sys.stdout.write('\n')
|
def download_url(url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar):
'Download url and write it to dst_file_path.\n Credit:\n https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook\n '
response = urllib.request.urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if (not chunk):
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
|
def _get_file_md5sum(file_name):
'Compute the md5 hash of a file.'
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest()
|
def _get_reference_md5sum(url):
"By convention the md5 hash for url is stored in url + '.md5sum'."
url_md5sum = (url + '.md5sum')
md5sum = urllib.request.urlopen(url_md5sum).read().strip()
return md5sum
|
class CosineRestartAnnealingLR(object):
def __init__(self, optimizer, T_max, lr_period, lr_step, eta_min=0, last_step=(- 1), use_warmup=False, warmup_mode='linear', warmup_steps=0, warmup_startlr=0, warmup_targetlr=0, use_restart=False):
self.use_warmup = use_warmup
self.warmup_mode = warmup_mode
self.warmup_steps = warmup_steps
self.warmup_startlr = warmup_startlr
self.warmup_targetlr = warmup_targetlr
self.use_restart = use_restart
self.T_max = T_max
self.eta_min = eta_min
if (self.use_restart == False):
self.lr_period = [(self.T_max - self.warmup_steps)]
self.lr_step = [self.warmup_steps]
else:
self.lr_period = lr_period
self.lr_step = lr_step
self.last_step = last_step
self.cycle_length = self.lr_period[0]
self.cur = 0
if (not isinstance(optimizer, Optimizer)):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if (last_step == (- 1)):
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for (i, group) in enumerate(optimizer.param_groups):
if ('initial_lr' not in group):
raise KeyError("param 'initial_lr' is not specified in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups))
def step(self, step=None):
if (step is not None):
self.last_step = step
else:
self.last_step += 1
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (self.use_warmup and (self.last_step < self.warmup_steps)):
if (self.warmup_mode == 'constant'):
lrs.append(self.warmup_startlr)
elif (self.warmup_mode == 'linear'):
cur_lr = (self.warmup_startlr + ((float((self.warmup_targetlr - self.warmup_startlr)) / self.warmup_steps) * self.last_step))
lrs.append(cur_lr)
else:
raise NotImplementedError
else:
if (self.last_step in self.lr_step):
self.cycle_length = self.lr_period[self.lr_step.index(self.last_step)]
self.cur = self.last_step
peri_iter = (self.last_step - self.cur)
if (peri_iter <= self.cycle_length):
unit_cycle = ((1 + math.cos(((peri_iter * math.pi) / self.cycle_length))) / 2)
adjusted_cycle = ((unit_cycle * (base_lr - self.eta_min)) + self.eta_min)
lrs.append(adjusted_cycle)
else:
lrs.append(self.eta_min)
return lrs
def display_lr_curve(self, total_steps):
lrs = []
for _ in range(total_steps):
self.step()
lrs.append(self.get_lr()[0])
import matplotlib.pyplot as plt
plt.plot(lrs)
plt.show()
def state_dict(self):
return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
|
def get_lr_scheduler(config, optimizer, num_examples=None, batch_size=None):
if (num_examples is None):
num_examples = config.data.num_examples
epoch_steps = ((num_examples // batch_size) + 1)
if config.optim.use_multi_stage:
max_steps = (epoch_steps * config.optim.multi_stage.stage_epochs)
else:
max_steps = (epoch_steps * config.train_params.epochs)
period_steps = [(epoch_steps * x) for x in config.optim.cosine.restart.lr_period]
step_steps = [(epoch_steps * x) for x in config.optim.cosine.restart.lr_step]
init_lr = config.optim.init_lr
use_warmup = config.optim.use_warm_up
if use_warmup:
warmup_steps = (config.optim.warm_up.epoch * epoch_steps)
warmup_startlr = config.optim.warm_up.init_lr
warmup_targetlr = config.optim.warm_up.target_lr
else:
warmup_steps = 0
warmup_startlr = init_lr
warmup_targetlr = init_lr
if (config.optim.lr_schedule == 'cosine'):
scheduler = CosineRestartAnnealingLR(optimizer, float(max_steps), period_steps, step_steps, eta_min=config.optim.min_lr, use_warmup=use_warmup, warmup_steps=warmup_steps, warmup_startlr=warmup_startlr, warmup_targetlr=warmup_targetlr, use_restart=config.optim.cosine.use_restart)
elif (config.optim.lr_schedule == 'poly'):
raise NotImplementedError
else:
raise NotImplementedError
return scheduler
|
def comp_multadds(model, input_size=(3, 224, 224)):
input_size = ((1,) + tuple(input_size))
model = model.cuda()
input_data = torch.randn(input_size).cuda()
model = add_flops_counting_methods(model)
model.start_flops_count()
with torch.no_grad():
_ = model(input_data)
mult_adds = (model.compute_average_flops_cost() / 1000000.0)
return mult_adds
|
def comp_multadds_fw(model, input_data, use_gpu=True):
model = add_flops_counting_methods(model)
if use_gpu:
model = model.cuda()
model.start_flops_count()
with torch.no_grad():
output_data = model(input_data)
mult_adds = (model.compute_average_flops_cost() / 1000000.0)
return (mult_adds, output_data)
|
def add_flops_counting_methods(net_main_module):
'Adds flops counting functions to an existing model. After that\n the flops count should be activated and the model should be run on an input\n image.\n Example:\n fcn = add_flops_counting_methods(fcn)\n fcn = fcn.cuda().train()\n fcn.start_flops_count()\n _ = fcn(batch)\n fcn.compute_average_flops_cost() / 1e9 / 2 # Result in GFLOPs per image in batch\n Important: dividing by 2 only works for resnet models -- see below for the details\n of flops computation.\n Attention: we are counting multiply-add as two flops in this work, because in\n most resnet models convolutions are bias-free (BN layers act as bias there)\n and it makes sense to count muliply and add as separate flops therefore.\n This is why in the above example we divide by 2 in order to be consistent with\n most modern benchmarks. For example in "Spatially Adaptive Computatin Time for Residual\n Networks" by Figurnov et al multiply-add was counted as two flops.\n This module computes the average flops which is necessary for dynamic networks which\n have different number of executed layers. For static networks it is enough to run the network\n once and get statistics (above example).\n Implementation:\n The module works by adding batch_count to the main module which tracks the sum\n of all batch sizes that were run through the network.\n Also each convolutional layer of the network tracks the overall number of flops\n performed.\n The parameters are updated with the help of registered hook-functions which\n are being called each time the respective layer is executed.\n Parameters\n ----------\n net_main_module : torch.nn.Module\n Main module containing network\n Returns\n -------\n net_main_module : torch.nn.Module\n Updated main module with new methods/attributes that are used\n to compute flops.\n '
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
|
def compute_average_flops_cost(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Returns current mean flops consumption per image.\n '
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def start_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Activates the computation of mean flops consumption per image.\n Call it before you run the network.\n '
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
|
def stop_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Stops computing the mean flops consumption per image.\n Call whenever you want to pause the computation.\n '
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
|
def reset_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Resets statistics computed so far.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
|
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__mask__ = mask
module.apply(add_flops_mask_func)
|
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
|
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(output_height, output_width) = output.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
conv_per_position_flops = ((((kernel_height * kernel_width) * in_channels) * out_channels) / conv_module.groups)
active_elements_count = ((batch_size * output_height) * output_width)
if (conv_module.__mask__ is not None):
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += overall_flops
|
def linear_flops_counter_hook(linear_module, input, output):
input = input[0]
batch_size = input.shape[0]
overall_flops = ((linear_module.in_features * linear_module.out_features) * batch_size)
linear_module.__flops__ += overall_flops
|
def batch_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(conv_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.Linear):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(linear_flops_counter_hook)
module.__flops_handle__ = handle
|
def remove_flops_counter_hook_function(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def add_flops_mask_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__mask__ = None
|
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.cur = val
self.sum += (val * n)
self.cnt += n
self.avg = (self.sum / self.cnt)
|
def accuracy(output, target, topk=(1, 5)):
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def count_parameters_in_MB(model):
return (np.sum((np.prod(v.size()) for (name, v) in model.named_parameters() if ('aux' not in name))) / 1000000.0)
|
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
|
def save(model, model_path):
torch.save(model.state_dict(), model_path)
|
def load_net_config(path):
with open(path, 'r') as f:
net_config = ''
while True:
line = f.readline().strip()
if ('net_type' in line):
net_type = line.split(': ')[(- 1)]
break
else:
net_config += line
return (net_config, net_type)
|
def load_model(model, model_path):
logging.info(('Start loading the model from ' + model_path))
if ('http' in model_path):
model_addr = model_path
model_path = model_path.split('/')[(- 1)]
if os.path.isfile(model_path):
os.system(('rm ' + model_path))
os.system(('wget -q ' + model_addr))
model.load_state_dict(torch.load(model_path))
logging.info('Loading the model finished!')
|
def create_exp_dir(path):
if (not os.path.exists(path)):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
|
def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.0):
'\n Label smoothing implementation.\n This function is taken from https://github.com/MIT-HAN-LAB/ProxylessNAS/blob/master/proxyless_nas/utils.py\n '
logsoftmax = nn.LogSoftmax().cuda()
n_classes = pred.size(1)
target = torch.unsqueeze(target, 1)
soft_target = torch.zeros_like(pred)
soft_target.scatter_(1, target, 1)
soft_target = ((soft_target * (1 - label_smoothing)) + (label_smoothing / n_classes))
return torch.mean(torch.sum(((- soft_target) * logsoftmax(pred)), 1))
|
def parse_net_config(net_config):
str_configs = net_config.split('|')
return [eval(str_config) for str_config in str_configs]
|
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
|
def set_logging(save_path, log_name='log.txt'):
log_format = '%(asctime)s %(message)s'
date_format = '%m/%d %H:%M:%S'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt=date_format)
fh = logging.FileHandler(os.path.join(save_path, log_name))
fh.setFormatter(logging.Formatter(log_format, date_format))
logging.getLogger().addHandler(fh)
|
def create_save_dir(save_path, job_name):
if (job_name != ''):
job_name = (time.strftime('%Y%m%d-%H%M%S-') + job_name)
save_path = os.path.join(save_path, job_name)
create_exp_dir(save_path)
os.system(('cp -r ./* ' + save_path))
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
else:
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
return (save_path, job_name)
|
def latency_measure(module, input_size, batch_size, meas_times, mode='gpu'):
assert (mode in ['gpu', 'cpu'])
latency = []
module.eval()
input_size = ((batch_size,) + tuple(input_size))
input_data = torch.randn(input_size)
if (mode == 'gpu'):
input_data = input_data.cuda()
module.cuda()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
_ = module(input_data)
torch.cuda.synchronize()
if (i >= 100):
latency.append((time.time() - start))
print((np.mean(latency) * 1000.0), 'ms')
return (np.mean(latency) * 1000.0)
|
def latency_measure_fw(module, input_data, meas_times):
latency = []
module.eval()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
output_data = module(input_data)
torch.cuda.synchronize()
if (i >= 100):
latency.append((time.time() - start))
print((np.mean(latency) * 1000.0), 'ms')
return ((np.mean(latency) * 1000.0), output_data)
|
def record_topk(k, rec_list, data, comp_attr, check_attr):
def get_insert_idx(orig_list, data, comp_attr):
start = 0
end = len(orig_list)
while (start < end):
mid = ((start + end) // 2)
if (data[comp_attr] < orig_list[mid][comp_attr]):
start = (mid + 1)
else:
end = mid
return start
if_insert = False
insert_idx = get_insert_idx(rec_list, data, comp_attr)
if (insert_idx < k):
rec_list.insert(insert_idx, data)
if_insert = True
while (len(rec_list) > k):
rec_list.pop()
return if_insert
|
class BilevelDataset(Dataset):
def __init__(self, dataset):
'\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n Args:\n dataset: PyTorch Dataset object\n '
inds = np.arange(len(dataset))
self.dataset = dataset
n_train = int((0.2 * len(inds)))
self.train_inds1 = inds[0:n_train]
self.train_inds2 = inds[n_train:(2 * n_train)]
self.train_inds3 = inds[(2 * n_train):(3 * n_train)]
self.train_inds4 = inds[(3 * n_train):(4 * n_train)]
self.val_inds = inds[(4 * n_train):(5 * n_train)]
assert (len(self.train_inds1) == len(self.val_inds))
def shuffle_val_inds(self):
np.random.shuffle(self.val_inds)
def __len__(self):
return len(self.train_inds1)
def __getitem__(self, idx):
train_ind1 = self.train_inds1[idx]
train_ind2 = self.train_inds2[idx]
train_ind3 = self.train_inds3[idx]
train_ind4 = self.train_inds4[idx]
val_ind = self.val_inds[idx]
(x_train1, y_train1) = self.dataset[train_ind1]
(x_train2, y_train2) = self.dataset[train_ind2]
(x_train3, y_train3) = self.dataset[train_ind3]
(x_train4, y_train4) = self.dataset[train_ind4]
(x_val, y_val) = self.dataset[val_ind]
return (x_train1, y_train1, x_train2, y_train2, x_train3, y_train3, x_train4, y_train4, x_val, y_val)
|
def download_from_s3(s3_bucket, task, download_dir):
s3 = boto3.client('s3')
if (task == 'smnist'):
data_files = ['s2_mnist.gz']
s3_folder = 'spherical'
if (task == 'scifar100'):
data_files = ['s2_cifar100.gz']
s3_folder = 'spherical'
elif (task == 'sEMG'):
data_files = ['trainval_Myo.pt', 'test_Myo.pt']
s3_folder = 'Myo'
elif (task == 'ninapro'):
data_files = ['ninapro_train.npy', 'ninapro_val.npy', 'ninapro_test.npy', 'label_train.npy', 'label_val.npy', 'label_test.npy']
s3_folder = 'ninapro'
elif ((task == 'cifar10') or (task == 'cifar100')):
return
elif (task == 'audio'):
data_files = ['audio.zip']
s3_folder = 'audio'
else:
raise NotImplementedError
for data_file in data_files:
filepath = os.path.join(download_dir, data_file)
if (s3_folder is not None):
s3_path = os.path.join(s3_folder, data_file)
else:
s3_path = data_file
if (not os.path.exists(filepath)):
s3.download_file(s3_bucket, s3_path, filepath)
if ((task == 'audio') and (not os.path.exists(os.path.join(download_dir, 'data')))):
os.mkdir(os.path.join(download_dir, 'data'))
import zipfile
with zipfile.ZipFile(os.path.join(download_dir, 'audio.zip'), 'r') as zip_ref:
zip_ref.extractall(os.path.join(download_dir, 'data'))
return
|
class ImageNet12(object):
def __init__(self, trainFolder, testFolder, num_workers=8, pin_memory=True, size_images=224, scaled_size=256, type_of_data_augmentation='rand_scale', data_config=None):
self.data_config = data_config
self.trainFolder = trainFolder
self.testFolder = testFolder
self.num_workers = num_workers
self.pin_memory = pin_memory
self.patch_dataset = self.data_config.patch_dataset
if (not isinstance(size_images, int)):
raise ValueError('size_images must be an int. It will be scaled to a square image')
self.size_images = size_images
self.scaled_size = scaled_size
type_of_data_augmentation = type_of_data_augmentation.lower()
if (type_of_data_augmentation not in ('rand_scale', 'random_sized')):
raise ValueError('type_of_data_augmentation must be either rand-scale or random-sized')
self.type_of_data_augmentation = type_of_data_augmentation
def _getTransformList(self, aug_type):
assert (aug_type in ['rand_scale', 'random_sized', 'week_train', 'validation'])
list_of_transforms = []
if (aug_type == 'validation'):
list_of_transforms.append(transforms.Resize(self.scaled_size))
list_of_transforms.append(transforms.CenterCrop(self.size_images))
elif (aug_type == 'week_train'):
list_of_transforms.append(transforms.Resize(256))
list_of_transforms.append(transforms.RandomCrop(self.size_images))
list_of_transforms.append(transforms.RandomHorizontalFlip())
else:
if (aug_type == 'rand_scale'):
list_of_transforms.append(transforms_extension.RandomScale(256, 480))
list_of_transforms.append(transforms.RandomCrop(self.size_images))
list_of_transforms.append(transforms.RandomHorizontalFlip())
elif (aug_type == 'random_sized'):
list_of_transforms.append(transforms.RandomResizedCrop(self.size_images, scale=(self.data_config.random_sized.min_scale, 1.0)))
list_of_transforms.append(transforms.RandomHorizontalFlip())
if self.data_config.color:
list_of_transforms.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4))
return transforms.Compose(list_of_transforms)
def _getTrainSet(self):
train_transform = self._getTransformList(self.type_of_data_augmentation)
if (self.data_config.train_data_type == 'img'):
train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform)
elif (self.data_config.train_data_type == 'lmdb'):
train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset)
self.train_num_examples = train_set.__len__()
return train_set
def _getWeekTrainSet(self):
train_transform = self._getTransformList('week_train')
if (self.data_config.train_data_type == 'img'):
train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform)
elif (self.data_config.train_data_type == 'lmdb'):
train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset)
self.train_num_examples = train_set.__len__()
return train_set
def _getTestSet(self):
test_transform = self._getTransformList('validation')
if (self.data_config.val_data_type == 'img'):
test_set = torchvision.datasets.ImageFolder(self.testFolder, test_transform)
elif (self.data_config.val_data_type == 'lmdb'):
test_set = lmdb_dataset.ImageFolder(self.testFolder, os.path.join(self.testFolder, '..', 'val_datalist'), test_transform)
self.test_num_examples = test_set.__len__()
return test_set
def getTrainLoader(self, batch_size, shuffle=True):
train_set = self._getTrainSet()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate)
return train_loader
def getWeekTrainLoader(self, batch_size, shuffle=True):
train_set = self._getWeekTrainSet()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=fast_collate)
return train_loader
def getTestLoader(self, batch_size, shuffle=False):
test_set = self._getTestSet()
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate)
return test_loader
def getTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False):
train_loader = self.getTrainLoader(batch_size, train_shuffle)
test_loader = self.getTestLoader(batch_size, val_shuffle)
return (train_loader, test_loader)
def getSetTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False):
train_loader = self.getTrainLoader(batch_size, train_shuffle)
week_train_loader = self.getWeekTrainLoader(batch_size, train_shuffle)
test_loader = self.getTestLoader(batch_size, val_shuffle)
return ((train_loader, week_train_loader), test_loader)
|
class Datum(object):
def __init__(self, shape=None, image=None, label=None):
self.shape = shape
self.image = image
self.label = label
def SerializeToString(self, img=None):
image_data = self.image.astype(np.uint8).tobytes()
label_data = np.uint16(self.label).tobytes()
return msgpack.packb((image_data + label_data), use_bin_type=True)
def ParseFromString(self, raw_data, orig_img):
raw_data = msgpack.unpackb(raw_data, raw=False)
raw_img_data = raw_data[:(- 2)]
image_data = np.frombuffer(raw_img_data, dtype=np.uint8)
self.image = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
raw_label_data = raw_data[(- 2):]
self.label = np.frombuffer(raw_label_data, dtype=np.uint16)
|
def create_dataset(output_path, image_folder, image_list, image_size):
image_name_list = [i.strip() for i in open(image_list)]
n_samples = len(image_name_list)
env = lmdb.open(output_path, map_size=1099511627776, meminit=False, map_async=True)
txn = env.begin(write=True)
classes = [d for d in os.listdir(image_folder) if os.path.isdir(os.path.join(image_folder, d))]
for (idx, image_name) in enumerate(tqdm(image_name_list)):
image_path = os.path.join(image_folder, image_name)
label_name = image_name.split('/')[0]
label = classes.index(label_name)
if (not os.path.isfile(image_path)):
raise RuntimeError(('%s does not exist' % image_path))
img = cv2.imread(image_path)
img_orig = img
if image_size:
resize_ratio = (float(image_size) / min(img.shape[0:2]))
new_size = (int((img.shape[1] * resize_ratio)), int((img.shape[0] * resize_ratio)))
img = cv2.resize(src=img, dsize=new_size)
img = cv2.imencode('.JPEG', img)[1]
image = np.asarray(img)
datum = Datum(image.shape, image, label)
txn.put(image_name.encode('ascii'), datum.SerializeToString())
if (((idx + 1) % 1000) == 0):
txn.commit()
txn = env.begin(write=True)
txn.commit()
env.sync()
env.close()
print(f'Created dataset with {n_samples:d} samples')
|
class Datum(object):
def __init__(self, shape=None, image=None, label=None):
self.shape = shape
self.image = image
self.label = label
def SerializeToString(self):
image_data = self.image.astype(np.uint8).tobytes()
label_data = np.uint16(self.label).tobytes()
return msgpack.packb((image_data + label_data), use_bin_type=True)
def ParseFromString(self, raw_data):
raw_data = msgpack.unpackb(raw_data, raw=False)
raw_img_data = raw_data[:(- 2)]
image_data = np.frombuffer(raw_img_data, dtype=np.uint8)
self.image = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
raw_label_data = raw_data[(- 2):]
self.label = np.frombuffer(raw_label_data, dtype=np.uint16)
|
class DatasetFolder(data.Dataset):
'\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n\n Attributes:\n samples (list): List of (sample path, class_index) tuples\n '
def __init__(self, root, list_path, transform=None, target_transform=None, patch_dataset=False):
self.root = root
self.patch_dataset = patch_dataset
if patch_dataset:
self.txn = []
for path in os.listdir(root):
lmdb_path = os.path.join(root, path)
if os.path.isdir(lmdb_path):
env = lmdb.open(lmdb_path, readonly=True, lock=False, readahead=False, meminit=False)
txn = env.begin(write=False)
self.txn.append(txn)
else:
self.env = lmdb.open(root, readonly=True, lock=False, readahead=False, meminit=False)
self.txn = self.env.begin(write=False)
self.list_path = list_path
self.samples = [image_name.strip() for image_name in open(list_path)]
if (len(self.samples) == 0):
raise RuntimeError((('Found 0 files in subfolders of: ' + root) + '\n'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
'\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n '
img_name = self.samples[index]
if self.patch_dataset:
txn_index = (index // (len(self.samples) // 10))
if (txn_index == 10):
txn_index = 9
txn = self.txn[txn_index]
else:
txn = self.txn
datum = Datum()
data_bin = txn.get(img_name.encode('ascii'))
if (data_bin is None):
raise RuntimeError(f'Key {img_name} not found')
datum.ParseFromString(data_bin)
sample = Image.fromarray(cv2.cvtColor(datum.image, cv2.COLOR_BGR2RGB))
target = np.int(datum.label)
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target)
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str
|
class ImageFolder(DatasetFolder):
def __init__(self, root, list_path, transform=None, target_transform=None, patch_dataset=False):
super(ImageFolder, self).__init__(root, list_path, transform=transform, target_transform=target_transform, patch_dataset=patch_dataset)
self.imgs = self.samples
|
def get_list(data_path, output_path):
for split in os.listdir(data_path):
split_path = os.path.join(data_path, split)
if (not os.path.isdir(split_path)):
continue
f = open(os.path.join(output_path, (split + '_datalist')), 'a+')
for sub in os.listdir(split_path):
sub_path = os.path.join(split_path, sub)
if (not os.path.isdir(sub_path)):
continue
for image in os.listdir(sub_path):
image_name = ((sub + '/') + image)
f.writelines((image_name + '\n'))
f.close()
|
def get_list(data_path, output_path):
for split in os.listdir(data_path):
if (split == 'train'):
split_path = os.path.join(data_path, split)
if (not os.path.isdir(split_path)):
continue
f_train = open(os.path.join(output_path, (split + '_datalist')), 'w')
f_val = open(os.path.join(output_path, ('val' + '_datalist')), 'w')
class_list = os.listdir(split_path)
for sub in class_list[:100]:
sub_path = os.path.join(split_path, sub)
if (not os.path.isdir(sub_path)):
continue
img_list = os.listdir(sub_path)
train_len = int((0.8 * len(img_list)))
for image in img_list[:train_len]:
image_name = os.path.join(sub, image)
f_train.writelines((image_name + '\n'))
for image in img_list[train_len:]:
image_name = os.path.join(sub, image)
f_val.writelines((image_name + '\n'))
f_train.close()
f_val.close()
|
class Lighting(object):
'Lighting noise(AlexNet - style PCA - based noise)'
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if (self.alphastd == 0):
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
|
class RandomScale(object):
'ResNet style data augmentation'
def __init__(self, minSize, maxSize):
self.minSize = minSize
self.maxSize = maxSize
def __call__(self, img):
targetSz = int(round(random.uniform(self.minSize, self.maxSize)))
return F.resize(img, targetSz)
|
def generate_arch(task, net_type, threshold_arch):
update_cfg_from_cfg(search_cfg, cfg)
if (task in ['cifar10', 'cifar100']):
merge_cfg_from_file('configs/cifar_random_search_cfg_resnet.yaml', cfg)
input_shape = (3, 32, 32)
elif (task in ['scifar100', 'smnist']):
merge_cfg_from_file('configs/spherical_random_cfg_resnet.yaml', cfg)
input_shape = ((3, 60, 60) if (task == 'scifar100') else (1, 60, 60))
elif (task == 'ninapro'):
merge_cfg_from_file('configs/ninapro_search_cfg_resnet.yaml', cfg)
input_shape = (1, 16, 52)
elif (task == 'audio'):
merge_cfg_from_file('configs/audio_random_cfg_resnet.yaml', cfg)
input_shape = (1, 96, 101)
else:
raise NotImplementedError
config = copy.deepcopy(cfg)
pprint.pformat(config)
SearchSpace = importlib.import_module(('models.search_space_' + net_type)).Network
ArchGenerater = importlib.import_module(('run_apis.derive_arch_' + net_type), __package__).ArchGenerate
derivedNetwork = getattr(model_derived, ('%s_Net' % net_type.upper()))
der_Net = (lambda net_config: derivedNetwork(net_config, task=task, config=config))
target_model = der_Net(threshold_arch)
target_flops = comp_multadds(target_model, input_size=input_shape)
print(('Target Model Mult-Adds = %.2fMB' % target_flops))
target_params = utils.count_parameters_in_MB(target_model)
lower_than_target = False
while (not lower_than_target):
config = copy.deepcopy(cfg)
super_model = SearchSpace(config.optim.init_dim, task, config)
arch_gener = ArchGenerater(super_model, config)
(betas, head_alphas, stack_alphas) = super_model.display_arch_params()
derived_arch = arch_gener.derive_archs(betas, head_alphas, stack_alphas)
derived_arch_str = '|\n'.join(map(str, derived_arch))
derived_model = der_Net(derived_arch_str)
derived_flops = comp_multadds(derived_model, input_size=input_shape)
derived_params = utils.count_parameters_in_MB(derived_model)
if (derived_params <= (target_params + 1)):
print('found arch!')
lower_than_target = True
print(('Derived Model Mult-Adds = %.2fMB' % derived_flops))
print(('Derived Model Num Params = %.2fMB' % derived_params))
print(derived_arch_str)
return derived_arch_str
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class DenseNASSearchTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
update_cfg_from_cfg(search_cfg, cfg)
if (self.hparams.task in ['cifar10', 'cifar100']):
merge_cfg_from_file('configs/cifar_small_search_cfg_resnet.yaml', cfg)
input_shape = (3, 32, 32)
elif (self.hparams.task in ['scifar100', 'smnist']):
merge_cfg_from_file('configs/spherical_search_cfg_resnet.yaml', cfg)
input_shape = ((3, 60, 60) if (self.hparams.task == 'scifar100') else (1, 60, 60))
elif (self.hparams.task == 'ninapro'):
merge_cfg_from_file('configs/ninapro_search_cfg_resnet.yaml', cfg)
input_shape = (1, 16, 52)
elif (self.hparams.task == 'sEMG'):
print('Not implemented yet!')
input_shape = (1, 8, 52)
else:
raise NotImplementedError
config = cfg
self.input_shape = input_shape
pprint.pformat(config)
cudnn.benchmark = True
cudnn.enabled = True
self.criterion = nn.CrossEntropyLoss()
self.criterion = self.criterion.cuda()
SearchSpace = importlib.import_module(('models.search_space_' + self.hparams.net_type)).Network
ArchGenerater = importlib.import_module(('run_apis.derive_arch_' + self.hparams.net_type), __package__).ArchGenerate
derivedNetwork = getattr(model_derived, ('%s_Net' % self.hparams.net_type.upper()))
super_model = SearchSpace(config.optim.init_dim, self.hparams.task, config)
self.arch_gener = ArchGenerater(super_model, config)
self.der_Net = (lambda net_config: derivedNetwork(net_config, task=self.hparams.task, config=config))
super_model = super_model.cuda()
if (config.optim.sub_obj.type == 'flops'):
(flops_list, total_flops) = super_model.get_cost_list(input_shape, cost_type='flops')
super_model.sub_obj_list = flops_list
print('Super Network flops (M) list: \n')
print(str(flops_list))
print(('Total flops: ' + str(total_flops)))
'\n elif config.optim.sub_obj.type==\'latency\':\n with open(os.path.join(\'latency_list\', config.optim.sub_obj.latency_list_path), \'r\') as f:\n latency_list = eval(f.readline())\n super_model.module.sub_obj_list = latency_list\n print("Super Network latency (ms) list: \n")\n print(str(latency_list))\n '
else:
raise NotImplementedError
pprint.pformat('Num params = %.2fMB', utils.count_parameters_in_MB(super_model))
self.model = self.context.wrap_model(super_model)
total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB: ', total_params)
self.Dropped_Network = (lambda model: Dropped_Network(model, softmax_temp=config.search_params.softmax_temp))
arch_params_id = list(map(id, self.model.arch_parameters))
weight_params = filter((lambda p: (id(p) not in arch_params_id)), self.model.parameters())
self.weight_sample_num = config.search_params.weight_sample_num
self.weight_optimizer = self.context.wrap_optimizer(torch.optim.SGD(weight_params, config.optim.weight.init_lr, momentum=config.optim.weight.momentum, weight_decay=config.optim.weight.weight_decay))
self.arch_optimizer = self.context.wrap_optimizer(torch.optim.Adam([{'params': self.model.arch_alpha_params, 'lr': config.optim.arch.alpha_lr}, {'params': self.model.arch_beta_params, 'lr': config.optim.arch.beta_lr}], betas=(0.5, 0.999), weight_decay=config.optim.arch.weight_decay))
scheduler = get_lr_scheduler(config, self.weight_optimizer, self.hparams.num_examples, self.context.get_per_slot_batch_size())
scheduler.last_step = 0
self.scheduler = self.context.wrap_lr_scheduler(scheduler, step_mode=LRScheduler.StepMode.MANUAL_STEP)
self.config = config
self.download_directory = self.download_data_from_s3()
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
(self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
self.train_data = BilevelDataset(self.train_data)
print('Length of bilevel dataset: ', len(self.train_data))
return DataLoader(self.train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2)
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
if (epoch_idx != self.last_epoch):
self.train_data.shuffle_val_inds()
self.last_epoch = epoch_idx
search_stage = (1 if (epoch_idx > self.config.search_params.arch_update_epoch) else 0)
(x_train1, y_train1, x_train2, y_train2, x_train3, y_train3, x_train4, y_train4, x_val, y_val) = batch
x_train = torch.cat((x_train1, x_train2, x_train3, x_train4), 0)
y_train = torch.cat((y_train1, y_train2, y_train3, y_train4), 0)
n = (x_train1.size(0) * 4)
arch_loss = 0
if search_stage:
self.set_param_grad_state('Arch')
(arch_logits, arch_loss, arch_subobj) = self.arch_step(x_val, y_val, self.model, search_stage)
self.scheduler.step()
self.set_param_grad_state('Weights')
(logits, loss, subobj) = self.weight_step(x_train, y_train, self.model, search_stage)
(prec1, prec5) = utils.accuracy(logits, y_train, topk=(1, 5))
return {'loss': loss, 'arch_loss': arch_loss, 'train_accuracy': prec1.item()}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
obj = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
sub_obj = utils.AverageMeter()
self.set_param_grad_state('')
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
(logits, loss, subobj) = self.valid_step(input, target, self.model)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
obj.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
sub_obj.update(subobj, n)
(betas, head_alphas, stack_alphas) = self.model.display_arch_params()
derived_arch = self.arch_gener.derive_archs(betas, head_alphas, stack_alphas)
derived_arch_str = '|\n'.join(map(str, derived_arch))
derived_model = self.der_Net(derived_arch_str)
derived_flops = comp_multadds(derived_model, input_size=self.input_shape)
derived_params = utils.count_parameters_in_MB(derived_model)
print(('Derived Model Mult-Adds = %.2fMB' % derived_flops))
print(('Derived Model Num Params = %.2fMB' % derived_params))
print(derived_arch_str)
return {'validation_loss': obj.avg, 'validation_subloss': sub_obj.avg, 'validation_accuracy': top1.avg, 'validation_top5': top5.avg}
def weight_step(self, input_train, target_train, model, search_stage):
(_, _) = model.sample_branch('head', self.weight_sample_num, search_stage=search_stage)
(_, _) = model.sample_branch('stack', self.weight_sample_num, search_stage=search_stage)
self.weight_optimizer.zero_grad()
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_train)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_train)
loss.backward()
self.weight_optimizer.step()
return (logits.detach(), loss.item(), sub_obj.item())
def set_param_grad_state(self, stage):
def set_grad_state(params, state):
for group in params:
for param in group['params']:
param.requires_grad_(state)
if (stage == 'Arch'):
state_list = [True, False]
elif (stage == 'Weights'):
state_list = [False, True]
else:
state_list = [False, False]
set_grad_state(self.arch_optimizer.param_groups, state_list[0])
set_grad_state(self.weight_optimizer.param_groups, state_list[1])
def arch_step(self, input_valid, target_valid, model, search_stage):
(head_sampled_w_old, alpha_head_index) = model.sample_branch('head', 2, search_stage=search_stage)
(stack_sampled_w_old, alpha_stack_index) = model.sample_branch('stack', 2, search_stage=search_stage)
self.arch_optimizer.zero_grad()
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_valid)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
if self.config.optim.if_sub_obj:
loss_sub_obj = (torch.log(sub_obj) / torch.log(torch.tensor(self.config.optim.sub_obj.log_base)))
sub_loss_factor = self.config.optim.sub_obj.sub_loss_factor
loss += (loss_sub_obj * sub_loss_factor)
loss.backward()
self.arch_optimizer.step()
self.rescale_arch_params(head_sampled_w_old, stack_sampled_w_old, alpha_head_index, alpha_stack_index, model)
return (logits.detach(), loss.item(), sub_obj.item())
def rescale_arch_params(self, alpha_head_weights_drop, alpha_stack_weights_drop, alpha_head_index, alpha_stack_index, model):
def comp_rescale_value(old_weights, new_weights, index):
old_exp_sum = old_weights.exp().sum()
new_drop_arch_params = torch.gather(new_weights, dim=(- 1), index=index)
new_exp_sum = new_drop_arch_params.exp().sum()
rescale_value = torch.log((old_exp_sum / new_exp_sum)).item()
rescale_mat = torch.zeros_like(new_weights).scatter_(0, index, rescale_value)
return (rescale_value, rescale_mat)
def rescale_params(old_weights, new_weights, indices):
for (i, (old_weights_block, indices_block)) in enumerate(zip(old_weights, indices)):
for (j, (old_weights_branch, indices_branch)) in enumerate(zip(old_weights_block, indices_block)):
(rescale_value, rescale_mat) = comp_rescale_value(old_weights_branch, new_weights[i][j], indices_branch)
new_weights[i][j].data.add_(rescale_mat)
rescale_params(alpha_head_weights_drop, model.alpha_head_weights, alpha_head_index)
rescale_params(alpha_stack_weights_drop, model.alpha_stack_weights, alpha_stack_index)
def valid_step(self, input_valid, target_valid, model):
(_, _) = model.sample_branch('head', 1, training=False)
(_, _) = model.sample_branch('stack', 1, training=False)
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_valid)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
return (logits, loss.item(), sub_obj.item())
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class DenseNASSearchTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
update_cfg_from_cfg(search_cfg, cfg)
if (self.hparams.task == 'audio'):
merge_cfg_from_file('configs/audio_search_cfg_resnet.yaml', cfg)
input_shape = (1, 96, 101)
else:
raise NotImplementedError
config = cfg
self.input_shape = input_shape
pprint.pformat(config)
cudnn.benchmark = True
cudnn.enabled = True
self.criterion = nn.BCEWithLogitsLoss().cuda()
SearchSpace = importlib.import_module(('models.search_space_' + self.hparams.net_type)).Network
ArchGenerater = importlib.import_module(('run_apis.derive_arch_' + self.hparams.net_type), __package__).ArchGenerate
derivedNetwork = getattr(model_derived, ('%s_Net' % self.hparams.net_type.upper()))
super_model = SearchSpace(config.optim.init_dim, self.hparams.task, config)
self.arch_gener = ArchGenerater(super_model, config)
self.der_Net = (lambda net_config: derivedNetwork(net_config, task=self.hparams.task, config=config))
super_model = super_model.cuda()
if (config.optim.sub_obj.type == 'flops'):
(flops_list, total_flops) = super_model.get_cost_list(input_shape, cost_type='flops')
super_model.sub_obj_list = flops_list
print('Super Network flops (M) list: \n')
print(str(flops_list))
print(('Total flops: ' + str(total_flops)))
'\n elif config.optim.sub_obj.type==\'latency\':\n with open(os.path.join(\'latency_list\', config.optim.sub_obj.latency_list_path), \'r\') as f:\n latency_list = eval(f.readline())\n super_model.module.sub_obj_list = latency_list\n print("Super Network latency (ms) list: \n")\n print(str(latency_list))\n '
else:
raise NotImplementedError
pprint.pformat('Num params = %.2fMB', utils.count_parameters_in_MB(super_model))
self.model = self.context.wrap_model(super_model)
total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB: ', total_params)
self.Dropped_Network = (lambda model: Dropped_Network(model, softmax_temp=config.search_params.softmax_temp))
arch_params_id = list(map(id, self.model.arch_parameters))
weight_params = filter((lambda p: (id(p) not in arch_params_id)), self.model.parameters())
self.weight_sample_num = config.search_params.weight_sample_num
self.weight_optimizer = self.context.wrap_optimizer(torch.optim.SGD(weight_params, config.optim.weight.init_lr, momentum=config.optim.weight.momentum, weight_decay=config.optim.weight.weight_decay))
self.arch_optimizer = self.context.wrap_optimizer(torch.optim.Adam([{'params': self.model.arch_alpha_params, 'lr': config.optim.arch.alpha_lr}, {'params': self.model.arch_beta_params, 'lr': config.optim.arch.beta_lr}], betas=(0.5, 0.999), weight_decay=config.optim.arch.weight_decay))
scheduler = get_lr_scheduler(config, self.weight_optimizer, self.hparams.num_examples, self.context.get_per_slot_batch_size())
scheduler.last_step = 0
self.scheduler = self.context.wrap_lr_scheduler(scheduler, step_mode=LRScheduler.StepMode.MANUAL_STEP)
self.config = config
self.download_directory = self.download_data_from_s3()
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
s3 = boto3.client('s3')
download_directory = '.'
download_from_s3(s3_bucket, self.hparams.task, download_directory)
(self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
self.train_data = BilevelDataset(self.train_data)
print('Length of bilevel dataset: ', len(self.train_data))
return DataLoader(self.train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn, pin_memory=False, drop_last=True, num_workers=4)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False)
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
if (epoch_idx != self.last_epoch):
self.train_data.shuffle_val_inds()
self.last_epoch = epoch_idx
search_stage = (1 if (epoch_idx > self.config.search_params.arch_update_epoch) else 0)
(x_train1, y_train1, x_train2, y_train2, x_train3, y_train3, x_train4, y_train4, x_val, y_val) = batch
x_train = torch.cat((x_train1, x_train2, x_train3, x_train4), 0)
y_train = torch.cat((y_train1, y_train2, y_train3, y_train4), 0)
n = (x_train1.size(0) * 4)
arch_loss = 0
if search_stage:
self.set_param_grad_state('Arch')
(arch_logits, arch_loss, arch_subobj) = self.arch_step(x_val, y_val, self.model, search_stage)
self.scheduler.step()
self.set_param_grad_state('Weights')
(logits, loss, subobj) = self.weight_step(x_train, y_train, self.model, search_stage)
return {'loss': loss, 'arch_loss': arch_loss}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
obj = utils.AverageMeter()
sub_obj = utils.AverageMeter()
val_predictions = []
val_gts = []
self.set_param_grad_state('')
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
(logits, loss, subobj) = self.valid_step(input, target, self.model)
logits = logits.mean(0).unsqueeze(0)
obj.update(loss, n)
sub_obj.update(subobj, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average='macro')
(betas, head_alphas, stack_alphas) = self.model.display_arch_params()
derived_arch = self.arch_gener.derive_archs(betas, head_alphas, stack_alphas)
derived_arch_str = '|\n'.join(map(str, derived_arch))
derived_model = self.der_Net(derived_arch_str)
derived_flops = comp_multadds(derived_model, input_size=self.input_shape)
derived_params = utils.count_parameters_in_MB(derived_model)
print(('Derived Model Mult-Adds = %.2fMB' % derived_flops))
print(('Derived Model Num Params = %.2fMB' % derived_params))
print(derived_arch_str)
return {'validation_loss': obj.avg, 'validation_subloss': sub_obj.avg, 'val_mAP': map_value}
def weight_step(self, input_train, target_train, model, search_stage):
(_, _) = model.sample_branch('head', self.weight_sample_num, search_stage=search_stage)
(_, _) = model.sample_branch('stack', self.weight_sample_num, search_stage=search_stage)
self.weight_optimizer.zero_grad()
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_train)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_train)
loss.backward()
self.weight_optimizer.step()
return (logits.detach(), loss.item(), sub_obj.item())
def set_param_grad_state(self, stage):
def set_grad_state(params, state):
for group in params:
for param in group['params']:
param.requires_grad_(state)
if (stage == 'Arch'):
state_list = [True, False]
elif (stage == 'Weights'):
state_list = [False, True]
else:
state_list = [False, False]
set_grad_state(self.arch_optimizer.param_groups, state_list[0])
set_grad_state(self.weight_optimizer.param_groups, state_list[1])
def arch_step(self, input_valid, target_valid, model, search_stage):
(head_sampled_w_old, alpha_head_index) = model.sample_branch('head', 2, search_stage=search_stage)
(stack_sampled_w_old, alpha_stack_index) = model.sample_branch('stack', 2, search_stage=search_stage)
self.arch_optimizer.zero_grad()
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_valid)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
if self.config.optim.if_sub_obj:
loss_sub_obj = (torch.log(sub_obj) / torch.log(torch.tensor(self.config.optim.sub_obj.log_base)))
sub_loss_factor = self.config.optim.sub_obj.sub_loss_factor
loss += (loss_sub_obj * sub_loss_factor)
loss.backward()
self.arch_optimizer.step()
self.rescale_arch_params(head_sampled_w_old, stack_sampled_w_old, alpha_head_index, alpha_stack_index, model)
return (logits.detach(), loss.item(), sub_obj.item())
def rescale_arch_params(self, alpha_head_weights_drop, alpha_stack_weights_drop, alpha_head_index, alpha_stack_index, model):
def comp_rescale_value(old_weights, new_weights, index):
old_exp_sum = old_weights.exp().sum()
new_drop_arch_params = torch.gather(new_weights, dim=(- 1), index=index)
new_exp_sum = new_drop_arch_params.exp().sum()
rescale_value = torch.log((old_exp_sum / new_exp_sum)).item()
rescale_mat = torch.zeros_like(new_weights).scatter_(0, index, rescale_value)
return (rescale_value, rescale_mat)
def rescale_params(old_weights, new_weights, indices):
for (i, (old_weights_block, indices_block)) in enumerate(zip(old_weights, indices)):
for (j, (old_weights_branch, indices_branch)) in enumerate(zip(old_weights_block, indices_block)):
(rescale_value, rescale_mat) = comp_rescale_value(old_weights_branch, new_weights[i][j], indices_branch)
new_weights[i][j].data.add_(rescale_mat)
rescale_params(alpha_head_weights_drop, model.alpha_head_weights, alpha_head_index)
rescale_params(alpha_stack_weights_drop, model.alpha_stack_weights, alpha_stack_index)
def valid_step(self, input_valid, target_valid, model):
(_, _) = model.sample_branch('head', 1, training=False)
(_, _) = model.sample_branch('stack', 1, training=False)
dropped_model = self.Dropped_Network(model)
(logits, sub_obj) = dropped_model(input_valid)
logits = logits.mean(0).unsqueeze(0)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
return (logits, loss.item(), sub_obj.item())
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class DenseNASTrainTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
pprint.pformat(config)
cudnn.benchmark = True
cudnn.enabled = True
self.criterion = nn.CrossEntropyLoss()
self.criterion = self.criterion.cuda()
(config.net_config, config.net_type) = (self.hparams.net_config, self.hparams.net_type)
derivedNetwork = getattr(model_derived, ('%s_Net' % self.hparams.net_type.upper()))
if (self.hparams.net_config == 'random'):
self.rand_arch = generate_arch(self.hparams.task, self.hparams.net_type, self.hparams.target_arch)
model = derivedNetwork(self.rand_arch, task=self.hparams.task, config=config)
else:
model = derivedNetwork(config.net_config, task=self.hparams.task, config=config)
pprint.pformat('Num params = %.2fMB', utils.count_parameters_in_MB(model))
self.model = self.context.wrap_model(model)
optimizer = torch.optim.SGD(model.parameters(), config.optim.init_lr, momentum=config.optim.momentum, weight_decay=config.optim.weight_decay)
self.optimizer = self.context.wrap_optimizer(optimizer)
scheduler = get_lr_scheduler(config, self.optimizer, self.hparams.num_examples, self.context.get_per_slot_batch_size())
scheduler.last_step = 0
self.scheduler = self.context.wrap_lr_scheduler(scheduler, step_mode=LRScheduler.StepMode.MANUAL_STEP)
self.config = config
self.download_directory = self.download_data_from_s3()
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if (self.hparams.net_config == 'random'):
(self.train_data, self.val_data, _) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
else:
(self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
return DataLoader(trainset, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2)
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
(x_train, y_train) = batch
self.scheduler.step()
logits = self.model(x_train)
loss = self.criterion(logits, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.optimizer)
(prec1, prec5) = utils.accuracy(logits, y_train, topk=(1, 5))
return {'loss': loss, 'train_accuracy': prec1.item()}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
obj = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
obj.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
return {'validation_loss': obj.avg, 'validation_accuracy': top1.avg, 'validation_top5': top5.avg}
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class DenseNASTrainTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
pprint.pformat(config)
cudnn.benchmark = True
cudnn.enabled = True
self.criterion = nn.BCEWithLogitsLoss().cuda()
(config.net_config, config.net_type) = (self.hparams.net_config, self.hparams.net_type)
derivedNetwork = getattr(model_derived, ('%s_Net' % self.hparams.net_type.upper()))
if (self.hparams.net_config == 'random'):
self.rand_arch = generate_arch(self.hparams.task, self.hparams.net_type, self.hparams.target_arch)
model = derivedNetwork(self.rand_arch, task=self.hparams.task, config=config)
else:
model = derivedNetwork(config.net_config, task=self.hparams.task, config=config)
pprint.pformat('Num params = %.2fMB', utils.count_parameters_in_MB(model))
self.model = self.context.wrap_model(model)
total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB: ', total_params)
optimizer = torch.optim.SGD(model.parameters(), config.optim.init_lr, momentum=config.optim.momentum, weight_decay=config.optim.weight_decay)
self.optimizer = self.context.wrap_optimizer(optimizer)
scheduler = get_lr_scheduler(config, self.optimizer, self.hparams.num_examples, self.context.get_per_slot_batch_size())
scheduler.last_step = 0
self.scheduler = self.context.wrap_lr_scheduler(scheduler, step_mode=LRScheduler.StepMode.MANUAL_STEP)
self.config = config
self.download_directory = self.download_data_from_s3()
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
s3 = boto3.client('s3')
download_directory = '.'
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if (self.hparams.net_config == 'random'):
(self.train_data, self.val_data, _) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
else:
(self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
return DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn_part, pin_memory=False, drop_last=True)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False)
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
(x_train, y_train) = batch
self.scheduler.step()
logits = self.model(x_train)
loss = self.criterion(logits, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.optimizer)
return {'loss': loss}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
obj = utils.AverageMeter()
val_predictions = []
val_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
logits = logits.mean(0).unsqueeze(0)
loss = self.criterion(logits, target)
obj.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average='macro')
stats = calculate_stats(val_preds, val_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {'test_loss': obj.avg, 'test_mAUC': mAUC, 'test_mAP': mAP, 'mAP_value': map_value, 'dPrime': d_prime(mAUC)}
return results
|
class MixedOp(nn.Module):
def __init__(self, dropped_mixed_ops, softmax_temp=1.0):
super(MixedOp, self).__init__()
self.softmax_temp = softmax_temp
self._ops = nn.ModuleList()
for op in dropped_mixed_ops:
self._ops.append(op)
def forward(self, x, alphas, branch_indices, mixed_sub_obj):
op_weights = torch.stack([alphas[branch_index] for branch_index in branch_indices])
op_weights = F.softmax((op_weights / self.softmax_temp), dim=(- 1))
return (sum(((op_weight * op(x)) for (op_weight, op) in zip(op_weights, self._ops))), sum(((op_weight * mixed_sub_obj[branch_index]) for (op_weight, branch_index) in zip(op_weights, branch_indices))))
|
class HeadLayer(nn.Module):
def __init__(self, dropped_mixed_ops, softmax_temp=1.0):
super(HeadLayer, self).__init__()
self.head_branches = nn.ModuleList()
for mixed_ops in dropped_mixed_ops:
self.head_branches.append(MixedOp(mixed_ops, softmax_temp))
def forward(self, inputs, betas, alphas, head_index, head_sub_obj):
head_data = []
count_sub_obj = []
for (input_data, head_branch, alpha, head_idx, branch_sub_obj) in zip(inputs, self.head_branches, alphas, head_index, head_sub_obj):
(data, sub_obj) = head_branch(input_data, alpha, head_idx, branch_sub_obj)
head_data.append(data)
count_sub_obj.append(sub_obj)
return (sum(((branch_weight * data) for (branch_weight, data) in zip(betas, head_data))), count_sub_obj)
|
class StackLayers(nn.Module):
def __init__(self, num_block_layers, dropped_mixed_ops, softmax_temp=1.0):
super(StackLayers, self).__init__()
if (num_block_layers != 0):
self.stack_layers = nn.ModuleList()
for i in range(num_block_layers):
self.stack_layers.append(MixedOp(dropped_mixed_ops[i], softmax_temp))
else:
self.stack_layers = None
def forward(self, x, alphas, stack_index, stack_sub_obj):
if (self.stack_layers is not None):
count_sub_obj = 0
for (stack_layer, alpha, stack_idx, layer_sub_obj) in zip(self.stack_layers, alphas, stack_index, stack_sub_obj):
(x, sub_obj) = stack_layer(x, alpha, stack_idx, layer_sub_obj)
count_sub_obj += sub_obj
return (x, count_sub_obj)
else:
return (x, 0)
|
class Block(nn.Module):
def __init__(self, num_block_layers, dropped_mixed_ops, softmax_temp=1.0):
super(Block, self).__init__()
self.head_layer = HeadLayer(dropped_mixed_ops[0], softmax_temp)
self.stack_layers = StackLayers(num_block_layers, dropped_mixed_ops[1], softmax_temp)
def forward(self, inputs, betas, head_alphas, stack_alphas, head_index, stack_index, block_sub_obj):
(x, head_sub_obj) = self.head_layer(inputs, betas, head_alphas, head_index, block_sub_obj[0])
(x, stack_sub_obj) = self.stack_layers(x, stack_alphas, stack_index, block_sub_obj[1])
return (x, [head_sub_obj, stack_sub_obj])
|
class Dropped_Network(nn.Module):
def __init__(self, super_model, alpha_head_index=None, alpha_stack_index=None, softmax_temp=1.0):
super(Dropped_Network, self).__init__()
self.softmax_temp = softmax_temp
self.input_block = super_model.input_block
if hasattr(super_model, 'head_block'):
self.head_block = super_model.head_block
self.conv1_1_block = super_model.conv1_1_block
self.global_pooling = super_model.global_pooling
self.classifier = super_model.classifier
self.alpha_head_weights = super_model.alpha_head_weights
self.alpha_stack_weights = super_model.alpha_stack_weights
self.beta_weights = super_model.beta_weights
self.alpha_head_index = (alpha_head_index if (alpha_head_index is not None) else super_model.alpha_head_index)
self.alpha_stack_index = (alpha_stack_index if (alpha_stack_index is not None) else super_model.alpha_stack_index)
self.config = super_model.config
self.input_configs = super_model.input_configs
self.output_configs = super_model.output_configs
self.sub_obj_list = super_model.sub_obj_list
self.blocks = nn.ModuleList()
for (i, block) in enumerate(super_model.blocks):
input_config = self.input_configs[i]
dropped_mixed_ops = []
head_mixed_ops = []
for (j, head_index) in enumerate(self.alpha_head_index[i]):
head_mixed_ops.append([block.head_layer.head_branches[j]._ops[k] for k in head_index])
dropped_mixed_ops.append(head_mixed_ops)
stack_mixed_ops = []
for (j, stack_index) in enumerate(self.alpha_stack_index[i]):
stack_mixed_ops.append([block.stack_layers.stack_layers[j]._ops[k] for k in stack_index])
dropped_mixed_ops.append(stack_mixed_ops)
self.blocks.append(Block(input_config['num_stack_layers'], dropped_mixed_ops))
def forward(self, x):
"\n To approximate the the total sub_obj(latency/flops), we firstly create the obj list for blocks\n as follows:\n [[[head_flops_1, head_flops_2, ...], stack_flops], ...]\n Then we compute the whole obj approximation from the end to the beginning. For block b, \n flops'_b = sum(beta_{bi} * (head_flops_{bi} + stack_flops_{i}) for i in out_idx[b])\n The total flops equals flops'_0\n "
sub_obj_list = []
block_datas = []
branch_weights = []
for betas in self.beta_weights:
branch_weights.append(F.softmax((betas / self.softmax_temp), dim=(- 1)))
block_data = self.input_block(x)
if hasattr(self, 'head_block'):
block_data = self.head_block(block_data)
block_datas.append(block_data)
sub_obj_list.append([[], torch.tensor(self.sub_obj_list[0]).cuda()])
for i in range((len(self.blocks) + 1)):
config = self.input_configs[i]
inputs = [block_datas[i] for i in config['in_block_idx']]
betas = [branch_weights[block_id][beta_id] for (block_id, beta_id) in zip(config['in_block_idx'], config['beta_idx'])]
if (i == len(self.blocks)):
(block_data, block_sub_obj) = self.conv1_1_block(inputs, betas, self.sub_obj_list[2])
else:
(block_data, block_sub_obj) = self.blocks[i](inputs, betas, self.alpha_head_weights[i], self.alpha_stack_weights[i], self.alpha_head_index[i], self.alpha_stack_index[i], self.sub_obj_list[1][i])
block_datas.append(block_data)
sub_obj_list.append(block_sub_obj)
out = self.global_pooling(block_datas[(- 1)])
logits = self.classifier(out.view(out.size(0), (- 1)))
for (i, out_config) in enumerate(self.output_configs[::(- 1)]):
block_id = ((len(self.output_configs) - i) - 1)
sum_obj = []
for (j, out_id) in enumerate(out_config['out_id']):
head_id = self.input_configs[(out_id - 1)]['in_block_idx'].index(block_id)
head_obj = sub_obj_list[out_id][0][head_id]
stack_obj = sub_obj_list[out_id][1]
sub_obj_j = (branch_weights[block_id][j] * (head_obj + stack_obj))
sum_obj.append(sub_obj_j)
sub_obj_list[((- i) - 2)][1] += sum(sum_obj)
net_sub_obj = (torch.tensor(self.sub_obj_list[(- 1)]).cuda() + sub_obj_list[0][1])
return (logits, net_sub_obj.expand(1))
@property
def arch_parameters(self):
arch_params = nn.ParameterList()
arch_params.extend(self.beta_weights)
arch_params.extend(self.alpha_head_weights)
arch_params.extend(self.alpha_stack_weights)
return arch_params
@property
def arch_alpha_params(self):
alpha_params = nn.ParameterList()
alpha_params.extend(self.alpha_head_weights)
alpha_params.extend(self.alpha_stack_weights)
return alpha_params
|
class Block(nn.Module):
def __init__(self, in_ch, block_ch, head_op, stack_ops, stride):
super(Block, self).__init__()
self.head_layer = OPS[head_op](in_ch, block_ch, stride, affine=True, track_running_stats=True)
modules = []
for stack_op in stack_ops:
modules.append(OPS[stack_op](block_ch, block_ch, 1, affine=True, track_running_stats=True))
self.stack_layers = nn.Sequential(*modules)
def forward(self, x):
x = self.head_layer(x)
x = self.stack_layers(x)
return x
|
class Conv1_1_Block(nn.Module):
def __init__(self, in_ch, block_ch):
super(Conv1_1_Block, self).__init__()
self.conv1_1 = nn.Sequential(nn.Conv2d(in_channels=in_ch, out_channels=block_ch, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(block_ch), nn.ReLU6(inplace=True))
def forward(self, x):
return self.conv1_1(x)
|
class MBV2_Net(nn.Module):
def __init__(self, net_config, task='cifar10', config=None):
'\n net_config=[[in_ch, out_ch], head_op, [stack_ops], num_stack_layers, stride]\n '
super(MBV2_Net, self).__init__()
self.config = config
self.net_config = parse_net_config(net_config)
self.in_chs = self.net_config[0][0][0]
self.dataset = task
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3)}
(n_classes, in_channels) = dataset_hypers[self.dataset]
self._num_classes = n_classes
self.input_block = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=self.in_chs, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(self.in_chs), nn.ReLU6(inplace=True))
self.blocks = nn.ModuleList()
for config in self.net_config:
if (config[1] == 'conv1_1'):
continue
self.blocks.append(Block(config[0][0], config[0][1], config[1], config[2], config[(- 1)]))
if (self.net_config[(- 1)][1] == 'conv1_1'):
block_last_dim = self.net_config[(- 1)][0][0]
last_dim = self.net_config[(- 1)][0][1]
else:
block_last_dim = self.net_config[(- 1)][0][1]
self.conv1_1_block = Conv1_1_Block(block_last_dim, last_dim)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(last_dim, self._num_classes)
self.init_model()
self.set_bn_param(0.1, 0.001)
def forward(self, x):
block_data = self.input_block(x)
for (i, block) in enumerate(self.blocks):
block_data = block(block_data)
block_data = self.conv1_1_block(block_data)
out = self.global_pooling(block_data)
logits = self.classifier(out.view(out.size(0), (- 1)))
return logits
def init_model(self, model_init='he_fout', init_div_groups=True):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (model_init == 'he_fout'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif (model_init == 'he_fin'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.in_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
else:
raise NotImplementedError
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def set_bn_param(self, bn_momentum, bn_eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum
m.eps = bn_eps
return
|
class RES_Net(nn.Module):
def __init__(self, net_config, task='cifar10', config=None):
'\n net_config=[[in_ch, out_ch], head_op, [stack_ops], num_stack_layers, stride]\n '
super(RES_Net, self).__init__()
self.config = config
self.net_config = parse_net_config(net_config)
self.in_chs = self.net_config[0][0][0]
self.dataset = task
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3), 'audio': (200, 1)}
(n_classes, in_channels) = dataset_hypers[self.dataset]
self._num_classes = n_classes
self.input_block = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=self.in_chs, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(self.in_chs), nn.ReLU6(inplace=True))
self.blocks = nn.ModuleList()
for config in self.net_config:
self.blocks.append(Block(config[0][0], config[0][1], config[1], config[2], config[(- 1)]))
self.global_pooling = nn.AdaptiveAvgPool2d((1, 1))
if (self.net_config[(- 1)][1] == 'bottle_neck'):
last_dim = (self.net_config[(- 1)][0][(- 1)] * 4)
else:
last_dim = self.net_config[(- 1)][0][1]
self.classifier = nn.Linear(last_dim, self._num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if (m.affine == True):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
block_data = self.input_block(x)
for (i, block) in enumerate(self.blocks):
block_data = block(block_data)
out = self.global_pooling(block_data)
out = torch.flatten(out, 1)
logits = self.classifier(out)
return logits
|
class MixedOp(nn.Module):
def __init__(self, C_in, C_out, stride, primitives):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in primitives:
op = OPS[primitive](C_in, C_out, stride, affine=False, track_running_stats=True)
self._ops.append(op)
|
class HeadLayer(nn.Module):
def __init__(self, in_chs, ch, strides, config):
super(HeadLayer, self).__init__()
self.head_branches = nn.ModuleList()
for (in_ch, stride) in zip(in_chs, strides):
self.head_branches.append(MixedOp(in_ch, ch, stride, config.search_params.PRIMITIVES_head))
|
class StackLayers(nn.Module):
def __init__(self, ch, num_block_layers, config, primitives):
super(StackLayers, self).__init__()
if (num_block_layers != 0):
self.stack_layers = nn.ModuleList()
for i in range(num_block_layers):
self.stack_layers.append(MixedOp(ch, ch, 1, primitives))
else:
self.stack_layers = None
|
class Block(nn.Module):
def __init__(self, in_chs, block_ch, strides, num_block_layers, config):
super(Block, self).__init__()
assert (len(in_chs) == len(strides))
self.head_layer = HeadLayer(in_chs, block_ch, strides, config)
self.stack_layers = StackLayers(block_ch, num_block_layers, config, config.search_params.PRIMITIVES_stack)
|
class Conv1_1_Branch(nn.Module):
def __init__(self, in_ch, block_ch):
super(Conv1_1_Branch, self).__init__()
self.conv1_1 = nn.Sequential(nn.Conv2d(in_channels=in_ch, out_channels=block_ch, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(block_ch, affine=False, track_running_stats=True), nn.ReLU6(inplace=True))
def forward(self, x):
return self.conv1_1(x)
|
class Conv1_1_Block(nn.Module):
def __init__(self, in_chs, block_ch):
super(Conv1_1_Block, self).__init__()
self.conv1_1_branches = nn.ModuleList()
for in_ch in in_chs:
self.conv1_1_branches.append(Conv1_1_Branch(in_ch, block_ch))
def forward(self, inputs, betas, block_sub_obj):
branch_weights = F.softmax(torch.stack(betas), dim=(- 1))
return (sum(((branch_weight * branch(input_data)) for (input_data, branch, branch_weight) in zip(inputs, self.conv1_1_branches, branch_weights))), [block_sub_obj, 0])
|
class Network(nn.Module):
def __init__(self, init_ch, dataset, config):
super(Network, self).__init__()
self.config = config
self._C_input = init_ch
self._head_dim = self.config.optim.head_dim
self._dataset = dataset
self._num_classes = 10
self.initialize()
def initialize(self):
self._init_block_config()
self._create_output_list()
self._create_input_list()
self._init_betas()
self._init_alphas()
self._init_sample_branch()
def init_model(self, model_init='he_fout', init_div_groups=True):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (model_init == 'he_fout'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif (model_init == 'he_fin'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.in_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
else:
raise NotImplementedError
elif isinstance(m, nn.BatchNorm2d):
if (m.affine == True):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
if (m.affine == True):
m.weight.data.fill_(1)
m.bias.data.zero_()
def set_bn_param(self, bn_momentum, bn_eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum
m.eps = bn_eps
return
def _init_betas(self):
'\n beta weights for the output ch choices in the head layer of the block\n '
self.beta_weights = nn.ParameterList()
for block in self.output_configs:
num_betas = len(block['out_chs'])
self.beta_weights.append(nn.Parameter((0.001 * torch.randn(num_betas))))
def _init_alphas(self):
'\n alpha weights for the op type in the block\n '
self.alpha_head_weights = nn.ParameterList()
self.alpha_stack_weights = nn.ParameterList()
for block in self.input_configs[:(- 1)]:
num_head_alpha = len(block['in_block_idx'])
self.alpha_head_weights.append(nn.Parameter((0.001 * torch.randn(num_head_alpha, len(self.config.search_params.PRIMITIVES_head)))))
num_layers = block['num_stack_layers']
self.alpha_stack_weights.append(nn.Parameter((0.001 * torch.randn(num_layers, len(self.config.search_params.PRIMITIVES_stack)))))
@property
def arch_parameters(self):
arch_params = nn.ParameterList()
arch_params.extend(self.beta_weights)
arch_params.extend(self.alpha_head_weights)
arch_params.extend(self.alpha_stack_weights)
return arch_params
@property
def arch_beta_params(self):
return self.beta_weights
@property
def arch_alpha_params(self):
alpha_params = nn.ParameterList()
alpha_params.extend(self.alpha_head_weights)
alpha_params.extend(self.alpha_stack_weights)
return alpha_params
def display_arch_params(self, display=True):
branch_weights = []
head_op_weights = []
stack_op_weights = []
for betas in self.beta_weights:
branch_weights.append(F.softmax(betas, dim=(- 1)))
for head_alpha in self.alpha_head_weights:
head_op_weights.append(F.softmax(head_alpha, dim=(- 1)))
for stack_alpha in self.alpha_stack_weights:
stack_op_weights.append(F.softmax(stack_alpha, dim=(- 1)))
if display:
logging.info(('branch_weights \n' + '\n'.join(map(str, branch_weights))))
if (len(self.config.search_params.PRIMITIVES_head) > 1):
logging.info(('head_op_weights \n' + '\n'.join(map(str, head_op_weights))))
logging.info(('stack_op_weights \n' + '\n'.join(map(str, stack_op_weights))))
return ([x.tolist() for x in branch_weights], [x.tolist() for x in head_op_weights], [x.tolist() for x in stack_op_weights])
def _init_sample_branch(self):
(_, _) = self.sample_branch('head', 1, training=False)
(_, _) = self.sample_branch('stack', 1, training=False)
def sample_branch(self, params_type, sample_num, training=True, search_stage=1, if_sort=True):
'\n the sampling computing is based on torch\n input: params_type\n output: sampled params\n '
def sample(param, weight, sample_num, sample_policy='prob', if_sort=True):
if (sample_num >= weight.shape[(- 1)]):
sample_policy = 'all'
assert (param.shape == weight.shape)
assert (sample_policy in ['prob', 'uniform', 'all'])
if (param.shape[0] == 0):
return ([], [])
if (sample_policy == 'prob'):
sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False)
elif (sample_policy == 'uniform'):
weight = torch.ones_like(weight)
sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False)
else:
sampled_index = torch.arange(start=0, end=weight.shape[(- 1)], step=1, device=weight.device).repeat(param.shape[0], 1)
if if_sort:
(sampled_index, _) = torch.sort(sampled_index, descending=False)
sampled_param_old = torch.gather(param, dim=(- 1), index=sampled_index)
return (sampled_param_old, sampled_index)
if (params_type == 'head'):
params = self.alpha_head_weights
elif (params_type == 'stack'):
params = self.alpha_stack_weights
else:
raise TypeError
weights = []
sampled_params_old = []
sampled_indices = []
if training:
sample_policy = (self.config.search_params.sample_policy if (search_stage == 1) else 'uniform')
else:
sample_policy = 'all'
for param in params:
weights.append(F.softmax(param, dim=(- 1)))
for (param, weight) in zip(params, weights):
(sampled_param_old, sampled_index) = sample(param, weight, sample_num, sample_policy, if_sort)
sampled_params_old.append(sampled_param_old)
sampled_indices.append(sampled_index)
if (params_type == 'head'):
self.alpha_head_index = sampled_indices
elif (params_type == 'stack'):
self.alpha_stack_index = sampled_indices
return (sampled_params_old, sampled_indices)
def _init_block_config(self):
self.block_chs = self.config.search_params.net_scale.chs
self.block_fm_sizes = self.config.search_params.net_scale.fm_sizes
self.num_blocks = (len(self.block_chs) - 1)
self.num_block_layers = self.config.search_params.net_scale.num_layers
if hasattr(self.config.search_params.net_scale, 'stage'):
self.block_stage = self.config.search_params.net_scale.stage
self.block_chs.append(self.config.optim.last_dim)
self.block_fm_sizes.append(self.block_fm_sizes[(- 1)])
self.num_block_layers.append(0)
def _create_output_list(self):
"\n Generate the output config of each block, which contains: \n 'ch': the channel number of the block \n 'out_chs': the possible output channel numbers \n 'strides': the corresponding stride\n "
self.output_configs = []
for i in range((len(self.block_chs) - 1)):
if hasattr(self, 'block_stage'):
stage = self.block_stage[i]
output_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'out_chs': [], 'out_fms': [], 'strides': [], 'out_id': [], 'num_stack_layers': self.num_block_layers[i]}
for j in range(self.config.search_params.adjoin_connect_nums[stage]):
out_index = ((i + j) + 1)
if (out_index >= len(self.block_chs)):
break
if hasattr(self, 'block_stage'):
block_stage = getattr(self, 'block_stage')
if ((block_stage[out_index] - block_stage[i]) > 1):
break
fm_size_ratio = (self.block_fm_sizes[i] / self.block_fm_sizes[out_index])
if (fm_size_ratio == 2):
output_config['strides'].append(2)
elif (fm_size_ratio == 1):
output_config['strides'].append(1)
else:
break
output_config['out_chs'].append(self.block_chs[out_index])
output_config['out_fms'].append(self.block_fm_sizes[out_index])
output_config['out_id'].append(out_index)
self.output_configs.append(output_config)
logging.info(('Network output configs: \n' + '\n'.join(map(str, self.output_configs))))
def _create_input_list(self):
"\n Generate the input config of each block for constructing the whole network.\n Each config dict contains:\n 'ch': the channel number of the block\n 'in_chs': all the possible input channel numbers\n 'strides': the corresponding stride\n 'in_block_idx': the index of the input block \n 'beta_idx': the corresponding beta weight index.\n "
self.input_configs = []
for i in range(1, len(self.block_chs)):
input_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'in_chs': [], 'in_fms': [], 'strides': [], 'in_block_idx': [], 'beta_idx': [], 'num_stack_layers': self.num_block_layers[i]}
for j in range(i):
in_index = ((i - j) - 1)
if (in_index < 0):
break
output_config = self.output_configs[in_index]
if (i in output_config['out_id']):
beta_idx = output_config['out_id'].index(i)
input_config['in_block_idx'].append(in_index)
input_config['in_chs'].append(output_config['ch'])
input_config['in_fms'].append(output_config['fm_size'])
input_config['beta_idx'].append(beta_idx)
input_config['strides'].append(output_config['strides'][beta_idx])
else:
continue
self.input_configs.append(input_config)
logging.info(('Network input configs: \n' + '\n'.join(map(str, self.input_configs))))
def get_cost_list(self, data_shape, cost_type='flops', use_gpu=True, meas_times=1000):
cost_list = []
block_datas = []
total_cost = 0
if (cost_type == 'flops'):
cost_func = (lambda module, data: comp_multadds_fw(module, data, use_gpu))
elif (cost_type == 'latency'):
cost_func = (lambda module, data: latency_measure_fw(module, data, meas_times))
else:
raise NotImplementedError
if (len(data_shape) == 3):
input_data = torch.randn(((1,) + tuple(data_shape)))
else:
input_data = torch.randn(tuple(data_shape))
if use_gpu:
input_data = input_data.cuda()
(cost, block_data) = cost_func(self.input_block, input_data)
cost_list.append(cost)
block_datas.append(block_data)
total_cost += cost
if hasattr(self, 'head_block'):
(cost, block_data) = cost_func(self.head_block, block_data)
cost_list[0] += cost
block_datas[0] = block_data
block_flops = []
for (block_id, block) in enumerate(self.blocks):
input_config = self.input_configs[block_id]
inputs = [block_datas[i] for i in input_config['in_block_idx']]
head_branch_flops = []
for (branch_id, head_branch) in enumerate(block.head_layer.head_branches):
op_flops = []
for op in head_branch._ops:
(cost, block_data) = cost_func(op, inputs[branch_id])
op_flops.append(cost)
total_cost += cost
head_branch_flops.append(op_flops)
stack_layer_flops = []
if (block.stack_layers.stack_layers is not None):
for stack_layer in block.stack_layers.stack_layers:
op_flops = []
for op in stack_layer._ops:
(cost, block_data) = cost_func(op, block_data)
if (isinstance(op, operations.Skip) and self.config.optim.sub_obj.skip_reg):
cost = (op_flops[0] / 10.0)
op_flops.append(cost)
total_cost += cost
stack_layer_flops.append(op_flops)
block_flops.append([head_branch_flops, stack_layer_flops])
block_datas.append(block_data)
cost_list.append(block_flops)
conv1_1_flops = []
input_config = self.input_configs[(- 1)]
inputs = [block_datas[i] for i in input_config['in_block_idx']]
for (branch_id, branch) in enumerate(self.conv1_1_block.conv1_1_branches):
(cost, block_data) = cost_func(branch, inputs[branch_id])
conv1_1_flops.append(cost)
total_cost += cost
block_datas.append(block_data)
cost_list.append(conv1_1_flops)
out = block_datas[(- 1)]
out = self.global_pooling(out)
(cost, out) = cost_func(self.classifier, out.view(out.size(0), (- 1)))
cost_list.append(cost)
total_cost += cost
return (cost_list, total_cost)
|
class Network(BaseSearchSpace):
def __init__(self, init_ch, dataset, config):
super(Network, self).__init__(init_ch, dataset, config)
self.input_block = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=self._C_input, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(self._C_input, affine=False, track_running_stats=True), nn.ReLU6(inplace=True))
self.head_block = OPS['mbconv_k3_t1'](self._C_input, self._head_dim, 1, affine=False, track_running_stats=True)
self.blocks = nn.ModuleList()
for i in range(self.num_blocks):
input_config = self.input_configs[i]
self.blocks.append(Block(input_config['in_chs'], input_config['ch'], input_config['strides'], input_config['num_stack_layers'], self.config))
self.conv1_1_block = Conv1_1_Block(self.input_configs[(- 1)]['in_chs'], self.config.optim.last_dim)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(self.config.optim.last_dim, self._num_classes)
self.init_model(model_init=config.optim.init_mode)
self.set_bn_param(self.config.optim.bn_momentum, self.config.optim.bn_eps)
|
class Network(BaseSearchSpace):
def __init__(self, init_ch, dataset, config, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Network, self).__init__(init_ch, dataset, config)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3), 'audio': (200, 1)}
(n_classes, in_channels) = dataset_hypers[dataset]
self.input_block = nn.Sequential(nn.Conv2d(in_channels, self._C_input, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(self._C_input), nn.ReLU(inplace=True))
self.blocks = nn.ModuleList()
for i in range(self.num_blocks):
input_config = self.input_configs[i]
self.blocks.append(Block(input_config['in_chs'], input_config['ch'], input_config['strides'], input_config['num_stack_layers'], self.config))
if ('bottle_neck' in self.config.search_params.PRIMITIVES_stack):
conv1_1_input_dim = [(ch * 4) for ch in self.input_configs[(- 1)]['in_chs']]
last_dim = (self.config.optim.last_dim * 4)
else:
conv1_1_input_dim = self.input_configs[(- 1)]['in_chs']
last_dim = self.config.optim.last_dim
self.conv1_1_block = Conv1_1_Block(conv1_1_input_dim, last_dim)
self.global_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(last_dim, n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if (m.affine == True):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
class BaseArchGenerate(object):
def __init__(self, super_network, config):
self.config = config
self.num_blocks = len(super_network.block_chs)
self.super_chs = super_network.block_chs
self.input_configs = super_network.input_configs
def update_arch_params(self, betas, head_alphas, stack_alphas):
self.betas = betas
self.head_alphas = head_alphas
self.stack_alphas = stack_alphas
def derive_chs(self):
'\n using viterbi algorithm to choose the best path of the super net\n '
path_p_max = []
path_p_max.append([0, 1])
for input_config in self.input_configs:
block_path_prob_max = [None, 0]
for (in_block_id, beta_id) in zip(input_config['in_block_idx'], input_config['beta_idx']):
path_prob = (path_p_max[in_block_id][1] * self.betas[in_block_id][beta_id])
if (path_prob > block_path_prob_max[1]):
block_path_prob_max = [in_block_id, path_prob]
path_p_max.append(block_path_prob_max)
ch_idx = (len(path_p_max) - 1)
ch_path = []
ch_path.append(ch_idx)
while 1:
ch_idx = path_p_max[ch_idx][0]
ch_path.append(ch_idx)
if (ch_idx == 0):
break
derived_chs = [self.super_chs[ch_id] for ch_id in ch_path]
ch_path = ch_path[::(- 1)]
derived_chs = derived_chs[::(- 1)]
return (ch_path, derived_chs)
def derive_ops(self, alpha, alpha_type):
assert (alpha_type in ['head', 'stack'])
if (alpha_type == 'head'):
op_type = self.config.search_params.PRIMITIVES_head[alpha.index(max(alpha))]
elif (alpha_type == 'stack'):
op_type = self.config.search_params.PRIMITIVES_stack[alpha.index(max(alpha))]
return op_type
def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True):
raise NotImplementedError
|
class ArchGenerate(BaseArchGenerate):
def __init__(self, super_network, config):
super(ArchGenerate, self).__init__(super_network, config)
def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True):
self.update_arch_params(betas, head_alphas, stack_alphas)
derived_archs = [[[self.config.optim.init_dim, self.config.optim.head_dim], 'mbconv_k3_t1', [], 0, 1]]
(ch_path, derived_chs) = self.derive_chs()
layer_count = 0
for (i, (ch_idx, ch)) in enumerate(zip(ch_path, derived_chs)):
if ((ch_idx == 0) or (i == (len(derived_chs) - 1))):
continue
block_idx = (ch_idx - 1)
input_config = self.input_configs[block_idx]
head_id = input_config['in_block_idx'].index(ch_path[(i - 1)])
head_alpha = self.head_alphas[block_idx][head_id]
head_op = self.derive_ops(head_alpha, 'head')
stride = input_config['strides'][input_config['in_block_idx'].index(ch_path[(i - 1)])]
stack_ops = []
for stack_alpha in self.stack_alphas[block_idx]:
stack_op = self.derive_ops(stack_alpha, 'stack')
if (stack_op != 'skip_connect'):
stack_ops.append(stack_op)
layer_count += 1
derived_archs.append([[derived_chs[(i - 1)], ch], head_op, stack_ops, len(stack_ops), stride])
derived_archs.append([[derived_chs[(- 2)], self.config.optim.last_dim], 'conv1_1'])
layer_count += len(derived_archs)
if if_display:
logging.info(('Derived arch: \n' + '|\n'.join(map(str, derived_archs))))
logging.info('Total {} layers.'.format(layer_count))
return derived_archs
|
class ArchGenerate(BaseArchGenerate):
def __init__(self, super_network, config):
super(ArchGenerate, self).__init__(super_network, config)
def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True):
self.update_arch_params(betas, head_alphas, stack_alphas)
derived_archs = []
(ch_path, derived_chs) = self.derive_chs()
layer_count = 0
for (i, (ch_idx, ch)) in enumerate(zip(ch_path, derived_chs)):
if ((ch_idx == 0) or (i == (len(derived_chs) - 1))):
continue
block_idx = (ch_idx - 1)
input_config = self.input_configs[block_idx]
head_id = input_config['in_block_idx'].index(ch_path[(i - 1)])
head_alpha = self.head_alphas[block_idx][head_id]
head_op = self.derive_ops(head_alpha, 'head')
stride = input_config['strides'][input_config['in_block_idx'].index(ch_path[(i - 1)])]
stack_ops = []
for stack_alpha in self.stack_alphas[block_idx]:
stack_op = self.derive_ops(stack_alpha, 'stack')
if (stack_op != 'skip_connect'):
stack_ops.append(stack_op)
layer_count += 1
derived_archs.append([[derived_chs[(i - 1)], ch], head_op, stack_ops, len(stack_ops), stride])
layer_count += len(derived_archs)
if if_display:
logging.info(('Derived arch: \n' + '|\n'.join(map(str, derived_archs))))
logging.info('Total {} layers.'.format(layer_count))
return derived_archs
|
class Optimizer(object):
def __init__(self, model, criterion, config):
self.config = config
self.weight_sample_num = self.config.search_params.weight_sample_num
self.criterion = criterion
self.Dropped_Network = (lambda model: Dropped_Network(model, softmax_temp=config.search_params.softmax_temp))
arch_params_id = list(map(id, model.module.arch_parameters))
weight_params = filter((lambda p: (id(p) not in arch_params_id)), model.parameters())
self.weight_optimizer = torch.optim.SGD(weight_params, config.optim.weight.init_lr, momentum=config.optim.weight.momentum, weight_decay=config.optim.weight.weight_decay)
self.arch_optimizer = torch.optim.Adam([{'params': model.module.arch_alpha_params, 'lr': config.optim.arch.alpha_lr}, {'params': model.module.arch_beta_params, 'lr': config.optim.arch.beta_lr}], betas=(0.5, 0.999), weight_decay=config.optim.arch.weight_decay)
def arch_step(self, input_valid, target_valid, model, search_stage):
(head_sampled_w_old, alpha_head_index) = model.module.sample_branch('head', 2, search_stage=search_stage)
(stack_sampled_w_old, alpha_stack_index) = model.module.sample_branch('stack', 2, search_stage=search_stage)
self.arch_optimizer.zero_grad()
dropped_model = nn.DataParallel(self.Dropped_Network(model))
(logits, sub_obj) = dropped_model(input_valid)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
if self.config.optim.if_sub_obj:
loss_sub_obj = (torch.log(sub_obj) / torch.log(torch.tensor(self.config.optim.sub_obj.log_base)))
sub_loss_factor = self.config.optim.sub_obj.sub_loss_factor
loss += (loss_sub_obj * sub_loss_factor)
loss.backward()
self.arch_optimizer.step()
self.rescale_arch_params(head_sampled_w_old, stack_sampled_w_old, alpha_head_index, alpha_stack_index, model)
return (logits.detach(), loss.item(), sub_obj.item())
def weight_step(self, *args, **kwargs):
return self.weight_step_(*args, **kwargs)
def weight_step_(self, input_train, target_train, model, search_stage):
(_, _) = model.module.sample_branch('head', self.weight_sample_num, search_stage=search_stage)
(_, _) = model.module.sample_branch('stack', self.weight_sample_num, search_stage=search_stage)
self.weight_optimizer.zero_grad()
dropped_model = nn.DataParallel(self.Dropped_Network(model))
(logits, sub_obj) = dropped_model(input_train)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_train)
loss.backward()
self.weight_optimizer.step()
return (logits.detach(), loss.item(), sub_obj.item())
def valid_step(self, input_valid, target_valid, model):
(_, _) = model.module.sample_branch('head', 1, training=False)
(_, _) = model.module.sample_branch('stack', 1, training=False)
dropped_model = nn.DataParallel(self.Dropped_Network(model))
(logits, sub_obj) = dropped_model(input_valid)
sub_obj = torch.mean(sub_obj)
loss = self.criterion(logits, target_valid)
return (logits, loss.item(), sub_obj.item())
def rescale_arch_params(self, alpha_head_weights_drop, alpha_stack_weights_drop, alpha_head_index, alpha_stack_index, model):
def comp_rescale_value(old_weights, new_weights, index):
old_exp_sum = old_weights.exp().sum()
new_drop_arch_params = torch.gather(new_weights, dim=(- 1), index=index)
new_exp_sum = new_drop_arch_params.exp().sum()
rescale_value = torch.log((old_exp_sum / new_exp_sum))
rescale_mat = torch.zeros_like(new_weights).scatter_(0, index, rescale_value)
return (rescale_value, rescale_mat)
def rescale_params(old_weights, new_weights, indices):
for (i, (old_weights_block, indices_block)) in enumerate(zip(old_weights, indices)):
for (j, (old_weights_branch, indices_branch)) in enumerate(zip(old_weights_block, indices_block)):
(rescale_value, rescale_mat) = comp_rescale_value(old_weights_branch, new_weights[i][j], indices_branch)
new_weights[i][j].data.add_(rescale_mat)
rescale_params(alpha_head_weights_drop, model.module.alpha_head_weights, alpha_head_index)
rescale_params(alpha_stack_weights_drop, model.module.alpha_stack_weights, alpha_stack_index)
def set_param_grad_state(self, stage):
def set_grad_state(params, state):
for group in params:
for param in group['params']:
param.requires_grad_(state)
if (stage == 'Arch'):
state_list = [True, False]
elif (stage == 'Weights'):
state_list = [False, True]
else:
state_list = [False, False]
set_grad_state(self.arch_optimizer.param_groups, state_list[0])
set_grad_state(self.weight_optimizer.param_groups, state_list[1])
|
class Trainer(object):
def __init__(self, train_data, val_data, optimizer=None, criterion=None, scheduler=None, config=None, report_freq=None):
self.train_data = train_data
self.val_data = val_data
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.config = config
self.report_freq = report_freq
def train(self, model, epoch):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train()
start = time.time()
prefetcher = data_prefetcher(self.train_data)
(input, target) = prefetcher.next()
step = 0
while (input is not None):
data_t = (time.time() - start)
self.scheduler.step()
n = input.size(0)
if (step == 0):
logging.info('epoch %d lr %e', epoch, self.optimizer.param_groups[0]['lr'])
self.optimizer.zero_grad()
logits = model(input)
if self.config.optim.label_smooth:
loss = self.criterion(logits, target, self.config.optim.smooth_alpha)
else:
loss = self.criterion(logits, target)
loss.backward()
if self.config.optim.use_grad_clip:
nn.utils.clip_grad_norm_(model.parameters(), self.config.optim.grad_clip)
self.optimizer.step()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
batch_t = (time.time() - start)
start = time.time()
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
data_time.update(data_t)
batch_time.update(batch_t)
if ((step != 0) and ((step % self.report_freq) == 0)):
logging.info('Train epoch %03d step %03d | loss %.4f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, objs.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg)
(input, target) = prefetcher.next()
step += 1
logging.info('EPOCH%d Train_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f', epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg)
return (top1.avg, top5.avg, objs.avg, batch_time.avg, data_time.avg)
def infer(self, model, epoch=0):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.eval()
start = time.time()
prefetcher = data_prefetcher(self.val_data)
(input, target) = prefetcher.next()
step = 0
while (input is not None):
step += 1
data_t = (time.time() - start)
n = input.size(0)
logits = model(input)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
batch_t = (time.time() - start)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
data_time.update(data_t)
batch_time.update(batch_t)
if ((step % self.report_freq) == 0):
logging.info('Val epoch %03d step %03d | top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, top1.avg, top5.avg, batch_time.avg, data_time.avg)
start = time.time()
(input, target) = prefetcher.next()
logging.info('EPOCH%d Valid_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f', epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg)
return (top1.avg, top5.avg, batch_time.avg, data_time.avg)
|
class SearchTrainer(object):
def __init__(self, train_data, val_data, search_optim, criterion, scheduler, config, args):
self.train_data = train_data
self.val_data = val_data
self.search_optim = search_optim
self.criterion = criterion
self.scheduler = scheduler
self.sub_obj_type = config.optim.sub_obj.type
self.args = args
def train(self, model, epoch, optim_obj='Weights', search_stage=0):
assert (optim_obj in ['Weights', 'Arch'])
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
sub_obj_avg = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train()
start = time.time()
if (optim_obj == 'Weights'):
prefetcher = data_prefetcher(self.train_data)
elif (optim_obj == 'Arch'):
prefetcher = data_prefetcher(self.val_data)
(input, target) = prefetcher.next()
step = 0
while (input is not None):
(input, target) = (input.cuda(), target.cuda())
data_t = (time.time() - start)
n = input.size(0)
if (optim_obj == 'Weights'):
self.scheduler.step()
if (step == 0):
logging.info('epoch %d weight_lr %e', epoch, self.search_optim.weight_optimizer.param_groups[0]['lr'])
(logits, loss, sub_obj) = self.search_optim.weight_step(input, target, model, search_stage)
elif (optim_obj == 'Arch'):
if (step == 0):
logging.info('epoch %d arch_lr %e', epoch, self.search_optim.arch_optimizer.param_groups[0]['lr'])
(logits, loss, sub_obj) = self.search_optim.arch_step(input, target, model, search_stage)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
del logits, input, target
batch_t = (time.time() - start)
objs.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
sub_obj_avg.update(sub_obj)
data_time.update(data_t)
batch_time.update(batch_t)
if ((step != 0) and ((step % self.args.report_freq) == 0)):
logging.info('Train%s epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', optim_obj, epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg)
start = time.time()
step += 1
(input, target) = prefetcher.next()
return (top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg)
def infer(self, model, epoch):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
sub_obj_avg = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train()
start = time.time()
prefetcher = data_prefetcher(self.val_data)
(input, target) = prefetcher.next()
step = 0
while (input is not None):
step += 1
data_t = (time.time() - start)
n = input.size(0)
(logits, loss, sub_obj) = self.search_optim.valid_step(input, target, model)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
batch_t = (time.time() - start)
objs.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
sub_obj_avg.update(sub_obj)
data_time.update(data_t)
batch_time.update(batch_t)
if ((step % self.args.report_freq) == 0):
logging.info('Val epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg)
start = time.time()
(input, target) = prefetcher.next()
return (top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg)
|
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if (name in self.__dict__):
return self.__dict__[name]
elif (name in self):
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if (not self.__dict__[AttrDict.IMMUTABLE]):
if (name in self.__dict__):
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError('Attempted to set "{}" to "{}", but AttrDict is immutable'.format(name, value))
def immutable(self, is_immutable):
'Set immutability to is_immutable and recursively apply the setting\n to all nested AttrDicts.\n '
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
|
def load_cfg(cfg_to_load):
'Wrapper around yaml.load used for maintaining backward compatibility'
if isinstance(cfg_to_load, IOBase):
cfg_to_load = ''.join(cfg_to_load.readlines())
return yaml.load(cfg_to_load)
|
def load_cfg_to_dict(cfg_filename):
with open(cfg_filename, 'r') as f:
yaml_cfg = load_cfg(f)
return yaml_cfg
|
def merge_cfg_from_file(cfg_filename, global_config):
'Load a yaml config file and merge it into the global config.'
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(load_cfg(f))
_merge_a_into_b(yaml_cfg, global_config)
|
def merge_cfg_from_cfg(cfg_other, global_config):
'Merge `cfg_other` into the global config.'
_merge_a_into_b(cfg_other, global_config)
|
def update_cfg_from_file(cfg_filename, global_config):
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(load_cfg(f))
update_cfg_from_cfg(yaml_cfg, global_config)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.