code
stringlengths
17
6.64M
class EG(Optimizer): def __init__(self, params, lr=required, normalize_fn=(lambda x: x)): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {}'.format(lr)) self.normalize_fn = normalize_fn defaults = dict(lr=lr) super(EG, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue d_p = p.grad p.mul_(torch.exp(((- group['lr']) * d_p))) p.data = self.normalize_fn(p.data) return loss
def load_image(path): with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB')
def list_blobs(storage_client, bucket_name, prefix=None): 'Lists all the blobs in the bucket.' blobs = storage_client.list_blobs(bucket_name, prefix=prefix) return blobs
def create_file_dirs(target_path): destination_dir = target_path[0:target_path.rfind('/')] if (not os.path.exists(destination_dir)): try: os.makedirs(destination_dir) except: assert os.path.exists(destination_dir) pass
class ImageNetDataset(Dataset): def __init__(self, split, bucket_name, streaming=True, data_download_dir=None, transform=None): '\n Args:\n split: train or validation split to return the right dataset\n directory: root directory for imagenet where "train" and "validation" folders reside\n ' assert (split in ['train', 'validation']), 'split {} not in (train, validation)'.format(split) self._split = split self._bucket_name = bucket_name self._target_dir = data_download_dir self._source_dir = os.path.join('imagenet/imagenet', self._split) self._transform = transform self._streaming = streaming if (self._bucket_name is not None): self._storage_client = storage.Client() self._bucket = self._storage_client.bucket(bucket_name) self._imgs_paths = [] self._labels = [] self._subdir_to_class = {} class_count = 0 blobs = list_blobs(self._storage_client, self._bucket_name, prefix=self._source_dir) for b in blobs: path = b.name self._imgs_paths.append(path) sub_dir = path.split('/')[(- 2)] if (sub_dir not in self._subdir_to_class): self._subdir_to_class[sub_dir] = class_count class_count += 1 self._labels.append(self._subdir_to_class[sub_dir]) print('There are {} records in dataset.'.format(len(self._imgs_paths))) def __len__(self): if (self._bucket_name is None): return (1024 * 100) return len(self._imgs_paths) def __getitem__(self, idx): if (self._bucket_name is None): array = np.random.rand(256, 256, 3) img = Image.fromarray(array, mode='RGB') return (transformfn.to_tensor(img), np.random.choice(1000)) img_path = self._imgs_paths[idx] blob = self._bucket.blob(img_path) if self._streaming: img_str = download_gcs_blob_with_backoff(blob) else: target_path = os.path.join(self._target_dir, img_path) if (not os.path.exists(target_path)): create_file_dirs(target_path) print('downloading...') img_str = download_gcs_blob_with_backoff(blob) with open(target_path, 'wb') as f: f.write(img_str) else: with open(target_path, 'rb') as f: img_str = f.read() img_bytes = BytesIO(img_str) img = Image.open(img_bytes) img = img.convert('RGB') if (self._transform is not None): img = self._transform(img) return (img, self._labels[idx])
def download_from_s3(s3_bucket, task, download_dir): s3 = boto3.client('s3') if (task == 'smnist'): data_files = ['s2_mnist.gz'] s3_folder = 'spherical' if (task == 'scifar100'): data_files = ['s2_cifar100.gz'] s3_folder = 'spherical' elif (task == 'sEMG'): data_files = ['trainval_Myo.pt', 'test_Myo.pt'] s3_folder = 'Myo' elif (task == 'ninapro'): data_files = ['ninapro_train.npy', 'ninapro_val.npy', 'ninapro_test.npy', 'label_train.npy', 'label_val.npy', 'label_test.npy'] s3_folder = 'ninapro' elif ((task == 'cifar10') or (task == 'cifar100')): return elif (task == 'audio'): data_files = ['audio.zip'] s3_folder = 'audio' else: raise NotImplementedError for data_file in data_files: filepath = os.path.join(download_dir, data_file) if (s3_folder is not None): s3_path = os.path.join(s3_folder, data_file) else: s3_path = data_file if (not os.path.exists(filepath)): s3.download_file(s3_bucket, s3_path, filepath) if ((task == 'audio') and (not os.path.exists(os.path.join(download_dir, 'data')))): os.mkdir(os.path.join(download_dir, 'data')) import zipfile with zipfile.ZipFile(os.path.join(download_dir, 'audio.zip'), 'r') as zip_ref: zip_ref.extractall(os.path.join(download_dir, 'data')) return
def WarmupWrapper(scheduler_type): class Wrapped(scheduler_type): def __init__(self, warmup_epochs, *args): self.warmup_epochs = warmup_epochs super(Wrapped, self).__init__(*args) def get_lr(self): if (self.last_epoch < self.warmup_epochs): return [(((self.last_epoch + 1) / self.warmup_epochs) * b_lr) for b_lr in self.base_lrs] return super(Wrapped, self).get_lr() return Wrapped
class LinearLRScheduler(_LRScheduler): def __init__(self, optimizer, max_epochs, warmup_epochs, last_epoch=(- 1)): self.optimizer = optimizer self.warmup_epochs = warmup_epochs self.max_epochs = max_epochs self.last_epoch = last_epoch super(LinearLRScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): if ((self.max_epochs - self.last_epoch) > self.warmup_epochs): lr_mult = (((self.max_epochs - self.warmup_epochs) - self.last_epoch) / (self.max_epochs - self.warmup_epochs)) else: lr_mult = ((self.max_epochs - self.last_epoch) / ((self.last_epoch - self.warmup_epochs) * 5)) return [(base_lr * lr_mult) for base_lr in self.base_lrs]
class EfficientNetScheduler(_LRScheduler): def __init__(self, optimizer, gamma, decay_every, last_epoch=(- 1)): self.optimizer = optimizer self.last_epoch = last_epoch self.gamma = gamma self.decay_every = decay_every super(EfficientNetScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): lr_mult = (self.gamma ** int(((self.last_epoch + 1) / self.decay_every))) return [(base_lr * lr_mult) for base_lr in self.base_lrs]
class Cell(nn.Module): def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, activation_function=nn.ReLU, drop_prob=0): super(Cell, self).__init__() print(C_prev_prev, C_prev, C) if reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C) else: self.preprocess0 = ActivationConvBN(activation_function, C_prev_prev, C, 1, 1, 0) self.preprocess1 = ActivationConvBN(activation_function, C_prev, C, 1, 1, 0) if reduction: (op_names, indices) = zip(*genotype.reduce) concat = genotype.reduce_concat else: (op_names, indices) = zip(*genotype.normal) concat = genotype.normal_concat self._compile(C, op_names, indices, concat, reduction, activation_function) def _compile(self, C, op_names, indices, concat, reduction, activation_function): assert (len(op_names) == len(indices)) self._steps = (len(op_names) // 2) self._concat = concat self.multiplier = len(concat) self._ops = nn.ModuleList() for (name, index) in zip(op_names, indices): stride = (2 if (reduction and (index < 2)) else 1) op = OPS[name](C, stride, True, activation_function) self._ops += [op] self._indices = indices def forward(self, s0, s1, drop_path_prob): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] for i in range(self._steps): h1 = states[self._indices[(2 * i)]] h2 = states[self._indices[((2 * i) + 1)]] op1 = self._ops[(2 * i)] op2 = self._ops[((2 * i) + 1)] h1 = op1(h1) h2 = op2(h2) if (self.training and (drop_path_prob > 0.0)): if (not isinstance(op1, Identity)): h1 = drop_path(h1, drop_path_prob) if (not isinstance(op2, Identity)): h2 = drop_path(h2, drop_path_prob) s = (h1 + h2) states += [s] return torch.cat([states[i] for i in self._concat], dim=1)
class Network(nn.Module): def __init__(self, C, num_classes, layers, genotype, in_channels, drop_path_prob): super(Network, self).__init__() self._layers = layers self.drop_path_prob = 0.0 stem_multiplier = 3 C_curr = (stem_multiplier * C) self.stem = nn.Sequential(nn.Conv2d(in_channels, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr)) (C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C) self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if (i in [(layers // 3), ((2 * layers) // 3)]): C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] (C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr)) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) def get_save_states(self): return {'state_dict': self.state_dict()} def load_states(self, save_states): self.load_state_dict(save_states['state_dict']) def forward(self, input, **kwargs): s0 = s1 = self.stem(input) for (i, cell) in enumerate(self.cells): (s0, s1) = (s1, cell(s0, s1, self.drop_path_prob)) out = self.global_pooling(s1) logits = self.classifier(out.reshape(out.size(0), (- 1))) return logits
class AuxNetworkCIFAR(nn.Module): def __init__(self, C, num_classes, layers, auxiliary, genotype): super(AuxNetworkCIFAR, self).__init__() self._layers = layers self._auxiliary = auxiliary self.drop_path_prob = 0.3 stem_multiplier = 3 C_curr = (stem_multiplier * C) self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr)) (C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C) self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if (i in [(layers // 3), ((2 * layers) // 3)]): C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] (C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr)) if (i == ((2 * layers) // 3)): C_to_auxiliary = C_prev if auxiliary: self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) def forward(self, input): logits_aux = None s0 = s1 = self.stem(input) for (i, cell) in enumerate(self.cells): (s0, s1) = (s1, cell(s0, s1, self.drop_path_prob)) if (i == ((2 * self._layers) // 3)): if (self._auxiliary and self.training): logits_aux = self.auxiliary_head(s1) out = self.global_pooling(s1) logits = self.classifier(out.contiguous().view(out.size(0), (- 1))) return (logits, logits_aux)
class AuxiliaryHeadCIFAR(nn.Module): def __init__(self, C, num_classes): 'assuming input size 8x8' super(AuxiliaryHeadCIFAR, self).__init__() self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True)) self.classifier = nn.Linear(768, num_classes) def forward(self, x): x = self.features(x) x = self.classifier(x.contiguous().view(x.size(0), (- 1))) return x
class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self
class GAEAEvalTrial(PyTorchTrial): def __init__(self, context: PyTorchTrialContext) -> None: self.context = context self.hparams = AttrDict(context.get_hparams()) self.data_config = context.get_data_config() self.criterion = nn.BCEWithLogitsLoss().cuda() download_directory = self.download_data_from_s3() self.last_epoch_idx = (- 1) self.model = self.context.wrap_model(self.build_model_from_config()) print(('param size = %f MB' % utils.count_parameters_in_MB(self.model))) self.optimizer = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.context.get_hparam('learning_rate'), momentum=self.context.get_hparam('momentum'), weight_decay=self.context.get_hparam('weight_decay'))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.optimizer, 150.0, 0), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def build_model_from_config(self): genotype = genotypes[self.context.get_hparam('task')] print(self.context.get_hparam('task')) print(genotype) dataset_hypers = {'audio': (200, 1)} (n_classes, in_channels) = dataset_hypers[self.context.get_hparam('task')] model = Network(self.context.get_hparam('init_channels'), n_classes, self.context.get_hparam('layers'), genotype, in_channels=in_channels, drop_path_prob=self.context.get_hparam('drop_path_prob')) return model def get_genotype_from_hps(self): cell_config = {'normal': [], 'reduce': []} for cell in ['normal', 'reduce']: for node in range(4): for edge in [1, 2]: edge_ind = self.hparams['{}_node{}_edge{}'.format(cell, (node + 1), edge)] edge_op = self.hparams['{}_node{}_edge{}_op'.format(cell, (node + 1), edge)] cell_config[cell].append((edge_op, edge_ind)) print(cell_config) return Genotype(normal=cell_config['normal'], normal_concat=range(2, 6), reduce=cell_config['reduce'], reduce_concat=range(2, 6)) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] s3 = boto3.client('s3') download_directory = '.' download_from_s3(s3_bucket, self.context.get_hparam('task'), download_directory) (self.train_data, _, self.val_data) = load_data(self.context.get_hparam('task'), download_directory, False, permute=self.context.get_hparam('permute')) return download_directory def build_training_data_loader(self) -> DataLoader: train_data = self.train_data train_queue = DataLoader(train_data, num_workers=4, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn, pin_memory=False, drop_last=True) return train_queue def build_validation_data_loader(self) -> DataLoader: valid_data = self.val_data valid_queue = DataLoader(valid_data, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False) return valid_queue def train_batch(self, batch: Any, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if ((batch_idx == 0) or (self.last_epoch_idx < epoch_idx)): current_lr = self.lr_scheduler.get_last_lr()[0] print('Epoch: {} lr {}'.format(epoch_idx, current_lr)) self.model.drop_path_prob = ((self.context.get_hparam('drop_path_prob') * epoch_idx) / 150.0) print('current drop prob is {}'.format(self.model.drop_path_prob)) self.last_epoch_idx = epoch_idx (input, _, target) = batch print(input.shape) logits = self.model(input) loss = self.criterion(logits, target) self.context.backward(loss) self.context.step_optimizer(self.optimizer, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) return {'loss': loss} def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = utils.AverageMeter() val_predictions = [] val_gts = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) logits = logits.mean(0).unsqueeze(0) loss = self.criterion(logits, target) loss_avg.update(loss, n) logits_sigmoid = torch.sigmoid(logits) val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0]) val_gts.append(target.detach().cpu().numpy()[0]) val_preds = np.asarray(val_predictions).astype('float32') val_gts = np.asarray(val_gts).astype('int32') stats = calculate_stats(val_preds, val_gts) mAP = np.mean([stat['AP'] for stat in stats]) mAUC = np.mean([stat['auc'] for stat in stats]) results = {'test_loss': loss_avg.avg, 'test_mAUC': mAUC, 'test_mAP': mAP} return results
class GAEAEvalTrial(PyTorchTrial): def __init__(self, context: PyTorchTrialContext) -> None: self.context = context self.data_config = context.get_data_config() self.criterion = nn.CrossEntropyLoss() self.download_directory = self.download_data_from_s3() self.last_epoch_idx = (- 1) self.model = self.context.wrap_model(self.build_model_from_config()) print(('param size = %fMB' % utils.count_parameters_in_MB(self.model))) self.optimizer = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.context.get_hparam('learning_rate'), momentum=self.context.get_hparam('momentum'), weight_decay=self.context.get_hparam('weight_decay'))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.optimizer, 150.0, 0), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def build_model_from_config(self): '\n genotype = Genotype(\n normal=[\n ("skip_connect", 1),\n ("skip_connect", 0),\n ("sep_conv_3x3", 2),\n ("sep_conv_3x3", 1),\n ("sep_conv_5x5", 2),\n ("sep_conv_3x3", 0),\n ("sep_conv_5x5", 3),\n ("sep_conv_5x5", 2),\n ],\n normal_concat=range(2, 6),\n reduce=[\n ("max_pool_3x3", 1),\n ("sep_conv_3x3", 0),\n ("sep_conv_5x5", 1),\n ("dil_conv_5x5", 2),\n ("sep_conv_3x3", 1),\n ("sep_conv_3x3", 3),\n ("sep_conv_5x5", 1),\n ("max_pool_3x3", 2),\n ],\n reduce_concat=range(2, 6),\n )\n ' "\n best model for spherical MNIST\n genotype = Genotype(normal=[('max_pool_3x3', 0),\n ('dil_conv_3x3', 1),\n ('dil_conv_5x5', 0),\n ('sep_conv_3x3', 2),\n ('sep_conv_5x5', 1),\n ('max_pool_3x3', 3),\n ('sep_conv_3x3', 3),\n ('sep_conv_3x3', 1)],\n normal_concat=range(2, 6),\n reduce=[('skip_connect', 1),\n ('skip_connect', 0),\n ('sep_conv_5x5', 1),\n ('skip_connect', 0),\n ('max_pool_3x3', 0),\n ('sep_conv_3x3', 1),\n ('max_pool_3x3', 4),\n ('max_pool_3x3', 3)],\n reduce_concat=range(2, 6))\n\n best genotype for ninapro\n genotype= Genotype(normal=[('sep_conv_5x5', 0), \n ('sep_conv_3x3', 1), \n ('max_pool_3x3', 0), \n ('sep_conv_5x5', 1), \n ('dil_conv_3x3', 0), \n ('dil_conv_3x3', 2), \n ('sep_conv_3x3', 2), \n ('sep_conv_3x3', 0)], \n normal_concat=range(2, 6), \n reduce=[('skip_connect', 0), \n ('avg_pool_3x3', 1), \n ('dil_conv_3x3', 2), \n ('max_pool_3x3', 0), \n ('sep_conv_3x3', 2), \n ('sep_conv_5x5', 0), \n ('dil_conv_5x5', 3), \n ('sep_conv_5x5', 2)], \n reduce_concat=range(2, 6))\n " if self.context.get_hparam('permute'): genotype = genotypes['cifar100_permuted'] else: genotype = genotypes[self.context.get_hparam('task')] print(self.context.get_hparam('task')) print(genotype) dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3)} (n_classes, in_channels) = dataset_hypers[self.context.get_hparam('task')] '\n model = Network(\n self.context.get_hparam("init_channels"),\n n_classes,\n self.context.get_hparam("layers"),\n genotype,\n in_channels=in_channels,\n drop_path_prob=self.context.get_hparam("drop_path_prob"),\n )\n ' model = AuxNetworkCIFAR(self.context.get_hparam('init_channels'), n_classes, self.context.get_hparam('layers'), True, genotype) return model def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}' s3 = boto3.client('s3') os.makedirs(download_directory, exist_ok=True) download_from_s3(s3_bucket, self.context.get_hparam('task'), download_directory) (self.train_data, _, self.val_data) = load_data(self.context.get_hparam('task'), download_directory, False, permute=self.context.get_hparam('permute')) return download_directory def build_training_data_loader(self) -> DataLoader: train_data = self.train_data train_queue = DataLoader(train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, pin_memory=True, num_workers=self.data_config['num_workers_train']) return train_queue def build_validation_data_loader(self) -> DataLoader: valid_data = self.val_data valid_queue = DataLoader(valid_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, pin_memory=True, num_workers=self.data_config['num_workers_val']) return valid_queue def train_batch(self, batch: Any, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if ((batch_idx == 0) or (self.last_epoch_idx < epoch_idx)): current_lr = self.lr_scheduler.get_last_lr()[0] self.last_epoch_idx = epoch_idx self.model.drop_path_prob = ((self.context.get_hparam('drop_path_prob') * epoch_idx) / 150.0) (input, target) = batch (logits, logits_aux) = self.model(input) loss = self.criterion(logits, target) loss_aux = self.criterion(logits_aux, target) loss += (loss_aux * 0.4) (top1, top5) = accuracy(logits, target, topk=(1, 5)) self.context.backward(loss) self.context.step_optimizer(self.optimizer, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) return {'loss': loss, 'top1_accuracy': top1, 'top5_accuracy': top5} '\n def evaluate_batch(self, batch: Any) -> Dict[str, Any]:\n input, target = batch\n logits = self.model(input)\n loss = self.criterion(logits, target)\n top1, top5 = accuracy(logits, target, topk=(1, 5))\n\n\n return {\n "loss": loss,\n "top1_accuracy": top1,\n "top5_accuracy": top5,\n\n }\n ' def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: acc_top1 = 0 acc_top5 = 0 loss_avg = 0 num_batches = 0 with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch num_batches += 1 (logits, _) = self.model(input) loss = self.criterion(logits, target) (top1, top5) = accuracy(logits, target, topk=(1, 5)) acc_top1 += top1 acc_top5 += top5 loss_avg += loss results = {'loss': (loss_avg.item() / num_batches), 'top1_accuracy': (acc_top1.item() / num_batches), 'top5_accuracy': (acc_top5.item() / num_batches)} return results
class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self
class GAEAEvalTrial(PyTorchTrial): def __init__(self, context: PyTorchTrialContext) -> None: self.context = context self.hparams = AttrDict(context.get_hparams()) self.data_config = context.get_data_config() self.criterion = nn.CrossEntropyLoss() self.download_directory = self.download_data_from_s3() self.last_epoch_idx = (- 1) self.model = self.context.wrap_model(self.build_model_from_config()) print(('param size = %f MB' % utils.count_parameters_in_MB(self.model))) self.optimizer = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.context.get_hparam('learning_rate'), momentum=self.context.get_hparam('momentum'), weight_decay=self.context.get_hparam('weight_decay'))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.optimizer, 150.0, 0), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def build_model_from_config(self): if self.context.get_hparam('permute'): genotype = genotypes['cifar100_permuted'] else: genotype = genotypes[self.context.get_hparam('task')] print(self.context.get_hparam('task')) print(genotype) dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3)} (n_classes, in_channels) = dataset_hypers[self.context.get_hparam('task')] model = Network(self.context.get_hparam('init_channels'), n_classes, self.context.get_hparam('layers'), genotype, in_channels=in_channels, drop_path_prob=self.context.get_hparam('drop_path_prob')) return model def get_genotype_from_hps(self): cell_config = {'normal': [], 'reduce': []} for cell in ['normal', 'reduce']: for node in range(4): for edge in [1, 2]: edge_ind = self.hparams['{}_node{}_edge{}'.format(cell, (node + 1), edge)] edge_op = self.hparams['{}_node{}_edge{}_op'.format(cell, (node + 1), edge)] cell_config[cell].append((edge_op, edge_ind)) print(cell_config) return Genotype(normal=cell_config['normal'], normal_concat=range(2, 6), reduce=cell_config['reduce'], reduce_concat=range(2, 6)) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}' s3 = boto3.client('s3') os.makedirs(download_directory, exist_ok=True) download_from_s3(s3_bucket, self.context.get_hparam('task'), download_directory) (self.train_data, _, self.val_data) = load_data(self.context.get_hparam('task'), download_directory, False, permute=self.context.get_hparam('permute')) return download_directory def build_training_data_loader(self) -> DataLoader: train_data = self.train_data train_queue = DataLoader(train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, pin_memory=True, num_workers=self.data_config['num_workers_train']) return train_queue def build_validation_data_loader(self) -> DataLoader: valid_data = self.val_data valid_queue = DataLoader(valid_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, pin_memory=True, num_workers=self.data_config['num_workers_val']) return valid_queue def train_batch(self, batch: Any, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if ((batch_idx == 0) or (self.last_epoch_idx < epoch_idx)): current_lr = self.lr_scheduler.get_last_lr()[0] self.model.drop_path_prob = ((self.context.get_hparam('drop_path_prob') * epoch_idx) / 150.0) self.last_epoch_idx = epoch_idx (input, target) = batch logits = self.model(input) loss = self.criterion(logits, target) (top1, top5) = accuracy(logits, target, topk=(1, 5)) self.context.backward(loss) self.context.step_optimizer(self.optimizer, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) return {'loss': loss, 'top1_accuracy': top1, 'top5_accuracy': top5} '\n def evaluate_batch(self, batch: Any) -> Dict[str, Any]:\n input, target = batch\n logits = self.model(input)\n loss = self.criterion(logits, target)\n top1, top5 = accuracy(logits, target, topk=(1, 5))\n\n\n return {\n "loss": loss,\n "top1_accuracy": top1,\n "top5_accuracy": top5,\n\n }\n ' def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: acc_top1 = utils.AverageMeter() acc_top5 = utils.AverageMeter() loss_avg = utils.AverageMeter() with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.criterion(logits, target) (top1, top5) = utils.accuracy(logits, target, topk=(1, 5)) acc_top1.update(top1.item(), n) acc_top5.update(top5.item(), n) loss_avg.update(loss, n) results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg} return results
def imagenet_policies(): 'AutoAugment policies found on ImageNet.\n\n This policy also transfers to five FGVC datasets with image size similar to\n ImageNet including Oxford 102 Flowers, Caltech-101, Oxford-IIIT Pets,\n FGVC Aircraft and Stanford Cars.\n ' policies = [[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('Posterize', 0.6, 7), ('Posterize', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('Posterize', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('Posterize', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)]] return policies
def get_trans_list(): trans_list = ['Invert', 'Sharpness', 'AutoContrast', 'Posterize', 'ShearX', 'TranslateX', 'TranslateY', 'ShearY', 'Cutout', 'Rotate', 'Equalize', 'Contrast', 'Color', 'Solarize', 'Brightness'] return trans_list
def randaug_policies(): trans_list = get_trans_list() op_list = [] for trans in trans_list: for magnitude in range(1, 10): op_list += [(trans, 0.5, magnitude)] policies = [] for op_1 in op_list: for op_2 in op_list: policies += [[op_1, op_2]] return policies
class BilevelDataset(Dataset): def __init__(self, dataset): '\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n\n Args:\n dataset: PyTorch Dataset object\n ' inds = np.arange(len(dataset)) self.dataset = dataset n_train = int((0.5 * len(inds))) self.train_inds = inds[0:n_train] self.val_inds = inds[n_train:(2 * n_train)] assert (len(self.train_inds) == len(self.val_inds)) def shuffle_val_inds(self): np.random.shuffle(self.val_inds) def __len__(self): return len(self.train_inds) def __getitem__(self, idx): train_ind = self.train_inds[idx] val_ind = self.val_inds[idx] (x_train, y_train) = self.dataset[train_ind] (x_val, y_val) = self.dataset[val_ind] return (x_train, y_train, x_val, y_val)
class BilevelAudioDataset(Dataset): def __init__(self, dataset): '\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n\n Args:\n dataset: PyTorch Dataset object\n ' inds = np.arange(len(dataset)) self.dataset = dataset n_train = int((0.5 * len(inds))) self.train_inds = inds[0:n_train] self.val_inds = inds[n_train:(2 * n_train)] assert (len(self.train_inds) == len(self.val_inds)) def shuffle_val_inds(self): np.random.shuffle(self.val_inds) def __len__(self): return len(self.train_inds) def __getitem__(self, idx): train_ind = self.train_inds[idx] val_ind = self.val_inds[idx] (x_train, y_train) = self.dataset[train_ind] (x_val, y_val) = self.dataset[val_ind] return (x_train, y_train, x_val, y_val)
def download_from_s3(s3_bucket, task, download_dir): s3 = boto3.client('s3') if (task == 'smnist'): data_files = ['s2_mnist.gz'] s3_folder = 'spherical' if (task == 'scifar100'): data_files = ['s2_cifar100.gz'] s3_folder = 'spherical' elif (task == 'sEMG'): data_files = ['trainval_Myo.pt', 'test_Myo.pt'] s3_folder = 'Myo' elif (task == 'ninapro'): data_files = ['ninapro_train.npy', 'ninapro_val.npy', 'ninapro_test.npy', 'label_train.npy', 'label_val.npy', 'label_test.npy'] s3_folder = 'ninapro' elif ((task == 'cifar10') or (task == 'cifar100')): return elif (task == 'audio'): data_files = ['audio.zip'] s3_folder = 'audio' else: raise NotImplementedError for data_file in data_files: filepath = os.path.join(download_dir, data_file) if (s3_folder is not None): s3_path = os.path.join(s3_folder, data_file) else: s3_path = data_file if (not os.path.exists(filepath)): s3.download_file(s3_bucket, s3_path, filepath) if ((task == 'audio') and (not os.path.exists(os.path.join(download_dir, 'data')))): os.mkdir(os.path.join(download_dir, 'data')) import zipfile with zipfile.ZipFile(os.path.join(download_dir, 'audio.zip'), 'r') as zip_ref: zip_ref.extractall(os.path.join(download_dir, 'data')) return
class GenotypeCallback(PyTorchCallback): def __init__(self, context): self.model = context.models[0] def on_validation_end(self, metrics): print(self.model.genotype())
class GAEASearchTrial(PyTorchTrial): def __init__(self, trial_context: PyTorchTrialContext) -> None: self.context = trial_context self.hparams = utils.AttrDict(trial_context.get_hparams()) self.last_epoch = 0 self.download_directory = self.download_data_from_s3() dataset_hypers = {'audio': (200, 1)} (n_classes, in_channels) = dataset_hypers[self.hparams.task] self.criterion = nn.BCEWithLogitsLoss().cuda() self.model = self.context.wrap_model(Network(self.hparams.init_channels, n_classes, self.hparams.layers, self.criterion, self.hparams.nodes, k=self.hparams.shuffle_factor, in_channels=in_channels)) total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0) print('Parameter size in MB: ', total_params) self.ws_opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.ws_parameters(), self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay)) self.arch_opt = self.context.wrap_optimizer(EG(self.model.arch_parameters(), self.hparams.arch_learning_rate, (lambda p: (p / p.sum(dim=(- 1), keepdim=True))))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.ws_opt, self.hparams.scheduler_epochs, self.hparams.min_learning_rate), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] s3 = boto3.client('s3') download_directory = '.' download_from_s3(s3_bucket, self.hparams.task, download_directory) (self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute) return download_directory def build_training_data_loader(self) -> DataLoader: trainset = self.train_data bilevel = BilevelAudioDataset(trainset) self.train_data = bilevel print('Length of bilevel dataset: ', len(bilevel)) return DataLoader(bilevel, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn, pin_memory=False, drop_last=True, num_workers=4) def build_validation_data_loader(self) -> DataLoader: valset = self.val_data return DataLoader(valset, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False) def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if (epoch_idx != self.last_epoch): self.train_data.shuffle_val_inds() self.last_epoch = epoch_idx (x_train, y_train, x_val, y_val) = batch for a in self.model.arch_parameters(): a.requires_grad = False for w in self.model.ws_parameters(): w.requires_grad = True loss = self.model._loss(x_train, y_train) self.context.backward(loss) self.context.step_optimizer(optimizer=self.ws_opt, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) arch_loss = 0.0 if (epoch_idx > 10): for a in self.model.arch_parameters(): a.requires_grad = True for w in self.model.ws_parameters(): w.requires_grad = False arch_loss = self.model._loss(x_val, y_val) self.context.backward(arch_loss) self.context.step_optimizer(self.arch_opt) return {'loss': loss, 'arch_loss': arch_loss} def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = utils.AverageMeter() val_predictions = [] val_gts = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) logits = logits.mean(0).unsqueeze(0) loss = self.criterion(logits, target) loss_avg.update(loss, n) logits_sigmoid = torch.sigmoid(logits) val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0]) val_gts.append(target.detach().cpu().numpy()[0]) val_preds = np.asarray(val_predictions).astype('float32') val_gts = np.asarray(val_gts).astype('int32') map_value = average_precision_score(val_gts, val_preds, average='macro') results = {'loss': loss_avg.avg, 'val_mAP': map_value} if ((self.last_epoch % 10) == 0): test_predictions = [] test_gts = [] for ix in range(self.test_data.len): with torch.no_grad(): batch = self.test_data[ix] (x, y) = batch x = x.cuda() y_pred = self.model(x) y_pred = y_pred.mean(0).unsqueeze(0) sigmoid_preds = torch.sigmoid(y_pred) test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0]) test_gts.append(y.detach().cpu().numpy()[0]) test_predictions = np.asarray(test_predictions).astype('float32') test_gts = np.asarray(test_gts).astype('int32') stats = calculate_stats(test_predictions, test_gts) mAP = np.mean([stat['AP'] for stat in stats]) mAUC = np.mean([stat['auc'] for stat in stats]) results2 = {'test_mAUC': mAUC, 'test_mAP': mAP} results.update(results2) return results def build_callbacks(self): return {'genotype': GenotypeCallback(self.context)}
class GenotypeCallback(PyTorchCallback): def __init__(self, context): self.model = context.models[0] def on_validation_end(self, metrics): print(self.model.genotype())
class GAEASearchTrial(PyTorchTrial): def __init__(self, trial_context: PyTorchTrialContext) -> None: self.context = trial_context self.hparams = utils.AttrDict(trial_context.get_hparams()) self.last_epoch = 0 self.download_directory = self.download_data_from_s3() dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3)} (n_classes, in_channels) = dataset_hypers[self.hparams.task] criterion = nn.CrossEntropyLoss() self.model = self.context.wrap_model(Network(self.hparams.init_channels, n_classes, self.hparams.layers, criterion, self.hparams.nodes, k=self.hparams.shuffle_factor, in_channels=in_channels)) total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0) print('Parameter size in MB: ', total_params) self.ws_opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.ws_parameters(), self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay)) self.arch_opt = self.context.wrap_optimizer(EG(self.model.arch_parameters(), self.hparams.arch_learning_rate, (lambda p: (p / p.sum(dim=(- 1), keepdim=True))))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.ws_opt, self.hparams.scheduler_epochs, self.hparams.min_learning_rate), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}' s3 = boto3.client('s3') os.makedirs(download_directory, exist_ok=True) download_from_s3(s3_bucket, self.hparams.task, download_directory) (self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute) self.build_test_data_loader(download_directory) return download_directory def build_training_data_loader(self) -> DataLoader: trainset = self.train_data bilevel = BilevelDataset(trainset) self.train_data = bilevel print('Length of bilevel dataset: ', len(bilevel)) return DataLoader(bilevel, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2) def build_validation_data_loader(self) -> DataLoader: valset = self.val_data return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2) def build_test_data_loader(self, download_directory): testset = self.test_data self.test_loader = torch.utils.data.DataLoader(testset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2) return def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if (epoch_idx != self.last_epoch): self.train_data.shuffle_val_inds() self.last_epoch = epoch_idx (x_train, y_train, x_val, y_val) = batch for a in self.model.arch_parameters(): a.requires_grad = False for w in self.model.ws_parameters(): w.requires_grad = True loss = self.model._loss(x_train, y_train) self.context.backward(loss) self.context.step_optimizer(optimizer=self.ws_opt, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) arch_loss = 0.0 if (epoch_idx > 10): for a in self.model.arch_parameters(): a.requires_grad = True for w in self.model.ws_parameters(): w.requires_grad = False arch_loss = self.model._loss(x_val, y_val) self.context.backward(arch_loss) self.context.step_optimizer(self.arch_opt) return {'loss': loss, 'arch_loss': arch_loss} '\n def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]:\n input, target = batch\n logits = self.model(input)\n loss = self.model._loss(input, target)\n top1, top5 = utils.accuracy(logits, target, topk=(1, 5))\n\n test_input, test_target = next(iter(self.test_loader))\n test_input, test_target = test_input.cuda(), test_target.cuda()\n test_logits = self.model(test_input)\n test_loss = self.model._loss(test_input, test_target)\n test_top1, test_top5 = utils.accuracy(test_logits, test_target, topk=(1, 5))\n\n return {"loss": loss, "top1_accuracy": top1, "top5_accuracy": top5, "test_loss": test_loss,\n "top1_accuracy_test": test_top1, "top5_accuracy_test": test_top5}\n ' def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: acc_top1 = utils.AverageMeter() acc_top5 = utils.AverageMeter() loss_avg = utils.AverageMeter() with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.model._loss(input, target) (top1, top5) = utils.accuracy(logits, target, topk=(1, 5)) acc_top1.update(top1.item(), n) acc_top5.update(top5.item(), n) loss_avg.update(loss, n) results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg} acc_top1 = utils.AverageMeter() acc_top5 = utils.AverageMeter() loss_avg = utils.AverageMeter() with torch.no_grad(): for batch in self.test_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.model._loss(input, target) (top1, top5) = utils.accuracy(logits, target, topk=(1, 5)) acc_top1.update(top1.item(), n) acc_top5.update(top5.item(), n) loss_avg.update(loss, n) results2 = {'test_loss': loss_avg.avg, 'test_top1_accuracy': acc_top1.avg, 'test_top5_accuracy': acc_top5.avg} results.update(results2) return results def build_callbacks(self): return {'genotype': GenotypeCallback(self.context)}
class EG(Optimizer): def __init__(self, params, lr=required, normalize_fn=(lambda x: x)): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {}'.format(lr)) self.normalize_fn = normalize_fn defaults = dict(lr=lr) super(EG, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue d_p = p.grad p.mul_(torch.exp(((- group['lr']) * d_p))) p.data = self.normalize_fn(p.data) return loss
def download_from_s3(s3_bucket, task, download_dir): s3 = boto3.client('s3') if (task == 'ECG'): data_files = ['challenge2017.pkl'] s3_folder = 'ECG' elif (task == 'satellite'): data_files = ['satellite_train.npy', 'satellite_test.npy'] s3_folder = 'satellite' elif (task == 'deepsea'): data_files = ['deepsea_filtered.npz'] s3_folder = 'deepsea' else: raise NotImplementedError for data_file in data_files: filepath = os.path.join(download_dir, data_file) if (s3_folder is not None): s3_path = os.path.join(s3_folder, data_file) else: s3_path = data_file if (not os.path.exists(filepath)): s3.download_file(s3_bucket, s3_path, filepath) return
class ECGDataset(Dataset): def __init__(self, data, label, pid=None): self.data = data self.label = label self.pid = pid def __getitem__(self, index): return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long)) def __len__(self): return len(self.data)
def load_data(task, path, train=False): if (task == 'ECG'): return load_ECG_data(path, train) elif (task == 'satellite'): return load_satellite_data(path, train) elif (task == 'deepsea'): return load_deepsea_data(path, train) else: raise NotImplementedError
def load_ECG_data(path, train): return (read_data_physionet_4_with_val(path) if train else read_data_physionet_4(path))
def load_satellite_data(path, train): train_file = os.path.join(path, 'satellite_train.npy') test_file = os.path.join(path, 'satellite_test.npy') (all_train_data, all_train_labels) = (np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file, allow_pickle=True)[()]['label']) (test_data, test_labels) = (np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label']) all_train_labels = (all_train_labels - 1) test_labels = (test_labels - 1) all_train_data = ((all_train_data - all_train_data.mean(axis=1, keepdims=True)) / all_train_data.std(axis=1, keepdims=True)) test_data = ((test_data - test_data.mean(axis=1, keepdims=True)) / test_data.std(axis=1, keepdims=True)) all_train_data = np.expand_dims(all_train_data, 1) test_data = np.expand_dims(test_data, 1) (all_train_tensors, all_train_labeltensor) = (torch.from_numpy(all_train_data).type(torch.FloatTensor), torch.from_numpy(all_train_labels).type(torch.LongTensor)) (test_tensors, test_labeltensor) = (torch.from_numpy(test_data).type(torch.FloatTensor), torch.from_numpy(test_labels).type(torch.LongTensor)) testset = data_utils.TensorDataset(test_tensors, test_labeltensor) if train: len_val = len(test_data) (train_tensors, train_labeltensor) = (all_train_tensors[:(- len_val)], all_train_labeltensor[:(- len_val)]) (val_tensors, val_labeltensor) = (all_train_tensors[(- len_val):], all_train_labeltensor[(- len_val):]) trainset = data_utils.TensorDataset(train_tensors, train_labeltensor) valset = data_utils.TensorDataset(val_tensors, val_labeltensor) return (trainset, valset, testset) trainset = data_utils.TensorDataset(all_train_tensors, all_train_labeltensor) return (trainset, None, testset)
def read_data_physionet_4(path, window_size=1000, stride=500): with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin: res = pickle.load(fin) all_data = res['data'] for i in range(len(all_data)): tmp_data = all_data[i] tmp_std = np.std(tmp_data) tmp_mean = np.mean(tmp_data) all_data[i] = ((tmp_data - tmp_mean) / tmp_std) all_label = [] for i in res['label']: if (i == 'N'): all_label.append(0) elif (i == 'A'): all_label.append(1) elif (i == 'O'): all_label.append(2) elif (i == '~'): all_label.append(3) all_label = np.array(all_label) (X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.1, random_state=0) print('before: ') print(Counter(Y_train), Counter(Y_test)) (X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride) (X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True) print('after: ') print(Counter(Y_train), Counter(Y_test)) shuffle_pid = np.random.permutation(Y_train.shape[0]) X_train = X_train[shuffle_pid] Y_train = Y_train[shuffle_pid] X_train = np.expand_dims(X_train, 1) X_test = np.expand_dims(X_test, 1) trainset = ECGDataset(X_train, Y_train) testset = ECGDataset(X_test, Y_test, pid_test) return (trainset, None, testset)
def read_data_physionet_4_with_val(path, window_size=1000, stride=500): with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin: res = pickle.load(fin) all_data = res['data'] for i in range(len(all_data)): tmp_data = all_data[i] tmp_std = np.std(tmp_data) tmp_mean = np.mean(tmp_data) all_data[i] = ((tmp_data - tmp_mean) / tmp_std) all_label = [] for i in res['label']: if (i == 'N'): all_label.append(0) elif (i == 'A'): all_label.append(1) elif (i == 'O'): all_label.append(2) elif (i == '~'): all_label.append(3) all_label = np.array(all_label) (X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.2, random_state=0) (X_val, X_test, Y_val, Y_test) = train_test_split(X_test, Y_test, test_size=0.5, random_state=0) print('before: ') print(Counter(Y_train), Counter(Y_val), Counter(Y_test)) (X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride) (X_val, Y_val, pid_val) = slide_and_cut(X_val, Y_val, window_size=window_size, stride=stride, output_pid=True) (X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True) print('after: ') print(Counter(Y_train), Counter(Y_val), Counter(Y_test)) shuffle_pid = np.random.permutation(Y_train.shape[0]) X_train = X_train[shuffle_pid] Y_train = Y_train[shuffle_pid] X_train = np.expand_dims(X_train, 1) X_val = np.expand_dims(X_val, 1) X_test = np.expand_dims(X_test, 1) trainset = ECGDataset(X_train, Y_train) valset = ECGDataset(X_val, Y_val, pid_val) testset = ECGDataset(X_test, Y_test, pid_test) return (trainset, valset, testset)
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4): out_X = [] out_Y = [] out_pid = [] n_sample = X.shape[0] mode = 0 for i in range(n_sample): tmp_ts = X[i] tmp_Y = Y[i] if (tmp_Y == 0): i_stride = stride elif (tmp_Y == 1): if (datatype == 4): i_stride = (stride // 6) elif (datatype == 2): i_stride = (stride // 10) elif (datatype == 2.1): i_stride = (stride // 7) elif (tmp_Y == 2): i_stride = (stride // 2) elif (tmp_Y == 3): i_stride = (stride // 20) for j in range(0, (len(tmp_ts) - window_size), i_stride): out_X.append(tmp_ts[j:(j + window_size)]) out_Y.append(tmp_Y) out_pid.append(i) if output_pid: return (np.array(out_X), np.array(out_Y), np.array(out_pid)) else: return (np.array(out_X), np.array(out_Y))
def load_deepsea_data(path, train): data = np.load(os.path.join(path, 'deepsea_filtered.npz')) (train_data, train_labels) = (torch.from_numpy(data['x_train']).type(torch.FloatTensor), torch.from_numpy(data['y_train']).type(torch.LongTensor)) train_data = train_data.permute(0, 2, 1) trainset = data_utils.TensorDataset(train_data, train_labels) (val_data, val_labels) = (torch.from_numpy(data['x_val']).type(torch.FloatTensor), torch.from_numpy(data['y_val']).type(torch.LongTensor)) val_data = val_data.permute(0, 2, 1) valset = data_utils.TensorDataset(val_data, val_labels) (test_data, test_labels) = (torch.from_numpy(data['x_test']).type(torch.FloatTensor), torch.from_numpy(data['y_test']).type(torch.LongTensor)) test_data = test_data.permute(0, 2, 1) testset = data_utils.TensorDataset(test_data, test_labels) return (trainset, valset, testset)
def WarmupWrapper(scheduler_type): class Wrapped(scheduler_type): def __init__(self, warmup_epochs, *args): self.warmup_epochs = warmup_epochs super(Wrapped, self).__init__(*args) def get_lr(self): if (self.last_epoch < self.warmup_epochs): return [(((self.last_epoch + 1) / self.warmup_epochs) * b_lr) for b_lr in self.base_lrs] return super(Wrapped, self).get_lr() return Wrapped
class LinearLRScheduler(_LRScheduler): def __init__(self, optimizer, max_epochs, warmup_epochs, last_epoch=(- 1)): self.optimizer = optimizer self.warmup_epochs = warmup_epochs self.max_epochs = max_epochs self.last_epoch = last_epoch super(LinearLRScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): if ((self.max_epochs - self.last_epoch) > self.warmup_epochs): lr_mult = (((self.max_epochs - self.warmup_epochs) - self.last_epoch) / (self.max_epochs - self.warmup_epochs)) else: lr_mult = ((self.max_epochs - self.last_epoch) / ((self.last_epoch - self.warmup_epochs) * 5)) return [(base_lr * lr_mult) for base_lr in self.base_lrs]
class EfficientNetScheduler(_LRScheduler): def __init__(self, optimizer, gamma, decay_every, last_epoch=(- 1)): self.optimizer = optimizer self.last_epoch = last_epoch self.gamma = gamma self.decay_every = decay_every super(EfficientNetScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): lr_mult = (self.gamma ** int(((self.last_epoch + 1) / self.decay_every))) return [(base_lr * lr_mult) for base_lr in self.base_lrs]
class Cell(nn.Module): def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, activation_function=nn.ReLU, drop_prob=0): super(Cell, self).__init__() print(C_prev_prev, C_prev, C) if reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C) else: self.preprocess0 = ActivationConvBN(activation_function, C_prev_prev, C, 1, 1, 0) self.preprocess1 = ActivationConvBN(activation_function, C_prev, C, 1, 1, 0) if reduction: (op_names, indices) = zip(*genotype.reduce) concat = genotype.reduce_concat else: (op_names, indices) = zip(*genotype.normal) concat = genotype.normal_concat self._compile(C, op_names, indices, concat, reduction, activation_function) def _compile(self, C, op_names, indices, concat, reduction, activation_function): assert (len(op_names) == len(indices)) self._steps = (len(op_names) // 2) self._concat = concat self.multiplier = len(concat) self._ops = nn.ModuleList() for (name, index) in zip(op_names, indices): stride = (2 if (reduction and (index < 2)) else 1) op = OPS[name](C, stride, True, activation_function) self._ops += [op] self._indices = indices def forward(self, s0, s1, drop_path_prob): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] for i in range(self._steps): h1 = states[self._indices[(2 * i)]] h2 = states[self._indices[((2 * i) + 1)]] op1 = self._ops[(2 * i)] op2 = self._ops[((2 * i) + 1)] h1 = op1(h1) h2 = op2(h2) if (self.training and (drop_path_prob > 0.0)): if (not isinstance(op1, Identity)): h1 = drop_path(h1, drop_path_prob) if (not isinstance(op2, Identity)): h2 = drop_path(h2, drop_path_prob) s = (h1 + h2) states += [s] return torch.cat([states[i] for i in self._concat], dim=1)
class Network(nn.Module): def __init__(self, C, num_classes, layers, genotype, in_channels, drop_path_prob): super(Network, self).__init__() self._layers = layers self.drop_path_prob = 0.0 stem_multiplier = 3 C_curr = (stem_multiplier * C) self.stem = nn.Sequential(nn.Conv1d(in_channels, C_curr, 3, padding=1, bias=False), nn.BatchNorm1d(C_curr)) (C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C) self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if (i in [(layers // 3), ((2 * layers) // 3)]): C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) reduction_prev = reduction self.cells += [cell] (C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr)) self.global_pooling = nn.AdaptiveAvgPool1d(1) self.classifier = nn.Linear(C_prev, num_classes) def get_save_states(self): return {'state_dict': self.state_dict()} def load_states(self, save_states): self.load_state_dict(save_states['state_dict']) def forward(self, input, **kwargs): s0 = s1 = self.stem(input) for (i, cell) in enumerate(self.cells): (s0, s1) = (s1, cell(s0, s1, self.drop_path_prob)) out = self.global_pooling(s1) logits = self.classifier(out.reshape(out.size(0), (- 1))) return logits
class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self
class GAEAEvalTrial(PyTorchTrial): def __init__(self, context: PyTorchTrialContext) -> None: self.context = context self.hparams = AttrDict(context.get_hparams()) self.data_config = context.get_data_config() if (self.context.get_hparam('task') == 'deepsea'): self.criterion = nn.BCEWithLogitsLoss().cuda() self.accuracy = False else: self.criterion = nn.CrossEntropyLoss().cuda() self.accuracy = True self.download_directory = self.download_data_from_s3() self.last_epoch_idx = (- 1) self.model = self.context.wrap_model(self.build_model_from_config()) print(('param size = %f MB' % count_parameters_in_MB(self.model))) self.optimizer = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.context.get_hparam('learning_rate'), momentum=self.context.get_hparam('momentum'), weight_decay=self.context.get_hparam('weight_decay'))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.optimizer, 150.0, 0), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def build_model_from_config(self): genotype = genotypes[self.context.get_hparam('task')] print(self.context.get_hparam('task')) print(genotype) dataset_hypers = {'ECG': (4, 1), 'satellite': (24, 1), 'deepsea': (36, 4)} (n_classes, in_channels) = dataset_hypers[self.context.get_hparam('task')] model = Network(self.context.get_hparam('init_channels'), n_classes, self.context.get_hparam('layers'), genotype, in_channels=in_channels, drop_path_prob=self.context.get_hparam('drop_path_prob')) return model def get_genotype_from_hps(self): cell_config = {'normal': [], 'reduce': []} for cell in ['normal', 'reduce']: for node in range(4): for edge in [1, 2]: edge_ind = self.hparams['{}_node{}_edge{}'.format(cell, (node + 1), edge)] edge_op = self.hparams['{}_node{}_edge{}_op'.format(cell, (node + 1), edge)] cell_config[cell].append((edge_op, edge_ind)) print(cell_config) return Genotype(normal=cell_config['normal'], normal_concat=range(2, 6), reduce=cell_config['reduce'], reduce_concat=range(2, 6)) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}' s3 = boto3.client('s3') os.makedirs(download_directory, exist_ok=True) download_from_s3(s3_bucket, self.context.get_hparam('task'), download_directory) (self.train_data, _, self.val_data) = load_data(self.context.get_hparam('task'), download_directory, False) return download_directory def build_training_data_loader(self) -> DataLoader: train_data = self.train_data del self.train_data train_queue = DataLoader(train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, pin_memory=True, num_workers=self.data_config['num_workers_train']) return train_queue def build_validation_data_loader(self) -> DataLoader: valid_data = self.val_data valid_queue = DataLoader(valid_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, pin_memory=True, num_workers=self.data_config['num_workers_val']) return valid_queue def train_batch(self, batch: Any, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: torch.cuda.empty_cache() if ((batch_idx == 0) or (self.last_epoch_idx < epoch_idx)): current_lr = self.lr_scheduler.get_last_lr()[0] self.model.drop_path_prob = ((self.context.get_hparam('drop_path_prob') * epoch_idx) / 150.0) self.last_epoch_idx = epoch_idx (input, target) = batch self.model.train() del batch if (self.context.get_hparam('task') == 'deepsea'): target = target.float() logits = self.model(input) loss = self.criterion(logits, target) torch.cuda.empty_cache() self.context.backward(loss) self.context.step_optimizer(self.optimizer, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) torch.cuda.empty_cache() return {'loss': loss} def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: if (self.hparams.task == 'ECG'): return self.evaluate_full_dataset_ECG(data_loader) elif (self.hparams.task == 'satellite'): return self.evaluate_full_dataset_satellite(data_loader) elif (self.hparams.task == 'deepsea'): return self.evaluate_full_dataset_deepsea(data_loader) return None def evaluate_full_dataset_ECG(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = AverageMeter() all_pred_prob = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.criterion(logits, target) loss_avg.update(loss, n) all_pred_prob.append(logits.cpu().data.numpy()) all_pred_prob = np.concatenate(all_pred_prob) all_pred = np.argmax(all_pred_prob, axis=1) final_pred = [] final_gt = [] pid_test = self.val_data.pid for i_pid in np.unique(pid_test): tmp_pred = all_pred[(pid_test == i_pid)] tmp_gt = self.val_data.label[(pid_test == i_pid)] final_pred.append(Counter(tmp_pred).most_common(1)[0][0]) final_gt.append(Counter(tmp_gt).most_common(1)[0][0]) tmp_report = classification_report(final_gt, final_pred, output_dict=True) print(confusion_matrix(final_gt, final_pred)) f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4) results = {'loss': loss_avg.avg, 'score': f1_score} return results def evaluate_full_dataset_satellite(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: acc_top1 = AverageMeter() acc_top5 = AverageMeter() loss_avg = AverageMeter() with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.criterion(logits, target) (top1, top5) = accuracy(logits, target, topk=(1, 5)) acc_top1.update(top1.item(), n) acc_top5.update(top5.item(), n) loss_avg.update(loss, n) results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg} return results def evaluate_full_dataset_deepsea(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = AverageMeter() test_predictions = [] test_gts = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.criterion(logits, target.float()) loss_avg.update(loss, n) logits_sigmoid = torch.sigmoid(logits) test_predictions.append(logits_sigmoid.detach().cpu().numpy()) test_gts.append(target.detach().cpu().numpy()) test_predictions = np.concatenate(test_predictions).astype(np.float32) test_gts = np.concatenate(test_gts).astype(np.int32) stats = calculate_stats(test_predictions, test_gts) mAP = np.mean([stat['AP'] for stat in stats]) mAUC = np.mean([stat['auc'] for stat in stats]) results = {'test_mAUC': mAUC, 'test_mAP': mAP} return results
class BilevelDataset(Dataset): def __init__(self, dataset): '\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n\n Args:\n dataset: PyTorch Dataset object\n ' inds = np.arange(len(dataset)) self.dataset = dataset n_train = int((0.5 * len(inds))) self.train_inds = inds[0:n_train] self.val_inds = inds[n_train:(2 * n_train)] assert (len(self.train_inds) == len(self.val_inds)) def shuffle_val_inds(self): np.random.shuffle(self.val_inds) def __len__(self): return len(self.train_inds) def __getitem__(self, idx): train_ind = self.train_inds[idx] val_ind = self.val_inds[idx] (x_train, y_train) = self.dataset[train_ind] (x_val, y_val) = self.dataset[val_ind] return (x_train, y_train, x_val, y_val)
def download_from_s3(s3_bucket, task, download_dir): s3 = boto3.client('s3') if (task == 'ECG'): data_files = ['challenge2017.pkl'] s3_folder = 'ECG' elif (task == 'satellite'): data_files = ['satellite_train.npy', 'satellite_test.npy'] s3_folder = 'satellite' elif (task == 'deepsea'): data_files = ['deepsea_filtered.npz'] s3_folder = 'deepsea' else: raise NotImplementedError for data_file in data_files: filepath = os.path.join(download_dir, data_file) if (s3_folder is not None): s3_path = os.path.join(s3_folder, data_file) else: s3_path = data_file if (not os.path.exists(filepath)): s3.download_file(s3_bucket, s3_path, filepath) return
class ECGDataset(Dataset): def __init__(self, data, label, pid=None): self.data = data self.label = label self.pid = pid def __getitem__(self, index): return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long)) def __len__(self): return len(self.data)
def load_data(task, path, train=True): if (task == 'ECG'): return load_ECG_data(path, True) elif (task == 'satellite'): return load_satellite_data(path, True) elif (task == 'deepsea'): return load_deepsea_data(path, True) else: raise NotImplementedError
def load_ECG_data(path, train): return (read_data_physionet_4_with_val(path) if train else read_data_physionet_4(path))
def load_satellite_data(path, train): train_file = os.path.join(path, 'satellite_train.npy') test_file = os.path.join(path, 'satellite_test.npy') (all_train_data, all_train_labels) = (np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file, allow_pickle=True)[()]['label']) (test_data, test_labels) = (np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label']) all_train_labels = (all_train_labels - 1) test_labels = (test_labels - 1) all_train_data = ((all_train_data - all_train_data.mean(axis=1, keepdims=True)) / all_train_data.std(axis=1, keepdims=True)) test_data = ((test_data - test_data.mean(axis=1, keepdims=True)) / test_data.std(axis=1, keepdims=True)) all_train_data = np.expand_dims(all_train_data, 1) test_data = np.expand_dims(test_data, 1) (all_train_tensors, all_train_labeltensor) = (torch.from_numpy(all_train_data).type(torch.FloatTensor), torch.from_numpy(all_train_labels).type(torch.LongTensor)) (test_tensors, test_labeltensor) = (torch.from_numpy(test_data).type(torch.FloatTensor), torch.from_numpy(test_labels).type(torch.LongTensor)) testset = data_utils.TensorDataset(test_tensors, test_labeltensor) if train: len_val = len(test_data) (train_tensors, train_labeltensor) = (all_train_tensors[:(- len_val)], all_train_labeltensor[:(- len_val)]) (val_tensors, val_labeltensor) = (all_train_tensors[(- len_val):], all_train_labeltensor[(- len_val):]) trainset = data_utils.TensorDataset(train_tensors, train_labeltensor) valset = data_utils.TensorDataset(val_tensors, val_labeltensor) return (trainset, valset, testset) trainset = data_utils.TensorDataset(all_train_tensors, all_train_labeltensor) return (trainset, None, testset)
def read_data_physionet_4(path, window_size=1000, stride=500): with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin: res = pickle.load(fin) all_data = res['data'] for i in range(len(all_data)): tmp_data = all_data[i] tmp_std = np.std(tmp_data) tmp_mean = np.mean(tmp_data) all_data[i] = ((tmp_data - tmp_mean) / tmp_std) all_label = [] for i in res['label']: if (i == 'N'): all_label.append(0) elif (i == 'A'): all_label.append(1) elif (i == 'O'): all_label.append(2) elif (i == '~'): all_label.append(3) all_label = np.array(all_label) (X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.1, random_state=0) print('before: ') print(Counter(Y_train), Counter(Y_test)) (X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride) (X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True) print('after: ') print(Counter(Y_train), Counter(Y_test)) shuffle_pid = np.random.permutation(Y_train.shape[0]) X_train = X_train[shuffle_pid] Y_train = Y_train[shuffle_pid] X_train = np.expand_dims(X_train, 1) X_test = np.expand_dims(X_test, 1) trainset = ECGDataset(X_train, Y_train) testset = ECGDataset(X_test, Y_test, pid_test) return (trainset, None, testset)
def read_data_physionet_4_with_val(path, window_size=1000, stride=500): with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin: res = pickle.load(fin) all_data = res['data'] for i in range(len(all_data)): tmp_data = all_data[i] tmp_std = np.std(tmp_data) tmp_mean = np.mean(tmp_data) all_data[i] = ((tmp_data - tmp_mean) / tmp_std) all_label = [] for i in res['label']: if (i == 'N'): all_label.append(0) elif (i == 'A'): all_label.append(1) elif (i == 'O'): all_label.append(2) elif (i == '~'): all_label.append(3) all_label = np.array(all_label) (X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.2, random_state=0) (X_val, X_test, Y_val, Y_test) = train_test_split(X_test, Y_test, test_size=0.5, random_state=0) print('before: ') print(Counter(Y_train), Counter(Y_val), Counter(Y_test)) (X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride) (X_val, Y_val, pid_val) = slide_and_cut(X_val, Y_val, window_size=window_size, stride=stride, output_pid=True) (X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True) print('after: ') print(Counter(Y_train), Counter(Y_val), Counter(Y_test)) shuffle_pid = np.random.permutation(Y_train.shape[0]) X_train = X_train[shuffle_pid] Y_train = Y_train[shuffle_pid] X_train = np.expand_dims(X_train, 1) X_val = np.expand_dims(X_val, 1) X_test = np.expand_dims(X_test, 1) trainset = ECGDataset(X_train, Y_train) valset = ECGDataset(X_val, Y_val, pid_val) testset = ECGDataset(X_test, Y_test, pid_test) return (trainset, valset, testset)
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4): out_X = [] out_Y = [] out_pid = [] n_sample = X.shape[0] mode = 0 for i in range(n_sample): tmp_ts = X[i] tmp_Y = Y[i] if (tmp_Y == 0): i_stride = stride elif (tmp_Y == 1): if (datatype == 4): i_stride = (stride // 6) elif (datatype == 2): i_stride = (stride // 10) elif (datatype == 2.1): i_stride = (stride // 7) elif (tmp_Y == 2): i_stride = (stride // 2) elif (tmp_Y == 3): i_stride = (stride // 20) for j in range(0, (len(tmp_ts) - window_size), i_stride): out_X.append(tmp_ts[j:(j + window_size)]) out_Y.append(tmp_Y) out_pid.append(i) if output_pid: return (np.array(out_X), np.array(out_Y), np.array(out_pid)) else: return (np.array(out_X), np.array(out_Y))
def load_deepsea_data(path, train): data = np.load(os.path.join(path, 'deepsea_filtered.npz')) (train_data, train_labels) = (torch.from_numpy(data['x_train']).type(torch.FloatTensor), torch.from_numpy(data['y_train']).type(torch.LongTensor)) train_data = train_data.permute(0, 2, 1) trainset = data_utils.TensorDataset(train_data, train_labels) (val_data, val_labels) = (torch.from_numpy(data['x_val']).type(torch.FloatTensor), torch.from_numpy(data['y_val']).type(torch.LongTensor)) val_data = val_data.permute(0, 2, 1) valset = data_utils.TensorDataset(val_data, val_labels) (test_data, test_labels) = (torch.from_numpy(data['x_test']).type(torch.FloatTensor), torch.from_numpy(data['y_test']).type(torch.LongTensor)) test_data = test_data.permute(0, 2, 1) testset = data_utils.TensorDataset(test_data, test_labels) return (trainset, valset, testset)
class GenotypeCallback(PyTorchCallback): def __init__(self, context): self.model = context.models[0] def on_validation_end(self, metrics): print(self.model.genotype())
class GAEASearchTrial(PyTorchTrial): def __init__(self, trial_context: PyTorchTrialContext) -> None: self.context = trial_context self.hparams = AttrDict(trial_context.get_hparams()) self.last_epoch = 0 self.download_directory = self.download_data_from_s3() dataset_hypers = {'ECG': (4, 1), 'satellite': (24, 1), 'deepsea': (36, 4)} (n_classes, in_channels) = dataset_hypers[self.hparams.task] if (self.hparams.task == 'deepsea'): criterion = nn.BCEWithLogitsLoss().cuda() self.accuracy = False else: criterion = nn.CrossEntropyLoss().cuda() self.accuracy = True self.model = self.context.wrap_model(Network(self.hparams.init_channels, n_classes, self.hparams.layers, criterion, self.hparams.nodes, k=self.hparams.shuffle_factor, in_channels=in_channels)) total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0) print('Parameter size in MB: ', total_params) self.ws_opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.ws_parameters(), self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay)) self.arch_opt = self.context.wrap_optimizer(EG(self.model.arch_parameters(), self.hparams.arch_learning_rate, (lambda p: (p / p.sum(dim=(- 1), keepdim=True))))) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.ws_opt, self.hparams.scheduler_epochs, self.hparams.min_learning_rate), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def download_data_from_s3(self): 'Download data from s3 to store in temp directory' s3_bucket = self.context.get_data_config()['bucket'] download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}' s3 = boto3.client('s3') os.makedirs(download_directory, exist_ok=True) download_from_s3(s3_bucket, self.hparams.task, download_directory) (self.train_data, self.val_data, _) = load_data(self.hparams.task, download_directory, True) return download_directory def build_training_data_loader(self) -> DataLoader: "\n For bi-level NAS, we'll need each instance from the dataloader to have one image\n for training shared-weights and another for updating architecture parameters.\n " bilevel = BilevelDataset(self.train_data) self.train_data = bilevel print('Length of bilevel dataset: ', len(bilevel)) return DataLoader(bilevel, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2) def build_validation_data_loader(self) -> DataLoader: valset = self.val_data return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2) def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: if (epoch_idx != self.last_epoch): self.train_data.shuffle_val_inds() self.last_epoch = epoch_idx (x_train, y_train, x_val, y_val) = batch if (self.hparams.task == 'deepsea'): y_train = y_train.float() y_val = y_val.float() for a in self.model.arch_parameters(): a.requires_grad = False for w in self.model.ws_parameters(): w.requires_grad = True loss = self.model._loss(x_train, y_train) self.context.backward(loss) self.context.step_optimizer(optimizer=self.ws_opt, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm')))) arch_loss = 0.0 if (epoch_idx > 10): for a in self.model.arch_parameters(): a.requires_grad = True for w in self.model.ws_parameters(): w.requires_grad = False arch_loss = self.model._loss(x_val, y_val) self.context.backward(arch_loss) self.context.step_optimizer(self.arch_opt) return {'loss': loss, 'arch_loss': arch_loss} def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: if (self.hparams.task == 'ECG'): return self.evaluate_full_dataset_ECG(data_loader) elif (self.hparams.task == 'satellite'): return self.evaluate_full_dataset_satellite(data_loader) elif (self.hparams.task == 'deepsea'): return self.evaluate_full_dataset_deepsea(data_loader) return None def evaluate_full_dataset_ECG(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = AverageMeter() all_pred_prob = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.model._loss(input, target) loss_avg.update(loss, n) all_pred_prob.append(logits.cpu().data.numpy()) all_pred_prob = np.concatenate(all_pred_prob) all_pred = np.argmax(all_pred_prob, axis=1) final_pred = [] final_gt = [] pid_test = self.val_data.pid for i_pid in np.unique(pid_test): tmp_pred = all_pred[(pid_test == i_pid)] tmp_gt = self.val_data.label[(pid_test == i_pid)] final_pred.append(Counter(tmp_pred).most_common(1)[0][0]) final_gt.append(Counter(tmp_gt).most_common(1)[0][0]) tmp_report = classification_report(final_gt, final_pred, output_dict=True) print(confusion_matrix(final_gt, final_pred)) f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4) results = {'loss': loss_avg.avg, 'score': f1_score} return results def evaluate_full_dataset_satellite(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: acc_top1 = AverageMeter() acc_top5 = AverageMeter() loss_avg = AverageMeter() with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.model._loss(input, target) (top1, top5) = accuracy(logits, target, topk=(1, 5)) acc_top1.update(top1.item(), n) acc_top5.update(top5.item(), n) loss_avg.update(loss, n) results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg} return results def evaluate_full_dataset_deepsea(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: loss_avg = AverageMeter() test_predictions = [] test_gts = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) loss = self.model._loss(input, target.float()) loss_avg.update(loss, n) logits_sigmoid = torch.sigmoid(logits) test_predictions.append(logits_sigmoid.detach().cpu().numpy()) test_gts.append(target.detach().cpu().numpy()) test_predictions = np.concatenate(test_predictions).astype(np.float32) test_gts = np.concatenate(test_gts).astype(np.int32) stats = calculate_stats(test_predictions, test_gts) mAP = np.mean([stat['AP'] for stat in stats]) mAUC = np.mean([stat['auc'] for stat in stats]) results = {'test_mAUC': mAUC, 'test_mAP': mAP} return results def build_callbacks(self): return {'genotype': GenotypeCallback(self.context)}
class EG(Optimizer): def __init__(self, params, lr=required, normalize_fn=(lambda x: x)): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {}'.format(lr)) self.normalize_fn = normalize_fn defaults = dict(lr=lr) super(EG, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue d_p = p.grad p.mul_(torch.exp(((- group['lr']) * d_p))) p.data = self.normalize_fn(p.data) return loss
class BilevelDataset(Dataset): def __init__(self, dataset): '\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n Args:\n dataset: PyTorch Dataset object\n ' inds = np.arange(len(dataset)) self.dataset = dataset n_train = int((0.2 * len(inds))) self.train_inds1 = inds[0:n_train] self.train_inds2 = inds[n_train:(2 * n_train)] self.train_inds3 = inds[(2 * n_train):(3 * n_train)] self.train_inds4 = inds[(3 * n_train):(4 * n_train)] self.val_inds = inds[(4 * n_train):(5 * n_train)] assert (len(self.train_inds1) == len(self.val_inds)) def shuffle_val_inds(self): np.random.shuffle(self.val_inds) def __len__(self): return len(self.train_inds1) def __getitem__(self, idx): train_ind1 = self.train_inds1[idx] train_ind2 = self.train_inds2[idx] train_ind3 = self.train_inds3[idx] train_ind4 = self.train_inds4[idx] val_ind = self.val_inds[idx] (x_train1, y_train1) = self.dataset[train_ind1] (x_train2, y_train2) = self.dataset[train_ind2] (x_train3, y_train3) = self.dataset[train_ind3] (x_train4, y_train4) = self.dataset[train_ind4] (x_val, y_val) = self.dataset[val_ind] return (x_train1, y_train1, x_train2, y_train2, x_train3, y_train3, x_train4, y_train4, x_val, y_val)
class BilevelCosmicDataset(Dataset): def __init__(self, dataset): '\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n Args:\n dataset: PyTorch Dataset object\n ' inds = np.arange(len(dataset)) self.dataset = dataset n_train = int((0.2 * len(inds))) self.train_inds1 = inds[0:n_train] self.train_inds2 = inds[n_train:(2 * n_train)] self.train_inds3 = inds[(2 * n_train):(3 * n_train)] self.train_inds4 = inds[(3 * n_train):(4 * n_train)] self.val_inds = inds[(4 * n_train):(5 * n_train)] assert (len(self.train_inds1) == len(self.val_inds)) def shuffle_val_inds(self): np.random.shuffle(self.val_inds) def __len__(self): return len(self.train_inds1) def __getitem__(self, idx): train_ind1 = self.train_inds1[idx] train_ind2 = self.train_inds2[idx] train_ind3 = self.train_inds3[idx] train_ind4 = self.train_inds4[idx] val_ind = self.val_inds[idx] (img1, mask1, ignore1) = self.dataset[train_ind1] (img2, mask2, ignore2) = self.dataset[train_ind2] (img3, mask3, ignore3) = self.dataset[train_ind3] (img4, mask4, ignore4) = self.dataset[train_ind4] (img_val, mask_val, ignore_val) = self.dataset[val_ind] return (img1, mask1, ignore1, img2, mask2, ignore2, img3, mask3, ignore3, img4, mask4, ignore4, img_val, mask_val, ignore_val)
class ImageNet12(object): def __init__(self, trainFolder, testFolder, num_workers=8, pin_memory=True, size_images=224, scaled_size=256, type_of_data_augmentation='rand_scale', data_config=None): self.data_config = data_config self.trainFolder = trainFolder self.testFolder = testFolder self.num_workers = num_workers self.pin_memory = pin_memory self.patch_dataset = self.data_config.patch_dataset if (not isinstance(size_images, int)): raise ValueError('size_images must be an int. It will be scaled to a square image') self.size_images = size_images self.scaled_size = scaled_size type_of_data_augmentation = type_of_data_augmentation.lower() if (type_of_data_augmentation not in ('rand_scale', 'random_sized')): raise ValueError('type_of_data_augmentation must be either rand-scale or random-sized') self.type_of_data_augmentation = type_of_data_augmentation def _getTransformList(self, aug_type): assert (aug_type in ['rand_scale', 'random_sized', 'week_train', 'validation']) list_of_transforms = [] if (aug_type == 'validation'): list_of_transforms.append(transforms.Resize(self.scaled_size)) list_of_transforms.append(transforms.CenterCrop(self.size_images)) elif (aug_type == 'week_train'): list_of_transforms.append(transforms.Resize(256)) list_of_transforms.append(transforms.RandomCrop(self.size_images)) list_of_transforms.append(transforms.RandomHorizontalFlip()) else: if (aug_type == 'rand_scale'): list_of_transforms.append(transforms_extension.RandomScale(256, 480)) list_of_transforms.append(transforms.RandomCrop(self.size_images)) list_of_transforms.append(transforms.RandomHorizontalFlip()) elif (aug_type == 'random_sized'): list_of_transforms.append(transforms.RandomResizedCrop(self.size_images, scale=(self.data_config.random_sized.min_scale, 1.0))) list_of_transforms.append(transforms.RandomHorizontalFlip()) if self.data_config.color: list_of_transforms.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4)) return transforms.Compose(list_of_transforms) def _getTrainSet(self): train_transform = self._getTransformList(self.type_of_data_augmentation) if (self.data_config.train_data_type == 'img'): train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform) elif (self.data_config.train_data_type == 'lmdb'): train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '../..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset) self.train_num_examples = train_set.__len__() return train_set def _getWeekTrainSet(self): train_transform = self._getTransformList('week_train') if (self.data_config.train_data_type == 'img'): train_set = torchvision.datasets.ImageFolder(self.trainFolder, train_transform) elif (self.data_config.train_data_type == 'lmdb'): train_set = lmdb_dataset.ImageFolder(self.trainFolder, os.path.join(self.trainFolder, '../..', 'train_datalist'), train_transform, patch_dataset=self.patch_dataset) self.train_num_examples = train_set.__len__() return train_set def _getTestSet(self): test_transform = self._getTransformList('validation') if (self.data_config.val_data_type == 'img'): test_set = torchvision.datasets.ImageFolder(self.testFolder, test_transform) elif (self.data_config.val_data_type == 'lmdb'): test_set = lmdb_dataset.ImageFolder(self.testFolder, os.path.join(self.testFolder, '../..', 'val_datalist'), test_transform) self.test_num_examples = test_set.__len__() return test_set def getTrainLoader(self, batch_size, shuffle=True): train_set = self._getTrainSet() train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate) return train_loader def getWeekTrainLoader(self, batch_size, shuffle=True): train_set = self._getWeekTrainSet() train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=fast_collate) return train_loader def getTestLoader(self, batch_size, shuffle=False): test_set = self._getTestSet() test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=None, collate_fn=fast_collate) return test_loader def getTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False): train_loader = self.getTrainLoader(batch_size, train_shuffle) test_loader = self.getTestLoader(batch_size, val_shuffle) return (train_loader, test_loader) def getSetTrainTestLoader(self, batch_size, train_shuffle=True, val_shuffle=False): train_loader = self.getTrainLoader(batch_size, train_shuffle) week_train_loader = self.getWeekTrainLoader(batch_size, train_shuffle) test_loader = self.getTestLoader(batch_size, val_shuffle) return ((train_loader, week_train_loader), test_loader)
class Datum(object): def __init__(self, shape=None, image=None, label=None): self.shape = shape self.image = image self.label = label def SerializeToString(self, img=None): image_data = self.image.astype(np.uint8).tobytes() label_data = np.uint16(self.label).tobytes() return msgpack.packb((image_data + label_data), use_bin_type=True) def ParseFromString(self, raw_data, orig_img): raw_data = msgpack.unpackb(raw_data, raw=False) raw_img_data = raw_data[:(- 2)] image_data = np.frombuffer(raw_img_data, dtype=np.uint8) self.image = cv2.imdecode(image_data, cv2.IMREAD_COLOR) raw_label_data = raw_data[(- 2):] self.label = np.frombuffer(raw_label_data, dtype=np.uint16)
def create_dataset(output_path, image_folder, image_list, image_size): image_name_list = [i.strip() for i in open(image_list)] n_samples = len(image_name_list) env = lmdb.open(output_path, map_size=1099511627776, meminit=False, map_async=True) txn = env.begin(write=True) classes = [d for d in os.listdir(image_folder) if os.path.isdir(os.path.join(image_folder, d))] for (idx, image_name) in enumerate(tqdm(image_name_list)): image_path = os.path.join(image_folder, image_name) label_name = image_name.split('/')[0] label = classes.index(label_name) if (not os.path.isfile(image_path)): raise RuntimeError(('%s does not exist' % image_path)) img = cv2.imread(image_path) img_orig = img if image_size: resize_ratio = (float(image_size) / min(img.shape[0:2])) new_size = (int((img.shape[1] * resize_ratio)), int((img.shape[0] * resize_ratio))) img = cv2.resize(src=img, dsize=new_size) img = cv2.imencode('.JPEG', img)[1] image = np.asarray(img) datum = Datum(image.shape, image, label) txn.put(image_name.encode('ascii'), datum.SerializeToString()) if (((idx + 1) % 1000) == 0): txn.commit() txn = env.begin(write=True) txn.commit() env.sync() env.close() print(f'Created dataset with {n_samples:d} samples')
class Datum(object): def __init__(self, shape=None, image=None, label=None): self.shape = shape self.image = image self.label = label def SerializeToString(self): image_data = self.image.astype(np.uint8).tobytes() label_data = np.uint16(self.label).tobytes() return msgpack.packb((image_data + label_data), use_bin_type=True) def ParseFromString(self, raw_data): raw_data = msgpack.unpackb(raw_data, raw=False) raw_img_data = raw_data[:(- 2)] image_data = np.frombuffer(raw_img_data, dtype=np.uint8) self.image = cv2.imdecode(image_data, cv2.IMREAD_COLOR) raw_label_data = raw_data[(- 2):] self.label = np.frombuffer(raw_label_data, dtype=np.uint16)
class DatasetFolder(data.Dataset): '\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n\n Attributes:\n samples (list): List of (sample path, class_index) tuples\n ' def __init__(self, root, list_path, transform=None, target_transform=None, patch_dataset=False): self.root = root self.patch_dataset = patch_dataset if patch_dataset: self.txn = [] for path in os.listdir(root): lmdb_path = os.path.join(root, path) if os.path.isdir(lmdb_path): env = lmdb.open(lmdb_path, readonly=True, lock=False, readahead=False, meminit=False) txn = env.begin(write=False) self.txn.append(txn) else: self.env = lmdb.open(root, readonly=True, lock=False, readahead=False, meminit=False) self.txn = self.env.begin(write=False) self.list_path = list_path self.samples = [image_name.strip() for image_name in open(list_path)] if (len(self.samples) == 0): raise RuntimeError((('Found 0 files in subfolders of: ' + root) + '\n')) self.transform = transform self.target_transform = target_transform def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n ' img_name = self.samples[index] if self.patch_dataset: txn_index = (index // (len(self.samples) // 10)) if (txn_index == 10): txn_index = 9 txn = self.txn[txn_index] else: txn = self.txn datum = Datum() data_bin = txn.get(img_name.encode('ascii')) if (data_bin is None): raise RuntimeError(f'Key {img_name} not found') datum.ParseFromString(data_bin) sample = Image.fromarray(cv2.cvtColor(datum.image, cv2.COLOR_BGR2RGB)) target = np.int(datum.label) if (self.transform is not None): sample = self.transform(sample) if (self.target_transform is not None): target = self.target_transform(target) return (sample, target) def __len__(self): return len(self.samples) def __repr__(self): fmt_str = (('Dataset ' + self.__class__.__name__) + '\n') fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) fmt_str += ' Root Location: {}\n'.format(self.root) tmp = ' Transforms (if any): ' fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp))))) tmp = ' Target Transforms (if any): ' fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp))))) return fmt_str
class ImageFolder(DatasetFolder): def __init__(self, root, list_path, transform=None, target_transform=None, patch_dataset=False): super(ImageFolder, self).__init__(root, list_path, transform=transform, target_transform=target_transform, patch_dataset=patch_dataset) self.imgs = self.samples
def get_list(data_path, output_path): for split in os.listdir(data_path): split_path = os.path.join(data_path, split) if (not os.path.isdir(split_path)): continue f = open(os.path.join(output_path, (split + '_datalist')), 'a+') for sub in os.listdir(split_path): sub_path = os.path.join(split_path, sub) if (not os.path.isdir(sub_path)): continue for image in os.listdir(sub_path): image_name = ((sub + '/') + image) f.writelines((image_name + '\n')) f.close()
def get_list(data_path, output_path): for split in os.listdir(data_path): if (split == 'train'): split_path = os.path.join(data_path, split) if (not os.path.isdir(split_path)): continue f_train = open(os.path.join(output_path, (split + '_datalist')), 'w') f_val = open(os.path.join(output_path, ('val' + '_datalist')), 'w') class_list = os.listdir(split_path) for sub in class_list[:100]: sub_path = os.path.join(split_path, sub) if (not os.path.isdir(sub_path)): continue img_list = os.listdir(sub_path) train_len = int((0.8 * len(img_list))) for image in img_list[:train_len]: image_name = os.path.join(sub, image) f_train.writelines((image_name + '\n')) for image in img_list[train_len:]: image_name = os.path.join(sub, image) f_val.writelines((image_name + '\n')) f_train.close() f_val.close()
class Lighting(object): 'Lighting noise(AlexNet - style PCA - based noise)' def __init__(self, alphastd, eigval, eigvec): self.alphastd = alphastd self.eigval = eigval self.eigvec = eigvec def __call__(self, img): if (self.alphastd == 0): return img alpha = img.new().resize_(3).normal_(0, self.alphastd) rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze() return img.add(rgb.view(3, 1, 1).expand_as(img))
class RandomScale(object): 'ResNet style data augmentation' def __init__(self, minSize, maxSize): self.minSize = minSize self.maxSize = maxSize def __call__(self, img): targetSz = int(round(random.uniform(self.minSize, self.maxSize))) return F.resize(img, targetSz)
def generate_arch(task, net_type): update_cfg_from_cfg(search_cfg, cfg) if (task == 'pde'): merge_cfg_from_file('configs/pde_search_cfg_resnet.yaml', cfg) input_shape = (3, 85, 85) elif (task == 'protein'): merge_cfg_from_file('configs/protein_search_cfg_resnet.yaml', cfg) input_shape = (57, 128, 128) elif (task == 'cosmic'): merge_cfg_from_file('configs/cosmic_search_cfg_resnet.yaml', cfg) input_shape = (1, 256, 256) else: raise NotImplementedError config = copy.deepcopy(cfg) pprint.pformat(config) SearchSpace = importlib.import_module(('models.search_space_' + net_type)).Network ArchGenerater = importlib.import_module(('run_apis.derive_arch_' + net_type), __package__).ArchGenerate derivedNetwork = getattr(model_derived, ('%s_Net' % net_type.upper())) der_Net = (lambda net_config: derivedNetwork(net_config, task=task, config=config)) target_params = 99999999 lower_than_target = False while (not lower_than_target): config = copy.deepcopy(cfg) super_model = SearchSpace(config.optim.init_dim, task, config) arch_gener = ArchGenerater(super_model, config) (betas, head_alphas, stack_alphas) = super_model.display_arch_params() derived_arch = arch_gener.derive_archs(betas, head_alphas, stack_alphas) derived_arch_str = '|\n'.join(map(str, derived_arch)) derived_model = der_Net(derived_arch_str) derived_flops = comp_multadds(derived_model, input_size=input_shape) derived_params = utils.count_parameters_in_MB(derived_model) if (derived_params <= (target_params + 1)): print('found arch!') lower_than_target = True print(('Derived Model Mult-Adds = %.2fMB' % derived_flops)) print(('Derived Model Num Params = %.2fMB' % derived_params)) print(derived_arch_str) return derived_arch_str
class MixedOp(nn.Module): def __init__(self, dropped_mixed_ops, softmax_temp=1.0): super(MixedOp, self).__init__() self.softmax_temp = softmax_temp self._ops = nn.ModuleList() for op in dropped_mixed_ops: self._ops.append(op) def forward(self, x, alphas, branch_indices, mixed_sub_obj): op_weights = torch.stack([alphas[branch_index] for branch_index in branch_indices]) op_weights = F.softmax((op_weights / self.softmax_temp), dim=(- 1)) return (sum(((op_weight * op(x)) for (op_weight, op) in zip(op_weights, self._ops))), sum(((op_weight * mixed_sub_obj[branch_index]) for (op_weight, branch_index) in zip(op_weights, branch_indices))))
class HeadLayer(nn.Module): def __init__(self, dropped_mixed_ops, softmax_temp=1.0): super(HeadLayer, self).__init__() self.head_branches = nn.ModuleList() for mixed_ops in dropped_mixed_ops: self.head_branches.append(MixedOp(mixed_ops, softmax_temp)) def forward(self, inputs, betas, alphas, head_index, head_sub_obj): head_data = [] count_sub_obj = [] for (input_data, head_branch, alpha, head_idx, branch_sub_obj) in zip(inputs, self.head_branches, alphas, head_index, head_sub_obj): (data, sub_obj) = head_branch(input_data, alpha, head_idx, branch_sub_obj) head_data.append(data) count_sub_obj.append(sub_obj) return (sum(((branch_weight * data) for (branch_weight, data) in zip(betas, head_data))), count_sub_obj)
class StackLayers(nn.Module): def __init__(self, num_block_layers, dropped_mixed_ops, softmax_temp=1.0): super(StackLayers, self).__init__() if (num_block_layers != 0): self.stack_layers = nn.ModuleList() for i in range(num_block_layers): self.stack_layers.append(MixedOp(dropped_mixed_ops[i], softmax_temp)) else: self.stack_layers = None def forward(self, x, alphas, stack_index, stack_sub_obj): if (self.stack_layers is not None): count_sub_obj = 0 for (stack_layer, alpha, stack_idx, layer_sub_obj) in zip(self.stack_layers, alphas, stack_index, stack_sub_obj): (x, sub_obj) = stack_layer(x, alpha, stack_idx, layer_sub_obj) count_sub_obj += sub_obj return (x, count_sub_obj) else: return (x, 0)
class Block(nn.Module): def __init__(self, num_block_layers, dropped_mixed_ops, softmax_temp=1.0): super(Block, self).__init__() self.head_layer = HeadLayer(dropped_mixed_ops[0], softmax_temp) self.stack_layers = StackLayers(num_block_layers, dropped_mixed_ops[1], softmax_temp) def forward(self, inputs, betas, head_alphas, stack_alphas, head_index, stack_index, block_sub_obj): (x, head_sub_obj) = self.head_layer(inputs, betas, head_alphas, head_index, block_sub_obj[0]) (x, stack_sub_obj) = self.stack_layers(x, stack_alphas, stack_index, block_sub_obj[1]) return (x, [head_sub_obj, stack_sub_obj])
class Dropped_Network(nn.Module): def __init__(self, super_model, alpha_head_index=None, alpha_stack_index=None, softmax_temp=1.0): super(Dropped_Network, self).__init__() self.softmax_temp = softmax_temp self.input_block = super_model.input_block if hasattr(super_model, 'head_block'): self.head_block = super_model.head_block self.conv1_1_block = super_model.conv1_1_block self.classifier = super_model.classifier self.alpha_head_weights = super_model.alpha_head_weights self.alpha_stack_weights = super_model.alpha_stack_weights self.beta_weights = super_model.beta_weights self.alpha_head_index = (alpha_head_index if (alpha_head_index is not None) else super_model.alpha_head_index) self.alpha_stack_index = (alpha_stack_index if (alpha_stack_index is not None) else super_model.alpha_stack_index) self.config = super_model.config self.input_configs = super_model.input_configs self.output_configs = super_model.output_configs self.sub_obj_list = super_model.sub_obj_list self.blocks = nn.ModuleList() for (i, block) in enumerate(super_model.blocks): input_config = self.input_configs[i] dropped_mixed_ops = [] head_mixed_ops = [] for (j, head_index) in enumerate(self.alpha_head_index[i]): head_mixed_ops.append([block.head_layer.head_branches[j]._ops[k] for k in head_index]) dropped_mixed_ops.append(head_mixed_ops) stack_mixed_ops = [] for (j, stack_index) in enumerate(self.alpha_stack_index[i]): stack_mixed_ops.append([block.stack_layers.stack_layers[j]._ops[k] for k in stack_index]) dropped_mixed_ops.append(stack_mixed_ops) self.blocks.append(Block(input_config['num_stack_layers'], dropped_mixed_ops)) def forward(self, x): "\n To approximate the the total sub_obj(latency/flops), we firstly create the obj list for blocks\n as follows:\n [[[head_flops_1, head_flops_2, ...], stack_flops], ...]\n Then we compute the whole obj approximation from the end to the beginning. For block b, \n flops'_b = sum(beta_{bi} * (head_flops_{bi} + stack_flops_{i}) for i in out_idx[b])\n The total flops equals flops'_0\n " sub_obj_list = [] block_datas = [] branch_weights = [] for betas in self.beta_weights: branch_weights.append(F.softmax((betas / self.softmax_temp), dim=(- 1))) if (x.size(3) == 3): x = x.permute(0, 3, 1, 2).contiguous() block_data = self.input_block(x) if hasattr(self, 'head_block'): block_data = self.head_block(block_data) block_datas.append(block_data) sub_obj_list.append([[], torch.tensor(self.sub_obj_list[0]).cuda()]) for i in range((len(self.blocks) + 1)): config = self.input_configs[i] inputs = [block_datas[i] for i in config['in_block_idx']] betas = [branch_weights[block_id][beta_id] for (block_id, beta_id) in zip(config['in_block_idx'], config['beta_idx'])] if (i == len(self.blocks)): (block_data, block_sub_obj) = self.conv1_1_block(inputs, betas, self.sub_obj_list[2]) else: (block_data, block_sub_obj) = self.blocks[i](inputs, betas, self.alpha_head_weights[i], self.alpha_stack_weights[i], self.alpha_head_index[i], self.alpha_stack_index[i], self.sub_obj_list[1][i]) block_datas.append(block_data) sub_obj_list.append(block_sub_obj) out = block_datas[(- 1)] logits = self.classifier(out.permute(0, 2, 3, 1).contiguous()) logits = logits.squeeze() for (i, out_config) in enumerate(self.output_configs[::(- 1)]): block_id = ((len(self.output_configs) - i) - 1) sum_obj = [] for (j, out_id) in enumerate(out_config['out_id']): head_id = self.input_configs[(out_id - 1)]['in_block_idx'].index(block_id) head_obj = sub_obj_list[out_id][0][head_id] stack_obj = sub_obj_list[out_id][1] sub_obj_j = (branch_weights[block_id][j] * (head_obj + stack_obj)) sum_obj.append(sub_obj_j) sub_obj_list[((- i) - 2)][1] += sum(sum_obj) net_sub_obj = (torch.tensor(self.sub_obj_list[(- 1)]).cuda() + sub_obj_list[0][1]) return (logits, net_sub_obj.expand(1)) @property def arch_parameters(self): arch_params = nn.ParameterList() arch_params.extend(self.beta_weights) arch_params.extend(self.alpha_head_weights) arch_params.extend(self.alpha_stack_weights) return arch_params @property def arch_alpha_params(self): alpha_params = nn.ParameterList() alpha_params.extend(self.alpha_head_weights) alpha_params.extend(self.alpha_stack_weights) return alpha_params
class MixedOp(nn.Module): def __init__(self, C_in, C_out, stride, primitives): super(MixedOp, self).__init__() self._ops = nn.ModuleList() for primitive in primitives: op = OPS[primitive](C_in, C_out, stride, affine=False, track_running_stats=True) self._ops.append(op)
class HeadLayer(nn.Module): def __init__(self, in_chs, ch, strides, config): super(HeadLayer, self).__init__() self.head_branches = nn.ModuleList() for (in_ch, stride) in zip(in_chs, strides): self.head_branches.append(MixedOp(in_ch, ch, stride, config.search_params.PRIMITIVES_head))
class StackLayers(nn.Module): def __init__(self, ch, num_block_layers, config, primitives): super(StackLayers, self).__init__() if (num_block_layers != 0): self.stack_layers = nn.ModuleList() for i in range(num_block_layers): self.stack_layers.append(MixedOp(ch, ch, 1, primitives)) else: self.stack_layers = None
class Block(nn.Module): def __init__(self, in_chs, block_ch, strides, num_block_layers, config): super(Block, self).__init__() assert (len(in_chs) == len(strides)) self.head_layer = HeadLayer(in_chs, block_ch, strides, config) self.stack_layers = StackLayers(block_ch, num_block_layers, config, config.search_params.PRIMITIVES_stack)
class Conv1_1_Branch(nn.Module): def __init__(self, in_ch, block_ch): super(Conv1_1_Branch, self).__init__() self.conv1_1 = nn.Sequential(nn.Conv2d(in_channels=in_ch, out_channels=block_ch, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(block_ch, affine=False, track_running_stats=True), nn.ReLU6(inplace=True)) def forward(self, x): return self.conv1_1(x)
class Conv1_1_Block(nn.Module): def __init__(self, in_chs, block_ch): super(Conv1_1_Block, self).__init__() self.conv1_1_branches = nn.ModuleList() for in_ch in in_chs: self.conv1_1_branches.append(Conv1_1_Branch(in_ch, block_ch)) def forward(self, inputs, betas, block_sub_obj): branch_weights = F.softmax(torch.stack(betas), dim=(- 1)) return (sum(((branch_weight * branch(input_data)) for (input_data, branch, branch_weight) in zip(inputs, self.conv1_1_branches, branch_weights))), [block_sub_obj, 0])
class Network(nn.Module): def __init__(self, init_ch, dataset, config): super(Network, self).__init__() self.config = config self._C_input = init_ch self._head_dim = self.config.optim.head_dim self._dataset = dataset self.initialize() def initialize(self): self._init_block_config() self._create_output_list() self._create_input_list() self._init_betas() self._init_alphas() self._init_sample_branch() def init_model(self, model_init='he_fout', init_div_groups=True): for m in self.modules(): if isinstance(m, nn.Conv2d): if (model_init == 'he_fout'): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) if init_div_groups: n /= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif (model_init == 'he_fin'): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.in_channels) if init_div_groups: n /= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / n))) else: raise NotImplementedError elif isinstance(m, nn.BatchNorm2d): if (m.affine == True): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): if (m.bias is not None): m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): if (m.affine == True): m.weight.data.fill_(1) m.bias.data.zero_() def set_bn_param(self, bn_momentum, bn_eps): for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.momentum = bn_momentum m.eps = bn_eps return def _init_betas(self): '\n beta weights for the output ch choices in the head layer of the block\n ' self.beta_weights = nn.ParameterList() for block in self.output_configs: num_betas = len(block['out_chs']) self.beta_weights.append(nn.Parameter((0.001 * torch.randn(num_betas)))) def _init_alphas(self): '\n alpha weights for the op type in the block\n ' self.alpha_head_weights = nn.ParameterList() self.alpha_stack_weights = nn.ParameterList() for block in self.input_configs[:(- 1)]: num_head_alpha = len(block['in_block_idx']) self.alpha_head_weights.append(nn.Parameter((0.001 * torch.randn(num_head_alpha, len(self.config.search_params.PRIMITIVES_head))))) num_layers = block['num_stack_layers'] self.alpha_stack_weights.append(nn.Parameter((0.001 * torch.randn(num_layers, len(self.config.search_params.PRIMITIVES_stack))))) @property def arch_parameters(self): arch_params = nn.ParameterList() arch_params.extend(self.beta_weights) arch_params.extend(self.alpha_head_weights) arch_params.extend(self.alpha_stack_weights) return arch_params @property def arch_beta_params(self): return self.beta_weights @property def arch_alpha_params(self): alpha_params = nn.ParameterList() alpha_params.extend(self.alpha_head_weights) alpha_params.extend(self.alpha_stack_weights) return alpha_params def display_arch_params(self, display=True): branch_weights = [] head_op_weights = [] stack_op_weights = [] for betas in self.beta_weights: branch_weights.append(F.softmax(betas, dim=(- 1))) for head_alpha in self.alpha_head_weights: head_op_weights.append(F.softmax(head_alpha, dim=(- 1))) for stack_alpha in self.alpha_stack_weights: stack_op_weights.append(F.softmax(stack_alpha, dim=(- 1))) if display: logging.info(('branch_weights \n' + '\n'.join(map(str, branch_weights)))) if (len(self.config.search_params.PRIMITIVES_head) > 1): logging.info(('head_op_weights \n' + '\n'.join(map(str, head_op_weights)))) logging.info(('stack_op_weights \n' + '\n'.join(map(str, stack_op_weights)))) return ([x.tolist() for x in branch_weights], [x.tolist() for x in head_op_weights], [x.tolist() for x in stack_op_weights]) def _init_sample_branch(self): (_, _) = self.sample_branch('head', 1, training=False) (_, _) = self.sample_branch('stack', 1, training=False) def sample_branch(self, params_type, sample_num, training=True, search_stage=1, if_sort=True): '\n the sampling computing is based on torch\n input: params_type\n output: sampled params\n ' def sample(param, weight, sample_num, sample_policy='prob', if_sort=True): if (sample_num >= weight.shape[(- 1)]): sample_policy = 'all' assert (param.shape == weight.shape) assert (sample_policy in ['prob', 'uniform', 'all']) if (param.shape[0] == 0): return ([], []) if (sample_policy == 'prob'): sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False) elif (sample_policy == 'uniform'): weight = torch.ones_like(weight) sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False) else: sampled_index = torch.arange(start=0, end=weight.shape[(- 1)], step=1, device=weight.device).repeat(param.shape[0], 1) if if_sort: (sampled_index, _) = torch.sort(sampled_index, descending=False) sampled_param_old = torch.gather(param, dim=(- 1), index=sampled_index) return (sampled_param_old, sampled_index) if (params_type == 'head'): params = self.alpha_head_weights elif (params_type == 'stack'): params = self.alpha_stack_weights else: raise TypeError weights = [] sampled_params_old = [] sampled_indices = [] if training: sample_policy = (self.config.search_params.sample_policy if (search_stage == 1) else 'uniform') else: sample_policy = 'all' for param in params: weights.append(F.softmax(param, dim=(- 1))) for (param, weight) in zip(params, weights): (sampled_param_old, sampled_index) = sample(param, weight, sample_num, sample_policy, if_sort) sampled_params_old.append(sampled_param_old) sampled_indices.append(sampled_index) if (params_type == 'head'): self.alpha_head_index = sampled_indices elif (params_type == 'stack'): self.alpha_stack_index = sampled_indices return (sampled_params_old, sampled_indices) def _init_block_config(self): self.block_chs = self.config.search_params.net_scale.chs self.block_fm_sizes = self.config.search_params.net_scale.fm_sizes self.num_blocks = (len(self.block_chs) - 1) self.num_block_layers = self.config.search_params.net_scale.num_layers if hasattr(self.config.search_params.net_scale, 'stage'): self.block_stage = self.config.search_params.net_scale.stage self.block_chs.append(self.config.optim.last_dim) self.block_fm_sizes.append(self.block_fm_sizes[(- 1)]) self.num_block_layers.append(0) def _create_output_list(self): "\n Generate the output config of each block, which contains: \n 'ch': the channel number of the block \n 'out_chs': the possible output channel numbers \n 'strides': the corresponding stride\n " self.output_configs = [] for i in range((len(self.block_chs) - 1)): if hasattr(self, 'block_stage'): stage = self.block_stage[i] output_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'out_chs': [], 'out_fms': [], 'strides': [], 'out_id': [], 'num_stack_layers': self.num_block_layers[i]} for j in range(self.config.search_params.adjoin_connect_nums[stage]): out_index = ((i + j) + 1) if (out_index >= len(self.block_chs)): break if hasattr(self, 'block_stage'): block_stage = getattr(self, 'block_stage') if ((block_stage[out_index] - block_stage[i]) > 1): break fm_size_ratio = (self.block_fm_sizes[i] / self.block_fm_sizes[out_index]) if (fm_size_ratio == 2): output_config['strides'].append(2) elif (fm_size_ratio == 1): output_config['strides'].append(1) else: break output_config['out_chs'].append(self.block_chs[out_index]) output_config['out_fms'].append(self.block_fm_sizes[out_index]) output_config['out_id'].append(out_index) self.output_configs.append(output_config) logging.info(('Network output configs: \n' + '\n'.join(map(str, self.output_configs)))) def _create_input_list(self): "\n Generate the input config of each block for constructing the whole network.\n Each config dict contains:\n 'ch': the channel number of the block\n 'in_chs': all the possible input channel numbers\n 'strides': the corresponding stride\n 'in_block_idx': the index of the input block \n 'beta_idx': the corresponding beta weight index.\n " self.input_configs = [] for i in range(1, len(self.block_chs)): input_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'in_chs': [], 'in_fms': [], 'strides': [], 'in_block_idx': [], 'beta_idx': [], 'num_stack_layers': self.num_block_layers[i]} for j in range(i): in_index = ((i - j) - 1) if (in_index < 0): break output_config = self.output_configs[in_index] if (i in output_config['out_id']): beta_idx = output_config['out_id'].index(i) input_config['in_block_idx'].append(in_index) input_config['in_chs'].append(output_config['ch']) input_config['in_fms'].append(output_config['fm_size']) input_config['beta_idx'].append(beta_idx) input_config['strides'].append(output_config['strides'][beta_idx]) else: continue self.input_configs.append(input_config) logging.info(('Network input configs: \n' + '\n'.join(map(str, self.input_configs)))) def get_cost_list(self, data_shape, cost_type='flops', use_gpu=True, meas_times=1000): cost_list = [] block_datas = [] total_cost = 0 if (cost_type == 'flops'): cost_func = (lambda module, data: comp_multadds_fw(module, data, use_gpu)) elif (cost_type == 'latency'): cost_func = (lambda module, data: latency_measure_fw(module, data, meas_times)) else: raise NotImplementedError if (len(data_shape) == 3): input_data = torch.randn(((1,) + tuple(data_shape))) else: input_data = torch.randn(tuple(data_shape)) if use_gpu: input_data = input_data.cuda() (cost, block_data) = cost_func(self.input_block, input_data) cost_list.append(cost) block_datas.append(block_data) total_cost += cost if hasattr(self, 'head_block'): (cost, block_data) = cost_func(self.head_block, block_data) cost_list[0] += cost block_datas[0] = block_data block_flops = [] for (block_id, block) in enumerate(self.blocks): input_config = self.input_configs[block_id] inputs = [block_datas[i] for i in input_config['in_block_idx']] head_branch_flops = [] for (branch_id, head_branch) in enumerate(block.head_layer.head_branches): op_flops = [] for op in head_branch._ops: (cost, block_data) = cost_func(op, inputs[branch_id]) op_flops.append(cost) total_cost += cost head_branch_flops.append(op_flops) stack_layer_flops = [] if (block.stack_layers.stack_layers is not None): for stack_layer in block.stack_layers.stack_layers: op_flops = [] for op in stack_layer._ops: (cost, block_data) = cost_func(op, block_data) if (isinstance(op, operations.Skip) and self.config.optim.sub_obj.skip_reg): cost = (op_flops[0] / 10.0) op_flops.append(cost) total_cost += cost stack_layer_flops.append(op_flops) block_flops.append([head_branch_flops, stack_layer_flops]) block_datas.append(block_data) cost_list.append(block_flops) conv1_1_flops = [] input_config = self.input_configs[(- 1)] inputs = [block_datas[i] for i in input_config['in_block_idx']] for (branch_id, branch) in enumerate(self.conv1_1_block.conv1_1_branches): (cost, block_data) = cost_func(branch, inputs[branch_id]) conv1_1_flops.append(cost) total_cost += cost block_datas.append(block_data) cost_list.append(conv1_1_flops) out = block_datas[(- 1)] (cost, out) = cost_func(self.classifier, out.permute(0, 2, 3, 1).contiguous()) cost_list.append(cost) total_cost += cost return (cost_list, total_cost)
class Network(BaseSearchSpace): def __init__(self, init_ch, dataset, config): super(Network, self).__init__(init_ch, dataset, config) self.input_block = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=self._C_input, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(self._C_input, affine=False, track_running_stats=True), nn.ReLU6(inplace=True)) self.head_block = OPS['mbconv_k3_t1'](self._C_input, self._head_dim, 1, affine=False, track_running_stats=True) self.blocks = nn.ModuleList() for i in range(self.num_blocks): input_config = self.input_configs[i] self.blocks.append(Block(input_config['in_chs'], input_config['ch'], input_config['strides'], input_config['num_stack_layers'], self.config)) self.conv1_1_block = Conv1_1_Block(self.input_configs[(- 1)]['in_chs'], self.config.optim.last_dim) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(self.config.optim.last_dim, self._num_classes) self.init_model(model_init=config.optim.init_mode) self.set_bn_param(self.config.optim.bn_momentum, self.config.optim.bn_eps)
class Network(BaseSearchSpace): def __init__(self, init_ch, dataset, config, groups=1, base_width=64, dilation=1, norm_layer=None): super(Network, self).__init__(init_ch, dataset, config) if (norm_layer is None): norm_layer = nn.BatchNorm2d if ((groups != 1) or (base_width != 64)): raise ValueError('BasicBlock only supports groups=1 and base_width=64') if (dilation > 1): raise NotImplementedError('Dilation > 1 not supported in BasicBlock') dataset_hypers = {'pde': (1, 3), 'protein': (1, 57), 'cosmic': (1, 1)} (n_classes, in_channels) = dataset_hypers[dataset] self.input_block = nn.Sequential(nn.Conv2d(in_channels, self._C_input, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(self._C_input), nn.ReLU(inplace=True)) self.blocks = nn.ModuleList() for i in range(self.num_blocks): input_config = self.input_configs[i] self.blocks.append(Block(input_config['in_chs'], input_config['ch'], input_config['strides'], input_config['num_stack_layers'], self.config)) if ('bottle_neck' in self.config.search_params.PRIMITIVES_stack): conv1_1_input_dim = [(ch * 4) for ch in self.input_configs[(- 1)]['in_chs']] last_dim = (self.config.optim.last_dim * 4) else: conv1_1_input_dim = self.input_configs[(- 1)]['in_chs'] last_dim = self.config.optim.last_dim self.conv1_1_block = Conv1_1_Block(conv1_1_input_dim, last_dim) print(n_classes) self.classifier = nn.Linear(last_dim, 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): if (m.affine == True): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
class BaseArchGenerate(object): def __init__(self, super_network, config): self.config = config self.num_blocks = len(super_network.block_chs) self.super_chs = super_network.block_chs self.input_configs = super_network.input_configs def update_arch_params(self, betas, head_alphas, stack_alphas): self.betas = betas self.head_alphas = head_alphas self.stack_alphas = stack_alphas def derive_chs(self): '\n using viterbi algorithm to choose the best path of the super net\n ' path_p_max = [] path_p_max.append([0, 1]) for input_config in self.input_configs: block_path_prob_max = [None, 0] for (in_block_id, beta_id) in zip(input_config['in_block_idx'], input_config['beta_idx']): path_prob = (path_p_max[in_block_id][1] * self.betas[in_block_id][beta_id]) if (path_prob > block_path_prob_max[1]): block_path_prob_max = [in_block_id, path_prob] path_p_max.append(block_path_prob_max) ch_idx = (len(path_p_max) - 1) ch_path = [] ch_path.append(ch_idx) while 1: ch_idx = path_p_max[ch_idx][0] ch_path.append(ch_idx) if (ch_idx == 0): break derived_chs = [self.super_chs[ch_id] for ch_id in ch_path] ch_path = ch_path[::(- 1)] derived_chs = derived_chs[::(- 1)] return (ch_path, derived_chs) def derive_ops(self, alpha, alpha_type): assert (alpha_type in ['head', 'stack']) if (alpha_type == 'head'): op_type = self.config.search_params.PRIMITIVES_head[alpha.index(max(alpha))] elif (alpha_type == 'stack'): op_type = self.config.search_params.PRIMITIVES_stack[alpha.index(max(alpha))] return op_type def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True): raise NotImplementedError
class ArchGenerate(BaseArchGenerate): def __init__(self, super_network, config): super(ArchGenerate, self).__init__(super_network, config) def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True): self.update_arch_params(betas, head_alphas, stack_alphas) derived_archs = [[[self.config.optim.init_dim, self.config.optim.head_dim], 'mbconv_k3_t1', [], 0, 1]] (ch_path, derived_chs) = self.derive_chs() layer_count = 0 for (i, (ch_idx, ch)) in enumerate(zip(ch_path, derived_chs)): if ((ch_idx == 0) or (i == (len(derived_chs) - 1))): continue block_idx = (ch_idx - 1) input_config = self.input_configs[block_idx] head_id = input_config['in_block_idx'].index(ch_path[(i - 1)]) head_alpha = self.head_alphas[block_idx][head_id] head_op = self.derive_ops(head_alpha, 'head') stride = input_config['strides'][input_config['in_block_idx'].index(ch_path[(i - 1)])] stack_ops = [] for stack_alpha in self.stack_alphas[block_idx]: stack_op = self.derive_ops(stack_alpha, 'stack') if (stack_op != 'skip_connect'): stack_ops.append(stack_op) layer_count += 1 derived_archs.append([[derived_chs[(i - 1)], ch], head_op, stack_ops, len(stack_ops), stride]) derived_archs.append([[derived_chs[(- 2)], self.config.optim.last_dim], 'conv1_1']) layer_count += len(derived_archs) if if_display: logging.info(('Derived arch: \n' + '|\n'.join(map(str, derived_archs)))) logging.info('Total {} layers.'.format(layer_count)) return derived_archs
class ArchGenerate(BaseArchGenerate): def __init__(self, super_network, config): super(ArchGenerate, self).__init__(super_network, config) def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True): self.update_arch_params(betas, head_alphas, stack_alphas) derived_archs = [] (ch_path, derived_chs) = self.derive_chs() layer_count = 0 for (i, (ch_idx, ch)) in enumerate(zip(ch_path, derived_chs)): if ((ch_idx == 0) or (i == (len(derived_chs) - 1))): continue block_idx = (ch_idx - 1) input_config = self.input_configs[block_idx] head_id = input_config['in_block_idx'].index(ch_path[(i - 1)]) head_alpha = self.head_alphas[block_idx][head_id] head_op = self.derive_ops(head_alpha, 'head') stride = input_config['strides'][input_config['in_block_idx'].index(ch_path[(i - 1)])] stack_ops = [] for stack_alpha in self.stack_alphas[block_idx]: stack_op = self.derive_ops(stack_alpha, 'stack') if (stack_op != 'skip_connect'): stack_ops.append(stack_op) layer_count += 1 derived_archs.append([[derived_chs[(i - 1)], ch], head_op, stack_ops, len(stack_ops), stride]) layer_count += len(derived_archs) if if_display: logging.info(('Derived arch: \n' + '|\n'.join(map(str, derived_archs)))) logging.info('Total {} layers.'.format(layer_count)) return derived_archs
class Optimizer(object): def __init__(self, model, criterion, config): self.config = config self.weight_sample_num = self.config.search_params.weight_sample_num self.criterion = criterion self.Dropped_Network = (lambda model: Dropped_Network(model, softmax_temp=config.search_params.softmax_temp)) arch_params_id = list(map(id, model.module.arch_parameters)) weight_params = filter((lambda p: (id(p) not in arch_params_id)), model.parameters()) self.weight_optimizer = torch.optim.SGD(weight_params, config.optim.weight.init_lr, momentum=config.optim.weight.momentum, weight_decay=config.optim.weight.weight_decay) self.arch_optimizer = torch.optim.Adam([{'params': model.module.arch_alpha_params, 'lr': config.optim.arch.alpha_lr}, {'params': model.module.arch_beta_params, 'lr': config.optim.arch.beta_lr}], betas=(0.5, 0.999), weight_decay=config.optim.arch.weight_decay) def arch_step(self, input_valid, target_valid, model, search_stage): (head_sampled_w_old, alpha_head_index) = model.module.sample_branch('head', 2, search_stage=search_stage) (stack_sampled_w_old, alpha_stack_index) = model.module.sample_branch('stack', 2, search_stage=search_stage) self.arch_optimizer.zero_grad() dropped_model = nn.DataParallel(self.Dropped_Network(model)) (logits, sub_obj) = dropped_model(input_valid) sub_obj = torch.mean(sub_obj) loss = self.criterion(logits, target_valid) if self.config.optim.if_sub_obj: loss_sub_obj = (torch.log(sub_obj) / torch.log(torch.tensor(self.config.optim.sub_obj.log_base))) sub_loss_factor = self.config.optim.sub_obj.sub_loss_factor loss += (loss_sub_obj * sub_loss_factor) loss.backward() self.arch_optimizer.step() self.rescale_arch_params(head_sampled_w_old, stack_sampled_w_old, alpha_head_index, alpha_stack_index, model) return (logits.detach(), loss.item(), sub_obj.item()) def weight_step(self, *args, **kwargs): return self.weight_step_(*args, **kwargs) def weight_step_(self, input_train, target_train, model, search_stage): (_, _) = model.module.sample_branch('head', self.weight_sample_num, search_stage=search_stage) (_, _) = model.module.sample_branch('stack', self.weight_sample_num, search_stage=search_stage) self.weight_optimizer.zero_grad() dropped_model = nn.DataParallel(self.Dropped_Network(model)) (logits, sub_obj) = dropped_model(input_train) sub_obj = torch.mean(sub_obj) loss = self.criterion(logits, target_train) loss.backward() self.weight_optimizer.step() return (logits.detach(), loss.item(), sub_obj.item()) def valid_step(self, input_valid, target_valid, model): (_, _) = model.module.sample_branch('head', 1, training=False) (_, _) = model.module.sample_branch('stack', 1, training=False) dropped_model = nn.DataParallel(self.Dropped_Network(model)) (logits, sub_obj) = dropped_model(input_valid) sub_obj = torch.mean(sub_obj) loss = self.criterion(logits, target_valid) return (logits, loss.item(), sub_obj.item()) def rescale_arch_params(self, alpha_head_weights_drop, alpha_stack_weights_drop, alpha_head_index, alpha_stack_index, model): def comp_rescale_value(old_weights, new_weights, index): old_exp_sum = old_weights.exp().sum() new_drop_arch_params = torch.gather(new_weights, dim=(- 1), index=index) new_exp_sum = new_drop_arch_params.exp().sum() rescale_value = torch.log((old_exp_sum / new_exp_sum)) rescale_mat = torch.zeros_like(new_weights).scatter_(0, index, rescale_value) return (rescale_value, rescale_mat) def rescale_params(old_weights, new_weights, indices): for (i, (old_weights_block, indices_block)) in enumerate(zip(old_weights, indices)): for (j, (old_weights_branch, indices_branch)) in enumerate(zip(old_weights_block, indices_block)): (rescale_value, rescale_mat) = comp_rescale_value(old_weights_branch, new_weights[i][j], indices_branch) new_weights[i][j].data.add_(rescale_mat) rescale_params(alpha_head_weights_drop, model.module.alpha_head_weights, alpha_head_index) rescale_params(alpha_stack_weights_drop, model.module.alpha_stack_weights, alpha_stack_index) def set_param_grad_state(self, stage): def set_grad_state(params, state): for group in params: for param in group['params']: param.requires_grad_(state) if (stage == 'Arch'): state_list = [True, False] elif (stage == 'Weights'): state_list = [False, True] else: state_list = [False, False] set_grad_state(self.arch_optimizer.param_groups, state_list[0]) set_grad_state(self.weight_optimizer.param_groups, state_list[1])
class Trainer(object): def __init__(self, train_data, val_data, optimizer=None, criterion=None, scheduler=None, config=None, report_freq=None): self.train_data = train_data self.val_data = val_data self.optimizer = optimizer self.criterion = criterion self.scheduler = scheduler self.config = config self.report_freq = report_freq def train(self, model, epoch): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() data_time = utils.AverageMeter() batch_time = utils.AverageMeter() model.train() start = time.time() prefetcher = data_prefetcher(self.train_data) (input, target) = prefetcher.next() step = 0 while (input is not None): data_t = (time.time() - start) self.scheduler.step() n = input.size(0) if (step == 0): logging.info('epoch %d lr %e', epoch, self.optimizer.param_groups[0]['lr']) self.optimizer.zero_grad() logits = model(input) if self.config.optim.label_smooth: loss = self.criterion(logits, target, self.config.optim.smooth_alpha) else: loss = self.criterion(logits, target) loss.backward() if self.config.optim.use_grad_clip: nn.utils.clip_grad_norm_(model.parameters(), self.config.optim.grad_clip) self.optimizer.step() (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) batch_t = (time.time() - start) start = time.time() objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) data_time.update(data_t) batch_time.update(batch_t) if ((step != 0) and ((step % self.report_freq) == 0)): logging.info('Train epoch %03d step %03d | loss %.4f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, objs.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg) (input, target) = prefetcher.next() step += 1 logging.info('EPOCH%d Train_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f', epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg) return (top1.avg, top5.avg, objs.avg, batch_time.avg, data_time.avg) def infer(self, model, epoch=0): top1 = utils.AverageMeter() top5 = utils.AverageMeter() data_time = utils.AverageMeter() batch_time = utils.AverageMeter() model.eval() start = time.time() prefetcher = data_prefetcher(self.val_data) (input, target) = prefetcher.next() step = 0 while (input is not None): step += 1 data_t = (time.time() - start) n = input.size(0) logits = model(input) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) batch_t = (time.time() - start) top1.update(prec1.item(), n) top5.update(prec5.item(), n) data_time.update(data_t) batch_time.update(batch_t) if ((step % self.report_freq) == 0): logging.info('Val epoch %03d step %03d | top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, top1.avg, top5.avg, batch_time.avg, data_time.avg) start = time.time() (input, target) = prefetcher.next() logging.info('EPOCH%d Valid_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f', epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg) return (top1.avg, top5.avg, batch_time.avg, data_time.avg)
class SearchTrainer(object): def __init__(self, train_data, val_data, search_optim, criterion, scheduler, config, args): self.train_data = train_data self.val_data = val_data self.search_optim = search_optim self.criterion = criterion self.scheduler = scheduler self.sub_obj_type = config.optim.sub_obj.type self.args = args def train(self, model, epoch, optim_obj='Weights', search_stage=0): assert (optim_obj in ['Weights', 'Arch']) objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() sub_obj_avg = utils.AverageMeter() data_time = utils.AverageMeter() batch_time = utils.AverageMeter() model.train() start = time.time() if (optim_obj == 'Weights'): prefetcher = data_prefetcher(self.train_data) elif (optim_obj == 'Arch'): prefetcher = data_prefetcher(self.val_data) (input, target) = prefetcher.next() step = 0 while (input is not None): (input, target) = (input.cuda(), target.cuda()) data_t = (time.time() - start) n = input.size(0) if (optim_obj == 'Weights'): self.scheduler.step() if (step == 0): logging.info('epoch %d weight_lr %e', epoch, self.search_optim.weight_optimizer.param_groups[0]['lr']) (logits, loss, sub_obj) = self.search_optim.weight_step(input, target, model, search_stage) elif (optim_obj == 'Arch'): if (step == 0): logging.info('epoch %d arch_lr %e', epoch, self.search_optim.arch_optimizer.param_groups[0]['lr']) (logits, loss, sub_obj) = self.search_optim.arch_step(input, target, model, search_stage) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) del logits, input, target batch_t = (time.time() - start) objs.update(loss, n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) sub_obj_avg.update(sub_obj) data_time.update(data_t) batch_time.update(batch_t) if ((step != 0) and ((step % self.args.report_freq) == 0)): logging.info('Train%s epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', optim_obj, epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg) start = time.time() step += 1 (input, target) = prefetcher.next() return (top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg) def infer(self, model, epoch): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() sub_obj_avg = utils.AverageMeter() data_time = utils.AverageMeter() batch_time = utils.AverageMeter() model.train() start = time.time() prefetcher = data_prefetcher(self.val_data) (input, target) = prefetcher.next() step = 0 while (input is not None): step += 1 data_t = (time.time() - start) n = input.size(0) (logits, loss, sub_obj) = self.search_optim.valid_step(input, target, model) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) batch_t = (time.time() - start) objs.update(loss, n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) sub_obj_avg.update(sub_obj) data_time.update(data_t) batch_time.update(batch_t) if ((step % self.args.report_freq) == 0): logging.info('Val epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f', epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg) start = time.time() (input, target) = prefetcher.next() return (top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg)
class AttrDict(dict): IMMUTABLE = '__immutable__' def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__[AttrDict.IMMUTABLE] = False def __getattr__(self, name): if (name in self.__dict__): return self.__dict__[name] elif (name in self): return self[name] else: raise AttributeError(name) def __setattr__(self, name, value): if (not self.__dict__[AttrDict.IMMUTABLE]): if (name in self.__dict__): self.__dict__[name] = value else: self[name] = value else: raise AttributeError('Attempted to set "{}" to "{}", but AttrDict is immutable'.format(name, value)) def immutable(self, is_immutable): 'Set immutability to is_immutable and recursively apply the setting\n to all nested AttrDicts.\n ' self.__dict__[AttrDict.IMMUTABLE] = is_immutable for v in self.__dict__.values(): if isinstance(v, AttrDict): v.immutable(is_immutable) for v in self.values(): if isinstance(v, AttrDict): v.immutable(is_immutable) def is_immutable(self): return self.__dict__[AttrDict.IMMUTABLE]
def load_cfg(cfg_to_load): 'Wrapper around yaml.load used for maintaining backward compatibility' if isinstance(cfg_to_load, IOBase): cfg_to_load = ''.join(cfg_to_load.readlines()) return yaml.load(cfg_to_load)
def load_cfg_to_dict(cfg_filename): with open(cfg_filename, 'r') as f: yaml_cfg = load_cfg(f) return yaml_cfg
def merge_cfg_from_file(cfg_filename, global_config): 'Load a yaml config file and merge it into the global config.' with open(cfg_filename, 'r') as f: yaml_cfg = AttrDict(load_cfg(f)) _merge_a_into_b(yaml_cfg, global_config)
def merge_cfg_from_cfg(cfg_other, global_config): 'Merge `cfg_other` into the global config.' _merge_a_into_b(cfg_other, global_config)
def update_cfg_from_file(cfg_filename, global_config): with open(cfg_filename, 'r') as f: yaml_cfg = AttrDict(load_cfg(f)) update_cfg_from_cfg(yaml_cfg, global_config)
def update_cfg_from_cfg(cfg_other, global_config, stack=None): assert isinstance(cfg_other, AttrDict), '`a` (cur type {}) must be an instance of {}'.format(type(a), AttrDict) assert isinstance(global_config, AttrDict), '`b` (cur type {}) must be an instance of {}'.format(type(b), AttrDict) for (k, v_) in cfg_other.items(): full_key = ((('.'.join(stack) + '.') + k) if (stack is not None) else k) v = copy.deepcopy(v_) v = _decode_cfg_value(v) if (k not in global_config): global_config[k] = v if isinstance(v, AttrDict): try: stack_push = ([k] if (stack is None) else (stack + [k])) update_cfg_from_cfg(v, global_config[k], stack=stack_push) except BaseException: raise else: v = _check_and_coerce_cfg_value_type(v, global_config[k], k, full_key) if isinstance(v, AttrDict): try: stack_push = ([k] if (stack is None) else (stack + [k])) update_cfg_from_cfg(v, global_config[k], stack=stack_push) except BaseException: raise else: global_config[k] = v
def merge_cfg_from_list(cfg_list, global_config): "Merge config keys, values in a list (e.g., from command line) into the\n global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.\n " assert ((len(cfg_list) % 2) == 0) for (full_key, v) in zip(cfg_list[0::2], cfg_list[1::2]): if _key_is_deprecated(full_key): continue if _key_is_renamed(full_key): _raise_key_rename_error(full_key) key_list = full_key.split('.') d = global_config for subkey in key_list[:(- 1)]: assert (subkey in d), 'Non-existent key: {}'.format(full_key) d = d[subkey] subkey = key_list[(- 1)] assert (subkey in d), 'Non-existent key: {}'.format(full_key) value = _decode_cfg_value(v) value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key) d[subkey] = value
def _merge_a_into_b(a, b, stack=None): 'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n ' assert isinstance(a, AttrDict), '`a` (cur type {}) must be an instance of {}'.format(type(a), AttrDict) assert isinstance(b, AttrDict), '`b` (cur type {}) must be an instance of {}'.format(type(b), AttrDict) for (k, v_) in a.items(): full_key = ((('.'.join(stack) + '.') + k) if (stack is not None) else k) if (k not in b): raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) v = _decode_cfg_value(v) v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key) if isinstance(v, AttrDict): try: stack_push = ([k] if (stack is None) else (stack + [k])) _merge_a_into_b(v, b[k], stack=stack_push) except BaseException: raise else: b[k] = v