code stringlengths 17 6.64M |
|---|
def create_lr_scheduler(lr_scheduler_config, optimizer):
gamma = lr_scheduler_config['gamma']
if (lr_scheduler_config['type'] == 'multistep_lr'):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_scheduler_config['milestones'], gamma=gamma, verbose=True)
else:
raise ValueError('Optimizer type {} not supported'.format(lr_scheduler_config['type']))
return lr_scheduler
|
def create_optimizer(optimizer_config, model):
lr = optimizer_config['lr']
weight_decay = optimizer_config['weight_decay']
momentum = optimizer_config['momentum']
if (optimizer_config['type'] == 'RMSProp'):
optimizer = torch.optim.RMSprop(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
elif (optimizer_config['type'] == 'Adam'):
betas = optimizer_config['betas']
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
elif (optimizer_config['type'] == 'SGD'):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
raise ValueError('Optimizer type {} not supported'.format(optimizer_config['type']))
return optimizer
|
class Benchmark():
def __init__(self, config_file):
self.config_file_path = config_file
self.config_file = read_yaml(config_file)
validate_config(self.config_file, 'benchmark', defaults=True)
torch.manual_seed(self.config_file['experiment']['seed'])
random.seed(self.config_file['experiment']['seed'])
np.random.seed(self.config_file['experiment']['seed'])
self.deterministic = self.config_file['experiment']['deterministic']
if self.deterministic:
cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
cudnn.benchmark = True
self.gpus = self.config_file['infrastructure']['gpus']
self.model_config = self.config_file['model']
if ('training' not in self.config_file):
self.benchmark_mode = 'evaluation'
else:
self.benchmark_mode = 'training'
self.hyperparameters = self.config_file['training']['hyperparameters']
self.metrics_config = self.config_file['training']['metrics']
self.optimizer_config = self.config_file['training']['optimizer']
self.lr_scheduler_config = self.config_file['training']['lr_scheduler']
self.mode_names = sorted((name for name in models.__dict__ if (name.islower() and (not name.startswith('__')) and isinstance(models.__dict__[name], ModuleType))))
self.mode = self.model_config['mode']['type']
self.layer_config = {}
self.layer_config = {'type': self.mode}
if ('options' in self.model_config['mode']):
self.mode_options = self.model_config['mode']['options']
self.layer_config['options'] = self.mode_options
if (self.mode not in self.mode_names):
raise ValueError('Mode not {} supported'.format(self.mode))
options = models.__dict__[self.mode].__dict__
self.model_names = sorted((name for name in options if (name.islower() and (not name.startswith('__')) and callable(options[name]))))
self.loss_function_config = self.config_file['model']['loss_function']
self.data_config = self.config_file['data']
self.num_workers = self.config_file['data']['num_workers']
self.multi_gpu = False
if (isinstance(self.gpus, int) and (self.gpus <= (- 1))):
self.device = 'cpu'
else:
if (not torch.cuda.is_available()):
raise ValueError('You selected {} GPUs but there are no GPUs available'.format(self.gpus))
self.device = 'cuda'
if isinstance(self.gpus, int):
self.device += (':' + str(self.gpus))
elif isinstance(self.gpus, list):
self.device += (':' + str(self.gpus[0]))
self.multi_gpu = True
self.output_dir = os.path.join(self.config_file['experiment']['output_dir'], self.config_file['experiment']['name'])
mkdir(self.output_dir)
shutil.copy2(self.config_file_path, os.path.join(self.output_dir, 'config.yaml'))
def run(self):
self.epochs = self.hyperparameters['epochs']
self.batch_size = self.hyperparameters['batch_size']
self.target_size = self.data_config['target_size']
self.dataset_creator = DatasetSelector(self.data_config['dataset']).get_dataset()
if (self.data_config['dataset_path'] is not None):
self.dataset = self.dataset_creator(self.target_size, dataset_path=self.data_config['dataset_path'])
else:
self.dataset = self.dataset_creator(self.target_size)
self.train_dataloader = self.dataset.create_train_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.val_dataloader = self.dataset.create_val_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.num_classes = self.dataset.num_classes
if ((self.model_config['architecture'] is not None) and (self.model_config['architecture'] in self.model_names)):
arch = self.model_config['architecture']
if self.model_config['pretrained']:
print("=> Using pre-trained model '{}'".format(self.model_config['architecture']))
else:
print("=> Creating model from scratch '{}'".format(self.model_config['architecture']))
self.model = models.__dict__[self.mode].__dict__[arch](pretrained=self.model_config['pretrained'], num_classes=self.num_classes, layer_config=self.layer_config)
elif (self.model_config['checkpoint'] is not None):
print('Loading model checkpoint from ', self.model_config['checkpoint'])
self.model = torch.load(self.model_config['checkpoint'], map_location=self.device)
self.model.to(self.device)
if isinstance(self.gpus, list):
self.model = nn.DataParallel(self.model, self.gpus)
self.loss_function = select_loss_function(self.loss_function_config)
self.optimizer = create_optimizer(self.optimizer_config, self.model)
self.lr_scheduler = create_lr_scheduler(self.lr_scheduler_config, self.optimizer)
print('\nBenchmarking model on {}'.format(str(self.dataset)))
print(self.metrics_config)
trainer = Trainer(model=self.model, mode=self.mode, loss_function=self.loss_function, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, val_dataloader=self.val_dataloader, device=self.device, epochs=self.epochs, output_dir=self.output_dir, metrics_config=self.metrics_config, multi_gpu=self.multi_gpu)
trainer.run()
if self.config_file['evaluation']:
self.model = torch.load(os.path.join(self.output_dir, 'model_best_acc.pth'))
self.test_dataloader = self.dataset.create_test_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.evaluator = Evaluator(self.model, self.mode, self.loss_function, self.test_dataloader, self.device, self.output_dir)
self.evaluate(self.evaluator)
def run_eval(self):
if (self.model_config['checkpoint'] is not None):
print('Loading model checkpoint from ', self.model_config['checkpoint'])
self.model = torch.load(self.model_config['checkpoint'], map_location=self.device)
else:
raise ValueError('A model checkpoint must be specified')
self.target_size = self.data_config['target_size']
self.batch_size = self.data_config['batch_size']
self.dataset_creator = DatasetSelector(self.data_config['dataset']).get_dataset()
if (self.data_config['dataset_path'] is not None):
self.dataset = self.dataset_creator(self.target_size, dataset_path=self.data_config['dataset_path'])
else:
self.dataset = self.dataset_creator(self.target_size)
self.test_dataloader = self.dataset.create_test_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.loss_function = select_loss_function(self.loss_function_config)
self.evaluator = Evaluator(self.model, None, self.loss_function, self.test_dataloader, self.device, self.output_dir)
self.evaluate(self.evaluator)
def evaluate(self, evaluator):
(self.test_acc, self.test_loss) = evaluator.run()
self.results_df = pd.DataFrame({'model_name': [self.config_file['experiment']['name']], 'dataset': [self.data_config['dataset']], 'accuracy': [float(self.test_acc)], 'error': [(100.0 - float(self.test_acc))], 'loss': [self.test_loss]})
print('Test Results')
print(self.results_df)
csv_results = os.path.join(self.output_dir, 'results.csv')
json_results = os.path.join(self.output_dir, 'results.json')
print('Test Results saved in: \n{}\n{}'.format(csv_results, json_results))
self.results_df.to_csv(csv_results)
self.results_df.to_json(json_results, indent=2, orient='records')
|
def __main__():
parser = argparse.ArgumentParser(description='BioTorch')
parser.add_argument('--config_file', help='Path to the configuration file')
try:
args = parser.parse_args()
benchmark = Benchmark(args.config_file)
if (benchmark.benchmark_mode == 'training'):
benchmark.run()
else:
benchmark.run_eval()
except Exception as e:
message = 'an unexpected error occurred: {}: {}'.format(type(e).__name__, ((e.message if hasattr(e, 'message') else '') or str(e)))
raise ValueError(message)
|
class CIFAR100(Dataset):
def __str__(self):
return 'CIFAR-100 Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar100', train_transforms=None, test_transforms=None):
self.mean = (0.5071, 0.4867, 0.4408)
self.std = (0.2675, 0.2565, 0.2761)
self.num_classes = 100
super(CIFAR100, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.CIFAR100(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.CIFAR100(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.CIFAR100(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class CIFAR10(Dataset):
def __str__(self):
return 'CIFAR-10 Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar10', train_transforms=None, test_transforms=None):
self.mean = (0.4914, 0.4821, 0.4465)
self.std = (0.247, 0.2435, 0.2616)
self.num_classes = 10
super(CIFAR10, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class CIFAR10Benchmark(Dataset):
def __str__(self):
return 'CIFAR-10 Benchmark Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar10', train_transforms=None, test_transforms=None):
self.mean = (0.4914, 0.4821, 0.4465)
self.std = (0.247, 0.2435, 0.2616)
self.num_classes = 10
super(CIFAR10Benchmark, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
random.seed(0)
self.train_transforms = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(self.mean, self.std)]
self.train_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
val_indices = random.sample(range(0, len(self.train_dataset.data)), 5000)
self.train_dataset.data = np.delete(self.train_dataset.data, val_indices, axis=0)
self.train_dataset.targets = np.delete(self.train_dataset.targets, val_indices, axis=0)
self.val_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.test_transforms))
self.val_dataset.data = self.val_dataset.data[val_indices]
self.val_dataset.targets = list(np.array(self.val_dataset.targets)[val_indices])
self.test_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class Dataset(object):
def __init__(self, target_size, dataset_path, mean=None, std=None, train_transforms=None, test_transforms=None):
self.dataset_path = dataset_path
self.target_size = target_size
self.mean = mean
self.std = std
self.train_transforms = train_transforms
self.test_transforms = test_transforms
default_transforms = [transforms.Resize((self.target_size, self.target_size)), transforms.ToTensor()]
if ((self.mean is not None) and (self.std is not None)):
default_transforms.append(transforms.Normalize(self.mean, self.std))
if (self.train_transforms is None):
self.train_transforms = default_transforms
if (self.test_transforms is None):
self.test_transforms = default_transforms
@staticmethod
def seed_worker(worker_id):
worker_seed = (torch.initial_seed() % (2 ** 32))
numpy.random.seed(worker_seed)
random.seed(worker_seed)
def _create_dataloader(self, mode, batch_size, deterministic=False, shuffle=True, drop_last=True, num_workers=0):
gen = None
worker_init_fn = None
if (deterministic and (num_workers > 0)):
worker_init_fn = self.seed_worker
gen = torch.Generator()
gen.manual_seed(0)
if (mode == 'train'):
return DataLoader(self.train_dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
elif (mode == 'val'):
return DataLoader(self.val_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
elif (mode == 'test'):
return DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
def create_train_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('train', batch_size, deterministic=deterministic, num_workers=num_workers)
def create_val_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('val', batch_size, deterministic=deterministic, num_workers=num_workers)
def create_test_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('test', batch_size, deterministic=deterministic, num_workers=num_workers)
|
class FashionMNIST(Dataset):
def __str__(self):
return 'Fashion MNIST Dataset'
def __init__(self, target_size, dataset_path='./datasets/fashion-mnist', train_transforms=None, test_transforms=None):
self.mean = (0.2859,)
self.std = (0.353,)
self.num_classes = 10
super(FashionMNIST, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.FashionMNIST(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.FashionMNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.FashionMNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class ImageNet(Dataset):
def __str__(self):
return 'Imagenet Dataset'
def __init__(self, target_size, dataset_path='./datasets/imagenet', train_transforms=None, test_transforms=None):
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
self.num_classes = 1000
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (train_transforms is None):
train_transforms = [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
if (test_transforms is None):
test_transforms = [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]
super(ImageNet, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Reading {} data from {}'.format(str(self), dataset_path))
self.train_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'train'), transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms))
|
class MNIST(Dataset):
def __str__(self):
return 'MNIST Dataset'
def __init__(self, target_size, dataset_path='./datasets/mnist', train_transforms=None, test_transforms=None):
self.mean = (0.1307,)
self.std = (0.3081,)
self.num_classes = 10
super(MNIST, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.MNIST(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class DatasetSelector():
def __init__(self, dataset_name):
if (dataset_name not in DATASETS_AVAILABLE):
raise ValueError('Dataset name specified: {} not in the list of available datasets {}'.format(dataset_name, DATASETS_AVAILABLE))
self.dataset_name = dataset_name
def get_dataset(self):
if (self.dataset_name == 'cifar10'):
return CIFAR10
elif (self.dataset_name == 'cifar10_benchmark'):
return CIFAR10Benchmark
elif (self.dataset_name == 'cifar100'):
return CIFAR100
elif (self.dataset_name == 'mnist'):
return MNIST
elif (self.dataset_name == 'fashion_mnist'):
return FashionMNIST
elif (self.dataset_name == 'imagenet'):
return ImageNet
|
class Evaluator():
def __init__(self, model, mode, loss_function, dataloader, device, output_dir, multi_gpu=False):
self.model = model
self.mode = mode
self.output_dir = output_dir
self.logs_dir = os.path.join(output_dir, 'logs')
self.loss_function = loss_function
self.dataloader = dataloader
self.device = device
self.multi_gpu = multi_gpu
def run(self):
(acc, loss) = test(model=self.model, loss_function=self.loss_function, test_dataloader=self.dataloader, device=self.device)
return (acc.cpu().numpy(), loss)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'backpropagation'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.init_parameters()
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'backpropagation'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.init_parameters()
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Batchwise Random Magnitude Sign-concordant Feedbacks (brSF):\n weight_backward = |M| ◦ sign(weight), where M is redrawn after each update of W (i.e., each mini-batch).\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'brsf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Batchwise Random Magnitude Sign-concordant Feedbacks (brSF):\n weight_backward = |M| ◦ sign(weight), where M is redrawn after each update of W (i.e., each mini-batch).\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'brsf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, output_dim: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'init': 'xavier', 'gradient_clip': False}
self.options = self.layer_config['options']
self.init = self.options['init']
self.loss_gradient = None
self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels, self.kernel_size[0], self.kernel_size[1])), requires_grad=False)
self.bias_backward = None
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels)), requires_grad=False)
self.init_parameters()
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
self.register_backward_hook(self.dfa_backward_hook)
self.weight_ratio = 0
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
def forward(self, x):
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
return Conv2dGrad.apply(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
@staticmethod
def dfa_backward_hook(module, grad_input, grad_output):
if (grad_input[0] is None):
return grad_input
else:
out_grad = module.loss_gradient.unsqueeze(2).repeat(1, 1, grad_output[0].size()[2])
out_grad = out_grad.unsqueeze(3).repeat(1, 1, 1, grad_output[0].size()[3])
grad_dfa = torch.nn.grad.conv2d_input(input_size=grad_input[0].shape, weight=module.weight_backward, grad_output=out_grad, stride=module.stride, padding=module.padding, dilation=module.dilation, groups=module.groups)
if (len(grad_input) == 2):
return (grad_dfa, grad_input[1])
else:
return (grad_dfa, grad_input[1], grad_input[2])
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, output_dim: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.init = self.options['init']
self.loss_gradient = None
self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False)
self.bias_backward = None
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False)
self.init_parameters()
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
with torch.no_grad():
self.norm_initial_weights = torch.linalg.norm(self.weight)
self.register_backward_hook(self.dfa_backward_hook)
self.weight_ratio = 0
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x):
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
return LinearGrad.apply(x, self.weight, self.bias)
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def dfa_backward_hook(module, grad_input, grad_output):
if (grad_input[0] is None):
return grad_input
else:
grad_dfa = module.loss_gradient.mm(module.weight_backward)
if (len(grad_input) == 2):
return (grad_dfa, grad_input[1])
else:
return (grad_dfa, grad_input[1], grad_input[2])
|
class Conv2d(fa_constructor.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'fa'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'fa'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'fa'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.weight_backward = nn.Parameter(torch.Tensor(self.weight.size()), requires_grad=False)
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(self.bias.size()), requires_grad=False)
else:
self.register_parameter('bias', None)
self.bias_backward = None
self.init_parameters()
if (self.type == 'frsf'):
self.weight_backward = nn.Parameter(torch.abs(self.weight_backward), requires_grad=False)
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
if ((self.type == 'usf') or (self.type == 'brsf')):
with torch.no_grad():
self.weight_backward = torch.nn.Parameter((self.scaling_factor * torch.sign(self.weight)), requires_grad=False)
self.alignment = 0
self.weight_ratio = 0
if (('gradient_clip' in self.options) and self.options['gradient_clip']):
self.register_backward_hook(self.gradient_clip)
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x: Tensor) -> Tensor:
weight_backward = None
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
if (self.type == 'usf'):
weight_backward = torch.nn.Parameter(torch.sign(self.weight), requires_grad=False)
weight_backward = torch.nn.Parameter((self.scaling_factor * weight_backward), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'brsf'):
wb = torch.Tensor(self.weight.size()).to(self.weight.device)
if (self.init == 'xavier'):
torch.nn.init.xavier_uniform_(wb)
else:
init.kaiming_uniform_(wb, a=math.sqrt(5))
weight_backward = torch.nn.Parameter((torch.abs(wb) * torch.sign(self.weight)), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'frsf'):
weight_backward = torch.nn.Parameter((self.weight_backward * torch.sign(self.weight)), requires_grad=False)
if (weight_backward is None):
weight_backward = self.weight_backward
return Conv2dGrad.apply(x, self.weight, weight_backward, self.bias, self.bias_backward, self.stride, self.padding, self.dilation, self.groups)
def compute_alignment(self):
self.alignment = compute_matrix_angle(self.weight_backward, self.weight)
return self.alignment
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def gradient_clip(module, grad_input, grad_output):
grad_input = list(grad_input)
for i in range(len(grad_input)):
if (grad_input[i] is not None):
grad_input[i] = torch.clamp(grad_input[i], (- 1), 1)
return tuple(grad_input)
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'fa'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.weight_backward = nn.Parameter(torch.Tensor(self.weight.size()), requires_grad=False)
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(self.bias.size()), requires_grad=False)
else:
self.register_parameter('bias', None)
self.bias_backward = None
self.init_parameters()
if (self.type == 'frsf'):
self.weight_backward = nn.Parameter(torch.abs(self.weight_backward), requires_grad=False)
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
if ((self.type == 'usf') or (self.type == 'brsf')):
with torch.no_grad():
self.weight_backward = torch.nn.Parameter((self.scaling_factor * torch.sign(self.weight)), requires_grad=False)
self.alignment = 0
self.weight_ratio = 0
if (('gradient_clip' in self.options) and self.options['gradient_clip']):
self.register_backward_hook(self.gradient_clip)
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x: Tensor) -> Tensor:
weight_backward = None
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
if (self.type == 'usf'):
weight_backward = torch.nn.Parameter(torch.sign(self.weight), requires_grad=False)
weight_backward = torch.nn.Parameter((self.scaling_factor * weight_backward), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'brsf'):
wb = torch.Tensor(self.weight.size()).to(self.weight.device)
if (self.init == 'xavier'):
torch.nn.init.xavier_uniform_(wb)
else:
init.kaiming_uniform_(wb, a=math.sqrt(5))
weight_backward = torch.nn.Parameter((torch.abs(wb) * torch.sign(self.weight)), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'frsf'):
weight_backward = torch.nn.Parameter((torch.abs(self.weight_backward) * torch.sign(self.weight)), requires_grad=False)
if (weight_backward is None):
weight_backward = self.weight_backward
return LinearGrad.apply(x, self.weight, weight_backward, self.bias, self.bias_backward)
def compute_alignment(self):
self.alignment = compute_matrix_angle(self.weight_backward, self.weight)
return self.alignment
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def gradient_clip(module, grad_input, grad_output):
grad_input = list(grad_input)
for i in range(len(grad_input)):
if (grad_input[i] is not None):
grad_input[i] = torch.clamp(grad_input[i], (- 1), 1)
return tuple(grad_input)
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Fixed Random Magnitude Sign-concordant Feedbacks (frSF):\n weight_backward = |M| ◦ sign(weight), where M is initialized once and fixed throughout each experiment\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'frsf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Fixed Random Magnitude Sign-concordant Feedbacks (frSF):\n weight_backward = |M| ◦ sign(weight), where M is initialized once and fixed throughout each experiment\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'frsf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
def compute_matrix_angle(A, B):
with torch.no_grad():
flat_A = torch.reshape(A, ((- 1),))
normalized_flat_A = (flat_A / torch.norm(flat_A))
flat_B = torch.reshape(B, ((- 1),))
normalized_flat_B = (flat_B / torch.norm(flat_B))
angle = ((180.0 / math.pi) * torch.arccos(torch.clip(torch.dot(normalized_flat_A, normalized_flat_B), (- 1.0), 1.0)))
return angle
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n\n Uniform Sign-concordant Feedbacks (uSF):\n Backward Weights = sign(W)\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'usf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Method from [How Important Is Weight Symmetry in Backpropagation?](https://arxiv.org/pdf/1510.05067.pdf)\n\n Uniform Sign-concordant Feedbacks (uSF):\n weight_backward = sign(weight)\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'usf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
def convert_layer(layer, mode, copy_weights, layer_config=None, output_dim=None):
(layer_bias, bias_weight) = (False, None)
if (('weight' in layer.__dict__['_parameters']) and copy_weights):
weight = layer.weight
if (('bias' in layer.__dict__['_parameters']) and (layer.bias is not None)):
bias_weight = layer.bias
layer_bias = True
new_layer = None
if (layer_config is None):
layer_config = {}
layer_config['type'] = mode
if isinstance(layer, nn.Conv2d):
if (mode in ['fa', 'usf', 'brsf', 'frsf']):
new_layer = fa_constructor.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif (mode == 'dfa'):
new_layer = dfa_layers.Conv2d(layer.in_channels, layer.out_channels, output_dim, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif (mode == 'backpropagation'):
new_layer = bp_layers.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif isinstance(layer, nn.Linear):
if (mode in ['fa', 'usf', 'brsf', 'frsf']):
new_layer = fa_constructor.Linear(layer.in_features, layer.out_features, layer_bias, layer_config)
elif (mode == 'dfa'):
new_layer = dfa_layers.Linear(layer.in_features, layer.out_features, output_dim, layer_bias, layer_config)
elif (mode == 'backpropagation'):
new_layer = bp_layers.Linear(layer.in_features, layer.out_features, layer_bias, layer_config)
if ((new_layer is not None) and copy_weights):
new_layer.weight = weight
new_layer.bias = bias_weight
return new_layer
|
def alexnet(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> AlexNet:
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n The required minimum input size of the model is 63x63.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting AlexNet to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.alexnet, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN MNIST to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_mnist, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.5 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_75(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.75 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.75 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_75, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_0(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.0 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.0 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_0, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_3(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.3 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.3 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_3, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet18(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-18 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet18, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet20(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-20 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-20 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet20, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet32(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-32 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-32 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet32, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet34(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-34 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet34, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-44 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet50(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-50 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet50, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet56(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-56 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-56 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet56, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet101(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-101 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet101, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet110(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-110 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-110 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet110, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet152(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-152 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet152, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet1202(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-1202 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-1202 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet1202, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-50 32x4d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext50_32x4d, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext101_32x8d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-101 32x8d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext101_32x8d, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet50_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-50-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet50_2, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet101_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-101-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet101_2, MODE, layer_config, pretrained, progress, num_classes)
|
def alexnet(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> AlexNet:
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n The required minimum input size of the model is 63x63.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting AlexNet to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.alexnet, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN MNIST to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_mnist, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.5 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_75(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.75 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.75 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_75, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_0(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.0 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.0 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_0, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_3(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.3 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.3 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_3, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet18(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-18 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet18, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet20(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-20 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-20 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet20, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet32(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-32 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-32 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet32, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet34(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-34 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet34, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-44 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet50(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-50 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet50, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet56(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-56 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-56 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet56, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet101(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-101 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet101, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet110(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-110 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-110 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet110, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet152(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-152 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet152, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet1202(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-1202 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-1202 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet1202, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-50 32x4d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext50_32x4d, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext101_32x8d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-101 32x8d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext101_32x8d, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet50_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-50-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet50_2, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet101_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-101-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet101_2, MODE, layer_config, pretrained, progress, num_classes)
|
def alexnet(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> AlexNet:
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n The required minimum input size of the model is 63x63.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting AlexNet to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.alexnet, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN MNIST to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_mnist, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.5 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_75(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.75 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.75 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_75, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_0(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.0 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.0 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_0, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_3(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.3 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.3 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_3, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet18(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-18 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet18, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet20(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-20 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-20 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet20, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet32(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-32 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-32 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet32, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet34(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-34 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet34, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-44 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet50(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-50 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet50, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet56(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-56 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-56 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet56, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet101(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-101 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet101, MODE, layer_config, pretrained, progress, num_classes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.