code stringlengths 17 6.64M |
|---|
def _raise_key_rename_error(full_key):
new_key = _RENAMED_KEYS[full_key]
if isinstance(new_key, tuple):
msg = (' Note: ' + new_key[1])
new_key = new_key[0]
else:
msg = ''
raise KeyError('Key {} was renamed to {}; please update your config.{}'.format(full_key, new_key, msg))
|
def _decode_cfg_value(v):
'Decodes a raw config value (e.g., from a yaml config files or command\n line argument) into a Python object.\n '
if isinstance(v, dict):
return AttrDict(v)
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v
|
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
'Checks that `value_a`, which is intended to replace `value_b` is of the\n right type. The type is correct if it matches exactly or is one of a few\n cases in which the type can be easily coerced.\n '
type_b = type(value_b)
type_a = type(value_a)
if (type_a is type_b):
return value_a
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif (isinstance(value_a, tuple) and isinstance(value_b, list)):
value_a = list(value_a)
elif (isinstance(value_a, list) and isinstance(value_b, tuple)):
value_a = tuple(value_a)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a
|
def save_object(obj, file_name):
'Save a Python object by pickling it.'
file_name = os.path.abspath(file_name)
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
def cache_url(url_or_file, cache_dir):
'Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.\n '
is_url = (re.match('^(?:http)s?://', url_or_file, re.IGNORECASE) is not None)
if (not is_url):
return url_or_file
url = url_or_file
assert url.startswith(_DETECTRON_S3_BASE_URL), 'Detectron only automatically caches URLs in the Detectron S3 bucket: {}'.format(_DETECTRON_S3_BASE_URL)
cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if (not os.path.exists(cache_file_dir)):
os.makedirs(cache_file_dir)
logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
|
def assert_cache_file_is_ok(url, file_path):
'Check that cache file has the correct hash.'
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert (cache_file_md5sum == ref_md5sum), 'Target URL {} appears to be downloaded to the local cache file {}, but the md5 hash of the local file does not match the reference (actual: {} vs. expected: {}). You may wish to delete the cached file and try again to trigger automatic download.'.format(url, file_path, cache_file_md5sum, ref_md5sum)
|
def _progress_bar(count, total):
'Report download progress.\n Credit:\n https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113\n '
bar_len = 60
filled_len = int(round(((bar_len * count) / float(total))))
percents = round(((100.0 * count) / float(total)), 1)
bar = (('=' * filled_len) + ('-' * (bar_len - filled_len)))
sys.stdout.write(' [{}] {}% of {:.1f}MB file \r'.format(bar, percents, ((total / 1024) / 1024)))
sys.stdout.flush()
if (count >= total):
sys.stdout.write('\n')
|
def download_url(url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar):
'Download url and write it to dst_file_path.\n Credit:\n https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook\n '
response = urllib.request.urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if (not chunk):
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
|
def _get_file_md5sum(file_name):
'Compute the md5 hash of a file.'
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest()
|
def _get_reference_md5sum(url):
"By convention the md5 hash for url is stored in url + '.md5sum'."
url_md5sum = (url + '.md5sum')
md5sum = urllib.request.urlopen(url_md5sum).read().strip()
return md5sum
|
class CosineRestartAnnealingLR(object):
def __init__(self, optimizer, T_max, lr_period, lr_step, eta_min=0, last_step=(- 1), use_warmup=False, warmup_mode='linear', warmup_steps=0, warmup_startlr=0, warmup_targetlr=0, use_restart=False):
self.use_warmup = use_warmup
self.warmup_mode = warmup_mode
self.warmup_steps = warmup_steps
self.warmup_startlr = warmup_startlr
self.warmup_targetlr = warmup_targetlr
self.use_restart = use_restart
self.T_max = T_max
self.eta_min = eta_min
if (self.use_restart == False):
self.lr_period = [(self.T_max - self.warmup_steps)]
self.lr_step = [self.warmup_steps]
else:
self.lr_period = lr_period
self.lr_step = lr_step
self.last_step = last_step
self.cycle_length = self.lr_period[0]
self.cur = 0
if (not isinstance(optimizer, Optimizer)):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if (last_step == (- 1)):
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for (i, group) in enumerate(optimizer.param_groups):
if ('initial_lr' not in group):
raise KeyError("param 'initial_lr' is not specified in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups))
def step(self, step=None):
if (step is not None):
self.last_step = step
else:
self.last_step += 1
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (self.use_warmup and (self.last_step < self.warmup_steps)):
if (self.warmup_mode == 'constant'):
lrs.append(self.warmup_startlr)
elif (self.warmup_mode == 'linear'):
cur_lr = (self.warmup_startlr + ((float((self.warmup_targetlr - self.warmup_startlr)) / self.warmup_steps) * self.last_step))
lrs.append(cur_lr)
else:
raise NotImplementedError
else:
if (self.last_step in self.lr_step):
self.cycle_length = self.lr_period[self.lr_step.index(self.last_step)]
self.cur = self.last_step
peri_iter = (self.last_step - self.cur)
if (peri_iter <= self.cycle_length):
unit_cycle = ((1 + math.cos(((peri_iter * math.pi) / self.cycle_length))) / 2)
adjusted_cycle = ((unit_cycle * (base_lr - self.eta_min)) + self.eta_min)
lrs.append(adjusted_cycle)
else:
lrs.append(self.eta_min)
return lrs
def display_lr_curve(self, total_steps):
lrs = []
for _ in range(total_steps):
self.step()
lrs.append(self.get_lr()[0])
import matplotlib.pyplot as plt
plt.plot(lrs)
plt.show()
def state_dict(self):
return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
|
def get_lr_scheduler(config, optimizer, num_examples=None, batch_size=None):
if (num_examples is None):
num_examples = config.data.num_examples
epoch_steps = ((num_examples // batch_size) + 1)
if config.optim.use_multi_stage:
max_steps = (epoch_steps * config.optim.multi_stage.stage_epochs)
else:
max_steps = (epoch_steps * config.train_params.epochs)
period_steps = [(epoch_steps * x) for x in config.optim.cosine.restart.lr_period]
step_steps = [(epoch_steps * x) for x in config.optim.cosine.restart.lr_step]
init_lr = config.optim.init_lr
use_warmup = config.optim.use_warm_up
if use_warmup:
warmup_steps = (config.optim.warm_up.epoch * epoch_steps)
warmup_startlr = config.optim.warm_up.init_lr
warmup_targetlr = config.optim.warm_up.target_lr
else:
warmup_steps = 0
warmup_startlr = init_lr
warmup_targetlr = init_lr
if (config.optim.lr_schedule == 'cosine'):
scheduler = CosineRestartAnnealingLR(optimizer, float(max_steps), period_steps, step_steps, eta_min=config.optim.min_lr, use_warmup=use_warmup, warmup_steps=warmup_steps, warmup_startlr=warmup_startlr, warmup_targetlr=warmup_targetlr, use_restart=config.optim.cosine.use_restart)
elif (config.optim.lr_schedule == 'poly'):
raise NotImplementedError
else:
raise NotImplementedError
return scheduler
|
def comp_multadds(model, input_size=(3, 224, 224)):
input_size = ((1,) + tuple(input_size))
model = model.cuda()
input_data = torch.randn(input_size).cuda()
model = add_flops_counting_methods(model)
model.start_flops_count()
with torch.no_grad():
_ = model(input_data)
mult_adds = (model.compute_average_flops_cost() / 1000000.0)
return mult_adds
|
def comp_multadds_fw(model, input_data, use_gpu=True):
model = add_flops_counting_methods(model)
if use_gpu:
model = model.cuda()
model.start_flops_count()
with torch.no_grad():
output_data = model(input_data)
mult_adds = (model.compute_average_flops_cost() / 1000000.0)
return (mult_adds, output_data)
|
def add_flops_counting_methods(net_main_module):
'Adds flops counting functions to an existing model. After that\n the flops count should be activated and the model should be run on an input\n image.\n Example:\n fcn = add_flops_counting_methods(fcn)\n fcn = fcn.cuda().train()\n fcn.start_flops_count()\n _ = fcn(batch)\n fcn.compute_average_flops_cost() / 1e9 / 2 # Result in GFLOPs per image in batch\n Important: dividing by 2 only works for resnet models -- see below for the details\n of flops computation.\n Attention: we are counting multiply-add as two flops in this work, because in\n most resnet models convolutions are bias-free (BN layers act as bias there)\n and it makes sense to count muliply and add as separate flops therefore.\n This is why in the above example we divide by 2 in order to be consistent with\n most modern benchmarks. For example in "Spatially Adaptive Computatin Time for Residual\n Networks" by Figurnov et al multiply-add was counted as two flops.\n This module computes the average flops which is necessary for dynamic networks which\n have different number of executed layers. For static networks it is enough to run the network\n once and get statistics (above example).\n Implementation:\n The module works by adding batch_count to the main module which tracks the sum\n of all batch sizes that were run through the network.\n Also each convolutional layer of the network tracks the overall number of flops\n performed.\n The parameters are updated with the help of registered hook-functions which\n are being called each time the respective layer is executed.\n Parameters\n ----------\n net_main_module : torch.nn.Module\n Main module containing network\n Returns\n -------\n net_main_module : torch.nn.Module\n Updated main module with new methods/attributes that are used\n to compute flops.\n '
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
|
def compute_average_flops_cost(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Returns current mean flops consumption per image.\n '
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def start_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Activates the computation of mean flops consumption per image.\n Call it before you run the network.\n '
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
|
def stop_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Stops computing the mean flops consumption per image.\n Call whenever you want to pause the computation.\n '
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
|
def reset_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is called\n on a desired net object.\n Resets statistics computed so far.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
|
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__mask__ = mask
module.apply(add_flops_mask_func)
|
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
|
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(output_height, output_width) = output.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
conv_per_position_flops = ((((kernel_height * kernel_width) * in_channels) * out_channels) / conv_module.groups)
active_elements_count = ((batch_size * output_height) * output_width)
if (conv_module.__mask__ is not None):
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += overall_flops
|
def linear_flops_counter_hook(linear_module, input, output):
input = input[0]
batch_size = input.shape[0]
overall_flops = ((linear_module.in_features * linear_module.out_features) * batch_size)
linear_module.__flops__ += overall_flops
|
def batch_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(conv_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.Linear):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(linear_flops_counter_hook)
module.__flops_handle__ = handle
|
def remove_flops_counter_hook_function(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def add_flops_mask_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__mask__ = None
|
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
|
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = (out_planes * 4)
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
|
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
|
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block((in_planes + (i * growth_rate)), growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
|
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = (2 * growth_rate)
n = ((depth - 4) / 3)
if (bottleneck == True):
n = (n / 2)
block = BottleneckBlock
else:
block = BasicBlock
n = int(n)
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.trans1 = TransitionBlock(in_planes, int(math.floor((in_planes * reduction))), dropRate=dropRate)
in_planes = int(math.floor((in_planes * reduction)))
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.trans2 = TransitionBlock(in_planes, int(math.floor((in_planes * reduction))), dropRate=dropRate)
in_planes = int(math.floor((in_planes * reduction)))
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(in_planes, num_classes)
self.in_planes = in_planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.in_planes)
return self.fc(out)
|
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size_1)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size_1, hidden_size_2)
self.fc3 = nn.Linear(hidden_size_2, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
|
class UNet2Sigmoid(nn.Module):
def __init__(self, n_channels, n_classes, hidden=32):
super(type(self), self).__init__()
self.inc = inconv(n_channels, hidden)
self.down1 = down(hidden, (hidden * 2))
self.up8 = up((hidden * 2), hidden)
self.outc = outconv(hidden, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x = self.up8(x2, x1)
x = self.outc(x)
return sigmoid(x)
|
def get_dirs(base_dir, data_base):
train_dirs = []
test_dirs = []
test_base = os.path.join(data_base, 'npy_test')
train_base = os.path.join(data_base, 'npy_train')
print('------------------------------------------------------------')
print('Fetching directories for the test set')
print('------------------------------------------------------------')
for _filter in os.listdir(test_base):
filter_dir = os.path.join(test_base, _filter)
if (os.path.isdir(filter_dir) and (_filter == 'f435w')):
for prop_id in os.listdir(filter_dir):
prop_id_dir = os.path.join(filter_dir, prop_id)
if os.path.isdir(prop_id_dir):
for vis_num in os.listdir(prop_id_dir):
vis_num_dir = os.path.join(prop_id_dir, vis_num)
if os.path.isdir(vis_num_dir):
for f in os.listdir(vis_num_dir):
if (('.npy' in f) and (f != 'sky.npy')):
key = f'{prop_id}_{vis_num}'
if (field_type[key] == 'GAL'):
test_dirs.append(os.path.join(vis_num_dir, f))
print('------------------------------------------------------------')
print('Fetching directories for the training set')
print('------------------------------------------------------------')
for _filter in os.listdir(train_base):
filter_dir = os.path.join(train_base, _filter)
if (os.path.isdir(filter_dir) and (_filter == 'f435w')):
for prop_id in os.listdir(filter_dir):
prop_id_dir = os.path.join(filter_dir, prop_id)
if os.path.isdir(prop_id_dir):
for vis_num in os.listdir(prop_id_dir):
vis_num_dir = os.path.join(prop_id_dir, vis_num)
if os.path.isdir(vis_num_dir):
for f in os.listdir(vis_num_dir):
if (('.npy' in f) and (f != 'sky.npy')):
key = f'{prop_id}_{vis_num}'
if (field_type[key] == 'GAL'):
train_dirs.append(os.path.join(vis_num_dir, f))
np.save(os.path.join(base_dir, 'test_dirs.npy'), test_dirs)
np.save(os.path.join(base_dir, 'train_dirs.npy'), train_dirs)
return None
|
def adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[int], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float):
'Functional API that performs Adam algorithm computation.\n See :class:`~torch.optim.Adam` for details.\n '
for (i, param) in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = (1 - (beta1 ** step))
bias_correction2 = (1 - (beta2 ** step))
if (weight_decay != 0):
grad = grad.add(param, alpha=weight_decay)
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=(1 - beta2))
if amsgrad:
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
param.addcdiv_(exp_avg, denom, value=(- step_size))
|
class Adam(Optimizer):
'Implements Adam algorithm.\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n The implementation of the L2 penalty follows changes proposed in\n `Decoupled Weight Decay Regularization`_.\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
(beta1, beta2) = group['betas']
for p in group['params']:
if (p.grad is not None):
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state['step'] += 1
state_steps.append(state['step'])
adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=group['amsgrad'], beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'])
return loss
|
class GaussianRF(object):
def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary='periodic', device=None):
self.dim = dim
self.device = device
if (sigma is None):
sigma = (tau ** (0.5 * ((2 * alpha) - self.dim)))
k_max = (size // 2)
if (dim == 1):
k = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), torch.arange(start=(- k_max), end=0, step=1, device=device)), 0)
self.sqrt_eig = (((size * math.sqrt(2.0)) * sigma) * ((((4 * (math.pi ** 2)) * (k ** 2)) + (tau ** 2)) ** ((- alpha) / 2.0)))
self.sqrt_eig[0] = 0.0
elif (dim == 2):
wavenumers = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), torch.arange(start=(- k_max), end=0, step=1, device=device)), 0).repeat(size, 1)
k_x = wavenumers.transpose(0, 1)
k_y = wavenumers
self.sqrt_eig = ((((size ** 2) * math.sqrt(2.0)) * sigma) * ((((4 * (math.pi ** 2)) * ((k_x ** 2) + (k_y ** 2))) + (tau ** 2)) ** ((- alpha) / 2.0)))
self.sqrt_eig[(0, 0)] = 0.0
elif (dim == 3):
wavenumers = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), torch.arange(start=(- k_max), end=0, step=1, device=device)), 0).repeat(size, size, 1)
k_x = wavenumers.transpose(1, 2)
k_y = wavenumers
k_z = wavenumers.transpose(0, 2)
self.sqrt_eig = ((((size ** 3) * math.sqrt(2.0)) * sigma) * ((((4 * (math.pi ** 2)) * (((k_x ** 2) + (k_y ** 2)) + (k_z ** 2))) + (tau ** 2)) ** ((- alpha) / 2.0)))
self.sqrt_eig[(0, 0, 0)] = 0.0
self.size = []
for j in range(self.dim):
self.size.append(size)
self.size = tuple(self.size)
def sample(self, N):
coeff = torch.randn(N, *self.size, 2, device=self.device)
coeff[(..., 0)] = (self.sqrt_eig * coeff[(..., 0)])
coeff[(..., 1)] = (self.sqrt_eig * coeff[(..., 1)])
u = torch.ifft(coeff, self.dim, normalized=False)
u = u[(..., 0)]
return u
|
def parse_function(example_proto):
dics = {'x': tf.io.FixedLenFeature([1000, 4], tf.int64), 'y': tf.io.FixedLenFeature([36], tf.int64)}
parsed_example = tf.io.parse_single_example(example_proto, dics)
x = tf.reshape(parsed_example['x'], [1000, 4])
y = tf.reshape(parsed_example['y'], [36])
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.int32)
return (x, y)
|
def get_train_data(batch_size):
filenames = ['./data/traindata-00.tfrecord']
dataset = tf.data.TFRecordDataset(filenames, buffer_size=100000, num_parallel_reads=4)
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.map(map_func=parse_function, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat()
return dataset
|
def get_valid_data():
data = np.load('../deepsea_filtered.npz')
x = data['x_val']
y = data['y_val']
return (x, y)
|
def get_test_data():
filename = '../deepsea_filtered.npz'
data = np.load(filename)
x = data['x_test'].astype(float)
y = data['y_test']
return (x, y)
|
class DeepSEA(keras.Model):
def __init__(self):
super(DeepSEA, self).__init__()
self.conv_1 = keras.layers.Conv1D(filters=320, kernel_size=8, strides=1, use_bias=False, padding='SAME', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(5e-07), kernel_constraint=tf.keras.constraints.MaxNorm(0.9))
self.pool_1 = keras.layers.MaxPool1D(pool_size=4, strides=4, padding='SAME')
self.dropout_1 = keras.layers.Dropout(0.2)
self.conv_2 = keras.layers.Conv1D(filters=480, kernel_size=8, strides=1, use_bias=False, padding='SAME', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(5e-07), kernel_constraint=tf.keras.constraints.MaxNorm(0.9))
self.pool_2 = keras.layers.MaxPool1D(pool_size=4, strides=4, padding='SAME')
self.dropout_2 = keras.layers.Dropout(0.2)
self.conv_3 = keras.layers.Conv1D(filters=960, kernel_size=8, strides=1, use_bias=False, padding='SAME', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(5e-07), kernel_constraint=tf.keras.constraints.MaxNorm(0.9))
self.dropout_3 = keras.layers.Dropout(0.5)
self.flatten = keras.layers.Flatten()
self.dense_1 = keras.layers.Dense(units=925, use_bias=False, activation='relu', activity_regularizer=tf.keras.regularizers.l1(1e-08), kernel_regularizer=tf.keras.regularizers.l2(5e-07), kernel_constraint=tf.keras.constraints.MaxNorm(0.9))
self.dense_2 = keras.layers.Dense(units=36, use_bias=False, activation='sigmoid', kernel_regularizer=tf.keras.regularizers.l2(5e-07), kernel_constraint=tf.keras.constraints.MaxNorm(0.9))
def call(self, inputs, training=None, mask=None, **kwargs):
'\n Forward propagation of DeepSEA model.\n :param inputs: shape = (batch_size, length, c)\n :param training: training or not.\n :param kwargs: None\n :return: shape = (batch_size, 919)\n '
temp = self.conv_1(inputs)
temp = self.pool_1(temp)
temp = self.dropout_1(temp, training=training)
temp = self.conv_2(temp)
temp = self.pool_2(temp)
temp = self.dropout_2(temp, training=training)
temp = self.conv_3(temp)
temp = self.dropout_3(temp, training=training)
temp = self.flatten(temp)
temp = self.dense_1(temp)
output = self.dense_2(temp)
return output
|
def serialize_example(x, y):
example = {'x': tf.train.Feature(int64_list=tf.train.Int64List(value=x.flatten())), 'y': tf.train.Feature(int64_list=tf.train.Int64List(value=y.flatten()))}
example = tf.train.Features(feature=example)
example = tf.train.Example(features=example)
serialized_example = example.SerializeToString()
return serialized_example
|
def traindata_to_tfrecord():
filename = '../deepsea_filtered.npz'
with np.load(filename) as file:
x = file['x_train']
y = file['y_train']
for file_num in range(1):
with tf.io.TFRecordWriter(('./data/traindata-%.2d.tfrecord' % file_num)) as writer:
for i in tqdm(range((file_num * 71753), ((file_num + 1) * 71753)), desc='Processing Train Data {}'.format(file_num), ascii=True):
example_proto = serialize_example(x[i], y[i])
writer.write(example_proto)
|
def testdata_to_tfrecord():
filename = '../deepsea_filtered.npz'
data = np.load(filename)
x = data['x_test']
y = data['y_test']
with tf.io.TFRecordWriter('./data/testdata.tfrecord') as writer:
for i in tqdm(range(len(y)), desc='Processing Test Data', ascii=True):
example_proto = serialize_example(x[i], y[i])
writer.write(example_proto)
|
class ECGDataset(Dataset):
def __init__(self, data, label, pid=None):
self.data = data
self.label = label
self.pid = pid
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
def read_data_physionet_4(path, window_size=1000, stride=500):
with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = ((tmp_data - tmp_mean) / tmp_std)
all_label = []
for i in res['label']:
if (i == 'N'):
all_label.append(0)
elif (i == 'A'):
all_label.append(1)
elif (i == 'O'):
all_label.append(2)
elif (i == '~'):
all_label.append(3)
all_label = np.array(all_label)
(X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.1, random_state=0)
print('before: ')
print(Counter(Y_train), Counter(Y_test))
(X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride)
(X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True)
print('after: ')
print(Counter(Y_train), Counter(Y_test))
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
trainset = ECGDataset(X_train, Y_train)
testset = ECGDataset(X_test, Y_test, pid_test)
return (trainset, None, testset)
|
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4):
out_X = []
out_Y = []
out_pid = []
n_sample = X.shape[0]
mode = 0
for i in range(n_sample):
tmp_ts = X[i]
tmp_Y = Y[i]
if (tmp_Y == 0):
i_stride = stride
elif (tmp_Y == 1):
if (datatype == 4):
i_stride = (stride // 6)
elif (datatype == 2):
i_stride = (stride // 10)
elif (datatype == 2.1):
i_stride = (stride // 7)
elif (tmp_Y == 2):
i_stride = (stride // 2)
elif (tmp_Y == 3):
i_stride = (stride // 20)
for j in range(0, (len(tmp_ts) - window_size), i_stride):
out_X.append(tmp_ts[j:(j + window_size)])
out_Y.append(tmp_Y)
out_pid.append(i)
if output_pid:
return (np.array(out_X), np.array(out_Y), np.array(out_pid))
else:
return (np.array(out_X), np.array(out_Y))
|
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size_1)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size_1, hidden_size_2)
self.fc3 = nn.Linear(hidden_size_2, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
|
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
class ACNN(nn.Module):
'\n \n Input:\n X: (n_samples, n_channel, n_length)\n Y: (n_samples)\n \n Output:\n out: (n_samples)\n \n Pararmetes:\n n_classes: number of classes\n \n '
def __init__(self, in_channels, out_channels, att_channels, n_len_seg, n_classes, device, verbose=False):
super(ACNN, self).__init__()
self.n_len_seg = n_len_seg
self.n_classes = n_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.att_channels = att_channels
self.device = device
self.verbose = verbose
self.cnn = nn.Conv1d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=16, stride=4)
self.W_att_channel = nn.Parameter(torch.randn(self.out_channels, self.att_channels))
self.v_att_channel = nn.Parameter(torch.randn(self.att_channels, 1))
self.dense = nn.Linear(out_channels, n_classes)
def forward(self, x):
(self.n_channel, self.n_length) = (x.shape[(- 2)], x.shape[(- 1)])
assert ((self.n_length % self.n_len_seg) == 0), 'Input n_length should divided by n_len_seg'
self.n_seg = (self.n_length // self.n_len_seg)
out = x
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_len_seg, self.n_channel)
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = self.cnn(out)
if self.verbose:
print(out.shape)
out = out.mean((- 1))
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_seg, self.out_channels)
if self.verbose:
print(out.shape)
e = torch.matmul(out, self.W_att_channel)
e = torch.matmul(torch.tanh(e), self.v_att_channel)
n1 = torch.exp(e)
n2 = torch.sum(torch.exp(e), 1, keepdim=True)
gama = torch.div(n1, n2)
out = torch.sum(torch.mul(gama, out), 1)
if self.verbose:
print(out.shape)
out = self.dense(out)
if self.verbose:
print(out.shape)
return out
|
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
class CNN(nn.Module):
'\n \n Input:\n X: (n_samples, n_channel, n_length)\n Y: (n_samples)\n \n Output:\n out: (n_samples)\n \n Pararmetes:\n n_classes: number of classes\n \n '
def __init__(self, in_channels, out_channels, n_len_seg, n_classes, device, verbose=False):
super(CNN, self).__init__()
self.n_len_seg = n_len_seg
self.n_classes = n_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.device = device
self.verbose = verbose
self.cnn = nn.Conv1d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=16, stride=2)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=self.out_channels, nhead=8, dim_feedforward=128, dropout=0.5)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=1)
self.dense = nn.Linear(out_channels, n_classes)
def forward(self, x):
(self.n_channel, self.n_length) = (x.shape[(- 2)], x.shape[(- 1)])
assert ((self.n_length % self.n_len_seg) == 0), 'Input n_length should divided by n_len_seg'
self.n_seg = (self.n_length // self.n_len_seg)
out = x
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_len_seg, self.n_channel)
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = self.cnn(out)
if self.verbose:
print(out.shape)
out = out.mean((- 1))
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_seg, self.out_channels)
if self.verbose:
print(out.shape)
out = self.transformer_encoder(out)
if self.verbose:
print(out.shape)
out = out.mean((- 2))
if self.verbose:
print(out.shape)
out = self.dense(out)
if self.verbose:
print(out.shape)
return out
|
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
class CRNN(nn.Module):
'\n \n Input:\n X: (n_samples, n_channel, n_length)\n Y: (n_samples)\n \n Output:\n out: (n_samples)\n \n Pararmetes:\n n_classes: number of classes\n \n '
def __init__(self, in_channels, out_channels, n_len_seg, n_classes, device, verbose=False):
super(CRNN, self).__init__()
self.n_len_seg = n_len_seg
self.n_classes = n_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.device = device
self.verbose = verbose
self.cnn = nn.Conv1d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=16, stride=2)
self.rnn = nn.LSTM(input_size=self.out_channels, hidden_size=self.out_channels, num_layers=1, batch_first=True, bidirectional=False)
self.dense = nn.Linear(out_channels, n_classes)
def forward(self, x):
(self.n_channel, self.n_length) = (x.shape[(- 2)], x.shape[(- 1)])
assert ((self.n_length % self.n_len_seg) == 0), 'Input n_length should divided by n_len_seg'
self.n_seg = (self.n_length // self.n_len_seg)
out = x
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_len_seg, self.n_channel)
if self.verbose:
print(out.shape)
out = out.permute(0, 2, 1)
if self.verbose:
print(out.shape)
out = self.cnn(out)
if self.verbose:
print(out.shape)
out = out.mean((- 1))
if self.verbose:
print(out.shape)
out = out.view((- 1), self.n_seg, self.out_channels)
if self.verbose:
print(out.shape)
(_, (out, _)) = self.rnn(out)
out = torch.squeeze(out, dim=0)
if self.verbose:
print(out.shape)
out = self.dense(out)
if self.verbose:
print(out.shape)
return out
|
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
class MyConv1dPadSame(nn.Module):
'\n extend nn.Conv1d to support SAME padding\n\n input: (n_sample, in_channels, n_length)\n output: (n_sample, out_channels, (n_length+stride-1)//stride)\n '
def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1):
super(MyConv1dPadSame, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.conv = torch.nn.Conv1d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups)
def forward(self, x):
net = x
in_dim = net.shape[(- 1)]
out_dim = (((in_dim + self.stride) - 1) // self.stride)
p = max(0, ((((out_dim - 1) * self.stride) + self.kernel_size) - in_dim))
pad_left = (p // 2)
pad_right = (p - pad_left)
net = F.pad(net, (pad_left, pad_right), 'constant', 0)
net = self.conv(net)
return net
|
class MyMaxPool1dPadSame(nn.Module):
'\n extend nn.MaxPool1d to support SAME padding\n\n params:\n kernel_size: kernel size\n stride: the stride of the window. Default value is kernel_size\n \n input: (n_sample, n_channel, n_length)\n '
def __init__(self, kernel_size):
super(MyMaxPool1dPadSame, self).__init__()
self.kernel_size = kernel_size
self.max_pool = torch.nn.MaxPool1d(kernel_size=self.kernel_size)
def forward(self, x):
net = x
p = max(0, (self.kernel_size - 1))
pad_left = (p // 2)
pad_right = (p - pad_left)
net = F.pad(net, (pad_left, pad_right), 'constant', 0)
net = self.max_pool(net)
return net
|
class Swish(nn.Module):
def forward(self, x):
return (x * F.sigmoid(x))
|
class BasicBlock(nn.Module):
'\n Basic Block: \n conv1 -> convk -> conv1\n\n params:\n in_channels: number of input channels\n out_channels: number of output channels\n ratio: ratio of channels to out_channels\n kernel_size: kernel window length\n stride: kernel step size\n groups: number of groups in convk\n downsample: whether downsample length\n use_bn: whether use batch_norm\n use_do: whether use dropout\n\n input: (n_sample, in_channels, n_length)\n output: (n_sample, out_channels, (n_length+stride-1)//stride)\n '
def __init__(self, in_channels, out_channels, ratio, kernel_size, stride, groups, downsample, is_first_block=False, use_bn=True, use_do=True):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.ratio = ratio
self.kernel_size = kernel_size
self.groups = groups
self.downsample = downsample
self.stride = (stride if self.downsample else 1)
self.is_first_block = is_first_block
self.use_bn = use_bn
self.use_do = use_do
self.middle_channels = int((self.out_channels * self.ratio))
self.bn1 = nn.BatchNorm1d(in_channels)
self.activation1 = Swish()
self.do1 = nn.Dropout(p=0.5)
self.conv1 = MyConv1dPadSame(in_channels=self.in_channels, out_channels=self.middle_channels, kernel_size=1, stride=1, groups=1)
self.bn2 = nn.BatchNorm1d(self.middle_channels)
self.activation2 = Swish()
self.do2 = nn.Dropout(p=0.5)
self.conv2 = MyConv1dPadSame(in_channels=self.middle_channels, out_channels=self.middle_channels, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups)
self.bn3 = nn.BatchNorm1d(self.middle_channels)
self.activation3 = Swish()
self.do3 = nn.Dropout(p=0.5)
self.conv3 = MyConv1dPadSame(in_channels=self.middle_channels, out_channels=self.out_channels, kernel_size=1, stride=1, groups=1)
r = 2
self.se_fc1 = nn.Linear(self.out_channels, (self.out_channels // r))
self.se_fc2 = nn.Linear((self.out_channels // r), self.out_channels)
self.se_activation = Swish()
if self.downsample:
self.max_pool = MyMaxPool1dPadSame(kernel_size=self.stride)
def forward(self, x):
identity = x
out = x
if (not self.is_first_block):
if self.use_bn:
out = self.bn1(out)
out = self.activation1(out)
if self.use_do:
out = self.do1(out)
out = self.conv1(out)
if self.use_bn:
out = self.bn2(out)
out = self.activation2(out)
if self.use_do:
out = self.do2(out)
out = self.conv2(out)
if self.use_bn:
out = self.bn3(out)
out = self.activation3(out)
if self.use_do:
out = self.do3(out)
out = self.conv3(out)
se = out.mean((- 1))
se = self.se_fc1(se)
se = self.se_activation(se)
se = self.se_fc2(se)
se = F.sigmoid(se)
out = torch.einsum('abc,ab->abc', out, se)
if self.downsample:
identity = self.max_pool(identity)
if (self.out_channels != self.in_channels):
identity = identity.transpose((- 1), (- 2))
ch1 = ((self.out_channels - self.in_channels) // 2)
ch2 = ((self.out_channels - self.in_channels) - ch1)
identity = F.pad(identity, (ch1, ch2), 'constant', 0)
identity = identity.transpose((- 1), (- 2))
out += identity
return out
|
class BasicStage(nn.Module):
'\n Basic Stage:\n block_1 -> block_2 -> ... -> block_M\n '
def __init__(self, in_channels, out_channels, ratio, kernel_size, stride, groups, i_stage, m_blocks, use_bn=True, use_do=True, verbose=False):
super(BasicStage, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.ratio = ratio
self.kernel_size = kernel_size
self.groups = groups
self.i_stage = i_stage
self.m_blocks = m_blocks
self.use_bn = use_bn
self.use_do = use_do
self.verbose = verbose
self.block_list = nn.ModuleList()
for i_block in range(self.m_blocks):
if ((self.i_stage == 0) and (i_block == 0)):
self.is_first_block = True
else:
self.is_first_block = False
if (i_block == 0):
self.downsample = True
self.stride = stride
self.tmp_in_channels = self.in_channels
else:
self.downsample = False
self.stride = 1
self.tmp_in_channels = self.out_channels
tmp_block = BasicBlock(in_channels=self.tmp_in_channels, out_channels=self.out_channels, ratio=self.ratio, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups, downsample=self.downsample, is_first_block=self.is_first_block, use_bn=self.use_bn, use_do=self.use_do)
self.block_list.append(tmp_block)
def forward(self, x):
out = x
for i_block in range(self.m_blocks):
net = self.block_list[i_block]
out = net(out)
if self.verbose:
print('stage: {}, block: {}, in_channels: {}, out_channels: {}, outshape: {}'.format(self.i_stage, i_block, net.in_channels, net.out_channels, list(out.shape)))
print('stage: {}, block: {}, conv1: {}->{} k={} s={} C={}'.format(self.i_stage, i_block, net.conv1.in_channels, net.conv1.out_channels, net.conv1.kernel_size, net.conv1.stride, net.conv1.groups))
print('stage: {}, block: {}, convk: {}->{} k={} s={} C={}'.format(self.i_stage, i_block, net.conv2.in_channels, net.conv2.out_channels, net.conv2.kernel_size, net.conv2.stride, net.conv2.groups))
print('stage: {}, block: {}, conv1: {}->{} k={} s={} C={}'.format(self.i_stage, i_block, net.conv3.in_channels, net.conv3.out_channels, net.conv3.kernel_size, net.conv3.stride, net.conv3.groups))
return out
|
class Net1D(nn.Module):
'\n \n Input:\n X: (n_samples, n_channel, n_length)\n Y: (n_samples)\n \n Output:\n out: (n_samples)\n \n params:\n in_channels\n base_filters\n filter_list: list, filters for each stage\n m_blocks_list: list, number of blocks of each stage\n kernel_size\n stride\n groups_width\n n_stages\n n_classes\n use_bn\n use_do\n\n '
def __init__(self, in_channels, base_filters, ratio, filter_list, m_blocks_list, kernel_size, stride, groups_width, n_classes, use_bn=True, use_do=True, verbose=False):
super(Net1D, self).__init__()
self.in_channels = in_channels
self.base_filters = base_filters
self.ratio = ratio
self.filter_list = filter_list
self.m_blocks_list = m_blocks_list
self.kernel_size = kernel_size
self.stride = stride
self.groups_width = groups_width
self.n_stages = len(filter_list)
self.n_classes = n_classes
self.use_bn = use_bn
self.use_do = use_do
self.verbose = verbose
self.first_conv = MyConv1dPadSame(in_channels=in_channels, out_channels=self.base_filters, kernel_size=self.kernel_size, stride=2)
self.first_bn = nn.BatchNorm1d(base_filters)
self.first_activation = Swish()
self.stage_list = nn.ModuleList()
in_channels = self.base_filters
for i_stage in range(self.n_stages):
out_channels = self.filter_list[i_stage]
m_blocks = self.m_blocks_list[i_stage]
tmp_stage = BasicStage(in_channels=in_channels, out_channels=out_channels, ratio=self.ratio, kernel_size=self.kernel_size, stride=self.stride, groups=(out_channels // self.groups_width), i_stage=i_stage, m_blocks=m_blocks, use_bn=self.use_bn, use_do=self.use_do, verbose=self.verbose)
self.stage_list.append(tmp_stage)
in_channels = out_channels
self.dense = nn.Linear(in_channels, n_classes)
def forward(self, x):
out = x
out = self.first_conv(out)
if self.use_bn:
out = self.first_bn(out)
out = self.first_activation(out)
for i_stage in range(self.n_stages):
net = self.stage_list[i_stage]
out = net(out)
out = out.mean((- 1))
out = self.dense(out)
return out
|
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
class MyConv1dPadSame(nn.Module):
'\n extend nn.Conv1d to support SAME padding\n '
def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1):
super(MyConv1dPadSame, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.conv = torch.nn.Conv1d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups)
def forward(self, x):
net = x
in_dim = net.shape[(- 1)]
out_dim = (((in_dim + self.stride) - 1) // self.stride)
p = max(0, ((((out_dim - 1) * self.stride) + self.kernel_size) - in_dim))
pad_left = (p // 2)
pad_right = (p - pad_left)
net = F.pad(net, (pad_left, pad_right), 'constant', 0)
net = self.conv(net)
return net
|
class MyMaxPool1dPadSame(nn.Module):
'\n extend nn.MaxPool1d to support SAME padding\n '
def __init__(self, kernel_size):
super(MyMaxPool1dPadSame, self).__init__()
self.kernel_size = kernel_size
self.stride = 1
self.max_pool = torch.nn.MaxPool1d(kernel_size=self.kernel_size)
def forward(self, x):
net = x
in_dim = net.shape[(- 1)]
out_dim = (((in_dim + self.stride) - 1) // self.stride)
p = max(0, ((((out_dim - 1) * self.stride) + self.kernel_size) - in_dim))
pad_left = (p // 2)
pad_right = (p - pad_left)
net = F.pad(net, (pad_left, pad_right), 'constant', 0)
net = self.max_pool(net)
return net
|
class BasicBlock(nn.Module):
'\n ResNet Basic Block\n '
def __init__(self, in_channels, out_channels, kernel_size, stride, groups, downsample, use_bn, use_do, is_first_block=False):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.kernel_size = kernel_size
self.out_channels = out_channels
self.stride = stride
self.groups = groups
self.downsample = downsample
if self.downsample:
self.stride = stride
else:
self.stride = 1
self.is_first_block = is_first_block
self.use_bn = use_bn
self.use_do = use_do
self.bn1 = nn.BatchNorm1d(in_channels)
self.relu1 = nn.ReLU()
self.do1 = nn.Dropout(p=0.5)
self.conv1 = MyConv1dPadSame(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=self.stride, groups=self.groups)
self.bn2 = nn.BatchNorm1d(out_channels)
self.relu2 = nn.ReLU()
self.do2 = nn.Dropout(p=0.5)
self.conv2 = MyConv1dPadSame(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, groups=self.groups)
self.max_pool = MyMaxPool1dPadSame(kernel_size=self.stride)
def forward(self, x):
identity = x
out = x
if (not self.is_first_block):
if self.use_bn:
out = self.bn1(out)
out = self.relu1(out)
if self.use_do:
out = self.do1(out)
out = self.conv1(out)
if self.use_bn:
out = self.bn2(out)
out = self.relu2(out)
if self.use_do:
out = self.do2(out)
out = self.conv2(out)
if self.downsample:
identity = self.max_pool(identity)
if (self.out_channels != self.in_channels):
identity = identity.transpose((- 1), (- 2))
ch1 = ((self.out_channels - self.in_channels) // 2)
ch2 = ((self.out_channels - self.in_channels) - ch1)
identity = F.pad(identity, (ch1, ch2), 'constant', 0)
identity = identity.transpose((- 1), (- 2))
out += identity
return out
|
class ResNet1D(nn.Module):
'\n \n Input:\n X: (n_samples, n_channel, n_length)\n Y: (n_samples)\n \n Output:\n out: (n_samples)\n \n Pararmetes:\n in_channels: dim of input, the same as n_channel\n base_filters: number of filters in the first several Conv layer, it will double at every 4 layers\n kernel_size: width of kernel\n stride: stride of kernel moving\n groups: set larget to 1 as ResNeXt\n n_block: number of blocks\n n_classes: number of classes\n \n '
def __init__(self, in_channels, base_filters, kernel_size, stride, groups, n_block, n_classes, downsample_gap=2, increasefilter_gap=4, use_bn=True, use_do=True, verbose=False):
super(ResNet1D, self).__init__()
self.verbose = verbose
self.n_block = n_block
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.use_bn = use_bn
self.use_do = use_do
self.downsample_gap = downsample_gap
self.increasefilter_gap = increasefilter_gap
self.first_block_conv = MyConv1dPadSame(in_channels=in_channels, out_channels=base_filters, kernel_size=self.kernel_size, stride=1)
self.first_block_bn = nn.BatchNorm1d(base_filters)
self.first_block_relu = nn.ReLU()
out_channels = base_filters
self.basicblock_list = nn.ModuleList()
for i_block in range(self.n_block):
if (i_block == 0):
is_first_block = True
else:
is_first_block = False
if ((i_block % self.downsample_gap) == 1):
downsample = True
else:
downsample = False
if is_first_block:
in_channels = base_filters
out_channels = in_channels
else:
in_channels = int((base_filters * (2 ** ((i_block - 1) // self.increasefilter_gap))))
if (((i_block % self.increasefilter_gap) == 0) and (i_block != 0)):
out_channels = (in_channels * 2)
else:
out_channels = in_channels
tmp_block = BasicBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups, downsample=downsample, use_bn=self.use_bn, use_do=self.use_do, is_first_block=is_first_block)
self.basicblock_list.append(tmp_block)
self.final_bn = nn.BatchNorm1d(out_channels)
self.final_relu = nn.ReLU(inplace=True)
self.dense = nn.Linear(out_channels, n_classes)
def forward(self, x):
out = x
if self.verbose:
print('input shape', out.shape)
out = self.first_block_conv(out)
if self.verbose:
print('after first conv', out.shape)
if self.use_bn:
out = self.first_block_bn(out)
out = self.first_block_relu(out)
for i_block in range(self.n_block):
net = self.basicblock_list[i_block]
if self.verbose:
print('i_block: {0}, in_channels: {1}, out_channels: {2}, downsample: {3}'.format(i_block, net.in_channels, net.out_channels, net.downsample))
out = net(out)
if self.verbose:
print(out.shape)
if self.use_bn:
out = self.final_bn(out)
out = self.final_relu(out)
out = out.mean((- 1))
if self.verbose:
print('final pooling', out.shape)
out = self.dense(out)
if self.verbose:
print('dense', out.shape)
if self.verbose:
print('softmax', out.shape)
return out
|
def run_exp(base_filters, filter_list, m_blocks_list):
dataset = MyDataset(X_train, Y_train)
dataset_val = MyDataset(X_test, Y_test)
dataset_test = MyDataset(X_test, Y_test)
dataloader = DataLoader(dataset, batch_size=batch_size)
dataloader_val = DataLoader(dataset_val, batch_size=batch_size, drop_last=False)
dataloader_test = DataLoader(dataset_test, batch_size=batch_size, drop_last=False)
device_str = 'cuda'
device = torch.device((device_str if torch.cuda.is_available() else 'cpu'))
model = Net1D(in_channels=1, base_filters=base_filters, ratio=1.0, filter_list=filter_list, m_blocks_list=m_blocks_list, kernel_size=16, stride=2, groups_width=16, verbose=False, n_classes=4)
model.to(device)
summary(model, (X_train.shape[1], X_train.shape[2]), device=device_str)
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10)
loss_func = torch.nn.CrossEntropyLoss()
n_epoch = 50
step = 0
for _ in tqdm(range(n_epoch), desc='epoch', leave=False):
model.train()
prog_iter = tqdm(dataloader, desc='Training', leave=False)
for (batch_idx, batch) in enumerate(prog_iter):
(input_x, input_y) = tuple((t.to(device) for t in batch))
pred = model(input_x)
loss = loss_func(pred, input_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
writer.add_scalar('Loss/train', loss.item(), step)
if is_debug:
break
scheduler.step(_)
model.eval()
prog_iter_val = tqdm(dataloader_val, desc='Validation', leave=False)
all_pred_prob = []
with torch.no_grad():
for (batch_idx, batch) in enumerate(prog_iter_val):
(input_x, input_y) = tuple((t.to(device) for t in batch))
pred = model(input_x)
all_pred_prob.append(pred.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
final_pred = []
final_gt = []
for i_pid in np.unique(pid_val):
tmp_pred = all_pred[(pid_val == i_pid)]
tmp_gt = Y_val[(pid_val == i_pid)]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4)
writer.add_scalar('F1/f1_score', f1_score, _)
writer.add_scalar('F1/label_0', tmp_report['0']['f1-score'], _)
writer.add_scalar('F1/label_1', tmp_report['1']['f1-score'], _)
writer.add_scalar('F1/label_2', tmp_report['2']['f1-score'], _)
writer.add_scalar('F1/label_3', tmp_report['3']['f1-score'], _)
model.eval()
prog_iter_test = tqdm(dataloader_test, desc='Testing', leave=False)
all_pred_prob = []
with torch.no_grad():
for (batch_idx, batch) in enumerate(prog_iter_test):
(input_x, input_y) = tuple((t.to(device) for t in batch))
pred = model(input_x)
all_pred_prob.append(pred.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
final_pred = []
final_gt = []
for i_pid in np.unique(pid_test):
tmp_pred = all_pred[(pid_test == i_pid)]
tmp_gt = Y_test[(pid_test == i_pid)]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4)
writer.add_scalar('F1/f1_score', f1_score, _)
writer.add_scalar('F1/label_0', tmp_report['0']['f1-score'], _)
writer.add_scalar('F1/label_1', tmp_report['1']['f1-score'], _)
writer.add_scalar('F1/label_2', tmp_report['2']['f1-score'], _)
writer.add_scalar('F1/label_3', tmp_report['3']['f1-score'], _)
|
def train(model, device, train_loader, optimizer):
loss_func = torch.nn.CrossEntropyLoss()
all_loss = []
prog_iter = tqdm(train_loader, desc='Training', leave=False)
for (batch_idx, batch) in enumerate(prog_iter):
(input_x, input_y) = tuple((t.to(device) for t in batch))
pred = model(input_x)
loss = loss_func(pred, input_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
all_loss.append(loss.item())
|
def test(model, device, test_loader, label_test):
prog_iter_test = tqdm(test_loader, desc='Testing', leave=False)
all_pred_prob = []
for (batch_idx, batch) in enumerate(prog_iter_test):
(input_x, input_y) = tuple((t.to(device) for t in batch))
pred = model(input_x)
all_pred_prob.append(pred.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
print(classification_report(all_pred, label_test))
|
class Network(object):
def __init__(self, n_length, base_filters, kernel_size, n_block, n_channel):
'\n key parameters to control the model:\n n_length: dimention of input (resolution) [16, 64, 256, 1024, 4096]\n base_filters: number of convolutional filters (width) [8, 16, 32, 64, 128]\n kernel_size: size of convolutional filters [2, 4, 8, 16]\n n_block: depth of model (depth) [2, 4, 8, 16]\n\n \n '
use_cuda = torch.cuda.is_available()
n_samples = 1000
n_length = n_length
n_classes = 2
batch_size = 64
(data, label) = read_data_generated(n_samples=n_samples, n_length=n_length, n_channel=n_channel, n_classes=n_classes)
print(data.shape, Counter(label))
dataset = MyDataset(data, label)
dataloader = DataLoader(dataset, batch_size=batch_size)
(data_test, label_test) = read_data_generated(n_samples=n_samples, n_length=n_length, n_channel=n_channel, n_classes=n_classes)
self.label_test = label_test
print(data_test.shape, Counter(label_test))
dataset_test = MyDataset(data_test, label_test)
dataloader_test = DataLoader(dataset_test, batch_size=batch_size, drop_last=False)
self.device = device = torch.device(('cuda' if use_cuda else 'cpu'))
(self.train_loader, self.test_loader) = (dataloader, dataloader_test)
self.model = ResNet1D(in_channels=n_channel, base_filters=base_filters, kernel_size=kernel_size, stride=2, n_block=n_block, groups=base_filters, n_classes=n_classes, downsample_gap=max((n_block // 8), 1), increasefilter_gap=max((n_block // 4), 1), verbose=False).to(device)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
def train(self):
train(self.model, self.device, self.train_loader, self.optimizer)
return test(self.model, self.device, self.test_loader, self.label_test)
def test(self):
return test(self.model, self.device, self.test_loader, self.label_test)
def get_weights(self):
return self.model.state_dict()
def set_weights(self, weights):
self.model.load_state_dict(weights)
def save(self):
torch.save(self.model.state_dict(), 'synthetic_ray.pt')
def load(self):
self.model.load_state_dict(torch.load('synthetic_ray.pt'))
|
def replicate_if_needed(x, min_clip_duration):
if (len(x) < min_clip_duration):
tile_size = ((min_clip_duration // x.shape[0]) + 1)
x = np.tile(x, tile_size)[:min_clip_duration]
return x
|
def process_idx(idx):
f = files[idx]
fname = f.split('/')[(- 1)].split('.')[0]
(x, sr) = sf.read(f)
min_clip_duration = int((sr * 1))
parts = []
if (len(x) < min_clip_duration):
x = replicate_if_needed(x, min_clip_duration)
parts.append(x)
else:
overlap = int((sr * 0.5))
for ix in range(0, len(x), overlap):
clip_ix = x[ix:(ix + min_clip_duration)]
clip_ix = replicate_if_needed(clip_ix, min_clip_duration)
parts.append(clip_ix)
for jx in range(len(parts)):
pth = os.path.join(tgt_dir, '{}_{:04d}.wav'.format(fname, jx))
sf.write(pth, parts[jx], sr, 'PCM_16')
if ((idx % 500) == 0):
print('Done: {:07d}/{:07d}'.format(idx, lf))
|
def process_idx(idx):
f = files[idx]
fname = f.split('/')[(- 1)]
tgt_path = os.path.join(tgt_dir, fname)
command = "ffmpeg -loglevel 0 -nostats -i '{}' -ac 1 -ar {} '{}'".format(f, SAMPLE_RATE, tgt_path)
sp.call(command, shell=True)
if ((idx % 500) == 0):
print('Done: {:05d}/{}'.format(idx, lf))
|
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear((nHidden * 2), nOut)
def forward(self, input):
(recurrent, _) = self.rnn(input)
(T, b, h) = recurrent.size()
t_rec = recurrent.view((T * b), h)
output = self.embedding(t_rec)
output = output.view(T, b, (- 1))
return output
|
class ConvReLUBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, padding_size=2):
super(ConvReLUBN, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding_size)
self.relu = nn.ReLU(inplace=True)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
o = self.conv(x)
o = self.relu(o)
o = self.bn(o)
return o
|
class CRNN(nn.Module):
'\n CRNN model, as described in FSD50k paper Sec 5.B.2\n '
def __init__(self, imgH=96, num_classes=200, nh=64):
super(CRNN, self).__init__()
assert ((imgH % 16) == 0), 'imgH has to be a multiple of 16'
self.kernel_sizes = [5, 5, 5]
self.padding_sizes = [1, 1, 1]
self.strides = [1, 1, 1]
self.num_filters = [128, 128, 128]
self.mp_sizes = [(5, 2), (4, 2), (2, 2)]
self.cnn = nn.Sequential()
for ix in range(len(self.kernel_sizes)):
self._add_block(ix)
self.rnn = BidirectionalLSTM(128, nh, num_classes)
def _add_block(self, ix):
in_channels = (1 if (ix == 0) else self.num_filters[(ix - 1)])
out_channels = self.num_filters[ix]
self.cnn.add_module('cnn_block{}'.format(ix), ConvReLUBN(in_channels, out_channels, self.kernel_sizes[ix], self.strides[ix], self.padding_sizes[ix]))
self.cnn.add_module('pooling{}'.format(ix), nn.MaxPool2d(self.mp_sizes[ix]))
def forward(self, input):
conv = self.cnn(input)
(b, c, h, w) = conv.size()
assert (h == 1), 'the height of conv must be 1'
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1)
output = self.rnn(conv)
output = output[(- 1)]
return output
|
class _DenseLayer(nn.Module):
def __init__(self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool=False) -> None:
super(_DenseLayer, self).__init__()
self.norm1: nn.BatchNorm2d
self.add_module('norm1', nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module('relu1', nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module('conv1', nn.Conv2d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False))
self.norm2: nn.BatchNorm2d
self.add_module('norm2', nn.BatchNorm2d((bn_size * growth_rate)))
self.relu2: nn.ReLU
self.add_module('relu2', nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module('conv2', nn.Conv2d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False))
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features)))
return bottleneck_output
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method
def forward(self, input: List[Tensor]) -> Tensor:
pass
@torch.jit._overload_method
def forward(self, input: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if (self.memory_efficient and self.any_requires_grad(prev_features)):
if torch.jit.is_scripting():
raise Exception('Memory Efficient not supported in JIT')
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if (self.drop_rate > 0):
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
|
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(self, num_layers: int, num_input_features: int, bn_size: int, growth_rate: int, drop_rate: float, memory_efficient: bool=False) -> None:
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient)
self.add_module(('denselayer%d' % (i + 1)), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for (name, layer) in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
|
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
|
class DenseNet(nn.Module):
'Densenet-BC model class, based on\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n '
def __init__(self, growth_rate: int=32, block_config: Tuple[(int, int, int, int)]=(6, 12, 24, 16), num_init_features: int=64, bn_size: int=4, drop_rate: float=0, num_classes: int=1000, memory_efficient: bool=False) -> None:
super(DenseNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
num_features = num_init_features
for (i, num_layers) in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient)
self.features.add_module(('denseblock%d' % (i + 1)), block)
num_features = (num_features + (num_layers * growth_rate))
if (i != (len(block_config) - 1)):
trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2))
self.features.add_module(('transition%d' % (i + 1)), trans)
num_features = (num_features // 2)
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
|
def _load_state_dict(model: nn.Module, model_url: str, progress: bool) -> None:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
|
def _densenet(arch: str, growth_rate: int, block_config: Tuple[(int, int, int, int)], num_init_features: int, pretrained: bool, progress: bool, **kwargs: Any) -> DenseNet:
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
|
def densenet121(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n '
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, **kwargs)
|
def densenet161(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n '
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, **kwargs)
|
def densenet169(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n '
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, **kwargs)
|
def densenet201(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n '
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs)
|
def model_helper(opt):
pretrained = opt.get('pretrained', '')
pretrained_fc = opt.get('pretrained_fc', None)
if (os.path.isfile(pretrained) and (pretrained_fc > 2) and (type(pretrained_fc) == int)):
pretrained_flag = True
num_classes = pretrained_fc
ckpt = torch.load(pretrained)
print('pretrained model {} with {} classes found.'.format(pretrained, pretrained_fc))
else:
pretrained_flag = False
num_classes = opt['num_classes']
if (opt['arch'] == 'vgglike'):
model = vgglike.VGGLike(num_classes)
elif ('crnn' in opt['arch']):
model = crnn.CRNN(num_classes=num_classes)
elif ('densenet' in opt['arch']):
depth = opt['model_depth']
if (depth == 121):
model = densenet.densenet121(num_classes=num_classes)
elif (depth == 161):
model = densenet.densenet161(num_classes=num_classes)
elif (depth == 169):
model = densenet.densenet169(num_classes=num_classes)
elif (depth == 201):
model = densenet.densenet201(num_classes=num_classes)
else:
raise ValueError('Invalid value {} of depth for densenet arch'.format(depth))
elif ('resnet' == opt['arch']):
assert (opt['model_depth'] in [10, 18, 34, 50, 101, 152, 200])
if (opt['model_depth'] == 18):
model = resnet.resnet18(num_classes=309, pool=opt['pool'])
fc_in = model.fc.in_features
model.fc = nn.Linear(fc_in, num_classes)
elif (opt['model_depth'] == 34):
model = resnet.resnet34(num_classes=num_classes, pool=opt['pool'])
elif (opt['model_depth'] == 50):
model = resnet.resnet50(num_classes=num_classes, pool=opt['pool'])
elif (opt['model_depth'] == 101):
model = resnet.resnet101(num_classes=num_classes)
elif (opt['model_depth'] == 152):
model = resnet.resnet152(num_classes=num_classes)
elif ('cifar_resnet' == opt['arch']):
depth = opt['model_depth']
if (depth == 20):
model = vanilla_cifar_resnet.resnet20(num_classes=num_classes)
elif (depth == 32):
model = vanilla_cifar_resnet.resnet32(num_classes=num_classes)
elif (depth == 34):
model = vanilla_cifar_resnet.resnet34_custom(num_classes=num_classes)
elif (depth == 44):
model = vanilla_cifar_resnet.resnet44(num_classes=num_classes)
elif (depth == 56):
model = vanilla_cifar_resnet.resnet56(num_classes=num_classes)
elif (depth == 110):
model = vanilla_cifar_resnet.resnet110(num_classes=num_classes)
else:
raise ValueError('Invalid value {} of depth for cifar_resnet arch'.format(depth))
else:
raise ValueError("Unsupported value {} for opt['arch']".format(opt['arch']))
if pretrained_flag:
if ('resnet' == opt['arch']):
fc_in = model.fc.in_features
print('pretrained loading: ', model.load_state_dict(ckpt))
model.fc = nn.Linear(fc_in, opt['num_classes'])
elif ('densenet' == opt['arch']):
fc_in = model.classifier.in_features
print('pretrained loading: ', model.load_state_dict(ckpt))
model.classifier = nn.Linear(fc_in, opt['num_classes'])
elif ('cifar_resnet' == opt['arch']):
fc_in = model.linear.in_features
print('pretrained loading: ', model.load_state_dict(ckpt))
model.linear = nn.Linear(fc_in, opt['num_classes'])
print(model)
return model
|
class FSD50k_Lightning(pl.LightningModule):
def __init__(self, hparams):
super(FSD50k_Lightning, self).__init__()
self.hparams = hparams
self.net = model_helper(self.hparams.cfg['model'])
if (self.hparams.cfg['model']['type'] == 'multiclass'):
if (self.hparams.cw is not None):
print('Class weights found. Training weighted cross-entropy model')
cw = torch.load(self.hparams.cw)
else:
print('Training weighted cross-entropy model')
cw = None
self.criterion = nn.CrossEntropyLoss(weight=cw)
self.mode = 'multiclass'
self.collate_fn = _collate_fn_multiclass
elif (self.hparams.cfg['model']['type'] == 'multilabel'):
use_focal = self.hparams.cfg['opt'].get('focal_loss', False)
print('Training multilabel model')
self.mode = 'multilabel'
if (not use_focal):
if (self.hparams.cw is not None):
cw = torch.load(self.hparams.cw)
self.criterion = nn.BCEWithLogitsLoss(pos_weight=cw)
else:
self.criterion = nn.BCEWithLogitsLoss(self.hparams.cw)
else:
print('Training with SigmoidFocalLoss')
self.criterion = SigmoidFocalLoss()
self.collate_fn = _collate_fn
self.train_set = None
self.val_set = None
self.val_predictions = []
self.val_gts = []
def prepare_data(self) -> None:
self.train_set = SpectrogramDataset(self.hparams.cfg['data']['train'], self.hparams.cfg['data']['labels'], self.hparams.cfg['audio_config'], mode=self.mode, augment=True, mixer=self.hparams.tr_mixer, transform=self.hparams.tr_tfs)
self.val_set = FSD50kEvalDataset(self.hparams.cfg['data']['val'], self.hparams.cfg['data']['labels'], self.hparams.cfg['audio_config'], transform=self.hparams.val_tfs)
def forward(self, x):
return self.net(x)
def training_step(self, batch, batch_step):
self.net.zero_grad()
(x, _, y) = batch
y_pred = self(x)
loss = self.criterion(y_pred, y)
self.log('train_loss', loss, prog_bar=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_step):
(x, y) = batch
y_pred = self(x)
y_pred = y_pred.mean(0).unsqueeze(0)
loss = self.criterion(y_pred, y)
self.log('val_loss', loss, prog_bar=True, on_epoch=True)
y_pred_sigmoid = torch.sigmoid(y_pred)
self.val_predictions.append(y_pred_sigmoid.detach().cpu().numpy()[0])
self.val_gts.append(y.detach().cpu().numpy()[0])
return loss
def validation_epoch_end(self, outputs) -> None:
val_preds = np.asarray(self.val_predictions).astype('float32')
val_gts = np.asarray(self.val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average='macro')
self.log('val_mAP', torch.tensor(map_value), prog_bar=True)
self.val_predictions = []
self.val_gts = []
def configure_optimizers(self):
wd = float(self.hparams.cfg['opt'].get('weight_decay', 0))
lr = float(self.hparams.cfg['opt'].get('lr', 0.001))
optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=wd)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.1, patience=5, verbose=True)
to_monitor = ('val_mAP' if (self.mode == 'multilabel') else 'val_acc')
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler, 'monitor': to_monitor}
def train_dataloader(self):
return DataLoader(self.train_set, num_workers=self.hparams.num_workers, shuffle=True, sampler=None, collate_fn=self.collate_fn, batch_size=self.hparams.cfg['opt']['batch_size'], pin_memory=False, drop_last=True)
def val_dataloader(self):
return DataLoader(self.val_set, sampler=None, num_workers=self.hparams.num_workers, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False)
|
class NetVLAD(nn.Module):
'NetVLAD layer implementation'
def __init__(self, num_clusters=16, dim=512, alpha=100.0, normalize_input=True):
'\n Args:\n num_clusters : int\n The number of clusters\n dim : int\n Dimension of descriptors\n alpha : float\n Parameter of initialization. Larger value is harder assignment.\n normalize_input : bool\n If true, descriptor-wise L2 normalization is applied to input.\n '
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = alpha
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self._init_params()
def _init_params(self):
self.conv.weight = nn.Parameter(((2.0 * self.alpha) * self.centroids).unsqueeze((- 1)).unsqueeze((- 1)))
self.conv.bias = nn.Parameter(((- self.alpha) * self.centroids.norm(dim=1)))
def forward(self, x):
(N, C) = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, (- 1))
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, (- 1))
residual = (x_flatten.expand(self.num_clusters, (- 1), (- 1), (- 1)).permute(1, 0, 2, 3) - self.centroids.expand(x_flatten.size((- 1)), (- 1), (- 1)).permute(1, 2, 0).unsqueeze(0))
residual *= soft_assign.unsqueeze(2)
vlad = residual.sum(dim=(- 1))
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(x.size(0), (- 1))
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu2 = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu2(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, pool='avgpool', zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
self.pool = pool
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
if (self.pool == 'avgpool'):
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
elif (self.pool == 'vlad'):
self.avgpool = NetVLAD()
self.fc = nn.Linear((8192 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.normal_(m.weight, mean=1, std=0.02)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.