code stringlengths 101 5.91M |
|---|
class ModelAccumulator(object):
def __init__(self, running_model: nn.Module, n_accum, num_model, local_bn=False, raise_err_on_early_accum=True):
self.n_accum = n_accum
self._cnt = 0
self.local_bn = local_bn
self._weight_sum = 0
self.raise_err_on_early_accum = raise_err_on_early_accum
with torch.no_grad():
self.server_state_dict = {k: copy.deepcopy(v) for (k, v) in running_model.state_dict().items()}
self._accum_state_dict = {k: torch.zeros_like(v) for (k, v) in running_model.state_dict().items()}
if local_bn:
self.local_state_dict = [{k: copy.deepcopy(v) for (k, v) in running_model.state_dict().items() if ('bn' in k)} for _ in range(num_model)]
else:
self.local_state_dict = []
def state_dict(self):
return {'server': self.server_state_dict, 'clients': self.local_state_dict}
def load_state_dict(self, state_dict: dict):
self.server_state_dict = state_dict['server']
local_state_dict = state_dict['clients']
if self.local_bn:
assert (len(local_state_dict) > 0), 'Not found local state dict when local_bn is set.'
assert (len(local_state_dict) == len(self.local_state_dict)), f'Load {len(local_state_dict)} local states while expected {len(self.local_state_dict)}'
else:
assert (len(local_state_dict) == 0), 'Found local bn state when local_bn is not set.'
self.local_state_dict = local_state_dict
def add(self, model_idx, model, weight):
if (self._cnt >= self.n_accum):
raise RuntimeError(f'Try to accumulate {self._cnt}, while only {self.n_accum} models are allowed. Did you forget to reset after accumulated?')
with torch.no_grad():
for key in self._accum_state_dict:
if self.local_bn:
if ('bn' in key):
self.local_state_dict[model_idx][key].data.copy_(model.state_dict()[key])
else:
temp = (weight * model.state_dict()[key])
self._accum_state_dict[key].data.add_(temp)
elif ('num_batches_tracked' in key):
self._accum_state_dict[key].data.copy_(model.state_dict()[key])
else:
temp = (weight * model.state_dict()[key])
self._accum_state_dict[key].data.add_(temp)
self._cnt += 1
self._weight_sum += weight
def accumulated_count(self):
return self._cnt
def accum_state_dict(self):
self.check_full_accum()
return self._accum_state_dict
def load_model(self, running_model: nn.Module, model_idx: int, ignore_local_bn=False):
running_model.load_state_dict(self.server_state_dict)
if (self.local_bn and (not ignore_local_bn)):
with torch.no_grad():
for (k, v) in self.local_state_dict[model_idx].items():
running_model.state_dict()[k].data.copy_(v)
def update_server_and_reset(self, beta=0.0):
self.check_full_accum()
weight_norm = (1.0 / self._weight_sum)
with torch.no_grad():
for k in self.server_state_dict:
if ((beta > 0) and ('num_batches_tracked' not in k)):
self.server_state_dict[k].data.mul_(beta).add_((((1 - beta) * self._accum_state_dict[k].data) * weight_norm))
else:
self.server_state_dict[k].data.copy_((self._accum_state_dict[k].data * weight_norm))
self._cnt = 0
self._weight_sum = 0
for k in self._accum_state_dict:
self._accum_state_dict[k].data.zero_()
def check_full_accum(self):
if self.raise_err_on_early_accum:
assert (self._cnt == self.n_accum), 'Retrieve before all models are accumulated.'
def copy_dual_noise_bn(self, noised_src_idx, dst_idx, diff_coef=0.0):
assert self.local_bn
copy_dual_noise_bn(self.local_state_dict[noised_src_idx], self.local_state_dict[dst_idx], diff_coef=diff_coef)
def copy_multi_dual_noise_bn(self, noised_src_idxs, dst_idx, diff_coef=0.0, src_weight_mode='transBN'):
assert self.local_bn
copy_multi_dual_noise_bn([self.local_state_dict[i] for i in noised_src_idxs], self.local_state_dict[dst_idx], diff_coef=diff_coef, src_weight_mode=src_weight_mode)
def duplicate_dual_clean_bn(self, idx):
duplicate_dual_clean_bn(self.local_state_dict[idx])
def aggregate_local_bn(self):
with torch.no_grad():
is_init = True
n_client = len(self.local_state_dict)
for model_idx in range(n_client):
for (k, v) in self.local_state_dict[model_idx].items():
if ('num_batches_tracked' in k):
continue
if is_init:
self.server_state_dict[k].data.zero_()
self.server_state_dict[k].data.add_((v / float(n_client)))
is_init = False |
def test_raabbvi_avgrmsprop_optimize():
for scales in [np.ones(2), np.ones(4), np.geomspace(0.1, 1, 4)]:
true_value = np.arange(scales.size)
objective = DummyObjective(true_value, noise=0.2, scales=scales)
sgd = RAABBVI(AveragedRMSProp(0.01, diagnostics=True), rho=0.5, mcse_threshold=0.002, inefficiency_threshold=1.0, accuracy_threshold=0.002)
_test_optimizer(sgd, objective, true_value, 20000) |
class Clothing(torch.utils.data.Dataset):
def __init__(self, root, transform, mode):
self.root = root
self.noisy_labels = {}
self.clean_labels = {}
self.data = []
self.targets = []
self.transform = transform
self.mode = mode
with open((self.root + 'noisy_label_kv.txt'), 'r') as f:
lines = f.read().splitlines()
for l in lines:
entry = l.split()
img_path = (self.root + entry[0])
self.noisy_labels[img_path] = int(entry[1])
with open((self.root + 'clean_label_kv.txt'), 'r') as f:
lines = f.read().splitlines()
for l in lines:
entry = l.split()
img_path = (self.root + entry[0])
self.clean_labels[img_path] = int(entry[1])
if (self.mode == 'train'):
with open((self.root + 'noisy_train_key_list.txt'), 'r') as f:
lines = f.read().splitlines()
for l in lines:
img_path = (self.root + l)
self.data.append(img_path)
target = self.noisy_labels[img_path]
self.targets.append(target)
elif (self.mode == 'minitrain'):
with open((self.root + 'noisy_train_key_list.txt'), 'r') as f:
lines = f.read().splitlines()
n = len(lines)
np.random.seed(13)
subset_idx = np.random.choice(n, int((n / 10)), replace=False)
for i in subset_idx:
l = lines[i]
img_path = (self.root + l)
self.data.append(img_path)
target = self.noisy_labels[img_path]
self.targets.append(target)
elif (self.mode == 'test'):
with open((self.root + 'clean_test_key_list.txt'), 'r') as f:
lines = f.read().splitlines()
for l in lines:
img_path = (self.root + l)
self.data.append(img_path)
target = self.clean_labels[img_path]
self.targets.append(target)
def __getitem__(self, index):
img_path = self.data[index]
target = self.targets[index]
image = Image.open(img_path).convert('RGB')
img = self.transform(image)
return (img, target)
def __len__(self):
return len(self.data) |
.parametrize('multi_optimizers', (True, False))
def test_cosine_restart_lr_update_hook(multi_optimizers):
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5, 0.5], min_lr=0.1, min_lr_ratio=0)
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5], min_lr_ratio=0)
with pytest.raises(ValueError):
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 2], restart_weights=[0.5, 0.5], min_lr=0.0001)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 5], restart_weights=[0.5, 0.5], min_lr_ratio=0)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0., 'learning_rate/model2': 0., 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0., 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True) |
class CosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=(- 1)):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [(self.eta_min + (((base_lr - self.eta_min) * (1 + math.cos(((self.last_epoch / self.T_max) * math.pi)))) / 2)) for base_lr in self.base_lrs] |
def check_all_objects_are_documented():
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if ((c not in modules) and (not c.startswith('_')))]
undocumented_objs = [c for c in objects if ((c not in documented_objs) and (not ignore_undocumented(c)))]
if (len(undocumented_objs) > 0):
raise Exception(('The following objects are in the public init so should be documented:\n - ' + '\n - '.join(undocumented_objs)))
check_docstrings_are_in_md()
check_model_type_doc_match() |
def load_decoder(weights, model):
model.conditioning_emb[0].weight = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T))
model.conditioning_emb[2].weight = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T))
model.position_encoding.weight = nn.Parameter(torch.FloatTensor(weights['Embed_0']['embedding']), requires_grad=False)
model.continuous_inputs_projection.weight = nn.Parameter(torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T))
for (lyr_num, lyr) in enumerate(model.decoders):
ly_weight = weights[f'layers_{lyr_num}']
lyr.layer[0].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale']))
lyr.layer[0].FiLMLayer.scale_bias.weight = nn.Parameter(torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T))
attention_weights = ly_weight['self_attention']
lyr.layer[0].attention.to_q.weight = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
lyr.layer[0].attention.to_k.weight = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
lyr.layer[0].attention.to_v.weight = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
lyr.layer[0].attention.to_out[0].weight = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
attention_weights = ly_weight['MultiHeadDotProductAttention_0']
lyr.layer[1].attention.to_q.weight = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
lyr.layer[1].attention.to_k.weight = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
lyr.layer[1].attention.to_v.weight = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
lyr.layer[1].attention.to_out[0].weight = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
lyr.layer[1].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale']))
lyr.layer[2].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale']))
lyr.layer[2].film.scale_bias.weight = nn.Parameter(torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T))
lyr.layer[2].DenseReluDense.wi_0.weight = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T))
lyr.layer[2].DenseReluDense.wi_1.weight = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T))
lyr.layer[2].DenseReluDense.wo.weight = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T))
model.decoder_norm.weight = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale']))
model.spec_out.weight = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T))
return model |
def init_distributed_mode(args):
args.is_slurm_job = ('SLURM_JOB_ID' in os.environ)
if args.is_slurm_job:
args.rank = int(os.environ['SLURM_PROCID'])
args.world_size = (int(os.environ['SLURM_NNODES']) * int(os.environ['SLURM_TASKS_PER_NODE'][0]))
else:
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
dist.init_process_group(backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
args.gpu_to_work_on = (args.rank % torch.cuda.device_count())
torch.cuda.set_device(args.gpu_to_work_on)
return |
def main(config: ROSTrainerConfig) -> None:
config.set_timestamp()
if config.data:
CONSOLE.log('Using --data alias for --data.pipeline.datamanager.dataparser.data')
config.pipeline.datamanager.dataparser.data = config.data
config.print_to_terminal()
config.save_config()
try:
_set_random_seed(config.machine.seed)
trainer = config.setup(local_rank=0, world_size=1)
trainer.setup()
trainer.train()
except KeyboardInterrupt:
CONSOLE.print(traceback.format_exc())
finally:
profiler.flush_profiler(config.logging) |
class Agent(object):
def __init__(self, height, width, channel, num_class, ksize, radix=4, kpaths=4, learning_rate=0.001, ckpt_dir='./Checkpoint'):
print('\nInitializing Short-ResNeSt...')
(self.height, self.width, self.channel, self.num_class, self.ksize, self.radix, self.kpaths) = (height, width, channel, num_class, ksize, radix, kpaths)
self.learning_rate = learning_rate
self.ckpt_dir = ckpt_dir
self.__model = Neuralnet(height, width, channel, num_class, ksize, radix, kpaths)
self.__model.forward(x=tf.zeros((1, height, width, channel), dtype=tf.float32), verbose=True)
self.variables = {}
self.__init_propagation(path=self.ckpt_dir)
def __init_propagation(self, path):
self.summary_writer = tf.summary.create_file_writer(path)
self.variables['trainable'] = []
ftxt = open('list_parameters.txt', 'w')
for key in list(self.__model.layer.parameters.keys()):
trainable = self.__model.layer.parameters[key].trainable
text = (('T: ' + str(key)) + str(self.__model.layer.parameters[key].shape))
if trainable:
self.variables['trainable'].append(self.__model.layer.parameters[key])
ftxt.write(('%s\n' % text))
ftxt.close()
self.optimizer = tf.optimizers.Adam(learning_rate=self.learning_rate)
self.save_params()
conc_func = self.__model.__call__.get_concrete_function(tf.TensorSpec(shape=(1, self.height, self.width, self.channel), dtype=tf.float32))
self.__get_flops(conc_func)
.experimental.do_not_convert
def step(self, x, y, iteration=0, train=False):
with tf.GradientTape() as tape:
logits = self.__model.forward(x, verbose=False)
smce = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.math.reduce_mean(smce)
score = self.__model.layer.softmax(logits)
pred = tf.argmax(score, 1)
correct_pred = tf.equal(pred, tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if train:
gradients = tape.gradient(loss, self.variables['trainable'])
self.optimizer.apply_gradients(zip(gradients, self.variables['trainable']))
with self.summary_writer.as_default():
tf.summary.scalar('ResNeSt/loss', loss, step=iteration)
tf.summary.scalar('ResNeSt/accuracy', accuracy, step=iteration)
return (loss, accuracy, score)
def __get_flops(self, conc_func):
(frozen_func, graph_def) = convert_variables_to_constants_v2_as_graph(conc_func)
with tf.Graph().as_default() as graph:
tf.compat.v1.graph_util.import_graph_def(graph_def, name='')
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
flops = tf.compat.v1.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)
flop_tot = flops.total_float_ops
ftxt = open('flops.txt', 'w')
for (idx, name) in enumerate(['', 'K', 'M', 'G', 'T']):
text = ('%.3f [%sFLOPS]' % ((flop_tot / (10 ** (3 * idx))), name))
print(text)
ftxt.write(('%s\n' % text))
ftxt.close()
def save_params(self, model='base', tflite=False):
if tflite:
conc_func = self.__model.__call__.get_concrete_function(tf.TensorSpec(shape=(1, self.height, self.width, self.channel), dtype=tf.float32))
converter = tf.lite.TFLiteConverter.from_concrete_functions([conc_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.experimental_new_converter = True
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
else:
vars_to_save = self.__model.layer.parameters.copy()
vars_to_save['optimizer'] = self.optimizer
ckpt = tf.train.Checkpoint(**vars_to_save)
ckptman = tf.train.CheckpointManager(ckpt, directory=os.path.join(self.ckpt_dir, model), max_to_keep=1)
ckptman.save()
def load_params(self):
vars_to_load = {}
for (idx, name) in enumerate(self.__model.layer.name_bank):
vars_to_load[self.__model.layer.name_bank[idx]] = self.__model.layer.parameters[idx]
vars_to_load['optimizer'] = self.optimizer
ckpt = tf.train.Checkpoint(**vars_to_load)
latest_ckpt = tf.train.latest_checkpoint(self.ckpt_dir)
status = ckpt.restore(latest_ckpt)
status.expect_partial() |
class Logger(object):
def __init__(self, path, header, mode='w'):
self.log_file = open(path, mode=mode)
self.logger = csv.writer(self.log_file, delimiter='\t')
if (mode is not 'a'):
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert (col in values)
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush() |
def construct_H_with_KNN(X, K_neigs=[10], is_probH=False, m_prob=1):
if (len(X.shape) != 2):
X = X.reshape((- 1), X.shape[(- 1)])
if (type(K_neigs) == int):
K_neigs = [K_neigs]
dis_mat = cos_dis(X)
H = None
for k_neig in K_neigs:
H_tmp = construct_H_with_KNN_from_distance(dis_mat, k_neig, is_probH, m_prob)
H = hyperedge_concat(H, H_tmp)
return H |
def clip_gelu(model, maxval):
for (name, mod) in model.named_modules():
if (name.endswith('.output.dense') and (not name.endswith('attention.output.dense'))):
amax_init = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=maxval)
amax = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}') |
class STORAL1(DataProcessor):
def __init__(self):
super().__init__()
def get_examples(self, data_dir, split):
path = os.path.join(data_dir, f'{split}.jsonl')
with open(path, encoding='utf8') as f:
for line in f:
example_json = json.loads(line)
example = InputExample(meta={'story': example_json['story']}, tgt_text=example_json['moral'])
examples.append(example)
def get_templates(self):
return [':{story} :,:'] |
def get_theme(name):
if (name not in __themes__):
raise ValueError(f"Theme '{name}' not found.")
return __themes__[name]() |
class UPerNet(nn.Module):
def __init__(self, num_class=150, fc_dim=4096, use_softmax=False, pool_scales=(1, 2, 3, 6), fpn_inplanes=(256, 512, 1024, 2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), BatchNorm2d(512), nn.ReLU(inplace=True)))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu((fc_dim + (len(pool_scales) * 512)), fpn_dim, 1)
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:(- 1)]:
self.fpn_in.append(nn.Sequential(nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False), BatchNorm2d(fpn_dim), nn.ReLU(inplace=True)))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range((len(fpn_inplanes) - 1)):
self.fpn_out.append(nn.Sequential(conv3x3_bn_relu(fpn_dim, fpn_dim, 1)))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(conv3x3_bn_relu((len(fpn_inplanes) * fpn_dim), fpn_dim, 1), nn.Conv2d(fpn_dim, num_class, kernel_size=1))
def forward(self, conv_out, segSize=None):
conv5 = conv_out[(- 1)]
input_size = conv5.size()
ppm_out = [conv5]
for (pool_scale, pool_conv) in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(pool_scale(conv5), (input_size[2], input_size[3]), mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range((len(conv_out) - 1))):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x)
f = nn.functional.interpolate(f, size=conv_x.size()[2:], mode='bilinear', align_corners=False)
f = (conv_x + f)
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse()
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(fpn_feature_list[i], output_size, mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax:
x = nn.functional.interpolate(x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
return x |
.parametrize('old_size', [141, 32, 17, 6, 3])
.parametrize('new_size', [141, 32, 17, 6, 3])
def test_resize_head_1d(old_size, new_size, depth=1000):
old_shape = (old_size,)
np.random.seed(0)
p = 8
bits = np.random.randint((1 << p), size=((depth,) + old_shape), dtype=np.uint64)
message = cs.base_message(old_shape)
(other_bits_push, _) = cs.repeat(cs.Uniform(p), depth)
(message,) = other_bits_push(message, bits)
resized = cs.codecs._resize_head_1d(message, new_size)
reconstructed = cs.codecs._resize_head_1d(resized, old_size)
assert_message_equal(message, reconstructed) |
def parse_nnet2_to_nnet3(line_buffer):
model = Nnet3Model()
model.transition_model = parse_transition_model(line_buffer)
(line, model.num_components) = parse_nnet2_header(line_buffer)
while True:
if line.startswith('</Components>'):
break
(component, pairs) = parse_component(line, line_buffer)
model.add_component(component, pairs)
line = next(line_buffer)
model.priors = parse_priors(line, line_buffer)
if (model.components_read != model.num_components):
logger.error('Did not read all components succesfully: {0}/{1}'.format(model.components_read, model.num_components))
return model |
class ConstructEnvsSampler(TaskSampler):
def __init__(self, env_constructors):
self._env_constructors = env_constructors
def n_tasks(self):
return len(self._env_constructors)
def sample(self, n_tasks, with_replacement=False):
return [NewEnvUpdate(self._env_constructors[i]) for i in _sample_indices(n_tasks, len(self._env_constructors), with_replacement)] |
class InfiniteBatchSampler(Sampler):
def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True):
(_rank, _world_size) = get_dist_info()
if (world_size is None):
world_size = _world_size
if (rank is None):
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = (seed if (seed is not None) else 0)
self.shuffle = shuffle
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
(yield from torch.randperm(self.size, generator=g).tolist())
else:
(yield from torch.arange(self.size).tolist())
def _indices_of_rank(self):
(yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size))
def __iter__(self):
batch_buffer = []
for idx in self.indices:
batch_buffer.append(idx)
if (len(batch_buffer) == self.batch_size):
(yield batch_buffer)
batch_buffer = []
def __len__(self):
return self.size
def set_epoch(self, epoch):
raise NotImplementedError |
class ResponseGenerator():
def add_refresh(response: Response, refresh_time: int) -> Response:
response.headers['refresh'] = refresh_time
return response
def from_exception(exception: Exception) -> Response:
return Response(response=str(exception), status=ResponseGenerator.get_status_code_for_exception(exception))
def get_status_code_for_exception(exception: Exception) -> int:
if isinstance(exception, ClientErrorException):
return 400
if isinstance(exception, AccessDeniedException):
return 403
if isinstance(exception, NotFoundException):
return 404
if isinstance(exception, InternalException):
return 500
return 500 |
def _SpikeTorchConv(*args, input_):
states = []
if ((len(args) == 1) and (type(args) is not tuple)):
args = (args,)
for arg in args:
arg = arg.to('cpu')
arg = torch.Tensor(arg)
arg = torch.zeros_like(input_, requires_grad=True)
states.append(arg)
if (len(states) == 1):
return states[0]
return states |
class MXNetRunner(object):
def setup_distributed(self, env, config, model_creator, loss_creator=None, validation_metrics_creator=None, eval_metrics_creator=None):
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger()
self.config = config
self.model_creator = model_creator
self.loss_creator = loss_creator
self.validation_metrics_creator = validation_metrics_creator
self.eval_metrics_creator = eval_metrics_creator
self.is_worker = False
env['DMLC_NODE_HOST'] = self.get_node_ip()
if (env['DMLC_ROLE'] == 'worker'):
self.is_worker = True
if self.is_worker:
os.environ.update(env)
self.kv = mx.kv.create('dist_sync')
if ('seed' in self.config):
mx.random.seed(self.config['seed'])
self.model = self.model_creator(self.config)
self.loss = (self.loss_creator(self.config) if self.loss_creator else None)
self.eval_metrics = (self.eval_metrics_creator(self.config) if self.eval_metrics_creator else None)
from mxnet.metric import CompositeEvalMetric
if isinstance(self.eval_metrics, list):
self.eval_metrics = CompositeEvalMetric(self.eval_metrics)
self.val_metrics = (self.validation_metrics_creator(self.config) if self.validation_metrics_creator else None)
if isinstance(self.val_metrics, list):
self.val_metrics = CompositeEvalMetric(self.val_metrics)
if (not isinstance(self.model, mx.module.BaseModule)):
invalidInputError(self.loss, 'Loss not defined for gluon model, please specify loss_creator')
self.trainer = gluon.Trainer(self.model.collect_params(), self.config['optimizer'], optimizer_params=self.config['optimizer_params'], kvstore=self.kv)
else:
self.trainer = None
else:
modified_env = os.environ.copy()
modified_env.update(env)
subprocess.Popen(['python', '-c', 'import mxnet'], shell=False, env=modified_env)
def train(self, train_data, epochs=1, batch_size=32, validation_data=None, train_resize_batch_num=None):
stats = dict()
if self.is_worker:
config = copy.copy(self.config)
if ('batch_size' not in config):
config['batch_size'] = batch_size
if (train_resize_batch_num is not None):
config['train_resize_batch_num'] = train_resize_batch_num
train_data_iter = train_data(config, self.kv)
val_data_iter = (validation_data(config, self.kv) if validation_data else None)
start_time = time.time()
if self.trainer:
def cpu_context(target_data):
if isinstance(target_data, list):
return [cpu_context(d) for d in target_data]
else:
return target_data.as_in_context(mx.cpu())
for epoch in range(epochs):
if isinstance(train_data_iter, mx.io.DataIter):
train_data_iter.reset()
if self.eval_metrics:
self.eval_metrics.reset()
batch_start_time = time.time()
epoch_start_time = time.time()
for (i, batch) in enumerate(train_data_iter):
data = cpu_context(batch.data)
label = cpu_context(batch.label)
if (not isinstance(data, list)):
data = [data]
if (not isinstance(label, list)):
label = [label]
from mxnet import autograd as ag
with ag.record():
output = self.model(*data)
if (not isinstance(output, list)):
output = [output]
Ls = self.loss(*output, *label)
ag.backward(Ls)
self.trainer.step(batch_size)
if self.eval_metrics:
self.eval_metrics.update(label, output)
if (not ((i + 1) % self.config['log_interval'])):
iteration_log = ('Epoch[%d] Batch[%d] Speed: %f samples/sec %s=%f' % (epoch, i, (batch_size / (time.time() - batch_start_time)), 'loss', Ls.asnumpy().mean()))
if self.eval_metrics:
(names, accs) = self.eval_metrics.get()
(names, accs) = (to_list(names), to_list(accs))
for (name, acc) in zip(names, accs):
iteration_log += (' %s=%f' % (name, acc))
self.logger.info(iteration_log)
batch_start_time = time.time()
self.logger.info(('[Epoch %d] time cost: %f' % (epoch, (time.time() - epoch_start_time))))
if self.eval_metrics:
epoch_train_log = ('[Epoch %d] training: ' % epoch)
(names, accs) = self.eval_metrics.get()
(names, accs) = (to_list(names), to_list(accs))
for (name, acc) in zip(names, accs):
epoch_train_log += ('%s=%f ' % (name, acc))
self.logger.info(epoch_train_log)
if val_data_iter:
if isinstance(val_data_iter, mx.io.DataIter):
val_data_iter.reset()
self.val_metrics.reset()
for batch in val_data_iter:
data = cpu_context(batch.data)
label = cpu_context(batch.label)
if (not isinstance(data, list)):
data = [data]
if (not isinstance(label, list)):
label = [label]
output = self.model(*data)
if (not isinstance(output, list)):
output = [output]
self.val_metrics.update(label, output)
epoch_val_log = ('[Epoch %d] validation: ' % epoch)
(names, accs) = self.val_metrics.get()
(names, accs) = (to_list(names), to_list(accs))
for (name, acc) in zip(names, accs):
epoch_val_log += ('%s=%f ' % (name, acc))
self.logger.info(epoch_val_log)
if self.eval_metrics:
(names, accs) = self.eval_metrics.get()
(names, accs) = (to_list(names), to_list(accs))
for (name, acc) in zip(names, accs):
stats[name] = acc
else:
if ('init' not in self.config):
from mxnet.initializer import Uniform
self.config['init'] = Uniform(0.01)
if (self.eval_metrics is None):
self.eval_metrics = 'acc'
self.model.fit(train_data=train_data_iter, num_epoch=epochs, initializer=self.config['init'], kvstore=self.kv, optimizer=self.config['optimizer'], optimizer_params=self.config['optimizer_params'], eval_data=val_data_iter, eval_metric=self.eval_metrics, validation_metric=self.val_metrics, batch_end_callback=mx.callback.Speedometer(batch_size, self.config['log_interval']), epoch_end_callback=(None if ('model' not in self.config) else mx.callback.do_checkpoint(self.config['model'])))
epoch_time = (time.time() - start_time)
stats['epoch_time'] = epoch_time
return [stats]
def shutdown(self):
del self.logger
if self.is_worker:
del self.kv
del self.model
del self.trainer
del self.loss
del self.eval_metrics
del self.val_metrics
def get_node_ip(self):
if ('node_ip' not in self.__dict__):
self.node_ip = ray._private.services.get_node_ip_address()
return self.node_ip
def find_free_port(self):
if ('port' not in self.__dict__):
from bigdl.orca.learn.mxnet.utils import find_free_port
self.port = find_free_port()
return self.port |
def flow_embedding_module(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product'):
if knn:
(_, idx) = knn_point(nsample, xyz2, xyz1)
else:
(idx, cnt) = query_ball_point(radius, nsample, xyz2, xyz1)
(_, idx_knn) = knn_point(nsample, xyz2, xyz1)
cnt = tf.tile(tf.expand_dims(cnt, (- 1)), [1, 1, nsample])
idx = tf.where((cnt > (nsample - 1)), idx, idx_knn)
xyz2_grouped = group_point(xyz2, idx)
xyz1_expanded = tf.expand_dims(xyz1, 2)
xyz_diff = (xyz2_grouped - xyz1_expanded)
feat2_grouped = group_point(feat2, idx)
feat1_expanded = tf.expand_dims(feat1, 2)
if (corr_func == 'elementwise_product'):
feat_diff = (feat2_grouped * feat1_expanded)
elif (corr_func == 'concat'):
feat_diff = tf.concat(axis=(- 1), values=[feat2_grouped, tf.tile(feat1_expanded, [1, 1, nsample, 1])])
elif (corr_func == 'dot_product'):
feat_diff = tf.reduce_sum((feat2_grouped * feat1_expanded), axis=[(- 1)], keep_dims=True)
elif (corr_func == 'cosine_dist'):
feat2_grouped = tf.nn.l2_normalize(feat2_grouped, (- 1))
feat1_expanded = tf.nn.l2_normalize(feat1_expanded, (- 1))
feat_diff = tf.reduce_sum((feat2_grouped * feat1_expanded), axis=[(- 1)], keep_dims=True)
elif (corr_func == 'flownet_like'):
batch_size = xyz1.get_shape()[0].value
npoint = xyz1.get_shape()[1].value
feat_diff = tf.reduce_sum((feat2_grouped * feat1_expanded), axis=[(- 1)], keep_dims=True)
total_diff = tf.concat(axis=(- 1), values=[xyz_diff, feat_diff])
feat1_new = tf.reshape(total_diff, [batch_size, npoint, (- 1)])
return (xyz1, feat1_new)
feat1_new = tf.concat([feat_diff, xyz_diff], axis=3)
with tf.variable_scope(scope) as sc:
for (i, num_out_channel) in enumerate(mlp):
feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope=('conv_diff_%d' % i), bn_decay=bn_decay)
if (pooling == 'max'):
feat1_new = tf.reduce_max(feat1_new, axis=[2], keep_dims=False, name='maxpool_diff')
elif (pooling == 'avg'):
feat1_new = tf.reduce_mean(feat1_new, axis=[2], keep_dims=False, name='avgpool_diff')
return (xyz1, feat1_new) |
def HPF1(data, cutoff, q, order=5):
(b, a) = sig.butter(order, cutoff, btype='high', analog=False)
y = sig.lfilter(b, a, data)
return y |
def create_student_by_copying_alternating_layers(teacher: Union[(str, PreTrainedModel)], save_path: Union[(str, Path)]='student', e: Union[(int, None)]=None, d: Union[(int, None)]=None, copy_first_teacher_layers=False, e_layers_to_copy=None, d_layers_to_copy=None, **extra_config_kwargs) -> Tuple[(PreTrainedModel, List[int], List[int])]:
_msg = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert ((e is not None) or (d is not None)), _msg
if isinstance(teacher, str):
AutoTokenizer.from_pretrained(teacher).save_pretrained(save_path)
teacher = AutoModelForSeq2SeqLM.from_pretrained(teacher).eval()
else:
assert isinstance(teacher, PreTrainedModel), f'teacher must be a model or string got type {type(teacher)}'
init_kwargs = teacher.config.to_diff_dict()
try:
(teacher_e, teacher_d) = (teacher.config.encoder_layers, teacher.config.decoder_layers)
if (e is None):
e = teacher_e
if (d is None):
d = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d})
except AttributeError:
if hasattr(teacher.config, 'num_encoder_layers'):
(teacher_e, teacher_d) = (teacher.config.num_encoder_layers, teacher.config.num_decoder_layers)
else:
(teacher_e, teacher_d) = (teacher.config.num_layers, teacher.config.num_decoder_layers)
if (e is None):
e = teacher_e
if (d is None):
d = teacher_d
if hasattr(teacher.config, 'num_encoder_layers'):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d})
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d})
init_kwargs.update(extra_config_kwargs)
student_cfg = teacher.config_class(**init_kwargs)
student = AutoModelForSeq2SeqLM.from_config(student_cfg)
info = student.load_state_dict(teacher.state_dict(), strict=False)
assert (info.missing_keys == []), info.missing_keys
if copy_first_teacher_layers:
(e_layers_to_copy, d_layers_to_copy) = (list(range(e)), list(range(d)))
logger.info(f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}')
student.save_pretrained(save_path)
return (student, e_layers_to_copy, d_layers_to_copy)
if (e_layers_to_copy is None):
e_layers_to_copy: List[int] = pick_layers_to_copy(e, teacher_e)
if (d_layers_to_copy is None):
d_layers_to_copy: List[int] = pick_layers_to_copy(d, teacher_d)
try:
if hasattr(teacher, 'prophetnet'):
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, e_layers_to_copy)
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, d_layers_to_copy)
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy)
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy)
except AttributeError:
copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy)
copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy)
logger.info(f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}')
student.config.init_metadata = {'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy}
student.save_pretrained(save_path)
return (student, e_layers_to_copy, d_layers_to_copy) |
_model
def dm_nfnet_f1(pretrained=False, **kwargs):
return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) |
def vgg19(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg19(pretrained, progress, **kwargs)) |
def create_iNat18(task='train'):
src_root = '/path/to/iNat18'
dst_root = '/diskC/xzz/iNat18'
if (not os.path.exists(os.path.join(dst_root, task, '0'))):
for i in range(8142):
pth = os.path.join(dst_root, task, str(i))
os.makedirs(pth, exist_ok=True)
with open(os.path.join(src_root, f'iNat18_{task}.json')) as f:
meta = json.load(f)
for anno in tqdm(meta['annotations']):
img_path = anno['fpath'].replace('./downloaed/iNat18/', '')
img_name = anno['fpath'].split('/')[(- 1)]
cls_name = str(anno['category_id'])
src_pth = os.path.join(src_root, img_path)
dst_pth = os.path.join(dst_root, task, cls_name, img_name)
shutil.copy(src_pth, dst_pth) |
_config
def cfg_habitat():
uuid = 'habitat_core'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 1000, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network_reinit': False, 'perception_network': 'AtariNet', 'perception_network_kwargs': {'extra_kwargs': {'normalize_taskonomy': True}}, 'test': False, 'use_replay': True, 'replay_buffer_size': 3000, 'on_policy_epoch': 8, 'off_policy_epoch': 8, 'slam_class': None, 'slam_kwargs': {}, 'loss_kwargs': {'intrinsic_loss_coefs': [], 'intrinsic_loss_types': []}, 'deterministic': False, 'rollout_value_batch_multiplier': 2, 'cache_kwargs': {}, 'optimizer_class': 'optim.Adam', 'optimizer_kwargs': {}}
cfg['env'] = {'add_timestep': False, 'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'swap_building_k_episodes': 10, 'gpu_devices': [0], 'scenario_kwargs': {'use_depth': False, 'max_geodesic_dist': 99999}, 'map_kwargs': {'map_building_size': 22, 'map_max_pool': False, 'use_cuda': False, 'history_size': None}, 'target_dim': 16, 'val_scenes': None, 'train_scenes': None}, 'sensors': {'features': None, 'taskonomy': None, 'rgb_filled': None, 'map': None, 'target': None, 'depth': None, 'global_pos': None, 'pointgoal': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_pre_aggregation_fn': None, 'transform_fn_pre_aggregation_kwargs': {}, 'transform_fn_post_aggregation': None, 'transform_fn_post_aggregation_fn': None, 'transform_fn_post_aggregation_kwargs': {}, 'num_processes': 8, 'num_val_processes': 1, 'additional_repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_num': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097', 'obliterate_logs': False}
cfg['training'] = {'cuda': True, 'gpu_devices': None, 'seed': 42, 'num_frames': .0, 'resumable': False} |
def make_master_params(param_groups_and_shapes):
master_params = []
for (param_group, shape) in param_groups_and_shapes:
master_param = nn.Parameter(_flatten_dense_tensors([param.detach().float() for (_, param) in param_group]).view(shape))
master_param.requires_grad = True
master_params.append(master_param)
return master_params |
def init_matrix(data):
for i in range(len(data)):
data[i][0] = float('inf')
for i in range(len(data[0])):
data[0][i] = float('inf')
data[0][0] = 0
return data |
def test_add_edges_sum(g1, g2):
assert (g1.num_e == 2)
g1.add_edges((3, 2), e_weight=0.5, merge_op='sum')
assert (g1.num_e == 3)
assert ((2, 3) in g1.e[0])
assert ((3, 2) not in g1.e[0])
assert (g1.A[(3, 2)] == 0.5)
assert (g2.num_e == 3)
g2.add_edges(((1, 2), (1, 3)), e_weight=[0.1, 0.2], merge_op='sum')
assert (g2.num_e == 5)
assert ((1, 2) in g2.e[0])
assert (g2.A[(1, 2)] == 0.1)
assert ((2, 1) in g2.e_both_side[0])
assert (g2.A[(2, 1)] == 0.1)
g2.add_edges(((3, 2), (3, 1)), e_weight=[1.1, 2.1], merge_op='sum')
assert (g2.num_e == 6)
assert ((2, 3) in g2.e[0])
assert (g2.A[(3, 2)] == 1.1)
assert ((2, 3) in g2.e_both_side[0])
assert (g2.A[(2, 3)] == 1.1)
assert (g2.A[(1, 3)] == 2.3) |
def merge(b, graph):
merge_rules = list(merge_coalesce_rules)
merge_rules.append(merge_delete_rule)
graph = apply_confluent_gts(b, graph, merge_rules, apply_cleanup_rules=False)
return graph |
_torch
_staging_test
class DynamicPipelineTester(unittest.TestCase):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'I', 'love', 'hate', 'you']
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id='test-dynamic-pipeline')
except HTTPError:
pass
def test_push_to_hub_dynamic_pipeline(self):
from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
PIPELINE_REGISTRY.register_pipeline('pair-classification', pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification)
config = BertConfig(vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37)
model = BertForSequenceClassification(config).eval()
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-pipeline', token=self._token)
repo = Repository(tmp_dir, clone_from=f'{USER}/test-dynamic-pipeline', token=self._token)
vocab_file = os.path.join(tmp_dir, 'vocab.txt')
with open(vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
classifier = pipeline('pair-classification', model=model, tokenizer=tokenizer)
del PIPELINE_REGISTRY.supported_tasks['pair-classification']
classifier.save_pretrained(tmp_dir)
self.assertDictEqual(classifier.model.config.custom_pipelines, {'pair-classification': {'impl': 'custom_pipeline.PairClassificationPipeline', 'pt': ('AutoModelForSequenceClassification',), 'tf': ()}})
repo.push_to_hub()
with self.assertRaises(ValueError):
_ = pipeline(model=f'{USER}/test-dynamic-pipeline')
new_classifier = pipeline(model=f'{USER}/test-dynamic-pipeline', trust_remote_code=True)
self.assertEqual(new_classifier.__class__.__name__, 'PairClassificationPipeline')
results = classifier('I hate you', second_text='I love you')
new_results = new_classifier('I hate you', second_text='I love you')
self.assertDictEqual(nested_simplify(results), nested_simplify(new_results))
old_classifier = pipeline('text-classification', model=f'{USER}/test-dynamic-pipeline', trust_remote_code=False)
self.assertEqual(old_classifier.__class__.__name__, 'TextClassificationPipeline')
self.assertEqual(old_classifier.task, 'text-classification')
new_results = old_classifier('I hate you', text_pair='I love you')
self.assertListEqual(nested_simplify([{'label': results['label'], 'score': results['score']}]), nested_simplify(new_results)) |
class TestSequenceGenerator(unittest.TestCase):
def setUp(self):
(self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup()
self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
def test_with_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, normalize_scores=False)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_no_stop_early(self):
generator = SequenceGenerator(self.tgt_dict, stop_early=False, beam_size=2)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
self.assertHypoTokens(hypos[1][0], [w2, w2, w2, w2, eos])
self.assertHypoScore(hypos[1][0], [0.3, 0.9, 0.99, 0.4, 1.0])
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0) |
def get_option_setter(distiller_name):
distiller_class = find_distiller_using_name(distiller_name)
return distiller_class.modify_commandline_options |
_optimizer('adamax')
class FairseqAdamax(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adamax(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adamax_betas), 'eps': self.args.adamax_eps, 'weight_decay': self.args.weight_decay, 'bias_correction': (not self.args.no_bias_correction)} |
def backup(src_folder, backup_files, backup_folder):
if (not backup_files):
print('No backup required for', src_folder)
return
os.chdir(src_folder)
if (not os.path.exists(backup_folder)):
os.makedirs(backup_folder)
for file in backup_files:
os.rename(file, ((backup_folder + '/') + file)) |
def collate_int_fn(batch, *, collate_fn_map: Optional[Dict[(Union[(Type, Tuple[(Type, ...)])], Callable)]]=None):
return torch.tensor(batch) |
class PegBoxEnv(BaseEnv, utils.EzPickle):
def __init__(self, xml_path, cameras, n_substeps=20, observation_type='image', reward_type='dense', image_size=84, use_xyz=False, render=False):
self.sample_large = 1
BaseEnv.__init__(self, get_full_asset_path(xml_path), n_substeps=n_substeps, observation_type=observation_type, reward_type=reward_type, image_size=image_size, reset_free=False, cameras=cameras, render=render, use_xyz=use_xyz, has_object=True)
self.state_dim = ((26,) if self.use_xyz else (20,))
self.distance_threshold = 0.08
utils.EzPickle.__init__(self)
def compute_reward(self, achieved_goal, goal, info):
object_pos = self.sim.data.get_site_xpos('object0').copy()
goal_pos = goal.copy()
d_obj_goal_xy = self.goal_distance(object_pos, goal_pos, use_xyz=False)
d_obj_goal_xyz = self.goal_distance(object_pos, goal_pos, use_xyz=True)
obj_z = (object_pos[2] - self.center_of_table.copy()[2])
reward = ((- 1) * np.square(self._pos_ctrl_magnitude))
reward += ((- 4) * d_obj_goal_xy)
if (d_obj_goal_xy <= 0.05):
reward += (10 - (20 * d_obj_goal_xyz))
return reward
def _get_state_obs(self):
cot_pos = self.center_of_table.copy()
dt = (self.sim.nsubsteps * self.sim.model.opt.timestep)
eef_pos = self.sim.data.get_site_xpos('grasp')
eef_velp = (self.sim.data.get_site_xvelp('grasp') * dt)
goal_pos = self.goal
gripper_angle = self.sim.data.get_joint_qpos('right_outer_knuckle_joint')
obj_pos = self.sim.data.get_site_xpos('object0')
obj_rot = self.sim.data.get_joint_qpos('object0:joint')[(- 4):]
obj_velp = (self.sim.data.get_site_xvelp('object0') * dt)
obj_velr = (self.sim.data.get_site_xvelr('object0') * dt)
if (not self.use_xyz):
eef_pos = eef_pos[:2]
eef_velp = eef_velp[:2]
goal_pos = goal_pos[:2]
obj_pos = obj_pos[:2]
obj_velp = obj_velp[:2]
obj_velr = obj_velr[:2]
values = np.array([self.goal_distance(eef_pos, goal_pos, self.use_xyz), self.goal_distance(obj_pos, goal_pos, self.use_xyz), self.goal_distance(eef_pos, obj_pos, self.use_xyz), gripper_angle])
return np.concatenate([eef_pos, eef_velp, goal_pos, obj_pos, obj_rot, obj_velp, obj_velr, values], axis=0)
def _reset_sim(self):
return BaseEnv._reset_sim(self)
def _get_achieved_goal(self):
return np.squeeze(self.sim.data.get_site_xpos('object0').copy())
def _sample_object_pos(self):
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
object_quat = object_qpos[(- 4):]
object_qpos[0:3] = self.gripper_target[0:3]
object_qpos[2] += (- 0.08)
object_qpos[(- 4):] = object_quat.copy()
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
def _sample_goal(self, new=True):
object_qpos = self.sim.data.get_joint_qpos('box_hole:joint')
object_quat = object_qpos[(- 4):]
if new:
goal = np.array([1.605, 0.18, 0.58])
goal[0] += self.np_random.uniform(((- 0.05) - (0.05 * self.sample_large)), (0.05 + (0.05 * self.sample_large)), size=1)
goal[1] += self.np_random.uniform(((- 0.1) - (0.1 * self.sample_large)), (0.1 + (0.1 * self.sample_large)), size=1)
else:
goal = object_qpos[:3].copy()
object_qpos[:3] = goal[:3].copy()
object_qpos[(- 4):] = object_quat
self.sim.data.set_joint_qpos('box_hole:joint', object_qpos)
goal[1] += 0.075
goal[2] -= 0.035
self.lift_height = 0.15
return BaseEnv._sample_goal(self, goal)
def _sample_initial_pos(self):
gripper_target = np.array([1.2561169, 0.3, 0.])
gripper_target[0] += self.np_random.uniform((- 0.05), 0.1, size=1)
gripper_target[1] += self.np_random.uniform((- 0.1), 0.1, size=1)
gripper_target[2] += 0.1
if self.use_xyz:
gripper_target[2] += self.np_random.uniform((- 0.05), 0.05, size=1)
self.gripper_target = gripper_target
BaseEnv._sample_initial_pos(self, gripper_target) |
class OSBlockINin(nn.Module):
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlockINin, self).__init__()
assert (T >= 1)
assert ((out_channels >= reduction) and ((out_channels % reduction) == 0))
mid_channels = (out_channels // reduction)
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, (T + 1)):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if (in_channels != out_channels):
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = (x2 + self.gate(x2_t))
x3 = self.conv3(x2)
x3 = self.IN(x3)
if (self.downsample is not None):
identity = self.downsample(identity)
out = (x3 + identity)
return F.relu(out) |
def load_json(file):
with open(file) as json_file:
data = json5.load(json_file)
return data |
class UpDownCore(att_model.UpDownCore):
def __init__(self, config, use_maxout=False):
nn.Module.__init__(self)
self.config = config
self.drop_prob_lm = config.drop_prob_lm
mask_params = {'mask_type': self.config.prune_type, 'mask_init_value': self.config.prune_supermask_init}
self.att_lstm = MaskedLSTMCell((config.input_encoding_size + (config.rnn_size * 2)), config.rnn_size, **mask_params)
self.lang_lstm = MaskedLSTMCell((config.rnn_size * 2), config.rnn_size, **mask_params)
self.attention = Attention(config) |
def test_compat_loader_args():
cfg = ConfigDict(dict(data=dict(val=dict(), test=dict(), train=dict())))
cfg = compat_loader_args(cfg)
assert ('val_dataloader' in cfg.data)
assert ('train_dataloader' in cfg.data)
assert ('test_dataloader' in cfg.data)
cfg = ConfigDict(dict(data=dict(samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict())))
cfg = compat_loader_args(cfg)
assert (cfg.data.train_dataloader.workers_per_gpu == 1)
assert (cfg.data.train_dataloader.samples_per_gpu == 1)
assert cfg.data.train_dataloader.persistent_workers
assert (cfg.data.val_dataloader.workers_per_gpu == 1)
assert (cfg.data.val_dataloader.samples_per_gpu == 3)
assert (cfg.data.test_dataloader.workers_per_gpu == 1)
assert (cfg.data.test_dataloader.samples_per_gpu == 2)
cfg = ConfigDict(dict(data=dict(samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=[dict(samples_per_gpu=2), dict(samples_per_gpu=3)], train=dict())))
cfg = compat_loader_args(cfg)
assert (cfg.data.test_dataloader.samples_per_gpu == 3)
cfg = ConfigDict(dict(data=dict(samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict(), train_dataloader=dict(samples_per_gpu=2))))
with pytest.raises(AssertionError):
compat_loader_args(cfg)
cfg = ConfigDict(dict(data=dict(samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict(), val_dataloader=dict(samples_per_gpu=2))))
with pytest.raises(AssertionError):
compat_loader_args(cfg)
cfg = ConfigDict(dict(data=dict(samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), test_dataloader=dict(samples_per_gpu=2))))
with pytest.raises(AssertionError):
compat_loader_args(cfg) |
class ResNet_MPNCOV(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet_MPNCOV, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilate=replace_stride_with_dilation[2])
self.Layer_Reduce = nn.Conv2d((512 * block.expansion), 256, kernel_size=1, stride=1, padding=0, bias=False)
self.Layer_Reduce_BN = nn.BatchNorm2d(256)
self.Layer_Reduce_ReLU = nn.ReLU(inplace=True)
self.fc = nn.Linear(32896, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.Layer_Reduce(x)
x = self.Layer_Reduce_BN(x)
x = self.Layer_Reduce_ReLU(x)
x = CovpoolLayer(x)
x = SqrtmLayer(x, 5)
x = TriuvecLayer(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x |
class ConjugateConstraintOptimizer(Serializable):
def __init__(self, cg_iters=10, verbose_cg=False, resample_inputs=False, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, accept_violation=False, hvp_approach=None, num_slices=1, linesearch_infeasible_recovery=True):
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._verbose_cg = verbose_cg
self._resample_inputs = resample_inputs
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._linesearch_infeasible_recovery = linesearch_infeasible_recovery
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if (hvp_approach is None):
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, quad_leq_constraint, lin_leq_constraint, inputs, extra_inputs=None, constraint_name_1='quad_constraint', constraint_name_2='lin_constraint', using_surrogate=False, true_linear_leq_constraint=None, precompute=False, attempt_feasible_recovery=False, attempt_infeasible_recovery=False, revert_to_last_safe_point=False, *args, **kwargs):
self.precompute = precompute
self.attempt_feasible_recovery = attempt_feasible_recovery
self.attempt_infeasible_recovery = attempt_infeasible_recovery
self.revert_to_last_safe_point = revert_to_last_safe_point
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
(constraint_term_1, constraint_value_1) = quad_leq_constraint
(constraint_term_2, constraint_value_2) = lin_leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
lin_constraint_grads = theano.grad(constraint_term_2, wrt=params, disconnected_inputs='warn')
flat_lin_constraint_grad = ext.flatten_tensor_variables(lin_constraint_grads)
if (using_surrogate and (not precompute)):
constraint_term_2 = true_linear_leq_constraint
self._hvp_approach.update_opt(f=constraint_term_1, target=target, inputs=(inputs + extra_inputs), reg_coeff=self._reg_coeff)
self._target = target
self._max_quad_constraint_val = constraint_value_1
self._max_lin_constraint_val = constraint_value_2
self._constraint_name_1 = constraint_name_1
self._constraint_name_2 = constraint_name_2
self._opt_fun = ext.lazydict(f_loss=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=loss, log_name='f_loss')), f_grad=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=flat_grad, log_name='f_grad')), f_quad_constraint=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=constraint_term_1, log_name='quad_constraint')), f_lin_constraint=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=constraint_term_2, log_name='lin_constraint')), f_lin_constraint_grad=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=flat_lin_constraint_grad, log_name='lin_constraint_grad')), f_loss_constraint=(lambda : ext.compile_function(inputs=(inputs + extra_inputs), outputs=[loss, constraint_term_1, constraint_term_2], log_name='f_loss_constraint')))
self.last_safe_point = None
self._last_lin_pred_S = 0
self._last_surr_pred_S = 0
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_constraint'], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None, precomputed_eval=None, precomputed_threshold=None, diff_threshold=False, inputs2=None, extra_inputs2=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
if (inputs2 is None):
inputs2 = inputs
if (extra_inputs2 is None):
extra_inputs2 = tuple()
def subsampled_inputs(inputs, subsample_grouped_inputs):
if (self._subsample_factor < 1):
if (subsample_grouped_inputs is None):
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(n_samples, int((n_samples * self._subsample_factor)), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
return subsample_inputs
subsample_inputs = subsampled_inputs(inputs, subsample_grouped_inputs)
if self._resample_inputs:
subsample_inputs2 = subsampled_inputs(inputs, subsample_grouped_inputs)
logger.log('computing loss before')
loss_before = sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
logger.log('performing update')
logger.log('computing descent direction')
flat_g = sliced_fun(self._opt_fun['f_grad'], self._num_slices)(inputs, extra_inputs)
flat_b = sliced_fun(self._opt_fun['f_lin_constraint_grad'], self._num_slices)(inputs2, extra_inputs2)
Hx = self._hvp_approach.build_eval((subsample_inputs + extra_inputs))
v = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters, verbose=self._verbose_cg)
approx_g = Hx(v)
q = v.dot(approx_g)
delta = (2 * self._max_quad_constraint_val)
eps = 1e-08
residual = np.sqrt((approx_g - flat_g).dot((approx_g - flat_g)))
rescale = (q / v.dot(v))
logger.record_tabular('OptimDiagnostic_Residual', residual)
logger.record_tabular('OptimDiagnostic_Rescale', rescale)
if self.precompute:
S = precomputed_eval
assert (np.ndim(S) == 0)
else:
S = sliced_fun(self._opt_fun['lin_constraint'], self._num_slices)(inputs, extra_inputs)
c = (S - self._max_lin_constraint_val)
if (c > 0):
logger.log('warning! safety constraint is already violated')
else:
self.last_safe_point = np.copy(self._target.get_param_values(trainable=True))
stop_flag = False
if (flat_b.dot(flat_b) <= eps):
lam = np.sqrt((q / delta))
nu = 0
w = 0
(r, s, A, B) = (0, 0, 0, 0)
optim_case = 4
else:
if self._resample_inputs:
Hx = self._hvp_approach.build_eval((subsample_inputs2 + extra_inputs))
norm_b = np.sqrt(flat_b.dot(flat_b))
unit_b = (flat_b / norm_b)
w = (norm_b * krylov.cg(Hx, unit_b, cg_iters=self._cg_iters, verbose=self._verbose_cg))
r = w.dot(approx_g)
s = w.dot(Hx(w))
A = (q - ((r ** 2) / s))
B = (delta - ((c ** 2) / s))
if ((c < 0) and (B < 0)):
optim_case = 3
elif ((c < 0) and (B > 0)):
optim_case = 2
elif ((c > 0) and (B > 0)):
optim_case = 1
if self.attempt_feasible_recovery:
logger.log('alert! conjugate constraint optimizer is attempting feasible recovery')
else:
logger.log('alert! problem is feasible but needs recovery, and we were instructed not to attempt recovery')
stop_flag = True
else:
optim_case = 0
if self.attempt_infeasible_recovery:
logger.log('alert! conjugate constraint optimizer is attempting infeasible recovery')
else:
logger.log('alert! problem is infeasible, and we were instructed not to attempt recovery')
stop_flag = True
lam = np.sqrt((q / delta))
nu = 0
if ((optim_case == 2) or (optim_case == 1)):
lam_mid = (r / c)
L_mid = ((- 0.5) * ((q / lam_mid) + (lam_mid * delta)))
lam_a = np.sqrt((A / (B + eps)))
L_a = ((- np.sqrt((A * B))) - ((r * c) / (s + eps)))
lam_b = np.sqrt((q / delta))
L_b = (- np.sqrt((q * delta)))
if (lam_mid > 0):
if (c < 0):
if (lam_a > lam_mid):
lam_a = lam_mid
L_a = L_mid
if (lam_b < lam_mid):
lam_b = lam_mid
L_b = L_mid
else:
if (lam_a < lam_mid):
lam_a = lam_mid
L_a = L_mid
if (lam_b > lam_mid):
lam_b = lam_mid
L_b = L_mid
if (L_a >= L_b):
lam = lam_a
else:
lam = lam_b
elif (c < 0):
lam = lam_b
else:
lam = lam_a
nu = (max(0, ((lam * c) - r)) / (s + eps))
logger.record_tabular('OptimCase', optim_case)
logger.record_tabular('LagrangeLamda', lam)
logger.record_tabular('LagrangeNu', nu)
logger.record_tabular('OptimDiagnostic_q', q)
logger.record_tabular('OptimDiagnostic_r', r)
logger.record_tabular('OptimDiagnostic_s', s)
logger.record_tabular('OptimDiagnostic_c', c)
logger.record_tabular('OptimDiagnostic_A', A)
logger.record_tabular('OptimDiagnostic_B', B)
logger.record_tabular('OptimDiagnostic_S', S)
if (nu == 0):
logger.log('safety constraint is not active!')
nextS = (S + np.sqrt((delta * s)))
logger.record_tabular('OptimDiagnostic_WorstNextS', nextS)
def record_zeros():
logger.record_tabular('BacktrackIters', 0)
logger.record_tabular('LossRejects', 0)
logger.record_tabular('QuadRejects', 0)
logger.record_tabular('LinRejects', 0)
if (optim_case > 0):
flat_descent_step = ((1.0 / (lam + eps)) * (v + (nu * w)))
else:
flat_descent_step = (np.sqrt((delta / (s + eps))) * w)
logger.log('descent direction computed')
prev_param = np.copy(self._target.get_param_values(trainable=True))
prev_lin_constraint_val = sliced_fun(self._opt_fun['f_lin_constraint'], self._num_slices)(inputs, extra_inputs)
logger.record_tabular('PrevLinConstVal', prev_lin_constraint_val)
lin_reject_threshold = self._max_lin_constraint_val
if (precomputed_threshold is not None):
lin_reject_threshold = precomputed_threshold
if diff_threshold:
lin_reject_threshold += prev_lin_constraint_val
logger.record_tabular('LinRejectThreshold', lin_reject_threshold)
def check_nan():
(loss, quad_constraint_val, lin_constraint_val) = sliced_fun(self._opt_fun['f_loss_constraint'], self._num_slices)(inputs, extra_inputs)
if (np.isnan(loss) or np.isnan(quad_constraint_val) or np.isnan(lin_constraint_val)):
logger.log('Something is NaN. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(quad_constraint_val):
logger.log(('Violated because quad_constraint %s is NaN' % self._constraint_name_1))
if np.isnan(lin_constraint_val):
logger.log(('Violated because lin_constraint %s is NaN' % self._constraint_name_2))
self._target.set_param_values(prev_param, trainable=True)
def line_search(check_loss=True, check_quad=True, check_lin=True):
loss_rejects = 0
quad_rejects = 0
lin_rejects = 0
n_iter = 0
for (n_iter, ratio) in enumerate((self._backtrack_ratio ** np.arange(self._max_backtracks))):
cur_step = (ratio * flat_descent_step)
cur_param = (prev_param - cur_step)
self._target.set_param_values(cur_param, trainable=True)
(loss, quad_constraint_val, lin_constraint_val) = sliced_fun(self._opt_fun['f_loss_constraint'], self._num_slices)(inputs, extra_inputs)
loss_flag = (loss < loss_before)
quad_flag = (quad_constraint_val <= self._max_quad_constraint_val)
lin_flag = (lin_constraint_val <= lin_reject_threshold)
if (check_loss and (not loss_flag)):
logger.log(('At backtrack itr %i, loss failed to improve.' % n_iter))
loss_rejects += 1
if (check_quad and (not quad_flag)):
logger.log(('At backtrack itr %i, quad constraint violated.' % n_iter))
logger.log(('Quad constraint violation was %.3f %%.' % ((100 * (quad_constraint_val / self._max_quad_constraint_val)) - 100)))
quad_rejects += 1
if (check_lin and (not lin_flag)):
logger.log(('At backtrack itr %i, expression for lin constraint failed to improve.' % n_iter))
logger.log(('Lin constraint violation was %.3f %%.' % ((100 * (lin_constraint_val / lin_reject_threshold)) - 100)))
lin_rejects += 1
if ((loss_flag or (not check_loss)) and (quad_flag or (not check_quad)) and (lin_flag or (not check_lin))):
logger.log(('Accepted step at backtrack itr %i.' % n_iter))
break
logger.record_tabular('BacktrackIters', n_iter)
logger.record_tabular('LossRejects', loss_rejects)
logger.record_tabular('QuadRejects', quad_rejects)
logger.record_tabular('LinRejects', lin_rejects)
return (loss, quad_constraint_val, lin_constraint_val, n_iter)
def wrap_up():
if (optim_case < 4):
lin_constraint_val = sliced_fun(self._opt_fun['f_lin_constraint'], self._num_slices)(inputs, extra_inputs)
lin_constraint_delta = (lin_constraint_val - prev_lin_constraint_val)
logger.record_tabular('LinConstraintDelta', lin_constraint_delta)
cur_param = self._target.get_param_values()
next_linear_S = (S + flat_b.dot((cur_param - prev_param)))
next_surrogate_S = (S + lin_constraint_delta)
lin_surrogate_acc = ((100.0 * (next_linear_S - next_surrogate_S)) / next_surrogate_S)
logger.record_tabular('PredictedLinearS', next_linear_S)
logger.record_tabular('PredictedSurrogateS', next_surrogate_S)
logger.record_tabular('LinearSurrogateErr', lin_surrogate_acc)
lin_pred_err = (self._last_lin_pred_S - S)
surr_pred_err = (self._last_surr_pred_S - S)
logger.record_tabular('PredictionErrorLinearS', lin_pred_err)
logger.record_tabular('PredictionErrorSurrogateS', surr_pred_err)
self._last_lin_pred_S = next_linear_S
self._last_surr_pred_S = next_surrogate_S
else:
logger.record_tabular('LinConstraintDelta', 0)
logger.record_tabular('PredictedLinearS', 0)
logger.record_tabular('PredictedSurrogateS', 0)
logger.record_tabular('LinearSurrogateErr', 0)
lin_pred_err = (self._last_lin_pred_S - 0)
surr_pred_err = (self._last_surr_pred_S - 0)
logger.record_tabular('PredictionErrorLinearS', lin_pred_err)
logger.record_tabular('PredictionErrorSurrogateS', surr_pred_err)
self._last_lin_pred_S = 0
self._last_surr_pred_S = 0
if (stop_flag == True):
record_zeros()
wrap_up()
return
if ((optim_case == 1) and (not self.revert_to_last_safe_point)):
if self._linesearch_infeasible_recovery:
logger.log('feasible recovery mode: constrained natural gradient step. performing linesearch on constraints.')
line_search(False, True, True)
else:
self._target.set_param_values((prev_param - flat_descent_step), trainable=True)
logger.log('feasible recovery mode: constrained natural gradient step. no linesearch performed.')
check_nan()
record_zeros()
wrap_up()
return
elif ((optim_case == 0) and (not self.revert_to_last_safe_point)):
if self._linesearch_infeasible_recovery:
logger.log('infeasible recovery mode: natural safety step. performing linesearch on constraints.')
line_search(False, True, True)
else:
self._target.set_param_values((prev_param - flat_descent_step), trainable=True)
logger.log('infeasible recovery mode: natural safety gradient step. no linesearch performed.')
check_nan()
record_zeros()
wrap_up()
return
elif (((optim_case == 0) or (optim_case == 1)) and self.revert_to_last_safe_point):
if self.last_safe_point:
self._target.set_param_values(self.last_safe_point, trainable=True)
logger.log('infeasible recovery mode: reverted to last safe point!')
else:
logger.log('alert! infeasible recovery mode failed: no last safe point to revert to.')
record_zeros()
wrap_up()
return
(loss, quad_constraint_val, lin_constraint_val, n_iter) = line_search()
if ((np.isnan(loss) or np.isnan(quad_constraint_val) or np.isnan(lin_constraint_val) or (loss >= loss_before) or (quad_constraint_val >= self._max_quad_constraint_val) or (lin_constraint_val > lin_reject_threshold)) and (not self._accept_violation)):
logger.log('Line search condition violated. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(quad_constraint_val):
logger.log(('Violated because quad_constraint %s is NaN' % self._constraint_name_1))
if np.isnan(lin_constraint_val):
logger.log(('Violated because lin_constraint %s is NaN' % self._constraint_name_2))
if (loss >= loss_before):
logger.log('Violated because loss not improving')
if (quad_constraint_val >= self._max_quad_constraint_val):
logger.log(('Violated because constraint %s is violated' % self._constraint_name_1))
if (lin_constraint_val > lin_reject_threshold):
logger.log(('Violated because constraint %s exceeded threshold' % self._constraint_name_2))
self._target.set_param_values(prev_param, trainable=True)
logger.log(('backtrack iters: %d' % n_iter))
logger.log('computing loss after')
logger.log('optimization finished')
wrap_up() |
def adjust_lr(optimizer, init_lr, epoch, decay_rate=0.1, decay_epoch=5):
decay = (decay_rate ** (epoch // decay_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] *= decay |
def evaluate(sess_config, input_hooks, model, data_init_op, steps, checkpoint_dir):
model.is_training = False
hooks = []
hooks.extend(input_hooks)
scaffold = tf.compat.v1.train.Scaffold(local_init_op=tf.group(tf.compat.v1.local_variables_initializer(), data_init_op))
session_creator = tf.compat.v1.train.ChiefSessionCreator(scaffold=scaffold, checkpoint_dir=checkpoint_dir, config=sess_config)
writer = tf.compat.v1.summary.FileWriter(os.path.join(checkpoint_dir, 'eval'))
merged = tf.compat.v1.summary.merge_all()
print(merged)
with tf.compat.v1.train.MonitoredSession(session_creator=session_creator, hooks=hooks) as sess:
for step in range(1, (steps + 1)):
if (step != steps):
sess.run([model.acc_op, model.auc_op])
if ((step % 1000) == 0):
print('Evaluation complete:[{}/{}]'.format(step, steps))
else:
(eval_acc, eval_auc, events) = sess.run([model.acc_op, model.auc_op, merged])
writer.add_summary(events, step)
print('Evaluation complete:[{}/{}]'.format(step, steps))
print('ACC = {}\nAUC = {}'.format(eval_acc, eval_auc)) |
def test_piecewise_schedule():
ps = PiecewiseSchedule([((- 5), 100), (5, 200), (10, 50), (100, 50), (200, (- 50))], outside_value=500)
assert np.isclose(ps.value((- 10)), 500)
assert np.isclose(ps.value(0), 150)
assert np.isclose(ps.value(5), 200)
assert np.isclose(ps.value(9), 80)
assert np.isclose(ps.value(50), 50)
assert np.isclose(ps.value(80), 50)
assert np.isclose(ps.value(150), 0)
assert np.isclose(ps.value(175), (- 25))
assert np.isclose(ps.value(201), 500)
assert np.isclose(ps.value(500), 500)
assert np.isclose(ps.value((200 - 1e-10)), (- 50)) |
def compute_similarity_transform(source_points, target_points):
assert (target_points.shape[0] == source_points.shape[0])
assert ((target_points.shape[1] == 3) and (source_points.shape[1] == 3))
source_points = source_points.T
target_points = target_points.T
mu1 = source_points.mean(axis=1, keepdims=True)
mu2 = target_points.mean(axis=1, keepdims=True)
X1 = (source_points - mu1)
X2 = (target_points - mu2)
var1 = np.sum((X1 ** 2))
K = X1.dot(X2.T)
(U, _, Vh) = np.linalg.svd(K)
V = Vh.T
Z = np.eye(U.shape[0])
Z[((- 1), (- 1))] *= np.sign(np.linalg.det(U.dot(V.T)))
R = V.dot(Z.dot(U.T))
scale = (np.trace(R.dot(K)) / var1)
t = (mu2 - (scale * R.dot(mu1)))
source_points_hat = ((scale * R.dot(source_points)) + t)
source_points_hat = source_points_hat.T
return source_points_hat |
class BitPreTrainedModel(PreTrainedModel):
config_class = BitConfig
base_model_prefix = 'bit'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BitModel):
module.gradient_checkpointing = value |
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = (out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]))
subprocess.Popen(['mv', out_file, final_file]) |
def create_model_from_pretrained(model_name: str, pretrained: str, precision: str='fp32', device: Union[(str, torch.device)]='cpu', jit: bool=False, force_quick_gelu: bool=False, force_custom_text: bool=False, return_transform: bool=True, image_mean: Optional[Tuple[(float, ...)]]=None, image_std: Optional[Tuple[(float, ...)]]=None, cache_dir: Optional[str]=None):
if ((not is_pretrained_cfg(model_name, pretrained)) and (not os.path.exists(pretrained))):
raise RuntimeError(f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}. Use open_clip.list_pretrained() to find one.')
model = create_model(model_name, pretrained, precision=precision, device=device, jit=jit, force_quick_gelu=force_quick_gelu, force_custom_text=force_custom_text, cache_dir=cache_dir)
if (not return_transform):
return model
image_mean = (image_mean or getattr(model.visual, 'image_mean', None))
image_std = (image_std or getattr(model.visual, 'image_std', None))
preprocess = image_transform(model.visual.image_size, is_train=False, mean=image_mean, std=image_std)
return (model, preprocess) |
def validate_flags_or_throw(bert_config):
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)
if ((not FLAGS.do_train) and (not FLAGS.do_predict)):
raise ValueError('At least one of `do_train` or `do_predict` must be True.')
if FLAGS.do_train:
if (not FLAGS.train_file):
raise ValueError('If `do_train` is True, then `train_file` must be specified.')
if FLAGS.do_predict:
if (not FLAGS.predict_file):
raise ValueError('If `do_predict` is True, then `predict_file` must be specified.')
if (FLAGS.max_seq_length > bert_config.max_position_embeddings):
raise ValueError(('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings)))
if (FLAGS.max_seq_length <= (FLAGS.max_query_length + 3)):
raise ValueError(('The max_seq_length (%d) must be greater than max_query_length (%d) + 3' % (FLAGS.max_seq_length, FLAGS.max_query_length))) |
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D =
use_this_for_batch_size_computation_3D =
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(0.01), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None):
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if (nonlin_kwargs is None):
nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
if (dropout_op_kwargs is None):
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if (norm_op_kwargs is None):
norm_op_kwargs = {'eps': 1e-05, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self.do_ds = deep_supervision
if (conv_op == nn.Conv2d):
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if (pool_op_kernel_sizes is None):
pool_op_kernel_sizes = ([(2, 2)] * num_pool)
if (conv_kernel_sizes is None):
conv_kernel_sizes = ([(3, 3)] * (num_pool + 1))
elif (conv_op == nn.Conv3d):
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if (pool_op_kernel_sizes is None):
pool_op_kernel_sizes = ([(2, 2, 2)] * num_pool)
if (conv_kernel_sizes is None):
conv_kernel_sizes = ([(3, 3, 3)] * (num_pool + 1))
else:
raise ValueError(('unknown convolution dimensionality, conv op: %s' % str(conv_op)))
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([(1 if (i == 3) else 0) for i in krnl])
if (max_num_features is None):
if (self.conv_op == nn.Conv3d):
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
if ((d != 0) and self.convolutional_pooling):
first_stride = pool_op_kernel_sizes[(d - 1)]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride))
if (not self.convolutional_pooling):
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round((output_features * feat_map_mul_on_downscale)))
output_features = min(output_features, self.max_num_features)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[(- 1)]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[(- 1)].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(StackedConvLayers(input_features, output_features, (num_conv_per_stage - 1), self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride), StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)))
if (not dropout_in_localization):
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[(- (2 + u))].output_channels
n_features_after_tu_and_concat = (nfeatures_from_skip * 2)
if ((u != (num_pool - 1)) and (not self.convolutional_upsampling)):
final_num_features = self.conv_blocks_context[(- (3 + u))].output_channels
else:
final_num_features = nfeatures_from_skip
if (not self.convolutional_upsampling):
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[(- (u + 1))], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[(- (u + 1))], pool_op_kernel_sizes[(- (u + 1))], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[(- (u + 1))]
self.conv_kwargs['padding'] = self.conv_pad_sizes[(- (u + 1))]
self.conv_blocks_localization.append(nn.Sequential(StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, (num_conv_per_stage - 1), self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs), StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][(- 1)].output_channels, num_classes, 1, 1, 0, 1, 1, False))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::(- 1)]
for usl in range((num_pool - 1)):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[(usl + 1)]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append((lambda x: x))
if (not dropout_in_localization):
self.dropout_op_kwargs['p'] = old_dropout_p
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops)
if (self.weightInitializer is not None):
self.apply(self.weightInitializer)
def forward(self, x):
skips = []
seg_outputs = []
for d in range((len(self.conv_blocks_context) - 1)):
x = self.conv_blocks_context[d](x)
skips.append(x)
if (not self.convolutional_pooling):
x = self.td[d](x)
x = self.conv_blocks_context[(- 1)](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[(- (u + 1))]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self.do_ds:
return tuple(([seg_outputs[(- 1)]] + [i(j) for (i, j) in zip(list(self.upscale_logits_ops)[::(- 1)], seg_outputs[:(- 1)][::(- 1)])]))
else:
return seg_outputs[(- 1)]
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes):
if (not isinstance(num_pool_per_axis, np.ndarray)):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64(((((5 * np.prod(map_size, dtype=np.int64)) * base_num_features) + (num_modalities * np.prod(map_size, dtype=np.int64))) + (num_classes * np.prod(map_size, dtype=np.int64))))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min((num_feat * 2), max_num_features)
num_blocks = (5 if (p < (npool - 1)) else 2)
tmp += ((num_blocks * np.prod(map_size, dtype=np.int64)) * num_feat)
return tmp |
class ActionsAdapter():
def __init__(self):
self.renderer = None
self.parser = None
self.dataset = IGLUDataset()
def action_space(self):
env = gym.make('IGLUGridworldVector-v0')
action_space = env.action_space
del env
return action_space
def has_buffer(self):
return self._dir_non_emtpy('buffer')
def _dir_non_emtpy(self, subdir):
path = pathlib.Path(self.dataset.get_data_path())
path = (path / subdir)
return (path.exists() and (len(list(path.glob('*'))) != 0))
def parse_sessions(self, path, verbose=False):
path = pathlib.Path(path)
sessions = []
for sess_dir in tqdm(path.glob('*-c*/'), disable=(not verbose)):
game_session = self.parse_session(sess_dir.parent, sess_dir.name)
sessions.append(game_session)
return sessions
def save_session(self, session, save_path=None):
if (save_path is None):
path = pathlib.Path(self.dataset.get_data_path()).parent
sessions_path = (path / 'buffer')
sessions_path.mkdir(exist_ok=True)
else:
sessions_path = pathlib.Path(save_path)
sessions_path.mkdir(exist_ok=True)
with open((sessions_path / f'{session.name}_session.pkl'), 'wb') as f:
f.write(bz2.compress(pickle.dumps(session)))
return session
def load_session(self, session_name, load_path=None):
if (load_path is None):
path = pathlib.Path(self.dataset.get_data_path()).parent
session_path = ((path / 'buffer') / session_name)
else:
session_path = pathlib.Path(load_path)
with open(session_path, 'rb') as f:
compressed_session = f.read()
return pickle.loads(bz2.decompress(compressed_session))
def render_session_video(self, session, visualize=False, postprocess=True, render_size=(64, 64), outpath=None, single_turn=False):
path = pathlib.Path((outpath or self.dataset.get_data_path()))
if visualize:
raise ValueError('this mode does not work yet')
session_path = (path / 'session_videos')
(height, width) = (1000, 1000)
else:
session_path = path
logger.info(f'rendering session {session.name}')
from gridworld.visualizer import Visualizer
visualizer = Visualizer((render_size, render_size))
if single_turn:
visualizer.render_video(session_path, event_sequence=session.events[0], close=True)
if postprocess:
visualizer.postproc_video(session_path)
else:
for i in range(2, (len(session.dialogs) + 1), 2):
if (i not in session.events):
break
visualizer.render_video((session_path / f'{session.name}_{((i // 2) - 1)}'), event_sequence=session.events[i], close=True)
logger.info(f'session {session.name}: rendering step {((i // 2) + 1)}/{(len(session.dialogs) // 2)}')
if postprocess:
visualizer.postproc_video((session_path / f'{session.name}_{((i // 2) - 1)}')) |
def _reshape_raw_ferminet_orbitals(orbitals: ArrayList, ndeterminants: int) -> ArrayList:
orbitals = [jnp.reshape(orb, (*orb.shape[:(- 1)], ndeterminants, (orb.shape[(- 1)] // ndeterminants))) for orb in orbitals]
return [jnp.moveaxis(orb, (- 2), 0) for orb in orbitals] |
def choose_label(input_file, output_file):
with open(input_file, 'r') as in_file:
fins = in_file.readlines()
with open(output_file, 'w') as fout:
for line in fins:
if (len(line) < 3):
fout.write(line)
else:
pairs = line.strip('\n').split(' ')
fout.write((((pairs[0] + ' ') + pairs[(- 1)]) + '\n')) |
def Process_1000(args):
random.seed(args.seed)
tata_train = (args.file_path + 'TATA_scan_train.csv')
notata_train = (args.file_path + 'noTATA_scan_train.csv')
tata_train_file = open(tata_train, 'r', encoding='utf-8-sig')
notata_train_file = open(notata_train, 'r', encoding='utf-8-sig')
tata_train_lines = list(csv.reader(tata_train_file, delimiter=',', quotechar=None))[1:]
notata_train_lines = list(csv.reader(notata_train_file, delimiter=',', quotechar=None))[1:]
tata_test = (args.file_path + '/TATA_scan_test.csv')
notata_test = (args.file_path + '/noTATA_scan_test.csv')
tata_test_file = open(tata_test, 'r', encoding='utf-8-sig')
notata_test_file = open(notata_test, 'r', encoding='utf-8-sig')
tata_test_lines = list(csv.reader(tata_test_file, delimiter=',', quotechar=None))[1:]
notata_test_lines = list(csv.reader(notata_test_file, delimiter=',', quotechar=None))[1:]
print('Original:')
print(('tata train: %d' % len(tata_train_lines)))
print(('notata train: %d' % len(notata_train_lines)))
print(('tata test: %d' % len(tata_test_lines)))
print(('tata test: %d' % len(notata_test_lines)))
random.shuffle(tata_train_lines)
random.shuffle(notata_train_lines)
random.shuffle(tata_test_lines)
random.shuffle(notata_test_lines)
notata_train_lines = notata_train_lines[:len(tata_train_lines)]
notata_test_lines = notata_test_lines[:len(tata_test_lines)]
with open(os.path.join(args.file_path, 'notata_test_id'), 'w') as f:
tsv_w = csv.writer(f, delimiter=',')
tsv_w.writerow(['index', 'chrom', 'start', 'end', 'name', 'strand', 'keys', 'id'])
for line in notata_test_lines:
tsv_w.writerow([line[0], line[1], line[2], line[3], line[4], line[5], line[7], line[9]]) |
class UniformWindowWithoutOverlapSpotClipSampler(SpotClipSampler):
def __init__(self, data_source: Spot, windows_per_video: int=50, window_num_frames: int=32, sample_edges: bool=False, prevent_resample_edges: bool=True, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.windows_per_video = windows_per_video
self.window_num_frames = window_num_frames
self.sample_edges = sample_edges
self.prevent_resample_edges = prevent_resample_edges
self._shuffle = shuffle
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for _ in range((len(self.data_source) * self.windows_per_video))]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
video_random_starts = random_start_subsequences(clip_duration=self.window_num_frames, video_num_frames=video_metadata['num_frames'], num_subsequences=self.windows_per_video, sample_edges=self.sample_edges, prevent_resample_edges=self.prevent_resample_edges, generator=g)
for video_random_start in video_random_starts:
indices[global_idx] = (idx, video_random_start, ((video_random_start + self.window_num_frames) - 1))
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) * self.windows_per_video)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_num_frames={self.window_num_frames}, sample_edges={self.sample_edges}, prevent_resample_edges={self.prevent_resample_edges} shuffle={self._shuffle}, seed={self.seed})' |
def make_solved_cube(cube_size: int) -> Cube:
return jnp.stack([(face.value * jnp.ones((cube_size, cube_size), dtype=jnp.int8)) for face in Face]) |
def mzip(x, y):
if (x.dtype == tf.bfloat16):
x = r_cast(x)
y = r_cast(y)
return zip(x, y) |
class TestSummarizationDistillerMultiGPU(TestCasePlus):
def setUpClass(cls):
return cls
_torch_multi_gpu
def test_multi_gpu(self):
updates = {'no_teacher': True, 'freeze_encoder': True, 'gpus': 2, 'overwrite_output_dir': True, 'sortish_sampler': True}
self._test_distiller_cli_fork(updates, check_contents=False)
def _test_distiller_cli_fork(self, updates, check_contents=True):
default_updates = {'label_smoothing': 0.0, 'early_stopping_patience': (- 1), 'train_batch_size': 1, 'eval_batch_size': 2, 'max_epochs': 2, 'alpha_mlm': 0.2, 'alpha_ce': 0.8, 'do_predict': True, 'model_name_or_path': 'sshleifer/tinier_bart', 'teacher': CHEAP_ARGS['model_name_or_path'], 'val_check_interval': 0.5}
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
def convert(k, v):
if (k in ['tgt_suffix', 'server_ip', 'server_port', 'out', 'n_tpu_cores']):
return ''
if ((v is False) or (v is None)):
return ''
if (v is True):
return f'--{k}'
return f'--{k}={v}'
cli_args = [x for x in (convert(k, v) for (k, v) in args_d.items()) if len(x)]
cmd = ([sys.executable, f'{self.test_file_dir}/distillation.py'] + cli_args)
execute_subprocess_async(cmd, env=self.get_env())
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith('ckpt')]
assert (len(ckpt_files) > 0)
self.assertIn('test_generations.txt', contents)
self.assertIn('test_results.txt', contents)
metrics_save_path = os.path.join(output_dir, 'metrics.json')
val_metric = 'rouge2'
metrics = load_json(metrics_save_path)
print(metrics)
last_step_stats = metrics['val'][(- 1)]
self.assertGreaterEqual(last_step_stats['val_avg_gen_time'], 0.01)
self.assertIsInstance(last_step_stats[f'val_avg_{val_metric}'], float)
self.assertEqual(len(metrics['test']), 1)
desired_n_evals = int((((args_d['max_epochs'] * (1 / args_d['val_check_interval'])) / 2) + 1))
self.assertEqual(len(metrics['val']), desired_n_evals) |
def glue_eval_data_collator(dataset: Dataset, batch_size: int):
for i in range((len(dataset) // batch_size)):
batch = dataset[(i * batch_size):((i + 1) * batch_size)]
batch = {k: np.array(v) for (k, v) in batch.items()}
batch = shard(batch)
(yield batch) |
def _create_hrnet(variant, pretrained, **model_kwargs):
model_cls = HighResolutionNet
features_only = False
kwargs_filter = None
if model_kwargs.pop('features_only', False):
model_cls = HighResolutionNetFeatures
kwargs_filter = ('num_classes', 'global_pool')
features_only = True
model = build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfgs[variant], model_cfg=cfg_cls[variant], pretrained_strict=(not features_only), kwargs_filter=kwargs_filter, **model_kwargs)
if features_only:
model.default_cfg = default_cfg_for_features(model.default_cfg)
return model |
class Basis_GauSH(Basis):
def __init__(self, Name_=None):
Basis.__init__(self, Name_)
self.type = 'GauSH'
self.RBFS = np.tile(np.array([[0.1, 0.156787], [0.3, 0.3], [0.5, 0.5], [0.7, 0.7], [1.3, 1.3], [2.2, 2.4], [4.4, 2.4], [6.6, 2.4], [8.8, 2.4], [11.0, 2.4], [13.2, 2.4], [15.4, 2.4]]), (10, 1, 1))
return
def Orthogonalize(self):
from TensorMol.LinearOperations import MatrixPower
S_Rad = MolEmb.Overlap_RBFS(PARAMS, self.RBFS)
self.SRBF = np.zeros((self.RBFS.shape[0], PARAMS['SH_NRAD'], PARAMS['SH_NRAD']))
for i in range(S_Rad.shape[0]):
self.SRBF[i] = MatrixPower(S_Rad[i], ((- 1.0) / 2)) |
def convert_example_to_features(example, max_seq_length, tokenizer):
tokens_a = example.tokens_a
tokens_b = example.tokens_b
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
(tokens_a, t1_label) = random_word(tokens_a, tokenizer)
(tokens_b, t2_label) = random_word(tokens_b, tokenizer)
lm_label_ids = (((([(- 1)] + t1_label) + [(- 1)]) + t2_label) + [(- 1)])
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
assert (len(tokens_b) > 0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append((- 1))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(lm_label_ids) == max_seq_length)
if (example.guid < 5):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
logger.info(('LM label: %s ' % lm_label_ids))
logger.info(('Is next sentence label: %s ' % example.is_next))
features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, is_next=example.is_next)
return features |
def psi1(mean, var, a, b, ms):
omegas = (((2.0 * np.pi) * ms) / (b - a))
Kuf_cos = tf.transpose(tf.cos((omegas * (mean - a))))
omegas = omegas[(omegas != 0)]
Kuf_sin = tf.transpose(tf.sin((omegas * (mean - a))))
a = tf.transpose(tf.exp((((- tf.square(omegas)) * var) / 2)))
Psi1_cos = (Kuf_cos * a)
Psi1_sin = (Kuf_sin * a)
return tf.concat([Psi1_cos, Psi1_sin], axis=0) |
def standard_laurent_embed(nvar, topdim, pols, verbose_level=0):
from phcpy.phcpy2c3 import py2c_embed_standard_Laurent_system
from phcpy.interface import store_standard_laurent_system
from phcpy.interface import load_standard_laurent_system
store_standard_laurent_system(pols, nbvar=nvar)
py2c_embed_standard_Laurent_system(topdim, verbose_level)
return load_standard_laurent_system() |
class ShapeGenerator():
def generate_shape(self, name, geometry, color=None):
color = ([0.5, 0.5, 0.5, 1] if (color is None) else color)
return f'''
<robot name="{name}">
<link name="base_link">
<visual>
<!-- visual origin is defined w.r.t. link local coordinate system -->
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
{geometry}
</geometry>
<material name="white">
<color rgba="{' '.join((str(x) for x in color))}"/>
</material>
</visual>
<collision>
<!-- collision origin is defined w.r.t. link local coordinate system -->
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
{geometry}
</geometry>
</collision>
</link>
</robot>
'''
def generate_sphere(self, radius, color=None, **args):
return self.generate_shape('sphere', f'<sphere radius="{radius}"/>', color)
def generate_box(self, scale, color=None, **args):
return self.generate_shape('box', f'<box size="{scale[0]} {scale[1]} {scale[2]}"/>', color)
def generate_cylinder(self, radius, height, color=None, **args):
return self.generate_shape('cylinder', f'<cylinder radius="{radius}" length="{height}"/>', color) |
def sepreresnet164bn_cifar10(num_classes=10, **kwargs):
return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name='sepreresnet164bn_cifar10', **kwargs) |
class BratsSampler(Sampler):
def __init__(self, dataset, n_patients, n_samples):
self.batch_size = (n_patients * n_samples)
self.n_samples = n_samples
self.n_patients = n_patients
self.dataset_indices = list(range(0, len(dataset)))
def __iter__(self):
batch_indices = []
random.shuffle(self.dataset_indices)
for patient_id in self.dataset_indices:
batch_indices.extend((patient_id for _ in range(self.n_samples)))
if (len(batch_indices) == self.batch_size):
(yield batch_indices)
batch_indices = []
if (len(batch_indices) > 0):
(yield batch_indices)
def __len__(self):
return (((len(self.dataset_indices) + self.batch_size) - 1) // self.batch_size) |
class UniformReplay():
def sample_batch(self, batch_B):
(T_idxs, B_idxs) = self.sample_idxs(batch_B)
return self.extract_batch(T_idxs, B_idxs)
def sample_idxs(self, batch_B):
(t, b, f) = (self.t, self.off_backward, self.off_forward)
high = (((self.T - b) - f) if self._buffer_full else (t - b))
low = (0 if self._buffer_full else f)
T_idxs = np.random.randint(low=low, high=high, size=(batch_B,))
T_idxs[(T_idxs >= (t - b))] += (min(t, b) + f)
B_idxs = np.random.randint(low=0, high=self.B, size=(batch_B,))
return (T_idxs, B_idxs) |
class EltwiseParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELTWISEPARAMETER |
class _AssertNoLogsContext(unittest.case._AssertLogsContext):
def __exit__(self, exc_type, exc_value, tb):
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if (exc_type is not None):
return False
if self.watcher.records:
msg = 'logs of level {} or higher triggered on {}:\n'.format(logging.getLevelName(self.level), self.logger.name)
for record in self.watcher.records:
msg += ('logger %s %s:%i: %s\n' % (record.name, record.pathname, record.lineno, record.getMessage()))
self._raiseFailure(msg) |
class TFBertLMHeadModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def assert_scipy_wav_style(value):
assert is_scipy_wav_style(value), 'Must be Tuple[int, numpy.ndarray], but got {}'.format((type(value) if (not isinstance(value, Sequence)) else '{}[{}]'.format(type(value), ', '.join((str(type(v)) for v in value))))) |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
(MI_XTs, MI_TYs) = ([], [])
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.eval()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
if (epoch == 0):
(label_matrix, t_total) = cal_mi_epoch(model, train_loader)
(MI_XT, MI_TY) = MI_cal_v2(label_matrix, t_total, 1000)
MI_XTs.append(MI_XT)
MI_TYs.append(MI_TY)
MI_XTs.pop()
MI_TSs.pop()
(label_matrix, t_total) = cal_mi_epoch(model, train_loader)
(MI_XT, MI_TY) = MI_cal_v2(label_matrix, t_total, 1000)
return (MI_XTs, MI_TYs) |
class SuperResIDWE4K5(SuperResIDWEXKX):
def __init__(self, in_channels=None, out_channels=None, stride=None, bottleneck_channels=None, sub_layers=None, no_create=False, **kwargs):
super(SuperResIDWE4K5, self).__init__(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck_channels=bottleneck_channels, sub_layers=sub_layers, kernel_size=5, expension=4.0, no_create=no_create, **kwargs) |
def metric_fn(pred, label, metric='IC'):
mask = (~ torch.isnan(label))
if (metric == 'IC'):
return calc_ic(pred[mask], label[mask])
elif (metric == 'R2'):
return calc_r2(pred[mask], label[mask]) |
class VisionTouchDataset(Dataset):
def __init__(self, phase, data_lst_file, w_timewindow, trans_des=None, trans_lowres=None, trans_to_tensor=None, scale_size=None, crop_size=None, brightness=None, contrast=None, saturation=None, hue=None, loader=default_loader):
self.phase = phase
self.recs = open(data_lst_file, 'r').readlines()
self.w_timewindow = w_timewindow
self.trans_des = trans_des
self.trans_lowres = trans_lowres
self.trans_to_tensor = trans_to_tensor
self.loader = loader
self.scale_size = scale_size
self.crop_size = crop_size
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def __len__(self):
return len(self.recs)
def variance_of_laplacian(self, image):
return (cv2.Laplacian(image, cv2.CV_64F).var() ** 2)
def cvt_rgb2gray(self, image):
return cv2.cvtColor(np.array(image).copy(), cv2.COLOR_RGB2GRAY)
def calc_weight(self, ref_des, des):
gray_ref = np.array(self.cvt_rgb2gray(ref_des)).astype(np.float)
gray = np.array(self.cvt_rgb2gray(des)).astype(np.float)
return self.variance_of_laplacian((gray - gray_ref))
def get_crop_params(self, phase, img, crop_size):
(w, h) = img.size
(th, tw) = (crop_size, crop_size)
if (phase == 'train'):
if ((w == tw) and (h == th)):
return (0, 0, h, w)
i = random.randint(0, (h - th))
j = random.randint((((w - tw) / 2.0) - 8), (((w - tw) / 2.0) + 8))
else:
i = int(round(((h - th) / 2.0)))
j = int(round(((w - tw) / 2.0)))
return (i, j, th, tw)
def resize_and_crop(self, phase, srcs, scale_size, crop_size):
len_srcs = len(srcs)
for i in range(len_srcs):
srcs[i] = resize(srcs[i], scale_size)
crop_params = self.get_crop_params(phase, srcs[0], crop_size)
for i in range(len_srcs):
srcs[i] = crop(srcs[i], crop_params[0], crop_params[1], crop_params[2], crop_params[3])
return srcs
def colorjitter(self, srcs, brightness, contrast, saturation, hue):
len_srcs = len(srcs)
brightness_factor = np.random.uniform(max(0, (1 - brightness)), (1 + brightness))
contrast_factor = np.random.uniform(max(0, (1 - contrast)), (1 + contrast))
saturation_factor = np.random.uniform(max(0, (1 - saturation)), (1 + saturation))
hue_factor = np.random.uniform((- hue), hue)
for i in range(len_srcs):
srcs[i] = adjust_brightness(srcs[i], brightness_factor)
srcs[i] = adjust_contrast(srcs[i], contrast_factor)
srcs[i] = adjust_saturation(srcs[i], saturation_factor)
srcs[i] = adjust_hue(srcs[i], hue_factor)
return srcs
def __getitem__(self, idx):
(ref_src, ref_des, src, des, src_pre_0, src_pre_1, src_nxt_0, src_nxt_1) = self.recs[idx].strip().split(' ')
ref_src = self.loader(ref_src)
ref_des = self.loader(ref_des)
src = self.loader(src)
src_rgb = src.copy()
des = self.loader(des)
if self.w_timewindow:
src_pre_0 = self.loader(src_pre_0)
src_pre_1 = self.loader(src_pre_1)
src_nxt_0 = self.loader(src_nxt_0)
src_nxt_1 = self.loader(src_nxt_1)
srcs = [ref_src, src, src_pre_0, src_nxt_1, src_pre_1, src_nxt_0]
else:
srcs = [ref_src, src]
srcs = self.resize_and_crop(self.phase, srcs, self.scale_size, self.crop_size)
if (self.phase == 'train'):
srcs = self.colorjitter(srcs, self.brightness, self.contrast, self.saturation, self.hue)
srcs_lowres = []
for i in range(len(srcs)):
srcs_lowres += [self.trans_lowres(srcs[i])]
if self.w_timewindow:
for i in range(1, len(srcs)):
srcs[i] = self.cvt_rgb2gray(srcs[i])
srcs_lowres[i] = self.cvt_rgb2gray(srcs_lowres[i])
ref_src = srcs[0]
ref_src_lowres = srcs_lowres[0]
src = np.stack((srcs[1], srcs[2], srcs[3], srcs[4], srcs[5]), axis=(- 1))
src_lowres = np.stack((srcs_lowres[1], srcs_lowres[2], srcs_lowres[3], srcs_lowres[4], srcs_lowres[5]), axis=(- 1))
else:
ref_src = srcs[0]
ref_src_lowres = srcs_lowres[0]
src = srcs[1]
src_lowres = srcs_lowres[1]
ref_des = self.trans_des(ref_des)
ref_des_lowres = self.trans_lowres(ref_des)
des = self.trans_des(des)
des_lowres = self.trans_lowres(des)
ref_src = self.trans_to_tensor(ref_src)
ref_src_lowres = self.trans_to_tensor(ref_src_lowres)
src = self.trans_to_tensor(src)
src_lowres = self.trans_to_tensor(src_lowres)
src_rgb = self.trans_to_tensor(src_rgb)
ref_des = self.trans_to_tensor(ref_des)
ref_des_lowres = self.trans_to_tensor(ref_des_lowres)
des = self.trans_to_tensor(des)
des_lowres = self.trans_to_tensor(des_lowres)
return (ref_src_lowres, ref_des_lowres, src_lowres, des_lowres, ref_src, ref_des, src, src_rgb) |
class FlavaConfig(PretrainedConfig):
model_type = 'flava'
is_composition = True
def __init__(self, image_config: Dict[(str, Any)]=None, text_config: Dict[(str, Any)]=None, multimodal_config: Dict[(str, Any)]=None, image_codebook_config: Dict[(str, Any)]=None, hidden_size: int=768, layer_norm_eps: float=1e-12, projection_dim: int=768, init_codebook: bool=True, logit_scale_init_value: float=2.6592, initializer_range: float=0.02, ce_ignore_index: int=(- 100), mim_weight: float=1.0, mlm_weight: float=1.0, global_contrastive_weight: float=1.0, itm_weight: float=1.0, mmm_image_weight: float=1.0, mmm_text_weight: float=1.0, global_backprop_contrastive: bool=True, skip_unmasked_multimodal_encoder: bool=True, return_loss: bool=True, **kwargs):
text_config_dict = kwargs.pop('text_config_dict', None)
image_config_dict = kwargs.pop('image_config_dict', None)
multimodal_config_dict = kwargs.pop('multimodal_config_dict', None)
image_codebook_config_dict = kwargs.pop('image_codebook_config_dict', None)
super().__init__(**kwargs)
if (text_config_dict is not None):
if (text_config is None):
text_config = {}
_text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()
for (key, value) in _text_config_dict.items():
if ((key in text_config) and (value != text_config[key]) and (key not in ['transformers_version'])):
if (key in text_config_dict):
message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.'
else:
message = f'`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The value `text_config["{key}"]` will be overriden.'
logger.warning(message)
text_config.update(_text_config_dict)
if (image_config_dict is not None):
if (image_config is None):
image_config = {}
_image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()
if ('id2label' in _image_config_dict):
_image_config_dict['id2label'] = {str(key): value for (key, value) in _image_config_dict['id2label'].items()}
for (key, value) in _image_config_dict.items():
if ((key in image_config) and (value != image_config[key]) and (key not in ['transformers_version'])):
if (key in image_config_dict):
message = f'`{key}` is found in both `image_config_dict` and `image_config` but with different values. The value `image_config_dict["{key}"]` will be used instead.'
else:
message = f'`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. The value `image_config["{key}"]` will be overriden.'
logger.warning(message)
image_config.update(_image_config_dict)
if (multimodal_config_dict is not None):
if (multimodal_config is None):
multimodal_config = {}
_multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()
for (key, value) in _multimodal_config_dict.items():
if ((key in multimodal_config) and (value != multimodal_config[key]) and (key not in ['transformers_version'])):
if (key in multimodal_config_dict):
message = f'`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with different values. The value `multimodal_config_dict["{key}"]` will be used instead.'
else:
message = f'`multimodal_config_dict` is provided which will be used to initialize `FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overriden.'
logger.warning(message)
multimodal_config.update(_multimodal_config_dict)
if (image_codebook_config_dict is not None):
if (image_codebook_config is None):
image_codebook_config = {}
_image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()
for (key, value) in _image_codebook_config_dict.items():
if ((key in image_codebook_config) and (value != image_codebook_config[key]) and (key not in ['transformers_version'])):
if (key in image_codebook_config_dict):
message = f'`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but with different values. The value `image_codebook_config_dict["{key}"]` will be used instead.'
else:
message = f'`image_codebook_config_dict` is provided which will be used to initialize `FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overriden.'
logger.warning(message)
image_codebook_config.update(_image_codebook_config_dict)
if (image_config is None):
image_config = {}
logger.info('`image_config` is `None`. initializing the `FlavaImageConfig` with default values.')
if (text_config is None):
text_config = {}
logger.info('`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.')
if (multimodal_config is None):
multimodal_config = {}
logger.info('`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.')
if (image_codebook_config is None):
image_codebook_config = {}
logger.info('`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values.')
self.image_config = FlavaImageConfig(**image_config)
self.text_config = FlavaTextConfig(**text_config)
self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
self.projection_dim = projection_dim
self.init_codebook = init_codebook
self.hidden_size = hidden_size
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
self.ce_ignore_index = ce_ignore_index
self.mim_weight = mim_weight
self.mlm_weight = mlm_weight
self.global_contrastive_weight = global_contrastive_weight
self.itm_weight = itm_weight
self.mmm_image_weight = mmm_image_weight
self.mmm_text_weight = mmm_text_weight
self.global_backprop_contrastive = global_backprop_contrastive
self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
self.return_loss = return_loss
def from_configs(cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs):
return cls(image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['image_config'] = self.image_config.to_dict()
output['text_config'] = self.text_config.to_dict()
output['multimodal_config'] = self.multimodal_config.to_dict()
output['image_codebook_config'] = self.image_codebook_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
class FlaxBertForPreTraining(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def resnest():
device = torch.device('cpu')
cfg_file = 'tests/configs/resnet-resnext/senet-skent-resnest/resnest50_2s2x40d.yaml'
cfg.merge_from_file(cfg_file)
model = build_recognizer(cfg, device)
print(model) |
def mynorm(arr):
amin = np.amin(arr)
return np.absolute(((arr - amin) / ((np.amax(arr) - amin) + 1e-18))) |
class Dataloader(object):
def __init__(self, data_location, batch_size):
self.batch_size = batch_size
self.data_file = data_location
self.total_samples = sum((1 for _ in tf.compat.v1.python_io.tf_record_iterator(data_location)))
self.n = math.ceil((float(self.total_samples) / batch_size))
print((((('batch size is ' + str(self.batch_size)) + ',') + str(self.n)) + ' iteration'))
def __iter__(self):
data_graph = tf.Graph()
with data_graph.as_default():
self.dataset = input_fn(self.data_file, 1, False, self.batch_size)
self.dataset_iterator = tf.compat.v1.data.make_one_shot_iterator(self.dataset)
next_element = self.dataset_iterator.get_next()
with tf.compat.v1.Session(graph=data_graph) as sess:
for i in range(self.n):
batch = sess.run(next_element)
(yield (batch[0:2], batch[2]))
def __len__(self):
return self.n |
class CollisionCondition(AbstractCondition):
def __init__(self, not_allowed):
super(CollisionCondition, self).__init__()
if ((not isinstance(not_allowed, int)) and (not isinstance(not_allowed, long))):
raise TypeError('collision condition requires int handle')
self.not_allowed = not_allowed
def _check(self, world, state, actor, prev_state=None):
handle = actor.robot.handle
pts = pb.getContactPoints(bodyA=handle, bodyB=self.not_allowed)
return (len(pts) == 0) |
class SubDataset(object):
def __init__(self, name, root, anno, frame_range, num_use, start_idx):
cur_path = 'your_project_path/pysot'
self.name = name
self.root = os.path.join(cur_path, root)
self.anno = os.path.join(cur_path, anno)
self.frame_range = frame_range
self.num_use = num_use
self.start_idx = start_idx
logger.info(('loading ' + name))
with open(self.anno, 'r') as f:
meta_data = json.load(f)
meta_data = self._filter_zero(meta_data)
for video in list(meta_data.keys()):
for track in meta_data[video]:
frames = meta_data[video][track]
frames = list(map(int, filter((lambda x: x.isdigit()), frames.keys())))
frames.sort()
meta_data[video][track]['frames'] = frames
if (len(frames) <= 0):
logger.warning('{}/{} has no frames'.format(video, track))
del meta_data[video][track]
for video in list(meta_data.keys()):
if (len(meta_data[video]) <= 0):
logger.warning('{} has no tracks'.format(video))
del meta_data[video]
self.labels = meta_data
self.num = len(self.labels)
self.num_use = (self.num if (self.num_use == (- 1)) else self.num_use)
self.videos = list(meta_data.keys())
logger.info('{} loaded'.format(self.name))
self.path_format = '{}.{}.{}.jpg'
self.pick = self.shuffle()
def _filter_zero(self, meta_data):
meta_data_new = {}
for (video, tracks) in meta_data.items():
new_tracks = {}
for (trk, frames) in tracks.items():
new_frames = {}
for (frm, bbox) in frames.items():
if (not isinstance(bbox, dict)):
if (len(bbox) == 4):
(x1, y1, x2, y2) = bbox
(w, h) = ((x2 - x1), (y2 - y1))
else:
(w, h) = bbox
if ((w <= 0) or (h <= 0)):
continue
new_frames[frm] = bbox
if (len(new_frames) > 0):
new_tracks[trk] = new_frames
if (len(new_tracks) > 0):
meta_data_new[video] = new_tracks
return meta_data_new
def log(self):
logger.info('{} start-index {} select [{}/{}] path_format {}'.format(self.name, self.start_idx, self.num_use, self.num, self.path_format))
def shuffle(self):
lists = list(range(self.start_idx, (self.start_idx + self.num)))
pick = []
while (len(pick) < self.num_use):
np.random.shuffle(lists)
pick += lists
return pick[:self.num_use]
def get_image_anno(self, video, track, frame):
frame = '{:06d}'.format(frame)
image_path = os.path.join(self.root, video, self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
return (image_path, image_anno)
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
template_frame = np.random.randint(0, len(frames))
left = max((template_frame - self.frame_range), 0)
right = (min((template_frame + self.frame_range), (len(frames) - 1)) + 1)
search_range = frames[left:right]
template_frame = frames[template_frame]
search_frame = np.random.choice(search_range)
return (self.get_image_anno(video_name, track, template_frame), self.get_image_anno(video_name, track, search_frame))
def get_random_target(self, index=(- 1)):
if (index == (- 1)):
index = np.random.randint(0, self.num)
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = np.random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def __len__(self):
return self.num |
class ASTPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class DeployModel(Model):
def __init__(self, arch: Union[(NetType, CType)]):
super().__init__(arch)
self.eval()
def step(self):
raise RuntimeError(f'{self.__class__.__name__} does not support `step` method.')
def schedulerStep(self, *args, **kwargs):
raise RuntimeError(f'{self.__class__.__name__} does not support `schedulerStep` method.') |
class DependencySubmititLauncher(BaseSubmititLauncher):
_EXECUTOR = 'slurm'
def launch(self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int) -> Sequence[JobReturn]:
import submitit
assert (self.config is not None)
num_jobs = len(job_overrides)
assert (num_jobs > 0)
next_script = None
for jo in job_overrides:
if (next_script is None):
for item in jo:
if ('next_script=' in item):
next_script = item
break
assert (next_script is not None), 'job overrides must contain +next_script=path/to/next/script'
jo.remove(next_script)
idx = next_script.find('=')
next_script = next_script[(idx + 1):]
params = self.params
init_params = {'folder': self.params['submitit_folder']}
specific_init_keys = {'max_num_timeout'}
init_params.update(**{f'{self._EXECUTOR}_{x}': y for (x, y) in params.items() if (x in specific_init_keys)})
init_keys = (specific_init_keys | {'submitit_folder'})
executor = submitit.AutoExecutor(cluster=self._EXECUTOR, **init_params)
baseparams = set(OmegaConf.structured(DependencySubmititConf).keys())
params = {(x if (x in baseparams) else f'{self._EXECUTOR}_{x}'): y for (x, y) in params.items() if (x not in init_keys)}
executor.update_parameters(**params)
log.info(f"Submitit '{self._EXECUTOR}' sweep output dir : {self.config.hydra.sweep.dir}")
sweep_dir = Path(str(self.config.hydra.sweep.dir))
sweep_dir.mkdir(parents=True, exist_ok=True)
if ('mode' in self.config.hydra.sweep):
mode = int(str(self.config.hydra.sweep.mode), 8)
os.chmod(sweep_dir, mode=mode)
job_params: List[Any] = []
for (idx, overrides) in enumerate(job_overrides):
idx = (initial_job_idx + idx)
lst = ' '.join(filter_overrides(overrides))
log.info(f' #{idx} : {lst}')
job_params.append((list(overrides), 'hydra.sweep.dir', idx, f'job_id_for_{idx}', Singleton.get_state()))
jobs = executor.map_array(self, *zip(*job_params))
for (j, jp) in zip(jobs, job_params):
job_id = str(j.job_id)
task_id = ('0' if ('_' not in job_id) else job_id.split('_')[1])
sweep_config = self.config_loader.load_sweep_config(self.config, jp[0])
dir = sweep_config.hydra.sweep.dir
dir = dir.replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace(',', '_').replace("'", '').replace('"', '')
subprocess.call([next_script, job_id, task_id, dir], shell=False)
return [j.results()[0] for j in jobs] |
def get_ckpt_path(name, root=None, check=False):
if ('church_outdoor' in name):
name = name.replace('church_outdoor', 'church')
assert (name in URL_MAP)
cachedir = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
root = (root if (root is not None) else os.path.join(cachedir, 'diffusion_models_converted'))
path = os.path.join(root, CKPT_MAP[name])
if ((not os.path.exists(path)) or (check and (not (md5_hash(path) == MD5_MAP[name])))):
print('Downloading {} model from {} to {}'.format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert (md5 == MD5_MAP[name]), md5
return path |
def eval_step(H, data_input, target, ema_params, rng):
return lax.pmean(VAE(H).apply({'params': ema_params}, data_input, target, rng), 'batch') |
def test_actionAngleTorus_isochroneApprox_actions():
from galpy.actionAngle import actionAngleIsochroneApprox, actionAngleTorus
from galpy.potential import MWPotential2014
aAIA = actionAngleIsochroneApprox(pot=MWPotential2014, b=0.8)
tol = (- 2.5)
aAT = actionAngleTorus(pot=MWPotential2014, tol=tol)
(jr, jphi, jz) = (0.075, 1.1, 0.05)
angler = numpy.array([0.0])
anglephi = numpy.array([numpy.pi])
anglez = numpy.array([(numpy.pi / 2.0)])
RvR = aAT(jr, jphi, jz, angler, anglephi, anglez).T
ji = aAIA(*RvR)
djr = numpy.fabs(((ji[0] - jr) / jr))
dlz = numpy.fabs(((ji[1] - jphi) / jphi))
djz = numpy.fabs(((ji[2] - jz) / jz))
assert (djr < (10.0 ** tol)), ('actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr * 100.0))
assert (dlz < (10.0 ** tol)), ('actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz * 100.0))
assert (djz < (10.0 ** tol)), ('actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz * 100.0))
return None |
class Evaluator():
def __init__(self, eval_env: Environment, agent: Agent, total_batch_size: int, stochastic: bool):
self.eval_env = eval_env
self.agent = agent
self.num_local_devices = jax.local_device_count()
self.num_global_devices = jax.device_count()
self.num_workers = (self.num_global_devices // self.num_local_devices)
if ((total_batch_size % self.num_global_devices) != 0):
raise ValueError(f'Expected eval total_batch_size to be a multiple of num_devices, got {total_batch_size} and {self.num_global_devices}.')
self.total_batch_size = total_batch_size
self.batch_size_per_device = (total_batch_size // self.num_global_devices)
self.generate_evaluations = jax.pmap(functools.partial(self._generate_evaluations, eval_batch_size=self.batch_size_per_device), axis_name='devices')
self.stochastic = stochastic
def _eval_one_episode(self, policy_params: Optional[hk.Params], key: chex.PRNGKey) -> Dict:
policy = self.agent.make_policy(policy_params=policy_params, stochastic=self.stochastic)
if isinstance(self.agent, A2CAgent):
def acting_policy(observation: Any, key: chex.PRNGKey) -> chex.Array:
(action, _) = policy(observation, key)
return action
else:
acting_policy = policy
def cond_fun(carry: Tuple[(ActingState, float)]) -> jnp.bool_:
(acting_state, _) = carry
return (~ acting_state.timestep.last())
def body_fun(carry: Tuple[(ActingState, float)]) -> Tuple[(ActingState, float)]:
(acting_state, return_) = carry
(key, action_key) = jax.random.split(acting_state.key)
observation = jax.tree_util.tree_map((lambda x: x[None]), acting_state.timestep.observation)
action = acting_policy(observation, action_key)
(state, timestep) = self.eval_env.step(acting_state.state, jnp.squeeze(action, axis=0))
return_ += timestep.reward
acting_state = ActingState(state=state, timestep=timestep, key=key, episode_count=jnp.array(0, jnp.int32), env_step_count=(acting_state.env_step_count + 1))
return (acting_state, return_)
(reset_key, init_key) = jax.random.split(key)
(state, timestep) = self.eval_env.reset(reset_key)
acting_state = ActingState(state=state, timestep=timestep, key=init_key, episode_count=jnp.array(0, jnp.int32), env_step_count=jnp.array(0, jnp.int32))
return_ = jnp.array(0, float)
(final_acting_state, return_) = jax.lax.while_loop(cond_fun, body_fun, (acting_state, return_))
eval_metrics = {'episode_return': return_, 'episode_length': final_acting_state.env_step_count}
extras = final_acting_state.timestep.extras
if extras:
eval_metrics.update(extras)
return eval_metrics
def _generate_evaluations(self, params_state: ParamsState, key: chex.PRNGKey, eval_batch_size: int) -> Dict:
if isinstance(self.agent, A2CAgent):
policy_params = params_state.params.actor
elif isinstance(self.agent, RandomAgent):
policy_params = None
else:
raise ValueError
keys = jax.random.split(key, eval_batch_size)
eval_metrics = jax.vmap(self._eval_one_episode, in_axes=(None, 0))(policy_params, keys)
eval_metrics: Dict = jax.lax.pmean(jax.tree_util.tree_map(jnp.mean, eval_metrics), axis_name='devices')
return eval_metrics
def run_evaluation(self, params_state: Optional[ParamsState], eval_key: chex.PRNGKey) -> Dict:
eval_keys = jax.random.split(eval_key, self.num_global_devices).reshape(self.num_workers, self.num_local_devices, (- 1))
eval_keys_per_worker = eval_keys[jax.process_index()]
eval_metrics: Dict = self.generate_evaluations(params_state, eval_keys_per_worker)
return eval_metrics |
def augment_and_repeat_episode_data(episode_data, problem_size, nb_runs, aug_s):
node_data = episode_data[0]
batch_size = node_data.shape[0]
node_xy = node_data
if (nb_runs > 1):
assert (batch_size == 1)
node_xy = node_xy.repeat(nb_runs, 1, 1)
if (aug_s > 1):
assert (aug_s == 8)
node_xy = augment_xy_data_by_8_fold(node_xy)
return node_xy |
class MolGraph(object):
def __init__(self, moltree: MolTree, args: Namespace):
self.moltree = moltree
self.n_atoms = 0
self.n_bonds = 0
self.f_atoms = []
self.f_bonds = []
self.a2b = []
self.b2a = []
self.b2revb = []
self.n_atoms = self.moltree.size()
for (i, atom) in enumerate(moltree.get_nodes()):
atoms = moltree.mol.GetAtoms()
self.f_atoms.append(atom.node_features(atoms))
self.f_atoms = [self.f_atoms[i] for i in range(self.n_atoms)]
for _ in range(self.n_atoms):
self.a2b.append([])
for a1 in range(self.n_atoms):
for a2 in range((a1 + 1), self.n_atoms):
bond = moltree.get_bond_between_node_pair(a1, a2)
if (bond is None):
continue
f_bond = bond_features(bond)
if args.atom_messages:
self.f_bonds.append(f_bond)
self.f_bonds.append(f_bond)
else:
self.f_bonds.append((self.f_atoms[a1] + f_bond))
self.f_bonds.append((self.f_atoms[a2] + f_bond))
b1 = self.n_bonds
b2 = (b1 + 1)
self.a2b[a2].append(b1)
self.b2a.append(a1)
self.a2b[a1].append(b2)
self.b2a.append(a2)
self.b2revb.append(b2)
self.b2revb.append(b1)
self.n_bonds += 2 |
class Generator(nn.Module):
def __init__(self, ct1_channels=512, ct2_channels=256, ct3_channels=128, ct4_channels=64, d_channels_in_2=False, z_size=100):
super().__init__()
self.ct1_channels = ct1_channels
self.pheight = 4
self.pwidth = 4
if d_channels_in_2:
self.ct2_channels = (self.ct1_channels // 2)
self.ct3_channels = (self.ct2_channels // 2)
self.ct4_channels = (self.ct3_channels // 2)
else:
self.ct2_channels = ct2_channels
self.ct3_channels = ct3_channels
self.ct4_channels = ct4_channels
self.convt_0 = nn.ConvTranspose2d(in_channels=z_size, out_channels=self.ct1_channels, kernel_size=4, padding=0, stride=1, bias=False)
self.bnorm0 = nn.BatchNorm2d(self.ct1_channels)
self.convt_1 = nn.ConvTranspose2d(in_channels=self.ct1_channels, out_channels=self.ct2_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm1 = nn.BatchNorm2d(num_features=self.ct2_channels)
self.convt_2 = nn.ConvTranspose2d(in_channels=self.ct2_channels, out_channels=self.ct3_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm2 = nn.BatchNorm2d(num_features=self.ct3_channels)
self.convt_3 = nn.ConvTranspose2d(in_channels=self.ct3_channels, out_channels=self.ct4_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm3 = nn.BatchNorm2d(num_features=self.ct4_channels)
self.convt_4 = nn.ConvTranspose2d(in_channels=self.ct4_channels, out_channels=3, kernel_size=4, stride=2, padding=1, bias=False)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, z):
x = self.relu(self.bnorm0(self.convt_0(z)))
x = self.relu(self.bnorm1(self.convt_1(x)))
x = self.relu(self.bnorm2(self.convt_2(x)))
x = self.relu(self.bnorm3(self.convt_3(x)))
out = self.tanh(self.convt_4(x))
return out |
class AvalonScoring():
def __init__(self, config: AvalonBasicConfig) -> None:
self.config = config
def deduction_acc(self, true_player_sides, believed_player_sides) -> float:
true_player_sides = np.array(true_player_sides)
believed_player_sides = np.where((np.array(believed_player_sides) >= 0.5), 1, np.array(believed_player_sides))
believed_player_sides = np.where((np.array(believed_player_sides) < 0.5), 0, np.array(believed_player_sides))
return np.mean((np.sum((believed_player_sides == true_player_sides), axis=1) / 5))
def score_deduction(self, true_player_sides, believed_player_sides):
true_player_sides = np.array(true_player_sides)
believed_player_sides = np.where((np.array(believed_player_sides) == (- 1)), 0.5, np.array(believed_player_sides))
believed_player_sides = np.where((np.array(believed_player_sides) >= 1), 0.9999, np.array(believed_player_sides))
believed_player_sides = np.where((np.array(believed_player_sides) <= 0), 0.0001, np.array(believed_player_sides))
return np.mean((- np.sum(((true_player_sides * np.log(believed_player_sides)) + ((1 - true_player_sides) * np.log((1 - believed_player_sides)))), axis=1)))
def score_deception(self, other_player_sides, other_player_beliefs):
good_judgement = (np.sum((other_player_sides * other_player_beliefs), axis=1) / np.sum(other_player_sides, axis=1))
return np.mean(good_judgement)
def score_influence_per_game(self, true_vote, vote_outcome):
return np.mean((true_vote == vote_outcome))
def score_leadership_per_game(self, vote_outcome):
return np.mean(vote_outcome) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.