code stringlengths 101 5.91M |
|---|
class WaveformSignal(TimeSeries, WaveformMixin):
def __init__(self, *args, **kwargs):
raise NotImplementedError() |
class BiLinear(Layer):
def __init__(self, name='bi_linear'):
super(BiLinear, self).__init__(name)
self.projecting_layer = None
def __call__(self, t0, t1):
hidden_units = t0.shape.as_list()[(- 1)]
if (self.projecting_layer is None):
self.projecting_layer = tf.keras.layers.Dense(hidden_units, activation=None, use_bias=False)
t0 = self.projecting_layer(t0)
return tf.matmul(t0, t1, transpose_b=True) |
def GetTriadEdges_PUNGraph(Graph, SampleEdges=(- 1)):
return _snap.GetTriadEdges_PUNGraph(Graph, SampleEdges) |
def sample_other_than(black_list: Set[int], x: np.ndarray) -> int:
res = np.random.randint(0, len(x))
while (res in black_list):
res = np.random.randint(0, len(x))
return res |
def initialize_weights(*models):
for model in models:
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.fill_(0.0001)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.0001)
m.bias.data.zero_() |
('entity_match')
class CustomEntityMatchFactory(CRFFeatureFactory):
def __init__(self, factory_config, **shared):
super(CustomEntityMatchFactory, self).__init__(factory_config, **shared)
self.use_stemming = self.args['use_stemming']
self.tagging_scheme = TaggingScheme(self.args['tagging_scheme_code'])
self._entities = None
self.entities = self.args.get('entities')
ent_filter = self.args.get('entity_filter')
if ent_filter:
try:
_check_custom_entity_filter(ent_filter)
except _InvalidCustomEntityFilter as e:
logger.warning("Invalid filter '%s', invalid arguments have been ignored: %s", ent_filter, e)
self.entity_filter = (ent_filter or dict())
def entities(self):
return self._entities
def entities(self, value):
if (value is not None):
self._entities = value
self.args['entities'] = value
def fit(self, dataset, intent):
entities_names = extract_intent_entities(dataset, (lambda e: (not is_builtin_entity(e))))[intent]
extensible = self.entity_filter.get(AUTOMATICALLY_EXTENSIBLE)
if (extensible is not None):
entities_names = [e for e in entities_names if (dataset[ENTITIES][e][AUTOMATICALLY_EXTENSIBLE] == extensible)]
self.entities = list(entities_names)
return self
def _transform(self, tokens):
if self.use_stemming:
light_tokens = (stem_token(t, self.resources) for t in tokens)
else:
light_tokens = (normalize_token(t) for t in tokens)
current_index = 0
transformed_tokens = []
for light_token in light_tokens:
transformed_token = Token(value=light_token, start=current_index, end=(current_index + len(light_token)))
transformed_tokens.append(transformed_token)
current_index = (transformed_token.end + 1)
return transformed_tokens
def build_features(self):
features = []
for entity_name in self.entities:
entity_match = self._build_entity_match_fn(entity_name)
for offset in self.offsets:
feature = Feature(('entity_match_%s' % entity_name), entity_match, offset, self.drop_out)
features.append(feature)
return features
def _build_entity_match_fn(self, entity):
def entity_match(tokens, token_index):
transformed_tokens = self._transform(tokens)
text = initial_string_from_tokens(transformed_tokens)
token_start = transformed_tokens[token_index].start
token_end = transformed_tokens[token_index].end
custom_entities = self.custom_entity_parser.parse(text, scope=[entity], use_cache=True)
custom_entities = [ent for ent in custom_entities if entity_filter(ent, token_start, token_end)]
if custom_entities:
ent = custom_entities[0]
indexes = []
for (index, token) in enumerate(transformed_tokens):
if entity_filter(ent, token.start, token.end):
indexes.append(index)
return get_scheme_prefix(token_index, indexes, self.tagging_scheme)
return None
return entity_match
def get_required_resources(self):
if self.use_stemming:
return {STEMS: True, CUSTOM_ENTITY_PARSER_USAGE: CustomEntityParserUsage.WITH_STEMS}
return {STEMS: False, CUSTOM_ENTITY_PARSER_USAGE: CustomEntityParserUsage.WITHOUT_STEMS} |
_experiment
def trpo_pendulum(ctxt=None, seed=1):
set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec, hidden_sizes=[32, 32], hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
algo = TRPO(env_spec=env.spec, policy=policy, value_function=value_function, max_path_length=100, discount=0.99, center_adv=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=1024) |
def rename_keys(original_param_names):
block_names = [v.split('_')[0].split('block')[1] for v in original_param_names if v.startswith('block')]
block_names = list(set(block_names))
block_names = sorted(block_names)
num_blocks = len(block_names)
block_name_mapping = {b: str(i) for (b, i) in zip(block_names, range(num_blocks))}
rename_keys = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight'))
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight'))
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias'))
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean'))
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var'))
for b in block_names:
hf_b = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight'))
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight'))
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias'))
rename_keys.append((f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean'))
rename_keys.append((f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var'))
rename_keys.append((f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight'))
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight'))
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias'))
rename_keys.append((f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean'))
rename_keys.append((f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var'))
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight'))
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias'))
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight'))
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias'))
rename_keys.append((f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight'))
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight'))
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias'))
rename_keys.append((f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean'))
rename_keys.append((f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var'))
key_mapping = {}
for item in rename_keys:
if (item[0] in original_param_names):
key_mapping[item[0]] = ('vision_model.' + item[1])
rename_keys = []
old = 'tf_bert_model/bert'
new = 'text_model'
for i in range(12):
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/query/kernel:0', f'{new}.encoder.layer.{i}.attention.self.query.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/query/bias:0', f'{new}.encoder.layer.{i}.attention.self.query.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/key/kernel:0', f'{new}.encoder.layer.{i}.attention.self.key.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/key/bias:0', f'{new}.encoder.layer.{i}.attention.self.key.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/value/kernel:0', f'{new}.encoder.layer.{i}.attention.self.value.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/value/bias:0', f'{new}.encoder.layer.{i}.attention.self.value.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/dense/kernel:0', f'{new}.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/dense/bias:0', f'{new}.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/LayerNorm/gamma:0', f'{new}.encoder.layer.{i}.attention.output.LayerNorm.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/LayerNorm/beta:0', f'{new}.encoder.layer.{i}.attention.output.LayerNorm.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/intermediate/dense/kernel:0', f'{new}.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/intermediate/dense/bias:0', f'{new}.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/output/dense/kernel:0', f'{new}.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/output/dense/bias:0', f'{new}.encoder.layer.{i}.output.dense.bias'))
rename_keys.append((f'{old}/encoder/layer_._{i}/output/LayerNorm/gamma:0', f'{new}.encoder.layer.{i}.output.LayerNorm.weight'))
rename_keys.append((f'{old}/encoder/layer_._{i}/output/LayerNorm/beta:0', f'{new}.encoder.layer.{i}.output.LayerNorm.bias'))
rename_keys.append((f'{old}/embeddings/word_embeddings/weight:0', f'{new}.embeddings.word_embeddings.weight'))
rename_keys.append((f'{old}/embeddings/position_embeddings/embeddings:0', f'{new}.embeddings.position_embeddings.weight'))
rename_keys.append((f'{old}/embeddings/token_type_embeddings/embeddings:0', f'{new}.embeddings.token_type_embeddings.weight'))
rename_keys.append((f'{old}/embeddings/LayerNorm/gamma:0', f'{new}.embeddings.LayerNorm.weight'))
rename_keys.append((f'{old}/embeddings/LayerNorm/beta:0', f'{new}.embeddings.LayerNorm.bias'))
rename_keys.append((f'{old}/pooler/dense/kernel:0', f'{new}.pooler.dense.weight'))
rename_keys.append((f'{old}/pooler/dense/bias:0', f'{new}.pooler.dense.bias'))
rename_keys.append(('dense/kernel:0', 'text_projection.weight'))
rename_keys.append(('dense/bias:0', 'text_projection.bias'))
rename_keys.append(('dense/bias:0', 'text_projection.bias'))
rename_keys.append(('temperature:0', 'temperature'))
for item in rename_keys:
if (item[0] in original_param_names):
key_mapping[item[0]] = item[1]
return key_mapping |
_with_task('Doing query with k-Means')
def kmeans_query(clf, features, deep_feats, color_feats, labels, retrieval_top_n=5):
label = clf.predict(features[0].reshape(1, features[0].shape[0]))
ind = np.where((clf.labels_ == label))
d_feats = deep_feats[ind]
c_feats = color_feats[ind]
n_labels = list(np.array(labels)[ind])
results = get_deep_color_top_n(features, d_feats, c_feats, n_labels, retrieval_top_n)
return results |
def get_config():
modulenames = (set(sys.modules) & set(globals()))
allmodules = [sys.modules[name] for name in modulenames]
return {'name': 'python', 'version': platform.python_version(), 'modules': str(allmodules)} |
def run_eval_bleu(cmd):
output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('utf-8').strip()
print(output)
bleu = (- 1.0)
for line in output.strip().split('\n'):
m = BLEU_REGEX.search(line)
if (m is not None):
bleu = m.groups()[0]
bleu = float(bleu)
break
return bleu |
class Resnet_Imb_CB_beta0999_ep100_cifar100_2():
def __init__(self):
self.set_config()
def set_config(self):
self.filename_head = (self.__class__.__name__ + '_')
self.checkpoint_path = None
def get_model(self):
model = resnet.ResNet18(num_classes=100)
return model
def get_dataset(self, return_target=True):
DOWNLOAD = False
tr_transformer = alb.Compose([albtr.Flip(p=0.5), albtr.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=15, p=0.5), albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
ts_transformer = alb.Compose([albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
usage_rate = (((1,) * 50) + ((0.05,) * 50))
seed = 2020
(tr_ds, tr_tg) = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer, usage_rate, seed, return_target)
(ts_ds, ts_tg) = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer, None, None, return_target)
if return_target:
return (tr_ds, ts_ds, tr_tg, ts_tg)
else:
return (tr_ds, ts_ds)
def train_model(self, use_checkpoint=False, fine_turning=False):
(tr_ds, ts_ds, tr_tg, ts_tg) = self.get_dataset(return_target=True)
if use_checkpoint:
CP = get_checkpoint(self.checkpoint_path)
else:
CP = None
model = self.get_model()
if (CP is not None):
model.load_state_dict(CP['state_dict'])
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=0.0005)
if (CP is not None):
if (not fine_turning):
opt.load_state_dict(CP['optimizer'])
tr_criterion = cb_loss.ClassBalanced_CELoss(tr_tg, 100, beta=0.999)
vl_criterion = cb_loss.ClassBalanced_CELoss(ts_tg, 100, beta=0.999)
grad_accum_steps = 1
start_epoch = (0 if ((CP is None) or fine_turning) else CP['epoch'])
EPOCHS = 100
warmup_epoch = 0
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[51, 86, 101], gamma=0.1)
model = training.train_model(model, tr_loader, ts_loader, opt, tr_criterion, vl_criterion, grad_accum_steps, start_epoch, EPOCHS, warmup_epoch, step_scheduler, self.filename_head, use_yoto=False)
return |
.parametrize('data', ['bin_dense_train_data', 'bin_sparse_train_data'])
.parametrize('loss', ['l1', 'l2'])
def test_fit_linear_binary(data, loss, request):
(X, y) = request.getfixturevalue(data)
clf = LinearSVC(loss=loss, random_state=0, max_iter=10)
clf.fit(X, y)
assert (list(clf.classes_) == [0, 1])
assert (clf.score(X, y) == 1.0)
y_pred = clf.decision_function(X).ravel() |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(Bottleneck, self).__init__()
width = int((math.floor((planes * (base_width / 64))) * cardinality))
first_planes = (width // reduce_first)
outplanes = (planes * self.expansion)
first_dilation = (first_dilation or dilation)
use_aa = ((aa_layer is not None) and ((stride == 2) or (first_dilation != dilation)))
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.conv2 = nn.Conv2d(first_planes, width, kernel_size=3, stride=(1 if use_aa else stride), padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(width)
self.drop_block = (drop_block() if (drop_block is not None) else nn.Identity())
self.act2 = act_layer(inplace=True)
self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa)
self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_path = drop_path
def zero_init_last(self):
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.drop_block(x)
x = self.act2(x)
x = self.aa(x)
x = self.conv3(x)
x = self.bn3(x)
if (self.se is not None):
x = self.se(x)
if (self.drop_path is not None):
x = self.drop_path(x)
if (self.downsample is not None):
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x |
class MILSTMWithAttentionCell(AttentionCell):
def __init__(self, encoder_output_dim, encoder_outputs, decoder_input_dim, decoder_state_dim, name, attention_type, weighted_encoder_outputs, forget_bias, lstm_memory_optimization, attention_memory_optimization, forward_only=False):
decoder_cell = MILSTMCell(input_size=decoder_input_dim, hidden_size=decoder_state_dim, forget_bias=forget_bias, memory_optimization=lstm_memory_optimization, name='{}/decoder'.format(name), forward_only=False, drop_states=False)
super(MILSTMWithAttentionCell, self).__init__(encoder_output_dim=encoder_output_dim, encoder_outputs=encoder_outputs, decoder_cell=decoder_cell, decoder_state_dim=decoder_state_dim, name=name, attention_type=attention_type, weighted_encoder_outputs=weighted_encoder_outputs, attention_memory_optimization=attention_memory_optimization, forward_only=forward_only) |
def train(sess, model, train_url, test_url, batch_size, vocab_size, alternate_epochs=1, lexicon=[], result_file='test.txt', warm_up_period=100):
(train_set, train_count) = utils.data_set(train_url)
(test_set, test_count) = utils.data_set(test_url)
train_size = len(train_set)
validation_size = int((train_size * 0.1))
dev_set = train_set[:validation_size]
dev_count = train_count[:validation_size]
train_set = train_set[validation_size:]
train_count = train_count[validation_size:]
optimize_jointly = True
dev_batches = utils.create_batches(len(dev_set), batch_size, shuffle=False)
test_batches = utils.create_batches(len(test_set), batch_size, shuffle=False)
warm_up = 0
min_alpha = 0.001
best_print_ana_ppx = .0
early_stopping_iters = 30
no_improvement_iters = 0
stopped = False
epoch = (- 1)
while (not stopped):
epoch += 1
train_batches = utils.create_batches(len(train_set), batch_size, shuffle=True)
if (warm_up < 1.0):
warm_up += (1.0 / warm_up_period)
else:
warm_up = 1.0
if optimize_jointly:
optim = model.optim_all
print_mode = 'updating encoder and decoder'
elif (switch == 0):
optim = model.optim_dec
print_mode = 'updating decoder'
else:
optim = model.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
loss_sum = 0.0
ana_loss_sum = 0.0
ppx_sum = 0.0
kld_sum_train = 0.0
ana_kld_sum_train = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
for idx_batch in train_batches:
(data_batch, count_batch, mask) = utils.fetch_data(train_set, train_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 0.75, model.warm_up.name: warm_up, model.min_alpha.name: min_alpha}
(_, (loss, recon, kld_train, ana_loss, ana_kld_train)) = sess.run((optim, [model.true_objective, model.recons_loss, model.kld, model.analytical_objective, model.analytical_kld]), input_feed)
loss_sum += np.sum(loss)
ana_loss_sum += np.sum(ana_loss)
kld_sum_train += (np.sum(kld_train) / np.sum(mask))
ana_kld_sum_train += (np.sum(ana_kld_train) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_loss = (recon_sum / len(train_batches))
dec_vars = utils.variable_parser(tf.trainable_variables(), 'decoder')
phi = dec_vars[0]
phi = sess.run(phi)
utils.print_top_words(phi, lexicon, result_file=None)
print_ppx = np.exp((loss_sum / word_count))
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_train = (kld_sum_train / len(train_batches))
print_ana_kld_train = (ana_kld_sum_train / len(train_batches))
print('| Epoch train: {:d} |'.format((epoch + 1)), print_mode, '{:d}'.format(i), '| Corpus ppx: {:.5f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_train), '| Loss: {:.5}'.format(print_loss), '| ppx anal.: {:.5f}'.format(print_ana_ppx), '|KLD anal.: {:.5f}'.format(print_ana_kld_train))
loss_sum = 0.0
kld_sum_dev = 0.0
ppx_sum = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
print_ana_ppx = 0.0
ana_loss_sum = 0.0
for idx_batch in dev_batches:
(data_batch, count_batch, mask) = utils.fetch_data(dev_set, dev_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 1.0, model.warm_up.name: 1.0, model.min_alpha.name: min_alpha}
(loss, recon, kld_dev, ana_kld, ana_loss) = sess.run([model.objective, model.recons_loss, model.kld, model.analytical_kld, model.analytical_objective], input_feed)
loss_sum += np.sum(loss)
ana_loss_sum += np.sum(ana_loss)
kld_sum_dev += (np.sum(kld_dev) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ppx = np.exp((loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_dev = (kld_sum_dev / len(dev_batches))
print_loss = (recon_sum / len(dev_batches))
if (print_ppx < best_print_ana_ppx):
no_improvement_iters = 0
best_print_ana_ppx = print_ppx
tf.train.Saver().save(sess, 'models/improved_model_implicit_gradients')
else:
no_improvement_iters += 1
print('no_improvement_iters', no_improvement_iters, 'best ppx', best_print_ana_ppx)
if (no_improvement_iters >= early_stopping_iters):
stopped = True
print('stop training after', epoch, 'iterations,no_improvement_iters', no_improvement_iters)
print('load stored model')
tf.train.Saver().restore(sess, 'models/improved_model_implicit_gradients')
print('| Epoch dev: {:d} |'.format((epoch + 1)), '| Perplexity: {:.9f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_dev), '| Loss: {:.5}'.format(print_loss))
if FLAGS.test:
loss_sum = 0.0
kld_sum_test = 0.0
ppx_sum = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
ana_loss_sum = 0.0
ana_kld_sum_test = 0.0
for idx_batch in test_batches:
(data_batch, count_batch, mask) = utils.fetch_data(test_set, test_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 1.0, model.warm_up.name: 1.0, model.min_alpha.name: min_alpha}
(loss, recon, kld_test, ana_loss, ana_kld_test) = sess.run([model.objective, model.recons_loss, model.kld, model.analytical_objective, model.analytical_kld], input_feed)
loss_sum += np.sum(loss)
kld_sum_test += (np.sum(kld_test) / np.sum(mask))
ana_loss_sum += np.sum(ana_loss)
ana_kld_sum_test += (np.sum(ana_kld_test) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_loss = (recon_sum / len(test_batches))
print_ppx = np.exp((loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_test = (kld_sum_test / len(test_batches))
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ana_kld_test = (ana_kld_sum_test / len(train_batches))
print('| Epoch test: {:d} |'.format((epoch + 1)), '| Perplexity: {:.9f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_test), '| Loss: {:.5}'.format(print_loss), '| ppx anal.: {:.5f}'.format(print_ana_ppx), '|KLD anal.: {:.5f}'.format(print_ana_kld_test))
if stopped:
print('calculate topic coherence (might take a few minutes)')
coherence = utils.topic_coherence(test_set, phi, lexicon)
print('topic coherence', str(coherence)) |
.parametrize('nntxt_idx', CASE_INDEX)
.parametrize('parameter_format', ['.protobuf', '.h5'])
.parametrize('dataset_sample_num', [32])
def test_load_and_infer_equivalence(nntxt_idx, parameter_format, dataset_sample_num):
with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx], parameter_format, dataset_sample_num) as nnp_file:
ref_info = ref_load(nnp_file)
ref_result = partial(common_forward, forward_func=_ref_forward)(ref_info)
info = load.load(nnp_file)
result = partial(common_forward, forward_func=_forward)(info)
assert_tensor_equal(result, ref_result) |
class JTrivialSemigroups(CategoryWithAxiom):
def extra_super_categories(self):
return [Semigroups().LTrivial(), Semigroups().RTrivial()] |
def update_alpha_parameters(model, layers, p, pi, print_info=True):
standarlization = (lambda x: ((x - torch.mean(x)) / torch.std(x)))
alpha_grad_attn = torch.stack([torch.cat([getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha.grad for i in range(layers)])])
alpha_grad_mlp = torch.stack([torch.stack([getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha.grad for i in range(layers)])])
(alpha_grad_attn, alpha_grad_mlp) = (standarlization(alpha_grad_attn), standarlization(alpha_grad_mlp))
alpha_grad = torch.cat([alpha_grad_attn.view((- 1)), alpha_grad_mlp.view((- 1))])
(sorted_alpha_grad, indices) = torch.sort(alpha_grad, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[(indices < alpha_grad_attn.numel())] = 36
threshold = sorted_alpha_grad[torch.argmin(torch.abs((torch.cumsum(compression_weight, 0) - (torch.sum(compression_weight) * pi))))]
def update(module, grad):
mask = ((grad <= threshold) | (grad <= torch.min(grad)))
module.data.copy_((mask + ((~ mask) * (1 - (pi / p)))))
for i in range(layers):
update(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha, alpha_grad_attn[(0, i)].unsqueeze(0))
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha, alpha_grad_attn[(1, i)])
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha, alpha_grad_attn[(2, i)])
update(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha, alpha_grad_mlp[(0, i)])
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha, alpha_grad_mlp[(1, i)])
if print_info:
(attn, mlp) = ([], [])
for i in range(layers):
attn.append(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.flatten())
attn.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha.flatten())
attn.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha.flatten())
mlp.append(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.flatten())
mlp.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha.flatten())
print('Current compression ratio of attn: ', (1 - torch.mean(torch.cat(attn))))
print('Current compression ratio of mlp: ', (1 - torch.mean(torch.cat(mlp))))
print('Current compression ratio: ', pi) |
def hflip(in_dict, cfg):
if (np.random.random() < 0.5):
in_dict['img'] = F.hflip(in_dict['img'])
in_dict['mask'] = F.hflip(in_dict['mask']) |
def train(model, optimizer, loader):
model.train()
loss_sum = 0
acc_sum = 0
for (idx, (data, target)) in enumerate(loader):
(data, target) = (data.cuda(), target.cuda())
(data, target) = (Variable(data), Variable(target))
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss_sum += loss.item()
loss.backward()
optimizer.step()
predict = output.data.max(1)[1]
acc = predict.eq(target.data).cpu().sum().item()
acc_sum += acc
return ((loss_sum / len(loader)), (acc_sum / len(loader))) |
def save_checkpoint(checkpoint_manager):
if checkpoint_manager.is_checkpointing:
checkpoint_manager.saving = True
new_tmp_dest = get_temp_file('dump', 'checkpoints')
_LOG.info(('Checkpoint is being updated: %s' % new_tmp_dest))
old_tmp_file = open(checkpoint_manager.checkpoint_path).readlines()
old_tmp_file = (None if (len(old_tmp_file) == 0) else old_tmp_file[(- 1)].split(',')[0])
checkpoint_manager.update_time()
currenlimit = sys.getrecursionlimit()
sys.setrecursionlimit(100000)
picklefile = gzip.GzipFile(new_tmp_dest, 'wb')
pickle.dump(checkpoint_manager.checkpoint_state, picklefile, 2)
picklefile.close()
sys.setrecursionlimit(currenlimit)
f = open(checkpoint_manager.checkpoint_path, 'a')
f.write(('%s, %s\n' % (new_tmp_dest, datetime.datetime.now())))
f.close()
if (old_tmp_file is not None):
os.remove(old_tmp_file)
_LOG.info(('Checkpoint Saved to: %s and linked in %s.' % (new_tmp_dest, checkpoint_manager.checkpoint_path)))
checkpoint_manager.saving = False
checkpoint_manager.timer = threading.Timer(options().checkpoint_interval, save_checkpoint, args=[checkpoint_manager])
checkpoint_manager.timer.setDaemon(True)
checkpoint_manager.timer.start() |
def test_kmeans_inductive_gncd(merge_test_loader, args, K=None):
if (K is None):
K = (args.num_labeled_classes + args.num_unlabeled_classes)
all_feats = []
targets = np.array([])
mask_cls = np.array([])
print('Collating features...')
for (batch_idx, (feats, label, _)) in enumerate(tqdm(merge_test_loader)):
feats = feats.to(device)
feats = torch.nn.functional.normalize(feats, dim=(- 1))
all_feats.append(feats.cpu().numpy())
targets = np.append(targets, label.cpu().numpy())
mask_cls = np.append(mask_cls, np.array([(True if (x.item() in range(len(args.train_classes))) else False) for x in label]))
mask_cls = mask_cls.astype(bool)
all_feats = np.concatenate(all_feats)
print('Fitting Semi-Supervised K-Means...')
kmeans = KMeans(n_clusters=K, max_iter=args.max_kmeans_iter, init='k-means++', n_init=args.k_means_init, random_state=None)
kmeans.fit(all_feats)
all_preds = kmeans.labels_
print(f'best inertia={kmeans.inertia_}')
(all_acc, old_acc, new_acc) = log_accs_from_preds(y_true=targets, y_pred=all_preds, mask=mask_cls, eval_funcs=args.eval_funcs, save_name='SS-K-Means Train ACC Unlabelled', print_output=True)
return (all_acc, old_acc, new_acc, 0, kmeans) |
def _expand_braces(text, seen=None):
if (seen is None):
seen = set()
spans = [m.span() for m in re.finditer('\\{[^\\{\\}]*\\}', text)][::(- 1)]
alts = [text[(start + 1):(stop - 1)].split(',') for (start, stop) in spans]
if (len(spans) == 0):
if (text not in seen):
(yield text)
seen.add(text)
else:
for combo in itertools.product(*alts):
replaced = list(text)
for ((start, stop), replacement) in zip(spans, combo):
replaced[start:stop] = replacement
(yield from _expand_braces(''.join(replaced), seen)) |
class GlueDataTrainingArguments():
task_name: str = field(metadata={'help': ('The name of the task to train on: ' + ', '.join(glue_processors.keys()))})
data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
def __post_init__(self):
self.task_name = self.task_name.lower() |
def _is_in_ipython():
try:
__IPYTHON__
return True
except NameError:
pass
return False |
def _export(*args, **kwargs):
from torch.onnx import utils
return utils._export(*args, **kwargs) |
def validate_references(model: models.Model) -> None:
def process_field(parent: models.Model, child: Union[(str, optplan.ProblemGraphNodeSchema)], field_type: optplan.ReferenceType) -> None:
if (not child):
return
if ((not isinstance(child, (str, field_type.reference_type))) and (not isinstance(child, optplan.WildcardSchema))):
raise ValueError('Expected type {} for node {}, got {}.'.format(field_type.reference_type, child, type(child)))
_iter_optplan_fields(model, set(), process_field, pass_field_info=True) |
def get_constant(x):
if (x == inf):
return 'math.inf'
if (x == (- inf)):
return '-math.inf'
return x |
def save_summaries(file_writer, global_step=None):
global _merge_op
tfutil.assert_tf_initialized()
if (_merge_op is None):
layout = finalize_autosummaries()
if (layout is not None):
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.compat.v1.summary.merge_all() |
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs):
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor((self.planes * (base_width / base_channels))))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
if ((stage_type == 'stage') and (self.conv2_stride != 1)):
self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
for i in range((scales - 1)):
convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
for i in range((scales - 1)):
convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, (self.scales - 1)):
if (self.stage_type == 'stage'):
sp = spx[i]
else:
sp = (sp + spx[i])
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if ((self.stage_type == 'normal') or (self.conv2_stride == 1)):
out = torch.cat((out, spx[(self.scales - 1)]), 1)
elif (self.stage_type == 'stage'):
out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
class OpenEMS(Element):
def __init__(self, FDTD, CSX):
Element.__init__(self, 'openEMS')
self.append(FDTD)
self.append(CSX)
def __repr__(self):
st = ElementTree.tostring(self)
return st
def save(self, filename='openEMS.xml'):
self.filename = filename
output_file = open(filename, 'w')
output_file.write('<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>')
output_file.write(ElementTree.tostring(self))
output_file.close()
os.system((('xmllint --format ' + filename) + '> tmpf'))
os.system(('mv tmpf ' + filename))
def geomplot(self):
os.system(('~/Apps/openEMS/bin/AppCSXCAD ' + self.filename))
def run(self):
os.system(('~/Apps/openEMS/bin/openEMS.sh ' + self.filename)) |
class LayoutLMv2PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names |
def unit_derivations_expr(v):
v = str(v)
Z = unit_derivations[v]
if isinstance(Z, str):
d = {x: str_to_unit(x) for x in vars_in_str(Z)}
from sage.misc.sage_eval import sage_eval
Z = sage_eval(Z, d)
unit_derivations[v] = Z
return Z |
class kappa3_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('a', False, (0, np.inf), (False, False))]
def _pdf(self, x, a):
return (a * ((a + (x ** a)) ** (((- 1.0) / a) - 1)))
def _cdf(self, x, a):
return (x * ((a + (x ** a)) ** ((- 1.0) / a)))
def _sf(self, x, a):
(x, a) = np.broadcast_arrays(x, a)
sf = super()._sf(x, a)
cutoff = 0.01
i = (sf < cutoff)
sf2 = (- sc.expm1(sc.xlog1py(((- 1.0) / a[i]), (a[i] * (x[i] ** (- a[i]))))))
i2 = (sf2 > cutoff)
sf2[i2] = sf[i][i2]
sf[i] = sf2
return sf
def _ppf(self, q, a):
return ((a / ((q ** (- a)) - 1.0)) ** (1.0 / a))
def _isf(self, q, a):
lg = sc.xlog1py((- a), (- q))
denom = sc.expm1(lg)
return ((a / denom) ** (1.0 / a))
def _stats(self, a):
outputs = [(None if np.any((i < a)) else np.nan) for i in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
if np.any((m >= args[0])):
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=((m,) + args))[0] |
_builder('msvd_qa_instruct')
class MSVDQAInstructBuilder(VideoQABuilder):
train_dataset_cls = VideoQAInstructDataset
eval_dataset_cls = VideoQAInstructDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/msvd/defaults_qa_instruct.yaml'} |
def test_deepcopy():
modela = ModelA({'int_field': 1, 'list_int_field': [2, 3], 'model_field': {'value': 4}, 'list_model_field': [{'value': 5}, {'value': 6}]})
modela_copy = copy.deepcopy(modela)
assert (modela_copy.int_field == 1)
assert (modela_copy.list_int_field == [2, 3])
assert (modela_copy.model_field.value == 4)
assert (modela_copy.list_model_field[0].value == 5)
assert (modela_copy.list_model_field[1].value == 6)
assert (modela_copy.list_int_field is not modela.list_int_field)
assert (modela_copy.model_field is not modela.model_field)
assert (modela_copy.list_model_field is not modela.list_model_field)
assert (modela_copy.list_model_field[0] is not modela.list_model_field[0])
assert (modela_copy.list_model_field[1] is not modela.list_model_field[1])
modela.model_field.value = 7
assert (modela_copy.model_field.value == 4) |
def ref_bit_shift(x, shift, direction):
if (direction == 'LEFT'):
return (x << shift)
elif (direction == 'RIGHT'):
return (x >> shift)
else:
raise ValueError('Invalid direction: {}'.format(direction)) |
def add_weight_args_preprocessing(args, kwargs):
if (len(args) > 1):
if isinstance(args[1], (tuple, list)):
kwargs['shape'] = args[1]
args = ((args[0],) + args[2:])
if (len(args) > 1):
if isinstance(args[1], six.string_types):
kwargs['name'] = args[1]
args = ((args[0],) + args[2:])
return (args, kwargs, []) |
def timit_posteriorgram_url(ckpt, refresh=False, *args, **kwargs):
return timit_posteriorgram_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs) |
('/ngsi10/updateContext', methods=['POST'])
def getUpdateNotification():
data = request.get_json()
if (data.has_key('contextElements') == True):
dataContext = data['contextElements']
dataAttribute = dataContext[0]
if (dataAttribute.has_key('attributes') == True):
attribute = dataAttribute['attributes']
if (attribute[0]['name'] == 'off'):
my_status = 'off'
elif (attribute[0]['name'] == 'on'):
my_status = 'on'
else:
print('Command not found!!')
return ''
print('Lamp : {}'.format(my_status))
return '' |
class KernelizedDoublyRobust(BaseContinuousOffPolicyEstimator):
kernel: str
bandwidth: float
estimator_name: str = 'kernelized_dr'
def __post_init__(self) -> None:
if (self.kernel not in ['gaussian', 'epanechnikov', 'triangular', 'cosine']):
raise ValueError(f"kernel must be one of 'gaussian', 'epanechnikov', 'triangular', or 'cosine' but {self.kernel} is given")
check_scalar(self.bandwidth, name='bandwidth', target_type=(int, float), min_val=0)
def _estimate_round_rewards(self, reward: np.ndarray, action_by_behavior_policy: np.ndarray, pscore: np.ndarray, action_by_evaluation_policy: np.ndarray, estimated_rewards_by_reg_model: np.ndarray, **kwargs) -> np.ndarray:
kernel_func = kernel_functions[self.kernel]
u = (action_by_evaluation_policy - action_by_behavior_policy)
u /= self.bandwidth
estimated_rewards = ((kernel_func(u) * (reward - estimated_rewards_by_reg_model)) / pscore)
estimated_rewards /= self.bandwidth
estimated_rewards += estimated_rewards_by_reg_model
return estimated_rewards
def estimate_policy_value(self, reward: np.ndarray, action_by_behavior_policy: np.ndarray, pscore: np.ndarray, action_by_evaluation_policy: np.ndarray, estimated_rewards_by_reg_model: np.ndarray, **kwargs) -> np.ndarray:
check_array(array=estimated_rewards_by_reg_model, name='estimated_rewards_by_reg_model', expected_dim=1)
check_array(array=reward, name='reward', expected_dim=1)
check_array(array=action_by_behavior_policy, name='action_by_behavior_policy', expected_dim=1)
check_array(array=pscore, name='pscore', expected_dim=1)
check_continuous_ope_inputs(reward=reward, action_by_behavior_policy=action_by_behavior_policy, pscore=pscore, action_by_evaluation_policy=action_by_evaluation_policy, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
return self._estimate_round_rewards(reward=reward, action_by_behavior_policy=action_by_behavior_policy, pscore=pscore, action_by_evaluation_policy=action_by_evaluation_policy, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model).mean()
def estimate_interval(self, reward: np.ndarray, action_by_behavior_policy: np.ndarray, pscore: np.ndarray, action_by_evaluation_policy: np.ndarray, estimated_rewards_by_reg_model: np.ndarray, alpha: float=0.05, n_bootstrap_samples: int=10000, random_state: Optional[int]=None, **kwargs) -> Dict[(str, float)]:
check_array(array=estimated_rewards_by_reg_model, name='estimated_rewards_by_reg_model', expected_dim=1)
check_array(array=reward, name='reward', expected_dim=1)
check_array(array=action_by_behavior_policy, name='action_by_behavior_policy', expected_dim=1)
check_array(array=pscore, name='pscore', expected_dim=1)
check_continuous_ope_inputs(reward=reward, action_by_behavior_policy=action_by_behavior_policy, pscore=pscore, action_by_evaluation_policy=action_by_evaluation_policy, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
estimated_round_rewards = self._estimate_round_rewards(reward=reward, action_by_behavior_policy=action_by_behavior_policy, pscore=pscore, action_by_evaluation_policy=action_by_evaluation_policy, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
return estimate_confidence_interval_by_bootstrap(samples=estimated_round_rewards, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state) |
class DaskLFApplier(BaseLFApplier):
def apply(self, df: dd.DataFrame, scheduler: Scheduler='processes', fault_tolerant: bool=False) -> np.ndarray:
f_caller = _FunctionCaller(fault_tolerant)
apply_fn = partial(apply_lfs_to_data_point, lfs=self._lfs, f_caller=f_caller)
map_fn = df.map_partitions((lambda p_df: p_df.apply(apply_fn, axis=1)))
labels = map_fn.compute(scheduler=scheduler)
labels_with_index = rows_to_triplets(labels)
return self._numpy_from_row_data(labels_with_index) |
def add_arguments_oss(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument('--run-only', help='only run certain test(s), for example: atest test_nn.py.', nargs='*', default=None)
return parser |
class BalancedBatchSampler(BatchSampler):
def __init__(self, labels, all_speech, n_classes, n_samples):
self.labels = np.array(labels)
self.labels_set = list(set(self.labels))
self.label_to_indices = {label: np.where((self.labels == label))[0] for label in self.labels_set}
for l in self.labels_set:
np.random.shuffle(self.label_to_indices[l])
self.used_label_indices_count = {label: 0 for label in self.labels_set}
self.count = 0
self.n_classes = n_classes
self.n_samples = n_samples
self.n_dataset = all_speech
self.batch_size = (self.n_samples * self.n_classes)
def __iter__(self):
self.count = 0
while ((self.count + self.batch_size) < self.n_dataset):
classes = np.random.choice(self.labels_set, self.n_classes, replace=False)
indices = []
for class_ in classes:
indices.extend(self.label_to_indices[class_][self.used_label_indices_count[class_]:(self.used_label_indices_count[class_] + self.n_samples)])
self.used_label_indices_count[class_] += self.n_samples
if ((self.used_label_indices_count[class_] + self.n_samples) > len(self.label_to_indices[class_])):
np.random.shuffle(self.label_to_indices[class_])
self.used_label_indices_count[class_] = 0
(yield indices)
self.count += (self.n_classes * self.n_samples)
def __len__(self):
return (self.n_dataset // self.batch_size) |
def compute_high_actor_loss(agent, batch, network_params):
cur_goals = batch['high_goals']
(v1, v2) = agent.network(batch['observations'], cur_goals, method='value')
(nv1, nv2) = agent.network(batch['high_targets'], cur_goals, method='value')
v = ((v1 + v2) / 2)
nv = ((nv1 + nv2) / 2)
adv = (nv - v)
exp_a = jnp.exp((adv * agent.config['high_temperature']))
exp_a = jnp.minimum(exp_a, 100.0)
dist = agent.network(batch['observations'], batch['high_goals'], state_rep_grad=True, goal_rep_grad=True, method='high_actor', params=network_params)
if agent.config['use_rep']:
target = agent.network(targets=batch['high_targets'], bases=batch['observations'], method='value_goal_encoder')
else:
target = (batch['high_targets'] - batch['observations'])
log_probs = dist.log_prob(target)
actor_loss = (- (exp_a * log_probs).mean())
return (actor_loss, {'high_actor_loss': actor_loss, 'high_adv': adv.mean(), 'high_bc_log_probs': log_probs.mean(), 'high_adv_median': jnp.median(adv), 'high_mse': jnp.mean(((dist.mode() - target) ** 2)), 'high_scale': dist.scale_diag.mean()}) |
class MultiAgentWrapper(gym.Wrapper, MultiAgentEnv):
def __init__(self, game, cfg: Config):
self.env = disable_passive_env_checker(game)
gym.Wrapper.__init__(self, self.env)
MultiAgentEnv.__init__(self.env)
self.n_agents = cfg.multiagent.n_agents
self.observation_space = gym.spaces.Dict({})
self.action_space = gym.spaces.Dict({})
for i in range(self.n_agents):
self.observation_space.spaces[f'agent_{i}'] = self.env.observation_space
self.action_space.spaces[f'agent_{i}'] = self.env.action_space
self.unwrapped.observation_space = self.observation_space
self.unwrapped.action_space = self.action_space
def reset(self, *, seed=None, options=None):
(obs, info) = super().reset()
return (obs, info)
def seed(self, s):
return self.unwrapped.seed(s)
def step(self, action):
(obs, rew, done, truncated, info) = ({}, {}, {}, {}, {})
for (k, v) in action.items():
self.unwrapped._rep.set_active_agent(k)
(obs_k, rew[k], done[k], truncated[k], info[k]) = super().step(action={k: v})
obs.update(obs_k)
truncated['__all__'] = np.all(list(truncated.values()))
done['__all__'] = np.all(list(done.values()))
return (obs, rew, done, truncated, info) |
def tf_scope():
with tf_compat.v1.Graph().as_default(), tf_compat.v1.Session().as_default() as session:
(yield session) |
def register_Ns3PssFlowPerf_t_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::pssFlowPerf_t const &', 'arg0')])
cls.add_instance_attribute('flowStart', 'ns3::Time', is_const=False)
cls.add_instance_attribute('lastAveragedThroughput', 'double', is_const=False)
cls.add_instance_attribute('lastTtiBytesTransmitted', 'unsigned int', is_const=False)
cls.add_instance_attribute('secondLastAveragedThroughput', 'double', is_const=False)
cls.add_instance_attribute('targetThroughput', 'double', is_const=False)
cls.add_instance_attribute('totalBytesTransmitted', 'long unsigned int', is_const=False)
return |
def main():
modelname = GetModelAndOptNames()
FLAGS = getFlags(modelname)
args.print_flag(FLAGS)
cross_validate(modelname, FLAGS) |
def DenseNet169(nclass):
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32, num_classes=nclass) |
def setup_logger(name, save_dir, prefix, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if (distributed_rank > 0):
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if cfg.LOG_IN_FILE:
fh = logging.FileHandler(os.path.join(save_dir, ('log.%s.%s.txt' % (time.strftime('%m-%d_%H-%M-%S'), prefix))))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
def test_load_optimizer_old_format():
config = Config(dict(optimizer={'class': 'adamw', 'weight_decay': 0.001}))
model = torch.nn.Linear(7, 5)
updater = Updater(config=config, network=model, device=torch.device('cpu'))
updater.create_optimizer()
with tempfile.TemporaryDirectory(prefix='returnn_test_load_optimizer_old_format') as tmp_dir:
torch.save(updater.optimizer.state_dict(), (tmp_dir + '/model.opt.old_format.pt'))
updater.load_optimizer((tmp_dir + '/model.opt.old_format.pt'))
updater.save_optimizer((tmp_dir + '/model.opt.new_format.pt'))
updater.load_optimizer((tmp_dir + '/model.opt.new_format.pt')) |
def cvector_to_numpy(vector: Union[(pyrenderer.real3, pyrenderer.real4)]):
if isinstance(vector, pyrenderer.real3):
return np.array([vector.x, vector.y, vector.z], dtype=renderer_dtype_np)
elif isinstance(vector, pyrenderer.real4):
return np.array([vector.x, vector.y, vector.z, vector.w], dtype=renderer_dtype_np)
else:
raise ValueError('unsupported type, real3 or real4 expected but got', type(vector)) |
def enveloping_profile_elements(alist, char=2):
if (char == 2):
profiles = [profile_elt(x) for x in alist if (x != 0)]
if (not profiles):
return (0,)
if (len(profiles) == 1):
return profiles[0]
return find_min_profile((max(*a) for a in zip_longest(*profiles, fillvalue=0)))
profiles = [profile_elt(x, char) for x in alist if (x != 0)]
if (len(profiles) == 1):
return profiles[0]
profiles_P = [x[0] for x in profiles]
profiles_Q = [x[1] for x in profiles]
if ((not profiles_P) and (not profiles_Q)):
return ((0,), (0,))
else:
maxP = [max(*a) for a in zip_longest(*profiles_P, fillvalue=0)]
maxQ = [max(*a) for a in zip_longest(*profiles_Q, fillvalue=0)]
return find_min_profile([maxP, maxQ], char=char) |
def convertAttributeProto(onnx_arg):
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t
elif onnx_arg.HasField('g'):
return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
elif len(onnx_arg.graphs):
retval = []
for g in onnx_arg.graphs:
retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
return retval
else:
raise ValueError('Unsupported ONNX attribute: {}'.format(onnx_arg)) |
.torch
def test_sasrec_forward_with_float_timematrix(tensor_schema, simple_masks):
model = SasRecModel(tensor_schema.subset(['item_id', 'timestamp']), hidden_size=64, max_len=5, ti_modification=True)
(item_sequences, padding_mask, _, timestamp_sequences) = simple_masks
timestamp_sequences = timestamp_sequences.float()
inputs = {'item_id': item_sequences, 'timestamp': timestamp_sequences}
assert (model(inputs, padding_mask).size() == (4, 5, 4)) |
def linear_classifier(layer, output_size, hidden_keep_prob=1.0):
layer_shape = nn.get_sizes(layer)
input_size = layer_shape.pop()
weights = tf.get_variable('Weights', shape=[input_size, output_size], initializer=tf.zeros_initializer)
biases = tf.get_variable('Biases', shape=[output_size], initializer=tf.zeros_initializer)
if (hidden_keep_prob < 1.0):
if (len(layer_shape) > 1):
noise_shape = tf.stack((layer_shape[:(- 1)] + [1, input_size]))
else:
noise_shape = None
layer = nn.dropout(layer, hidden_keep_prob, noise_shape=noise_shape)
layer_reshaped = nn.reshape(layer, [(- 1), input_size])
layer = (tf.matmul(layer_reshaped, weights) + biases)
layer = nn.reshape(layer, (layer_shape + [output_size]))
return layer |
(_float_ftylists, '(n)->(n)')
def diff_reverse(a_in, a_out):
a_out[0] = a_in[0]
for i in range(1, a_in.shape[0]):
a_out[i] = (a_out[(i - 1)] - a_in[i]) |
_model
def swsl_resnext101_32x8d(pretrained=True, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) |
class ZippedDataset(torch.utils.data.Dataset):
def __init__(self, *components):
assert (len(components) >= 1)
lengths = [len(c) for c in components]
assert all(((lengths[0] == other) for other in lengths[1:])), "Lengths don't match: {}".format(lengths)
self.components = components
def __getitem__(self, idx):
return tuple((c[idx] for c in self.components))
def __len__(self):
return len(self.components[0]) |
def LF_single_char_rgx(s, dict_lf):
m = re.search('\\b(r/r/w|m/r/g|n/v/d|n/v|c/c/e|f/c/s|mg/r)\\b', s.text, re.I)
label = (2 if (not m) else 1)
char_dict = {'c', 'd', 'e', 'f', 'g', 'm', 'n', 'r', 's', 'v', 'w'}
L = {}
for (i, tok) in enumerate(s.words):
if (tok.lower() in char_dict):
L[i] = label
V = dict_lf(s)
for (i, y) in V.items():
L[i] = V[i]
return L |
class BNFNetTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32], [self.val_batch_size, 3, 32, 32]]
def create_feature_network(self, input_shape):
return BNFNet() |
def train(args, ckpt_dir, loader, generator, discriminator, g_optim, d_optim, g_ema, device, writer):
get_inception_metrics = prepare_inception_metrics(args.inception, False)
sample_fn = functools.partial(sample_gema, g_ema=g_ema, device=device, truncation=1.0, mean_latent=None, batch_size=args.batch)
loader = sample_data(loader)
pbar = range(args.iter)
mean_path_length = 0
d_loss_val = 0
r1_img_loss = torch.tensor(0.0, device=device)
r1_seg_loss = torch.tensor(0.0, device=device)
g_loss_val = 0
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length_avg = 0
loss_dict = {}
if args.distributed:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = (0.5 ** (32 / (10 * 1000)))
sample_z = torch.randn(args.n_sample, args.latent, device=device)
print('Start Training Iterations...')
for idx in pbar:
tic = time.time()
i = (idx + args.start_iter)
if (i > args.iter):
print('Done!')
break
real_data = next(loader)
(real_img, real_mask) = (real_data['image'], real_data['mask'])
(real_img, real_mask) = (real_img.to(device), real_mask.to(device))
requires_grad(generator, False)
requires_grad(discriminator, True)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
(fake_img, fake_seg) = generator(noise)
fake_pred = discriminator(fake_img, fake_seg)
real_pred = discriminator(real_img, real_mask)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict['d'] = d_loss
loss_dict['real_score'] = real_pred.mean()
loss_dict['fake_score'] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
d_regularize = ((i % args.d_reg_every) == 0)
if d_regularize:
real_img.requires_grad = True
real_mask.requires_grad = True
real_pred = discriminator(real_img, real_mask)
(r1_img_loss, r1_seg_loss) = d_r1_loss(real_pred, real_img, real_mask)
discriminator.zero_grad()
(((((args.r1_img / 2) * r1_img_loss) + ((args.r1_seg / 2) * r1_seg_loss)) * args.d_reg_every) + (0 * real_pred[0])).backward()
d_optim.step()
loss_dict['r1_img'] = r1_img_loss
loss_dict['r1_seg'] = r1_seg_loss
requires_grad(generator, True)
requires_grad(discriminator, False)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
(fake_img, fake_seg, fake_seg_coarse, _, _) = generator(noise, return_all=True)
fake_pred = discriminator(fake_img, fake_seg)
g_loss = g_nonsaturating_loss(fake_pred)
fake_seg_downsample = F.adaptive_avg_pool2d(fake_seg, fake_seg_coarse.shape[2:4])
mask_loss = torch.square((fake_seg_coarse - fake_seg_downsample)).mean()
loss_dict['g'] = g_loss
loss_dict['mask'] = mask_loss
generator.zero_grad()
(g_loss + (args.lambda_mask * mask_loss)).backward()
g_optim.step()
g_regularize = ((args.path_regularize > 0) and ((i % args.g_reg_every) == 0))
if g_regularize:
path_batch_size = max(1, (args.batch // args.path_batch_shrink))
with torch.no_grad():
noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)
noise = [g_module.style(n) for n in noise]
latents = g_module.mix_styles(noise).clone()
latents.requires_grad = True
(fake_img, fake_seg) = generator([latents], input_is_latent=True)
(path_loss, mean_path_length, path_lengths) = g_path_regularize(fake_img, latents, mean_path_length)
generator.zero_grad()
weighted_path_loss = ((args.path_regularize * args.g_reg_every) * path_loss)
if args.path_batch_shrink:
weighted_path_loss += ((0 * fake_img[(0, 0, 0, 0)]) + (0 * fake_seg[(0, 0, 0, 0)]))
weighted_path_loss.backward()
g_optim.step()
mean_path_length_avg = (reduce_sum(mean_path_length).item() / get_world_size())
loss_dict['path'] = path_loss
loss_dict['path_length'] = path_lengths.mean()
accumulate(g_ema, g_module, accum)
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced['d'].mean().item()
g_loss_val = loss_reduced['g'].mean().item()
r1_img_val = loss_reduced['r1_img'].mean().item()
r1_seg_val = loss_reduced['r1_seg'].mean().item()
path_loss_val = loss_reduced['path'].mean().item()
real_score_val = loss_reduced['real_score'].mean().item()
fake_score_val = loss_reduced['fake_score'].mean().item()
path_length_val = loss_reduced['path_length'].mean().item()
mask_loss_val = loss_reduced['mask'].mean().item()
batch_time = (time.time() - tic)
if (get_rank() == 0):
if ((i % 100) == 0):
print(f'[{i:06d}] d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; real: {real_score_val:.4f}; fake: {fake_score_val:.4f}; r1_img: {r1_img_val:.4f}; r1_seg: {r1_seg_val:.4f}; path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; mask: {mask_loss_val:.4f}; time: {batch_time:.2f}')
if (writer is not None):
writer.add_scalar('scores/real_score', real_score_val, global_step=i)
writer.add_scalar('scores/fake_score', fake_score_val, global_step=i)
writer.add_scalar('r1/img', r1_img_val, global_step=i)
writer.add_scalar('r1/seg', r1_seg_val, global_step=i)
writer.add_scalar('path/path_loss', path_loss_val, global_step=i)
writer.add_scalar('path/path_length', path_length_val, global_step=i)
writer.add_scalar('loss/d', d_loss_val, global_step=i)
writer.add_scalar('loss/g', g_loss_val, global_step=i)
writer.add_scalar('loss/mask', mask_loss_val, global_step=i)
if ((i % args.viz_every) == 0):
with torch.no_grad():
g_ema.eval()
(sample_img, sample_seg, sample_seg_coarse, depths, _) = g_ema([sample_z], return_all=True)
sample_img = sample_img.detach().cpu()
sample_mask = color_segmap(sample_seg.detach().cpu(), color_map)
sample_mask_coarse = color_segmap(sample_seg_coarse.detach().cpu(), color_map)
depths = [d.detach().cpu() for d in depths]
os.makedirs(os.path.join(ckpt_dir, 'sample'), exist_ok=True)
os.makedirs(os.path.join(ckpt_dir, 'depth'), exist_ok=True)
save_sample_image('sample', 'img', sample_img, i, writer, normalize=True, value_range=((- 1), 1))
save_sample_image('sample', 'mask', sample_mask, i, writer, normalize=True, value_range=(0, 255))
save_sample_image('sample', 'mask_coarse', sample_mask_coarse, i, writer, normalize=True, value_range=(0, 255))
for j in range(len(depths)):
save_sample_image('depth', f'depth_{j:02d}', depths[j], i, writer, normalize=True)
if (((i % args.save_every) == 0) and (i > args.start_iter)):
print('Start calculating FID')
(IS_mean, IS_std, FID) = get_inception_metrics(sample_fn, num_inception_images=10000, use_torch=False)
print('[val] iteration {0:06d}: FID: {1:.4f}, IS_mean: {2:.4f}, IS_std: {3:.4f}'.format(i, FID, IS_mean, IS_std))
if (writer is not None):
writer.add_scalar('metrics/FID', FID, global_step=i)
writer.add_scalar('metrics/IS_mean', IS_mean, global_step=i)
writer.add_scalar('metrics/IS_std', IS_std, global_step=i)
os.makedirs(os.path.join(ckpt_dir, 'ckpt'), exist_ok=True)
torch.save({'g': g_module.state_dict(), 'd': d_module.state_dict(), 'g_ema': g_ema.state_dict(), 'g_optim': g_optim.state_dict(), 'd_optim': d_optim.state_dict(), 'args': args}, os.path.join(ckpt_dir, f'ckpt/{str(i).zfill(6)}.pt')) |
class MBart50TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = MBart50Tokenizer
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, src_lang=None, tgt_lang=None, tokenizer_file=None, eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, src_lang=src_lang, tgt_lang=tgt_lang, tokenizer_file=tokenizer_file, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
self.vocab_file = vocab_file
self.add_special_tokens({'additional_special_tokens': FAIRSEQ_LANGUAGE_CODES})
self.lang_code_to_id = {lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES}
self._src_lang = (src_lang if (src_lang is not None) else 'en_XX')
self.tgt_lang = tgt_lang
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.set_src_lang_special_tokens(self._src_lang)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def as_target_tokenizer(self):
self.set_tgt_lang_special_tokens(self.tgt_lang)
(yield)
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens))))
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens))))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class TrajectoryEncoder(object):
def __init__(self):
self.encoding_dim = 8
self.input_size = 3
self.grad_clip = 10.0
self.learning_rate = 0.005
self.build_model()
def build_model(self):
self.inputs = tf.placeholder(tf.float32, (self.seq_length, self.max_num_obj, self.input_size), name='inputs')
self.nonexistent_ped = tf.constant(0.0, name='zero_ped')
self.targets = tf.placeholder(tf.float32, (self.seq_length, self.max_num_obj, self.input_size), name='targets')
self.lr = tf.Variable(self.learning_rate, trainable=False, name='learning_rate')
self.frame_target_data = [tf.squeeze(target_, [0]) for target_ in tf.split(self.targets, self.seq_length, 0)]
self.cost = tf.constant(0.0, name='cost')
self.counter = tf.constant(0.0, name='counter')
self.increment = tf.constant(1.0, name='increment')
self.frame_data = [tf.squeeze(input_, [0]) for input_ in tf.split(self.inputs, self.seq_length, 0)]
for (seq, frame) in enumerate(self.frame_data):
current_frame = frame
for ped in range(max_num_obj):
pedID = current_frame[(ped, 0)]
spat_input = tf.slice(current_frame, [ped, 1], [1, 2])
encoded = tf.layers.dense(spat_input, 5, activation=tf.nn.relu)
[x_data, y_data] = tf.split(tf.slice(self.frame_target_data[seq], [ped, 1], [1, 2]), 2, 1)
target_pedID = frame_target_data[seq][(ped, 0)]
[o_mux, o_muy, o_sx, o_sy, o_corr] = get_coef(encoded)
lossfunc = get_lossfunc(o_mux, o_muy, o_sx, o_sy, o_corr, x_data, y_data)
cost = tf.where(tf.logical_or(tf.equal(pedID, nonexistent_ped), tf.equal(target_pedID, nonexistent_ped)), cost, tf.add(cost, lossfunc))
counter = tf.where(tf.logical_or(tf.equal(pedID, nonexistent_ped), tf.equal(target_pedID, nonexistent_ped)), counter, tf.add(counter, increment))
cost = tf.div(cost, counter)
tvars = tf.trainable_variables()
gradients = tf.gradients(cost, tvars)
(grads, _) = tf.clip_by_global_norm(gradients, grad_clip)
optimizer = tf.train.RMSPropOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars)) |
def test_copy_from():
shape = [2, 3, 4]
src = nn.NdArray(shape)
dst = nn.NdArray(shape)
src.data = 0
src.cast(dtype=np.uint8)
dst.copy_from(src, use_current_context=False)
assert (dst.dtype == np.uint8)
from nnabla.ext_utils import get_extension_context
with nn.context_scope(get_extension_context('cpu', dtype='float')):
dst.copy_from(src, use_current_context=True)
assert (dst.dtype == np.uint8)
src.zero()
with nn.context_scope(get_extension_context('cpu', dtype='float')):
dst.copy_from(src, use_current_context=True)
assert (dst.dtype == np.float32) |
class ChessLMDataModule(LightningDataModule):
def __init__(self, data_dir=None, vocab_dir=None, batch_size=8, num_workers=1, train_size=1000000.0, n_positions=800, other_eval=True, rap_prob=0.0, rap_no_grad=False, oracle=False, model_type='transformer', **kwargs):
super().__init__()
self.other_eval = other_eval
self.model_type = model_type
self.vocab_dir = vocab_dir
self.data_dir = data_dir
self.train_size = train_size
self.rap_prob = rap_prob
self.oracle = oracle
if self.oracle:
self.rap_prob = 1.0
self.max_len = n_positions
self.batch_size = batch_size
self.num_workers = num_workers
self.tokenizer = ChessTokenizer(path.join(self.vocab_dir, 'vocab.txt'))
self.train_data_collator = DataCollatorForLanguageModeling(tokenizer=self.tokenizer, rap_no_grad=rap_no_grad, model_type=self.model_type)
self.inference_data_collator = DataCollatorForLanguageModeling(tokenizer=self.tokenizer, rap_no_grad=(False if self.oracle else True), model_type=self.model_type)
self.train_file = path.join(self.data_dir, 'train.txt')
self.dev_file = path.join(self.data_dir, 'dev.txt')
self.test_file = path.join(self.data_dir, 'test.txt')
self.num_of_canonical_tokens = self.get_num_of_canonical_tokens()
if self.other_eval:
(self.other_eval_files, self.other_eval_fen) = ({}, {})
other_eval_dir = path.join(path.dirname(self.data_dir), f'other_eval/uci')
for task_category in TASK_CATEGORIES:
if ((not self.rap_prob) and (task_category == 'start')):
continue
self.other_eval_files[task_category] = {}
for len_category in LENGTH_CATEGORIES:
self.other_eval_files[task_category][len_category] = path.join(other_eval_dir, f'{task_category}_{len_category}.jsonl')
self.other_eval_sets = OrderedDict()
self.load_other_eval_sets()
print('Other eval sets loaded!')
def get_num_of_canonical_tokens(self):
split_to_num_tokens = {}
for split in ['val', 'test']:
if ((split == 'val') or (split == 'dev')):
data_file = self.dev_file
elif (split == 'test'):
data_file = self.test_file
else:
raise ValueError
num_moves = []
with open(data_file) as f:
for line in f:
num_moves.append((len(line.strip().split()) + 1))
split_to_num_tokens[split] = num_moves
return split_to_num_tokens
def load_other_eval_sets(self):
self.other_eval_sets = OrderedDict()
for task_category in self.other_eval_files:
self.other_eval_sets[task_category] = {}
for len_category in LENGTH_CATEGORIES:
eval_file = self.other_eval_files[task_category][len_category]
eval_set = []
with open(eval_file) as f:
for (idx, line) in enumerate(f):
instance = json.loads(line.strip())
coded_instance = OrderedDict()
for (key, val) in instance.items():
if ('prefix' in key):
if (((key == 'oracle_prefix') and self.oracle) or ((key == 'prefix') and (not self.oracle))):
prefix = val
(prefix_enc, move_end_positions) = self.tokenizer.encode(prefix, add_special_tokens=False, get_move_end_positions=True)
prefix_enc = ([self.tokenizer.bos_token_id] + prefix_enc)
coded_instance['prefix'] = prefix
coded_instance['prefix_enc'] = prefix_enc
elif isinstance(val, str):
coded_val = self.tokenizer.encode_token(val)
coded_instance[key] = val
coded_instance[(key + '_enc')] = coded_val
elif isinstance(val, list):
coded_val = [self.tokenizer.encode_token(token) for token in val]
coded_instance[key] = val
coded_instance[(key + '_enc')] = coded_val
else:
raise ValueError
eval_set.append(coded_instance)
self.other_eval_sets[task_category][len_category] = eval_set
def train_dataloader(self):
train_dataset = LineByLineTextDataset(tokenizer=self.tokenizer, file_path=self.train_file, block_size=self.max_len, max_instances=self.train_size, rap_prob=(1.0 if self.oracle else self.rap_prob))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, collate_fn=self.train_data_collator, drop_last=False, pin_memory=True)
return train_loader
def val_dataloader(self):
dev_dataset = LineByLineTextDataset(tokenizer=self.tokenizer, file_path=self.dev_file, block_size=self.max_len, rap_prob=(1.0 if self.oracle else 0.0))
dev_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, collate_fn=self.inference_data_collator, drop_last=False, pin_memory=True)
return dev_loader
def test_dataloader(self):
test_dataset = LineByLineTextDataset(tokenizer=self.tokenizer, file_path=self.test_file, block_size=self.max_len, rap_prob=(1.0 if self.oracle else 0.0))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, collate_fn=self.inference_data_collator, drop_last=False)
return test_loader |
class FSRNet(nn.Module):
def __init__(self):
super(FSRNet, self).__init__()
self.coarse_SR = CoarseSR()
self.fine_SR = FineSR()
self.prior_estimation = PriorEstimation()
def forward(self, img):
img_coarse = self.coarse_SR(img)
(landmark, face_parsing) = self.prior_estimation(img_coarse)
img_fine = self.fine_SR(img_coarse, torch.cat((landmark, face_parsing), dim=1))
return (img_coarse, landmark, face_parsing, img_fine) |
def encode_dataset(dataset, vocab, test=False):
questions = []
sparqls = []
choices = []
answers = []
for question in tqdm(dataset):
q = [vocab['word_token_to_idx'].get(w, vocab['word_token_to_idx']['<UNK>']) for w in word_tokenize(question['question'].lower())]
questions.append(q)
_ = [vocab['answer_token_to_idx'][w] for w in question['choices']]
choices.append(_)
if test:
continue
_ = [vocab['sparql_token_to_idx'].get(w, vocab['sparql_token_to_idx']['<UNK>']) for w in tokenize_sparql(question['sparql'])]
_ = (([vocab['sparql_token_to_idx']['<START>']] + _) + [vocab['sparql_token_to_idx']['<END>']])
sparqls.append(_)
if ('answer' in question):
answers.append(vocab['answer_token_to_idx'].get(question['answer']))
max_len = max((len(q) for q in questions))
for q in questions:
while (len(q) < max_len):
q.append(vocab['word_token_to_idx']['<PAD>'])
if (not test):
max_len = max((len(s) for s in sparqls))
for s in sparqls:
while (len(s) < max_len):
s.append(vocab['sparql_token_to_idx']['<PAD>'])
questions = np.asarray(questions, dtype=np.int32)
sparqls = np.asarray(sparqls, dtype=np.int32)
choices = np.asarray(choices, dtype=np.int32)
answers = np.asarray(answers, dtype=np.int32)
return (questions, sparqls, choices, answers) |
class I7PoolFunction(Function):
def forward(ctx, input, guide):
(output, maxout) = _C.I7_pool_forward(input, guide)
ctx.save_for_backward(input, output, guide, maxout)
return output
def backward(ctx, grad_output):
(input, output, guide, maxout) = ctx.saved_variables
(grad_input, grad_guide) = _C.I7_pool_backward(input, guide, output, maxout, grad_output)
return (grad_input, grad_guide) |
def rename_rirs(decompress_path):
try:
os.rename(os.path.join(decompress_path, 'simulated_rirs_16k'), os.path.join(decompress_path, 'SLR26'))
except Exception:
pass
try:
os.rename(os.path.join(decompress_path, 'RIRS_NOISES'), os.path.join(decompress_path, 'SLR28'))
except Exception:
pass |
def get_device(tensors):
if isinstance(tensors, (list, tuple)):
return get_device(tensors[0])
elif isinstance(tensors, dict):
for (key, value) in tensors.items():
return get_device(value)
else:
return tensors.device |
def configure_satellite_container():
base_path = 'satellite/config/'
conf_file = (base_path + 'sat.conf')
change_line(conf_file, 17, (('emu_ipv4 = ' + str(os.getenv('EMU_NETWORK_HEAD'))) + '.0.2/24')) |
class MockRegexPattern(object):
def __init__(self, target_type):
self.type = target_type
def match(self, text):
try:
self.type(text)
except ValueError:
return False
return True |
.experimental
.parametrize('pad_columns', ['user_id'])
.usefixtures('dataframe')
def test_not_array_column(pad_columns, dataframe):
with pytest.raises(ValueError):
padder = Padder(pad_columns=pad_columns)
padder.transform(dataframe) |
def process_literal(value: str):
pattern_date = '(?:(?:jan.|feb.|mar.|apr.|may|jun.|jul.|aug.|sep.|oct.|nov.|dec.) the \\d+(?:st|nd|rd|th), \\d{4}|\\d{4}-\\d{2}-\\d{2}|\\d{2}/\\d{2}/\\d{4})'
pattern_datetime = '\\d{4}-\\d{2}-\\d{2}t[\\d:z-]+'
pattern_float = '(?:[-]*\\d+[.]*\\d*e[+-]\\d+|(?<= )[-]*\\d+[.]\\d*|^[-]*\\d+[.]\\d*)'
pattern_yearmonth = '\\d{4}-\\d{2}'
pattern_year = '(?:(?<= )\\d{4}|^\\d{4})'
pattern_int = '(?:(?<= )[-]*\\d+|^[-]*\\d+)'
if (len(re.findall(pattern_datetime, value)) == 1):
value = value.replace('t', 'T').replace('z', 'Z')
return f'{value}^^
elif (len(re.findall(pattern_date, value)) == 1):
if value.__contains__('-'):
return f'{value}^^
elif value.__contains__('/'):
fields = value.split('/')
value = f'{fields[2]}-{fields[0]}-{fields[1]}'
return f'{value}^^
elif (len(re.findall(pattern_yearmonth, value)) == 1):
return f'{value}^^
elif (len(re.findall(pattern_float, value)) == 1):
return f'{value}^^
elif ((len(re.findall(pattern_year, value)) == 1) and (int(value) <= 2015)):
return f'{value}^^
elif (len(re.findall(pattern_int, value)) == 1):
return f'{value}^^
else:
return 'null' |
def _check_params(length, size):
_check_size(size)
if ((length % size) != 0):
raise error('not a whole number of frames') |
class NeuralMatrixFactorizationModel(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, embed_mlp_size, mlp_hidden_size, dropout, is_mf_train, is_mlp_train, learning_rate=0.01, random_seed=42, name='NeuralMatrixFactorizationModel', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(random_seed)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.embed_mlp_size = embed_mlp_size
self.mlp_hidden_size = mlp_hidden_size
self.dropout = dropout
self.is_mf_train = is_mf_train
self.is_mlp_train = is_mlp_train
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_MF', dtype=tf.float32)
self.item_mf_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='I_MF', dtype=tf.float32)
self.user_mlp_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mlp_size, embeddings_initializer=self.initializer, name='U_MLP', dtype=tf.float32)
self.item_mlp_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mlp_size, embeddings_initializer=self.initializer, name='I_MLP', dtype=tf.float32)
self.user_mf_embedding(0)
self.user_mlp_embedding(0)
self.item_mf_embedding(0)
self.item_mlp_embedding(0)
self.mlp_layers = keras.Sequential()
for units in mlp_hidden_size:
self.mlp_layers.add(keras.layers.Dropout(dropout))
self.mlp_layers.add(keras.layers.Dense(units, activation='relu'))
if (self.is_mf_train and self.is_mlp_train):
self.predict_layer = keras.layers.Dense(1, input_dim=(self.embed_mf_size + self.mlp_hidden_size[(- 1)]))
elif self.is_mf_train:
self.predict_layer = keras.layers.Dense(1, input_dim=self.embed_mf_size)
elif self.is_mlp_train:
self.predict_layer = keras.layers.Dense(1, input_dim=self.mlp_hidden_size[(- 1)])
self.sigmoid = keras.activations.sigmoid
self.loss = keras.losses.BinaryCrossentropy()
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
user_mlp_e = self.user_mlp_embedding(user)
item_mlp_e = self.item_mlp_embedding(item)
if self.is_mf_train:
mf_output = (user_mf_e * item_mf_e)
if self.is_mlp_train:
mlp_output = self.mlp_layers(tf.concat([user_mlp_e, item_mlp_e], (- 1)))
if (self.is_mf_train and self.is_mlp_train):
output = self.sigmoid(self.predict_layer(tf.concat([mf_output, mlp_output], (- 1))))
elif self.is_mf_train:
output = self.sigmoid(self.predict_layer(mf_output))
elif self.is_mlp_train:
output = self.sigmoid(self.predict_layer(mlp_output))
else:
raise RuntimeError('mf_train and mlp_train can not be False at the same time')
return output
def train_step(self, batch):
(user, pos, label) = batch
with tf.GradientTape() as tape:
output = self(inputs=(user, pos), training=True)
loss = self.loss(label, output)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, inputs, training=False, **kwargs):
output = self.call(inputs=inputs, training=training)
return output
def get_recs(self, inputs, training=False, **kwargs):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
user_mlp_e = self.user_mlp_embedding(user)
item_mlp_e = self.item_mlp_embedding(item)
if self.is_mf_train:
mf_output = (user_mf_e * item_mf_e)
if self.is_mlp_train:
mlp_output = self.mlp_layers(tf.concat([user_mlp_e, item_mlp_e], (- 1)))
if (self.is_mf_train and self.is_mlp_train):
output = self.sigmoid(self.predict_layer(tf.concat([mf_output, mlp_output], (- 1))))
elif self.is_mf_train:
output = self.sigmoid(self.predict_layer(mf_output))
elif self.is_mlp_train:
output = self.sigmoid(self.predict_layer(mlp_output))
else:
raise RuntimeError('mf_train and mlp_train can not be False at the same time')
return tf.squeeze(output)
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
def load_cluster_config(path):
if path:
path = os.path.join(dirname(__file__), os.path.expandvars(path))
dcc = io.load_configfile(path)
else:
dcc = {}
if ('__default__' not in dcc):
dcc['__default__'] = {}
return dcc |
class StepLR(_LRScheduler):
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=(- 1), verbose=False):
self.step_size = step_size
self.gamma = gamma
super(StepLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if (not self._get_lr_called_within_step):
warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning)
if ((self.last_epoch == 0) or ((self.last_epoch % self.step_size) != 0)):
return [group['lr'] for group in self.optimizer.param_groups]
return [(group['lr'] * self.gamma) for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [(base_lr * (self.gamma ** (self.last_epoch // self.step_size))) for base_lr in self.base_lrs] |
def BatchIncremental(nominal_attributes=None):
warnings.warn("'BatchIncremental' has been renamed to 'BatchIncrementalClassifier' in v0.5.0.\nThe old name will be removed in v0.7.0", category=FutureWarning)
return BatchIncrementalClassifier(nominal_attributes=nominal_attributes) |
class NOISE_TRANSFORMATIONS(Enum):
GAUSSIAN = 'gaussian'
LOCALVAR = 'localvar'
POISSON = 'poisson'
SALT = 'salt'
PEPPER = 'pepper'
SALTNPEPPER = 's&p'
SPECKLE = 'speckle' |
def require_scatter(test_case):
if (not is_scatter_available()):
return unittest.skip('test requires PyTorch Scatter')(test_case)
else:
return test_case |
def compute_validation_loss(loss_fn: Callable, dataset: Iterable, max_batches: Optional[int]=None, name: Optional[str]=None):
def compute_loss(info: StepInfo):
loss = eval_loss_loop(loss_fn, info.model, dataset, max_batches=max_batches, name=name)
if (wandb.run is not None):
prefix = 'eval'
if name:
prefix += ('/' + name)
wandb.log({f'{prefix}/loss': loss}, step=info.step)
if name:
logger.info(f'{name} validation loss: {loss:.3f}')
else:
logger.info(f'validation loss: {loss:.3f}')
return loss
return compute_loss |
def deconv2d_bn_act(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), adj=(0, 0), no_bias=True, target_shape=None, act_type='relu', momentum=0.9, eps=(1e-05 + 1e-12), fix_gamma=True, name='deconv2d', use_global_stats=False, **kwargs):
global _params
deconv = deconv2d(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, adj=adj, target_shape=target_shape, no_bias=no_bias, name=name, **kwargs)
gamma = _params.get(('%s_bn_gamma' % name), **kwargs)
beta = _params.get(('%s_bn_beta' % name), **kwargs)
moving_mean = _params.get(('%s_bn_moving_mean' % name), **kwargs)
moving_var = _params.get(('%s_bn_moving_var' % name), **kwargs)
if fix_gamma:
bn = mx.sym.BatchNorm(data=deconv, beta=beta, gamma=gamma, moving_mean=moving_mean, moving_var=moving_var, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_bn' % name))
else:
bn = mx.sym.BatchNorm(data=deconv, beta=beta, gamma=gamma, moving_mean=moving_mean, moving_var=moving_var, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_bn' % name))
act = activation(bn, act_type=act_type, name=name)
return act |
class SEResNeXtBottleneck(Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = (math.floor((planes * (base_width / 64))) * groups)
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule((planes * 4), reduction=reduction)
self.downsample = downsample
self.stride = stride |
def BIBD_196_6_1():
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet
from .incidence_structures import IncidenceStructure
a = 'a'
bibd = [((0, 0), (2, 0), (12, 0), (45, 0), (3, 1), (11, a)), ((0, 0), (3, 0), (8, 0), (5, 1), (17, 1), (39, a)), ((0, 0), (9, 0), (36, 0), (24, 1), (44, 1), (37, a)), ((0, 0), (15, 0), (34, 1), (41, 1), (47, 2), (18, a)), ((0, 0), (7, 0), (31, 0), (13, 1), (35, 2), (41, a)), ((0, 0), (14, 0), (32, 1), (10, 2), (22, a), (44, a)), ((0, 0), (23, 0), (21, 1), (39, 1), (19, a), (25, a)), ((0, 0), (33, 1), (0, a), (5, a), (29, a), (47, a)), ((0, 0), (1, 0), (0, 1), (30, 1), (0, 2), (18, 2)), ((8, 0), (19, 0), (44, 1), (31, 1), (46, 2), (48, 2))]
gens = (lambda B: [frozenset(((((x * 30) % 49), (((y + 1) % 3) if (y != a) else a)) for (x, y) in B)), frozenset(((((x + 1) % 49), y) for (x, y) in B))])
bibd = RecursivelyEnumeratedSet([frozenset(e) for e in bibd], successors=gens)
return IncidenceStructure(bibd)._blocks |
class TestChipIO(object):
def test_chip2020_task1(self):
io = ChipIO(tokenize_callback='char', sep='|||', encoding='utf-8')
(train_data, train_errors, train_mismatches) = io.read('data/chip2020/task1/train_data.txt', return_errors=True)
(dev_data, dev_errors, dev_mismatches) = io.read('data/chip2020/task1/val_data.txt', return_errors=True)
assert (len(train_data) == 15000)
assert (sum((len(ex['chunks']) for ex in train_data)) == 61796)
assert (len(train_errors) == 0)
assert (len(train_mismatches) == 0)
assert (len(dev_data) == 5000)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 20300)
assert (len(dev_errors) == 0)
assert (len(dev_mismatches) == 0)
assert (max((detect_overlapping_level(ex['chunks']) for data in [train_data, dev_data] for ex in data)) == ARBITRARY)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=ARBITRARY) == ex['chunks']) for data in [train_data, dev_data] for ex in data)) |
def get_random_nodelist(G, A, num_tests):
nodelist = ([None] * num_tests)
for k in range(num_tests):
i = random.randint(0, (G.numNodes() - 1))
while (A[i] == NA_VALUE):
i = random.randint(0, (G.numNodes() - 1))
nodelist[k] = i
return nodelist |
class MyMaxPool1dPadSame(nn.Module):
def __init__(self, kernel_size):
super(MyMaxPool1dPadSame, self).__init__()
self.kernel_size = kernel_size
self.stride = 1
self.max_pool = torch.nn.MaxPool1d(kernel_size=self.kernel_size)
def forward(self, x):
net = x
in_dim = net.shape[(- 1)]
out_dim = (((in_dim + self.stride) - 1) // self.stride)
p = max(0, ((((out_dim - 1) * self.stride) + self.kernel_size) - in_dim))
pad_left = (p // 2)
pad_right = (p - pad_left)
net = F.pad(net, (pad_left, pad_right), 'constant', 0)
net = self.max_pool(net)
return net |
_tf
class TFBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFBertModel, TFBertForMaskedLM, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification) if is_tf_available() else ())
class TFBertModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels)
def create_and_check_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFBertModel(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(sequence_output, pooled_output) = model(inputs)
inputs = [input_ids, input_mask]
(sequence_output, pooled_output) = model(inputs)
(sequence_output, pooled_output) = model(input_ids)
result = {'sequence_output': sequence_output.numpy(), 'pooled_output': pooled_output.numpy()}
self.parent.assertListEqual(list(result['sequence_output'].shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(result['pooled_output'].shape), [self.batch_size, self.hidden_size])
def create_and_check_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFBertForMaskedLM(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(prediction_scores,) = model(inputs)
result = {'prediction_scores': prediction_scores.numpy()}
self.parent.assertListEqual(list(result['prediction_scores'].shape), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFBertForNextSentencePrediction(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(seq_relationship_score,) = model(inputs)
result = {'seq_relationship_score': seq_relationship_score.numpy()}
self.parent.assertListEqual(list(result['seq_relationship_score'].shape), [self.batch_size, 2])
def create_and_check_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFBertForPreTraining(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(prediction_scores, seq_relationship_score) = model(inputs)
result = {'prediction_scores': prediction_scores.numpy(), 'seq_relationship_score': seq_relationship_score.numpy()}
self.parent.assertListEqual(list(result['prediction_scores'].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list(result['seq_relationship_score'].shape), [self.batch_size, 2])
def create_and_check_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFBertForSequenceClassification(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(logits,) = model(inputs)
result = {'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.num_labels])
def create_and_check_bert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_choices = self.num_choices
model = TFBertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids}
(logits,) = model(inputs)
result = {'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.num_choices])
def create_and_check_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFBertForTokenClassification(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(logits,) = model(inputs)
result = {'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.seq_length, self.num_labels])
def create_and_check_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFBertForQuestionAnswering(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
(start_logits, end_logits) = model(inputs)
result = {'start_logits': start_logits.numpy(), 'end_logits': end_logits.numpy()}
self.parent.assertListEqual(list(result['start_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].shape), [self.batch_size, self.seq_length])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TFBertModelTest.TFBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in ['bert-base-uncased']:
model = TFBertModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
def get_dset_features(dset, model=None, num_workers=12, num=None, shuffle=False, seed=0, batch_size=128, device=torch.device('cuda'), mode='clean', custom_fn_resize=None, description='', verbose=True, custom_image_transform=None):
dataset = ResizeDataset(dset, mode=mode)
if (custom_image_transform is not None):
dataset.custom_image_tranform = custom_image_transform
if (custom_fn_resize is not None):
dataset.fn_resize = custom_fn_resize
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
l_feats = []
if verbose:
pbar = tqdm(dataloader, desc=description)
else:
pbar = dataloader
for batch in pbar:
l_feats.append(get_batch_features(batch, model, device))
np_feats = np.concatenate(l_feats)
return np_feats |
_module()
class DynamicMaskHead(FCNMaskHead):
def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, dynamic_conv_cfg=dict(type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask=dict(type='DiceLoss', loss_weight=8.0), **kwargs):
super(DynamicMaskHead, self).__init__(num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs)
assert (class_agnostic is False), 'DynamicMaskHead only support class_agnostic=False'
self.fp16_enabled = False
self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
def init_weights(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
nn.init.constant_(self.conv_logits.bias, 0.0)
_fp16()
def forward(self, roi_feat, proposal_feat):
proposal_feat = proposal_feat.reshape((- 1), self.in_channels)
proposal_feat_iic = self.instance_interactive_conv(proposal_feat, roi_feat)
x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size())
for conv in self.convs:
x = conv(x)
if (self.upsample is not None):
x = self.upsample(x)
if (self.upsample_method == 'deconv'):
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, mask_targets, labels):
num_pos = labels.new_ones(labels.size()).float().sum()
avg_factor = torch.clamp(reduce_mean(num_pos), min=1.0).item()
loss = dict()
if (mask_pred.size(0) == 0):
loss_mask = mask_pred.sum()
else:
loss_mask = self.loss_mask(mask_pred[(torch.arange(num_pos).long(), labels, ...)].sigmoid(), mask_targets, avg_factor=avg_factor)
loss['loss_mask'] = loss_mask
return loss
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [res.pos_assigned_gt_inds for res in sampling_results]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg)
return mask_targets |
def export_music(score, beat_data, chord_data, filename, repeat_chord=REPEAT_CHORD, outputs_path=OUTPUTS_PATH, water_mark=WATER_MARK):
harmony_list = []
offset = 0.0
filename = os.path.basename(filename)
filename = '.'.join(filename.split('.')[:(- 1)])
for (idx, song_chord) in enumerate(chord_data):
song_chord = [chord_types[int(cho_idx)] for cho_idx in song_chord]
song_beat = beat_data[idx]
pre_chord = None
for (t_idx, cho) in enumerate(song_chord):
cho = cho.replace('N.C.', 'R')
cho = cho.replace('bpedal', '-pedal')
if ((cho != 'R') and ((pre_chord != cho) or (repeat_chord and (t_idx != 0) and (song_beat[t_idx] == 4) and (song_beat[(t_idx - 1)] != 4)))):
chord_symbol = harmony.ChordSymbol(cho)
chord_symbol = chord_symbol
chord_symbol.offset = offset
harmony_list.append(chord_symbol)
offset += 0.25
pre_chord = cho
h_idx = 0
new_score = []
offset_list = []
for m in score:
if isinstance(m, stream.Measure):
new_m = deepcopy(m)
m_list = []
offset_list.append(m.offset)
for n in new_m:
if (not isinstance(n, harmony.ChordSymbol)):
if ((h_idx < len(harmony_list)) and ((n.offset + m.offset) >= harmony_list[h_idx].offset)):
harmony_list[h_idx].offset -= m.offset
m_list.append(harmony_list[h_idx])
h_idx += 1
m_list.append(n)
new_m.elements = m_list
new_score.append(new_m)
score = stream.Score(new_score)
for (m_idx, m) in enumerate(score):
m.offset = offset_list[m_idx]
if water_mark:
score = watermark(score, filename)
score.write('mxl', fp=(((outputs_path + '/') + filename) + '.mxl')) |
def main(args):
data_conf = {'num_channels': (NUM_CLASSES + 1), 'image_size': args.image_size, 'xbound': args.xbound, 'ybound': args.ybound, 'zbound': args.zbound, 'dbound': args.dbound, 'thickness': args.thickness, 'angle_class': args.angle_class, 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, 'final_dim': (128, 352)}
if ('temporal' in args.model):
parser_name = 'temporalsegmentationdata'
else:
parser_name = 'segmentationdata'
([train_loader, val_loader], [train_sampler, val_sampler]) = semantic_dataset(args.version, args.dataroot, data_conf, args.bsz, args.nworkers, args.distributed, parser_name)
model = get_model(args.model, data_conf, args.instance_seg, args.embedding_dim, args.direction_pred, args.angle_class)
model.load_state_dict(torch.load(args.modelf, map_location='cuda:0'), strict=False)
model.cuda()
(iou, process_iou, fps) = eval_iou(model, val_loader, logdir=args.logdir)
miou = iou[1:].mean()
process_miou = process_iou[1:].mean()
print(iou)
print(miou)
print(process_iou)
print(process_miou)
print(fps) |
.unit
.convert
def test_get_marker_file_name():
test_file_name = './test/test_file.cat'
expected_marker_file_name = 'test_file.cat.js'
actual_marker_file_name = convert.get_marker_file_name(test_file_name)
assert (expected_marker_file_name == actual_marker_file_name) |
def melspectrogram(y):
D = _stft(preemphasis(y))
S = (_amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db)
return _normalize(S) |
def test_matching():
width = 128
n_circles = 5
y = np.zeros((width, width), np.uint16)
for (i, r) in enumerate(np.linspace(0, width, (n_circles + 2))[1:(- 1)]):
(rr, cc) = disk(((width // 2), r), radius=(width // (3 * n_circles)), shape=y.shape)
y[(rr, cc)] = (i + 1)
for shift in (0, 5, 10):
y2 = np.roll(y, shift, axis=0)
iou = (np.sum((y2 == y)[(y > 0)]) / np.sum(((y + y2) > 0)))
res_all = matching(y, y2, thresh=(0.5 * iou), report_matches=True)
res_none = matching(y, y2, thresh=(2.0 * iou), report_matches=True)
assert ((res_all.tp, res_all.fp, res_all.fn) == (n_circles, 0, 0))
assert ((res_none.tp, res_none.fp, res_none.fn) == (0, n_circles, n_circles))
assert (len(res_all.matched_pairs) == n_circles)
assert (len(res_none.matched_pairs) == n_circles)
return (y, y2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.