code stringlengths 101 5.91M |
|---|
def convert_to_num_gpus(module, num_gpus_to_sim):
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
new_cls = MODULE_INSTANCES_TO_REPLACE[module.__class__.__name__]
module_output = new_cls(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_gpus_to_sim=num_gpus_to_sim)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.num_batches_tracked = module.num_batches_tracked
for i in range(num_gpus_to_sim):
setattr(module_output, f'running_mean_{i}', module.running_mean.clone().detach())
setattr(module_output, f'running_var_{i}', module.running_var.clone().detach())
for (name, child) in module.named_children():
module_output.add_module(name, convert_to_num_gpus(child, num_gpus_to_sim))
del module
return module_output |
class PerlmutterHvp(object):
def __init__(self, num_slices=1):
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = tf.gradients(f, xs=params)
for (idx, (grad, param)) in enumerate(zip(constraint_grads, params)):
if (grad is None):
constraint_grads[idx] = tf.zeros_like(param)
xs = tuple([tensor_utils.new_tensor_like(p.name.split(':')[0], p) for p in params])
def Hx_plain():
Hx_plain_splits = tf.gradients(tf.reduce_sum(tf.pack([tf.reduce_sum((g * x)) for (g, x) in zip(constraint_grads, xs)])), params)
for (idx, (Hx, param)) in enumerate(zip(Hx_plain_splits, params)):
if (Hx is None):
Hx_plain_splits[idx] = tf.zeros_like(param)
return tensor_utils.flatten_tensor_variables(Hx_plain_splits)
self.opt_fun = ext.lazydict(f_Hx_plain=(lambda : tensor_utils.compile_function(inputs=(inputs + xs), outputs=Hx_plain(), log_name='f_Hx_plain')))
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = (sliced_fun(self.opt_fun['f_Hx_plain'], self._num_slices)(inputs, xs) + (self.reg_coeff * x))
return ret
return eval |
def process_treebank(treebank, model_type, paths, args):
prepare_tokenizer_treebank.copy_conllu_treebank(treebank, model_type, paths, paths['POS_DATA_DIR']) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='./data/', help='path to datasets')
parser.add_argument('--data_name', default='precomp', help='{coco,f30k}_precomp')
parser.add_argument('--vocab_path', default='./vocab/', help='Path to saved vocabulary json files.')
parser.add_argument('--offcandipath', default='./offlinecandidates/', help='Path to saved vocabulary pickle files.')
parser.add_argument('--marginonline', default=0.2, type=float, help='Rank loss margin.')
parser.add_argument('--marginoffline', default=0.0, type=float, help='Rank loss margin.')
parser.add_argument('--alpha', default=0.3, type=float, help='Adaptive Penalization Parameter.')
parser.add_argument('--beta', default=1.5, type=float, help='Adaptive Penalization Parameter.')
parser.add_argument('--offcandisize', default=300, type=float, help='Top candidate list size to sample offline hard negative text')
parser.add_argument('--num_epochs', default=15, type=int, help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int, help='Size of a training mini-batch.')
parser.add_argument('--word_dim', default=300, type=int, help='Dimensionality of the word embedding.')
parser.add_argument('--embed_size', default=1024, type=int, help='Dimensionality of the joint embedding.')
parser.add_argument('--grad_clip', default=2.0, type=float, help='Gradient clipping threshold.')
parser.add_argument('--num_layers', default=1, type=int, help='Number of GRU layers.')
parser.add_argument('--learning_rate', default=0.0002, type=float, help='Initial learning rate.')
parser.add_argument('--lr_update', default=10, type=int, help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=10, type=int, help='Number of data loader workers.')
parser.add_argument('--log_step', default=1000, type=int, help='Number of steps to print and record the log.')
parser.add_argument('--val_step', default=1000, type=int, help='Number of steps to run validation.')
parser.add_argument('--logger_name', default='./runs/runX/log', help='Path to save Tensorboard log.')
parser.add_argument('--model_name', default='./runs/runX/checkpoint', help='Path to save the model.')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--max_violation', action='store_true', help='Use max instead of sum in the rank loss.')
parser.add_argument('--img_dim', default=2048, type=int, help='Dimensionality of the image embedding.')
parser.add_argument('--no_imgnorm', action='store_true', help='Do not normalize the image embeddings.')
parser.add_argument('--no_txtnorm', action='store_true', help='Do not normalize the text embeddings.')
parser.add_argument('--precomp_enc_type', default='basic', help='basic|weight_norm')
parser.add_argument('--lambda_softmax', default=20.0, type=float, help='Attention softmax temperature.')
parser.add_argument('--focal_type', default='equal', help='equal|prob')
opt = parser.parse_args()
print(opt)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
tb_logger.configure(opt.logger_name, flush_secs=5)
vocab = deserialize_vocab(os.path.join(opt.vocab_path, ('%s_vocab.json' % opt.data_name)))
opt.vocab_size = len(vocab)
(train_loader, val_loader) = dataAOQ.get_loaders(opt.data_name, vocab, opt.offcandipath, opt.offcandisize, opt.batch_size, opt.workers, opt)
model = BFAN(opt)
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
start_epoch = checkpoint['epoch']
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
model.Eiters = checkpoint['Eiters']
print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})".format(opt.resume, start_epoch, best_rsum))
validate(opt, val_loader, model)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
best_rsum = 0
for epoch in range(opt.num_epochs):
print(opt.logger_name)
print(opt.model_name)
adjust_learning_rate(opt, model.optimizer, epoch)
train(opt, train_loader, model, epoch, val_loader)
rsum = validate(opt, val_loader, model)
is_best = (rsum > best_rsum)
best_rsum = max(rsum, best_rsum)
if (not os.path.exists(opt.model_name)):
os.mkdir(opt.model_name)
save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_rsum': best_rsum, 'opt': opt, 'Eiters': model.Eiters}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=(opt.model_name + '/')) |
class GNN(torch.nn.Module):
def __init__(self, input_dim, hid_dim=None, out_dim=None, gcn_layer_num=2, pool=None, gnn_type='GAT'):
super().__init__()
if (gnn_type == 'GCN'):
GraphConv = GCNConv
elif (gnn_type == 'GAT'):
GraphConv = GATConv
elif (gnn_type == 'TransformerConv'):
GraphConv = TransformerConv
else:
raise KeyError('gnn_type can be only GAT, GCN and TransformerConv')
self.gnn_type = gnn_type
if (hid_dim is None):
hid_dim = int((0.618 * input_dim))
if (out_dim is None):
out_dim = hid_dim
if (gcn_layer_num < 2):
raise ValueError('GNN layer_num should >=2 but you set {}'.format(gcn_layer_num))
elif (gcn_layer_num == 2):
self.conv_layers = torch.nn.ModuleList([GraphConv(input_dim, hid_dim), GraphConv(hid_dim, out_dim)])
else:
layers = [GraphConv(input_dim, hid_dim)]
for i in range((gcn_layer_num - 2)):
layers.append(GraphConv(hid_dim, hid_dim))
layers.append(GraphConv(hid_dim, out_dim))
self.conv_layers = torch.nn.ModuleList(layers)
if (pool is None):
self.pool = global_mean_pool
else:
self.pool = pool
def forward(self, x, edge_index, batch):
for conv in self.conv_layers[0:(- 1)]:
x = conv(x, edge_index)
x = act(x)
x = F.dropout(x, training=self.training)
node_emb = self.conv_layers[(- 1)](x, edge_index)
graph_emb = self.pool(node_emb, batch.long())
return graph_emb |
def test_method_statement_args(test_case_mock, variable_reference_mock, method_mock):
references = {'a': MagicMock(vr.VariableReference), 'b': MagicMock(vr.VariableReference)}
statement = stmt.MethodStatement(test_case_mock, method_mock, variable_reference_mock)
statement.args = references
assert (statement.args == references) |
class BlenderbotSmallTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, bos_token='__start__', eos_token='__end__', unk_token='__unk__', pad_token='__null__', **kwargs):
super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[1:(- 1)]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def vocab_size(self) -> int:
return len(self.encoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token: str) -> str:
if (token in self.cache):
return self.cache[token]
token = re.sub('([.,!?()])', ' \\1', token)
token = re.sub("(')", ' \\1 ', token)
token = re.sub('\\s{2,}', ' ', token)
if ('\n' in token):
token = token.replace('\n', ' __newln__')
tokens = token.split(' ')
words = []
for token in tokens:
if (not len(token)):
continue
token = token.lower()
word = tuple(token)
word = tuple((list(word[:(- 1)]) + [(word[(- 1)] + '</w>')]))
pairs = get_pairs(word)
if (not pairs):
words.append(token)
continue
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
word = word[:(- 4)]
self.cache[token] = word
words.append(word)
return ' '.join(words)
def _tokenize(self, text: str) -> List[str]:
split_tokens = []
words = re.findall('\\S+\\n?', text)
for token in words:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token: str) -> int:
token = token.lower()
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
out_string = ' '.join(tokens).replace(' ', '').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
class Counter(object):
def __init__(self):
self._countList = {}
self._timeList = {}
def count(self, prop):
assert isinstance(prop, str), 'The property must be a string.'
if (prop not in self._countList):
self._countList[prop] = 0
self._countList[prop] += 1
def countTic(self, prop):
assert isinstance(prop, str), 'The property must be a string.'
if (prop not in self._timeList):
self._timeList[prop] = []
self._timeList[prop].append((- time.time()))
def countToc(self, prop):
assert isinstance(prop, str), 'The property must be a string.'
assert (prop in self._timeList), 'The property must already be in the dictionary.'
self._timeList[prop][(- 1)] += time.time()
def summary(self):
print('Counters:')
for prop in sorted(self._countList):
print(' {0:<40}: {1:8d}'.format(prop, self._countList[prop]))
print((('\nTimes:' + (' ' * 40)) + 'mean sum'))
for prop in sorted(self._timeList):
l = len(self._timeList[prop])
a = np.array(self._timeList[prop])
print(' {0:<40}: {1:4.2e}, {2:4.2e}, {3:4d}x'.format(prop, a.mean(), a.sum(), l)) |
def article_tokenizer_for_prompt_sequences(monkeypatch, packing_boundary: BoundaryType, ext_type: FileExtension, keep_prompt_only_sequences: bool) -> ArticleTokenizer:
monkeypatch.setattr(TOKENIZER, 'encode', mock_tokenize)
article_tokenizer = ArticleTokenizer(TOKENIZER, MAX_SEQ_LEN, ext_type, packing_boundary=packing_boundary, keep_prompt_only_sequences=keep_prompt_only_sequences)
monkeypatch.setattr(article_tokenizer, 'eos_token_id', EOS_TOKEN_ID)
monkeypatch.setattr(article_tokenizer.packer, 'eos_token_id', EOS_TOKEN_ID)
return article_tokenizer |
def validate_fi_veronumero(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(veronumero.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(veronumero.is_valid)
else:
return df.applymap(veronumero.is_valid)
return veronumero.is_valid(df) |
_module()
class DefaultFormatBundleMmdet():
def __init__(self, img_to_float=True, pad_val=dict(img=0, masks=0, seg=255, pan=0)):
self.img_to_float = img_to_float
self.pad_val = pad_val
def __call__(self, results):
if ('img' in results):
img = results['img']
if ((self.img_to_float is True) and (img.dtype == np.uint8)):
img = img.astype(np.float32)
results = self._add_default_meta_keys(results)
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), padding_value=self.pad_val['img'], stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels', 'max_inst_per_class']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], padding_value=self.pad_val['masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), padding_value=self.pad_val['seg'], stack=True)
if ('gt_panoptic_only_thing_classes' in results):
results['gt_panoptic_only_thing_classes'] = DC(to_tensor(results['gt_panoptic_only_thing_classes'][(None, ...)]), padding_value=self.pad_val['pan'], stack=True)
return results
def _add_default_meta_keys(self, results):
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = (1 if (len(img.shape) < 3) else img.shape[2])
results.setdefault('img_norm_cfg', dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False))
return results
def __repr__(self):
return (self.__class__.__name__ + f'(img_to_float={self.img_to_float})') |
def test_mapcollapse_tree():
sdfg: dace.SDFG = tocollapse.to_sdfg()
sdfg.simplify()
sdfg.validate()
assert (sdfg.apply_transformations(MapCollapse) == 1)
sdfg.validate() |
def benchmark(test_acc, target_acc, test_perf, target_perf):
def test(achieved, target, name):
passed = True
if ((target is not None) and (achieved is not None)):
logging.info(f'{name} achieved: {achieved:.2f} target: {target:.2f}')
if (achieved >= target):
logging.info(f'{name} test passed')
else:
logging.info(f'{name} test failed')
passed = False
return passed
passed = True
passed &= test(test_acc, target_acc, 'Accuracy')
passed &= test(test_perf, target_perf, 'Performance')
return passed |
def create_logger(args):
logger = logging.getLogger('MaskTCNNTrainLogger')
if dist_utils.is_main_process():
logger.setLevel(args.log_level)
else:
logger.setLevel(args.subprocess_log_level)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(proc_id)d] %(asctime)s - %(levelname)s - %(message)s', '%H:%M:%S')
extra = {'proc_id': dist_utils.get_rank()}
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
logger = logging.LoggerAdapter(logger, extra)
logger.propagate = False
return logger |
(scope='session')
def join_items():
left = [{'name': 'left', 'key': 'value', 'deep': [{'name': 1}]}, {'name': 'common', 'key': 'left', 'deep': [{'name': 1}]}]
right = [{'name': 'right', 'key': 'value', 'deep': [{'name': 2}]}, {'name': 'common', 'key': 'right', 'deep': [{'name': 2}]}]
return (left, right) |
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while (abs(num) >= 1000):
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude]) |
class PPO(flexs.Explorer):
def __init__(self, model: flexs.Model, rounds: int, sequences_batch_size: int, model_queries_per_batch: int, starting_sequence: str, alphabet: str, log_file: Optional[str]=None):
super().__init__(model, 'PPO_Agent', rounds, sequences_batch_size, model_queries_per_batch, starting_sequence, log_file)
self.alphabet = alphabet
env = PPOEnv(alphabet=self.alphabet, starting_seq=starting_sequence, model=self.model, max_num_steps=self.model_queries_per_batch)
self.tf_env = tf_py_environment.TFPyEnvironment(env)
encoder_layer = tf.keras.layers.Lambda((lambda obs: obs['sequence']))
actor_net = actor_distribution_network.ActorDistributionNetwork(self.tf_env.observation_spec(), self.tf_env.action_spec(), preprocessing_combiner=encoder_layer, fc_layer_params=[128])
value_net = value_network.ValueNetwork(self.tf_env.observation_spec(), preprocessing_combiner=encoder_layer, fc_layer_params=[128])
self.agent = ppo_agent.PPOAgent(time_step_spec=self.tf_env.time_step_spec(), action_spec=self.tf_env.action_spec(), optimizer=tf.keras.optimizers.Adam(learning_rate=1e-05), actor_net=actor_net, value_net=value_net, num_epochs=10, summarize_grads_and_vars=False)
self.agent.initialize()
def add_last_seq_in_trajectory(self, experience, new_seqs):
if experience.is_boundary():
seq = one_hot_to_string(experience.observation['sequence'].numpy()[0], self.alphabet)
new_seqs[seq] = experience.observation['fitness'].numpy().squeeze()
top_fitness = max(new_seqs.values())
top_sequences = [seq for (seq, fitness) in new_seqs.items() if (fitness >= (0.9 * top_fitness))]
if (len(top_sequences) > 0):
self.tf_env.pyenv.envs[0].seq = np.random.choice(top_sequences)
else:
self.tf_env.pyenv.envs[0].seq = np.random.choice([seq for (seq, _) in new_seqs.items()])
def propose_sequences(self, measured_sequences_data: pd.DataFrame) -> Tuple[(np.ndarray, np.ndarray)]:
num_parallel_environments = 1
replay_buffer_capacity = 10001
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(self.agent.collect_data_spec, batch_size=num_parallel_environments, max_length=replay_buffer_capacity)
sequences = {}
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(self.tf_env, self.agent.collect_policy, observers=[replay_buffer.add_batch, partial(self.add_last_seq_in_trajectory, new_seqs=sequences), tf_metrics.NumberOfEpisodes(), tf_metrics.EnvironmentSteps()], num_episodes=1)
previous_model_cost = self.model.cost
while ((self.model.cost - previous_model_cost) < self.model_queries_per_batch):
collect_driver.run()
trajectories = replay_buffer.gather_all()
self.agent.train(experience=trajectories)
replay_buffer.clear()
sequences = {seq: fitness for (seq, fitness) in sequences.items() if (seq not in set(measured_sequences_data['sequence']))}
new_seqs = np.array(list(sequences.keys()))
preds = np.array(list(sequences.values()))
sorted_order = np.argsort(preds)[:(- self.sequences_batch_size):(- 1)]
return (new_seqs[sorted_order], preds[sorted_order]) |
class DataAndLabelsLastPartitionTrainer(LastPartitionTrainer):
def backprop_last_partition(self, x, y, *args, **kw):
pass
def last_partition_step_and_statistics(self, x, y, *args, **kw):
pass |
def test_comma_separated_exclude_checks(cli, mocker, swagger_20):
excluded_checks = 'not_a_server_error,status_code_conformance'
mocker.patch('schemathesis.cli.load_schema', return_value=swagger_20)
execute = mocker.patch('schemathesis.runner.from_schema', autospec=True)
cli.run(SCHEMA_URI, '--checks=all', f'--exclude-checks={excluded_checks}')
assert (execute.call_args[1]['checks'] == tuple((check for check in tuple(ALL_CHECKS) if (check.__name__ not in excluded_checks.split(','))))) |
def slot_edit_f1_full(hypothesis, groundtruth, **kwargs):
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=True, **kwargs) |
.timeout(10)
def test_update_envs_env_update():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec, scripted_actions=[env.action_space.sample() for _ in range(max_path_length)])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers, policy, env)
rollouts = sampler.obtain_samples(0, 161, np.asarray(policy.get_param_values()), env_update=tasks.sample(n_workers))
mean_rewards = []
goals = []
for rollout in rollouts.split():
mean_rewards.append(rollout.rewards.mean())
goals.append(rollout.env_infos['task'][0]['goal'])
assert (np.var(mean_rewards) > 0)
assert (np.var(goals) > 0)
with pytest.raises(ValueError):
sampler.obtain_samples(0, 10, np.asarray(policy.get_param_values()), env_update=tasks.sample((n_workers + 1)))
sampler.shutdown_worker()
env.close() |
def summarize_address_range(first, last):
if (not (isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if (first.version != last.version):
raise TypeError(('%s and %s are not of the same version' % (first, last)))
if (first > last):
raise ValueError('last IP address must be greater than first')
if (first.version == 4):
ip = IPv4Network
elif (first.version == 6):
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while (first_int <= last_int):
nbits = min(_count_righthand_zero_bits(first_int, ip_bits), (_compat_bit_length(((last_int - first_int) + 1)) - 1))
net = ip((first_int, (ip_bits - nbits)))
(yield net)
first_int += (1 << nbits)
if ((first_int - 1) == ip._ALL_ONES):
break |
class LoggerMonitor(object):
def __init__(self, paths):
self.loggers = []
for (title, path) in paths.items():
logger = Logger(path, title=title, resume=True)
self.loggers.append(logger)
def plot(self, names=None):
plt.figure()
plt.subplot(121)
legend_text = []
for logger in self.loggers:
legend_text += plot_overlap(logger, names)
plt.legend(legend_text, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.grid(True) |
class MultiMLP(Module):
CombType = Literal[('cat', 'sum', 'max', 'mean', 'att')]
supported_combinations = get_args(CombType)
def __init__(self, *, num_channels: int, output_dim: int, hidden_dim: int=16, base_layers: int=2, head_layers: int=1, combination: CombType='cat', activation_fn: Callable[([Tensor], Tensor)]=torch.relu_, dropout: float=0.0, batch_norm: bool=False, plain_last: bool=True):
super().__init__()
self.combination = combination
self.activation_fn = activation_fn
self.dropout_fn = Dropout(dropout, inplace=True)
self.base_mlps: list[MLP] = ModuleList([MLP(hidden_dim=hidden_dim, output_dim=hidden_dim, num_layers=base_layers, dropout=dropout, activation_fn=activation_fn, batch_norm=batch_norm, plain_last=True) for _ in range(num_channels)])
if (combination == 'att'):
self.hidden_dim = hidden_dim
self.num_heads = num_channels
self.Q = Linear(in_features=hidden_dim, out_features=self.num_heads, bias=False)
self.bn = (LazyBatchNorm1d() if batch_norm else False)
self.head_mlp = MLP(output_dim=output_dim, hidden_dim=hidden_dim, num_layers=head_layers, dropout=dropout, activation_fn=activation_fn, batch_norm=batch_norm, plain_last=plain_last)
def forward(self, x_stack: Tensor) -> Tensor:
x_stack = x_stack.permute(2, 0, 1)
h_list = [mlp(x) for (x, mlp) in zip(x_stack, self.base_mlps)]
h = self.combine(h_list)
h = F.normalize(h, p=2, dim=(- 1))
h = (self.bn(h) if self.bn else h)
h = self.dropout_fn(h)
h = self.activation_fn(h)
h = self.head_mlp(h)
return h
def combine(self, h_list: Iterable[Tensor]) -> Tensor:
if (self.combination == 'cat'):
return torch.cat(h_list, dim=(- 1))
elif (self.combination == 'sum'):
return torch.stack(h_list, dim=0).sum(dim=0)
elif (self.combination == 'mean'):
return torch.stack(h_list, dim=0).mean(dim=0)
elif (self.combination == 'max'):
return torch.stack(h_list, dim=0).max(dim=0).values
elif (self.combination == 'att'):
H = torch.stack(h_list, dim=1)
W = F.leaky_relu(self.Q(H), 0.2).softmax(dim=0)
out = H.transpose(1, 2).matmul(W).view((- 1), (self.hidden_dim * self.num_heads))
return out
else:
raise ValueError(f'Unknown combination type {self.combination}')
def reset_parameters(self):
for mlp in self.base_mlps:
mlp.reset_parameters()
if (self.combination == 'att'):
self.Q.reset_parameters()
if self.bn:
self.bn.reset_parameters()
self.head_mlp.reset_parameters() |
def test_goal_1(env_0: Warehouse):
assert (env_0.request_queue[0] == env_0.shelfs[0])
(_, rewards, _, _) = env_0.step([Action.FORWARD])
assert (env_0.agents[0].x == 4)
assert (env_0.agents[0].y == 28)
assert (env_0.request_queue[0] != env_0.shelfs[0])
assert (rewards[0] == pytest.approx(1.0)) |
def test_incomplete_requirements_config(config_sop, F, bcs, J, y, p, geometry):
with pytest.raises(ConfigError) as e_info:
config_sop.set('Output', 'save_mesh', 'True')
cashocs.ShapeOptimizationProblem(F, bcs, J, y, p, geometry.boundaries, config=config_sop)
assert ('Key save_mesh in section Output requires key gmsh_file in section Mesh to be present.' in str(e_info.value)) |
_module()
class SegHead(nn.Module):
def __init__(self, num_classes, in_channels, mlps=None, norm_args={'norm': 'bn1d'}, act_args={'act': 'relu'}, dropout=0.5, global_feat=None, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
if (global_feat is not None):
self.global_feat = global_feat.split(',')
multiplier = (len(self.global_feat) + 1)
else:
self.global_feat = None
multiplier = 1
in_channels *= multiplier
if (mlps is None):
mlps = ([in_channels, in_channels] + [num_classes])
else:
if (not isinstance(mlps, List)):
mlps = [mlps]
mlps = (([in_channels] + mlps) + [num_classes])
heads = []
for i in range((len(mlps) - 2)):
heads.append(create_convblock1d(mlps[i], mlps[(i + 1)], norm_args=norm_args, act_args=act_args))
if dropout:
heads.append(nn.Dropout(dropout))
heads.append(create_convblock1d(mlps[(- 2)], mlps[(- 1)], act_args=None))
self.head = nn.Sequential(*heads)
def forward(self, end_points):
if (self.global_feat is not None):
global_feats = []
for feat_type in self.global_feat:
if ('max' in feat_type):
global_feats.append(torch.max(end_points, dim=(- 1), keepdim=True)[0])
elif (feat_type in ['avg', 'mean']):
global_feats.append(torch.mean(end_points, dim=(- 1), keepdim=True))
global_feats = torch.cat(global_feats, dim=1).expand((- 1), (- 1), end_points.shape[(- 1)])
end_points = torch.cat((end_points, global_feats), dim=1)
logits = self.head(end_points)
return logits |
def test_gen_mesh_from_voxels(output_dir):
from sfepy.mesh.mesh_generators import gen_mesh_from_voxels
voxels = nm.array([[[0, 0, 0, 0, 1], [0, 0, 0, 1, 1], [0, 0, 0, 1, 1], [0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1], [0, 1, 1, 1, 1], [0, 1, 1, 1, 1], [0, 0, 0, 1, 1]], [[1, 0, 0, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 0, 0, 1, 1]]])
mesh = gen_mesh_from_voxels(voxels, [0.5, 0.3, 1.0])
filename = op.join(output_dir, 'gen_voxels.mesh')
mesh.write(filename)
tst.report('voxel based mesh generated')
csum = nm.sum((mesh.coors - nm.min(mesh.coors, axis=0)), axis=0)
assert (nm.linalg.norm((csum - nm.array([90, 48.3, 265]))) < tolerance) |
class SoftIntroVAEBootstrap(nn.Module):
def __init__(self, config):
super(SoftIntroVAEBootstrap, self).__init__()
self.zdim = config['z_size']
self.encoder = Encoder(config)
self.decoder = Decoder(config)
self.target_decoder = Decoder(config)
def forward(self, x, deterministic=False, use_target_decoder=True):
(mu, logvar) = self.encoder(x)
if deterministic:
z = mu
else:
z = reparameterize(mu, logvar)
if use_target_decoder:
y = self.target_decoder(z)
else:
y = self.decoder(z)
return (y, mu, logvar)
def sample(self, z, use_target_decoder=False):
if use_target_decoder:
y = self.decode_target(z)
else:
y = self.decode(z)
return y
def sample_with_noise(self, num_samples=1, device=torch.device('cpu')):
z = torch.randn(num_samples, self.z_dim).to(device)
return self.decode(z)
def encode(self, x):
(mu, logvar) = self.encoder(x)
return (mu, logvar)
def decode(self, z):
y = self.decoder(z)
return y
def decode_target(self, z):
y = self.target_decoder(z)
return y |
def parse_shape(shape, dim):
if isinstance(shape, basestr):
try:
shape = {'scalar': (1,), 'vector': (dim,)}[shape]
except KeyError:
raise ValueError('unsupported field shape! (%s)', shape)
elif isinstance(shape, six.integer_types):
shape = (int(shape),)
return shape |
class Splitter():
def __init__(self, data: pd.DataFrame, splitting_ns: SimpleNamespace, random_seed=42):
self.random_seed = random_seed
self.data = data
self.splitting_ns = splitting_ns
self.save_on_disk = False
self.save_folder = None
def process_splitting(self):
np.random.seed(self.random_seed)
data = self.data
splitting_ns = self.splitting_ns
if hasattr(splitting_ns, 'save_on_disk'):
if hasattr(splitting_ns, 'save_folder'):
self.save_on_disk = True
self.save_folder = splitting_ns.save_folder
if os.path.exists(self.save_folder):
shutil.rmtree(self.save_folder, ignore_errors=True)
os.makedirs(self.save_folder)
else:
raise Exception('Train or Test paths are missing')
if hasattr(splitting_ns, 'test_splitting'):
tuple_list = self.handle_hierarchy(data, splitting_ns.test_splitting)
if hasattr(splitting_ns, 'validation_splitting'):
exploded_train_list = []
for (single_train, single_test) in tuple_list:
train_val_test_tuples_list = self.handle_hierarchy(single_train, splitting_ns.validation_splitting)
exploded_train_list.append(train_val_test_tuples_list)
tuple_list = self.rearrange_data(tuple_list, exploded_train_list)
print('\nRealized a Train/Validation Test splitting strategy\n')
else:
print('\nRealized a Train/Test splitting strategy\n')
else:
raise Exception('Test splitting strategy is not defined')
if self.save_on_disk:
self.store_splitting(tuple_list)
return tuple_list
def store_splitting(self, tuple_list):
for (i, (train_val, test)) in enumerate(tuple_list):
actual_test_folder = create_folder_by_index(self.save_folder, str(i))
test.to_csv(os.path.abspath(os.sep.join([actual_test_folder, 'test.tsv'])), sep='\t', index=False, header=False)
if isinstance(train_val, list):
for (j, (train, val)) in enumerate(train_val):
actual_val_folder = create_folder_by_index(actual_test_folder, str(j))
val.to_csv(os.path.abspath(os.sep.join([actual_val_folder, 'val.tsv'])), sep='\t', index=False, header=False)
train.to_csv(os.path.abspath(os.sep.join([actual_val_folder, 'train.tsv'])), sep='\t', index=False, header=False)
else:
train_val.to_csv(os.path.abspath(os.sep.join([actual_test_folder, 'train.tsv'])), sep='\t', index=False, header=False)
def handle_hierarchy(self, data: pd.DataFrame, valtest_splitting_ns: SimpleNamespace) -> t.List[t.Tuple[(pd.DataFrame, pd.DataFrame)]]:
if hasattr(valtest_splitting_ns, 'strategy'):
if (valtest_splitting_ns.strategy == 'fixed_timestamp'):
if hasattr(valtest_splitting_ns, 'timestamp'):
if valtest_splitting_ns.timestamp.isdigit():
tuple_list = self.splitting_passed_timestamp(data, int(valtest_splitting_ns.timestamp))
elif (valtest_splitting_ns.timestamp == 'best'):
print('Here')
kwargs = {}
if hasattr(valtest_splitting_ns, 'min_below'):
kwargs['min_below'] = int(valtest_splitting_ns.min_below)
if hasattr(valtest_splitting_ns, 'min_over'):
kwargs['min_over'] = int(valtest_splitting_ns.min_over)
tuple_list = self.splitting_best_timestamp(data, **kwargs)
else:
raise Exception('Timestamp option value is not valid')
else:
raise Exception(f'Option timestamp missing for {valtest_splitting_ns.strategy} strategy')
elif (valtest_splitting_ns.strategy == 'temporal_hold_out'):
if hasattr(valtest_splitting_ns, 'test_ratio'):
tuple_list = self.splitting_temporal_holdout(data, float(valtest_splitting_ns.test_ratio))
elif hasattr(valtest_splitting_ns, 'leave_n_out'):
tuple_list = self.splitting_temporal_leavenout(data, int(valtest_splitting_ns.leave_n_out))
else:
raise Exception(f'Option missing for {valtest_splitting_ns.strategy} strategy')
elif (valtest_splitting_ns.strategy == 'random_subsampling'):
if hasattr(valtest_splitting_ns, 'folds'):
if str(valtest_splitting_ns.folds).isdigit():
pass
else:
raise Exception('Folds option value is not valid')
else:
valtest_splitting_ns.folds = 1
print('Folds option value is missing. It has been set to 1')
if hasattr(valtest_splitting_ns, 'test_ratio'):
tuple_list = self.splitting_randomsubsampling_kfolds(data, int(valtest_splitting_ns.folds), float(valtest_splitting_ns.test_ratio))
elif hasattr(valtest_splitting_ns, 'leave_n_out'):
tuple_list = self.splitting_randomsubsampling_kfolds_leavenout(data, int(valtest_splitting_ns.folds), int(valtest_splitting_ns.leave_n_out))
else:
raise Exception(f'Option missing for {valtest_splitting_ns.strategy} strategy')
elif (valtest_splitting_ns.strategy == 'random_cross_validation'):
if hasattr(valtest_splitting_ns, 'folds'):
if str(valtest_splitting_ns.folds).isdigit():
tuple_list = self.splitting_kfolds(data, int(valtest_splitting_ns.folds))
else:
raise Exception('Folds option value is not valid')
else:
raise Exception(f'Option missing for {valtest_splitting_ns.strategy} strategy')
else:
raise Exception(f'Unrecognized Test Strategy: {valtest_splitting_ns.strategy}')
else:
raise Exception('Strategy option not found')
return tuple_list
def rearrange_data(self, train_test: t.List[t.Tuple[(pd.DataFrame, pd.DataFrame)]], train_val: t.List[t.List[t.Tuple[(pd.DataFrame, pd.DataFrame)]]]):
return [(train_val[p], v[1]) for (p, v) in enumerate(train_test)]
def generic_split_function(self, data: pd.DataFrame, **kwargs) -> t.List[t.Tuple[(pd.DataFrame, pd.DataFrame)]]:
pass
def fold_list_generator(self, length, folds=5):
def infinite_looper(folds=5):
while True:
for f in range(folds):
(yield f)
looper = infinite_looper(folds)
return [next(looper) for _ in range(length)]
def splitting_kfolds(self, data: pd.DataFrame, folds=5):
tuple_list = []
user_groups = data.groupby(['userId'])
for (name, group) in user_groups:
data.loc[(group.index, 'fold')] = self.fold_list_generator(len(group), folds)
data['fold'] = pd.to_numeric(data['fold'], downcast='integer')
for i in range(folds):
test = data[(data['fold'] == i)].drop(columns=['fold']).reset_index(drop=True)
train = data[(data['fold'] != i)].drop(columns=['fold']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def splitting_temporal_holdout(self, d: pd.DataFrame, ratio=0.2):
tuple_list = []
data = d.copy()
user_size = data.groupby(['userId'], as_index=True).size()
user_threshold = user_size.apply((lambda x: math.floor((x * (1 - ratio)))))
data['rank_first'] = data.groupby(['userId'])['timestamp'].rank(method='first', ascending=True, axis=1)
data['test_flag'] = data.apply((lambda x: (x['rank_first'] > user_threshold.loc[x['userId']])), axis=1)
test = data[(data['test_flag'] == True)].drop(columns=['rank_first', 'test_flag']).reset_index(drop=True)
train = data[(data['test_flag'] == False)].drop(columns=['rank_first', 'test_flag']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def splitting_temporal_leavenout(self, d: pd.DataFrame, n=1):
tuple_list = []
data = d.copy()
data['rank_first'] = data.groupby(['userId'])['timestamp'].rank(method='first', ascending=False, axis=1)
data['test_flag'] = data.apply((lambda x: (x['rank_first'] <= n)), axis=1)
test = data[(data['test_flag'] == True)].drop(columns=['rank_first', 'test_flag']).reset_index(drop=True)
train = data[(data['test_flag'] == False)].drop(columns=['rank_first', 'test_flag']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def splitting_passed_timestamp(self, d: pd.DataFrame, timestamp=1):
tuple_list = []
data = d.copy()
data['test_flag'] = data.apply((lambda x: (x['timestamp'] >= timestamp)), axis=1)
test = data[(data['test_flag'] == True)].drop(columns=['test_flag']).reset_index(drop=True)
train = data[(data['test_flag'] == False)].drop(columns=['test_flag']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def subsampling_list_generator(self, length, ratio=0.2):
train = int(math.floor((length * (1 - ratio))))
test = (length - train)
list_ = (([0] * train) + ([1] * test))
np.random.shuffle(list_)
return list_
def splitting_randomsubsampling_kfolds(self, d: pd.DataFrame, folds=5, ratio=0.2):
tuple_list = []
data = d.copy()
user_groups = data.groupby(['userId'])
for i in range(folds):
for (name, group) in user_groups:
data.loc[(group.index, 'test_flag')] = self.subsampling_list_generator(len(group), ratio)
data['test_flag'] = pd.to_numeric(data['test_flag'], downcast='integer')
test = data[(data['test_flag'] == 1)].drop(columns=['test_flag']).reset_index(drop=True)
train = data[(data['test_flag'] == 0)].drop(columns=['test_flag']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def subsampling_leavenout_list_generator(self, length, n=1):
test = n
train = (length - test)
list_ = (([0] * train) + ([1] * test))
np.random.shuffle(list_)
return list_
def splitting_randomsubsampling_kfolds_leavenout(self, d: pd.DataFrame, folds=5, n=1):
tuple_list = []
data = d.copy()
user_groups = data.groupby(['userId'])
for i in range(folds):
for (name, group) in user_groups:
data.loc[(group.index, 'test_flag')] = self.subsampling_leavenout_list_generator(len(group), n)
data['test_flag'] = pd.to_numeric(data['test_flag'], downcast='integer')
test = data[(data['test_flag'] == 1)].drop(columns=['test_flag']).reset_index(drop=True)
train = data[(data['test_flag'] == 0)].drop(columns=['test_flag']).reset_index(drop=True)
tuple_list.append((train, test))
return tuple_list
def splitting_best_timestamp(self, d: pd.DataFrame, min_below=1, min_over=1):
data = d.copy()
unique_timestamps = data['timestamp'].unique()
user_groups = data.groupby(['userId'])
ts_dict = {}
nuniques = len(unique_timestamps)
i = 0
for ts in unique_timestamps:
print((nuniques - i))
i += 1
ts_dict[ts] = 0
for (name, group) in user_groups:
below = group[(group['timestamp'] < ts)]['timestamp'].count()
over = (len(group) - below)
if ((below >= min_below) and (over >= min_over)):
ts_dict[ts] += 1
max_val = max(ts_dict.values())
best_tie = [ts for (ts, v) in ts_dict.items() if (v == max_val)]
max_ts = max(best_tie)
print(f'Best Timestamp: {max_ts}')
return self.splitting_passed_timestamp(d, max_ts) |
class EncodeText(DataPipe):
text_name: str = 'transcription'
output_text_name: str = 'tokenized_text'
tokenizer_name: str = 'tokenizer'
def encode_text(self, tokenizer: Tokenizer, text: str) -> torch.LongTensor:
return torch.LongTensor(tokenizer.encode(text))
def forward(self, dataset: AugmentedDynamicItemDataset):
try:
tokenizer = dataset.get_tool(self.tokenizer_name)
except KeyError:
raise KeyError(f'Tokenizer (name = {self.tokenizer_name}) not found!')
dataset.add_dynamic_item(self.encode_text, takes=[self.tokenizer_name, self.text_name], provides=self.output_text_name)
dataset.add_tool('output_size', tokenizer.vocab_size)
return dataset |
def load_params_from_file(model, filename, to_cpu=False):
if (not os.path.isfile(filename)):
raise FileNotFoundError
print(('==> Loading parameters from checkpoint %s to %s' % (filename, ('CPU' if to_cpu else 'GPU'))))
loc_type = (torch.device('cpu') if to_cpu else None)
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if ('version' in checkpoint):
print(('==> Checkpoint trained from version: %s' % checkpoint['version']))
update_model_state = {}
for (key, val) in model_state_disk.items():
if ((key in model.state_dict()) and (model.state_dict()[key].shape == model_state_disk[key].shape)):
update_model_state[key] = val
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)
for key in state_dict:
if (key not in update_model_state):
print(('Not updated weight %s: %s' % (key, str(state_dict[key].shape))))
print(('==> Done (loaded %d/%d)' % (len(update_model_state), len(model.state_dict())))) |
def test():
encoder = BERTEncoder('bert-base-cased')
sentences = ['test sentence #1', 'test sentence #2']
encoder.embed_sentences(sentences) |
class ExternSprintDatasetSource():
def __init__(self, c2p_fd, p2c_fd, input_dim, output_dim, num_segments):
self.pipe_c2p = os.fdopen(c2p_fd, 'wb')
self.pipe_p2c = os.fdopen(p2c_fd, 'rb')
self._send('init', (input_dim, output_dim, num_segments))
def _send(self, data_type, args=None):
assert (data_type is not None)
util.write_pickled_object(self.pipe_c2p, (data_type, args))
def add_new_data(self, segment_name, features, targets):
self._send('data', (segment_name, features, targets))
def close(self):
self._send('exit')
self.pipe_c2p.close()
self.pipe_p2c.close() |
class SoftmaxAverage(OptimizationFunction):
def __init__(self, objectives: List[OptimizationFunction]):
super().__init__(objectives)
self.objectives = objectives
def eval(self, input_vals: List[np.ndarray]) -> np.ndarray:
max_val = np.max(input_vals)
weights = np.exp((input_vals - max_val))
denom = np.sum(weights)
return (np.dot(input_vals, weights) / denom)
def grad(self, input_vals: List[np.ndarray], grad_val: np.ndarray) -> List[np.ndarray]:
max_val = np.max(input_vals)
weights = np.exp((input_vals - max_val))
num = np.dot(input_vals, weights)
denom = np.sum(weights)
grads = []
for i in range(len(self.objectives)):
grad_num = (((weights[i] + (input_vals[i] * weights[i])) * denom) - (num * weights[i]))
grad_denom = (denom ** 2)
grads.append((grad_num / grad_denom))
return (grad_val * np.array(grads))
def __str__(self):
return 'SoftmaxAvg({0})'.format(', '.join((str(obj) for obj in self.objectives))) |
class NaiveModel(nn.Module):
def __init__(self, input_sequence_length=1, forecasting_step=1):
super(NaiveModel, self).__init__()
self.input_sequence_length = input_sequence_length
self.forecasting_step = forecasting_step
def forward(x):
return x[(- forecasting_step):] |
def _demo_head_inputs(input_shape=(1, 480, 56, 56)):
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
features = rng.rand(*input_shape)
return features |
def merge_vocab(pair, v_in):
v_out = {}
bigram_pattern = re.escape(' '.join(pair))
p = re.compile((('(?<!\\S)' + bigram_pattern) + '(?!\\S)'))
for word in v_in:
w_out = p.sub(''.join(pair), word)
v_out[w_out] = v_in[word]
return v_out |
def get_parser(desc, default_task='translation'):
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=100, metavar='N', help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-no-flatten-grads', action='store_true', help="don't flatten FP16 grads tensor")
parser.add_argument('--fp16-init-scale', default=(2 ** 7), type=int, help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=0.0001, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int, help='number of bytes reserved for gathering stats from workers')
parser.add_argument('--model-parallel-size', type=int, metavar='N', default=1, help='total number of GPUs to parallelize model over')
parser.add_argument('--checkpoint-suffix', default='', help='Suffix to add to the checkpoint file name')
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys())
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task')
return parser |
class AI21TextGenerationAPI(TextGenerationAPI):
config_name = 'ai21'
def __init__(self, engine, api_key):
super().__init__(engine, api_key=api_key, request_batch_size=1)
ai21.api_key = api_key
def generate_text(self, prompts, max_tokens, temperature, top_p, stop_sequences, retries=3, **kwargs):
response = None
retry_cnt = 0
backoff_time = 30
while (retry_cnt <= retries):
try:
response = ai21.Completion.execute(model=self.engine, prompt=prompts[0], numResults=1, maxTokens=max_tokens, temperature=temperature, topKReturn=0, topP=top_p, stopSequences=stop_sequences)
break
except Exception as e:
print(f'AI21Error: {e}.')
print(f'Retrying in {backoff_time} seconds...')
time.sleep(backoff_time)
backoff_time *= 1.5
retry_cnt += 1
predicts = {'choices': [{'text': response['prompt']['text'], 'finish_reason': 'eos'}]}
data = {'prompt': prompts, 'response': predicts, 'created_at': str(datetime.now())}
return [data] |
def upload_objects(bucket_name: str='iq-airport-use-case', root_path: str='/scratch/SATE00_MFSR00/DATA_SOURCES/', root_pth_bucket: str='DATA_SOURCES/', upload_num_threads: int=10) -> None:
s3_resource = boto3.resource('s3', region_name='eu-west-1')
try:
my_bucket = s3_resource.Bucket(bucket_name)
for (pth, subdirs, files) in os.walk(root_path):
directory_name = pth.replace(root_path, '')
if directory_name.startswith(os.sep):
directory_name = directory_name[1:]
if (len(files) > 0):
src = os.path.join(pth, files[0])
dst = os.path.join(root_pth_bucket, directory_name, files[0])
print('Uploading data to S3... i.e.')
print(f'SRC > {src}')
print(f'DST > {dst}')
Parallel(n_jobs=upload_num_threads, prefer='threads', verbose=10)((delayed(my_bucket.upload_file)(os.path.join(pth, base_fn), os.path.join(root_pth_bucket, directory_name, base_fn)) for base_fn in files))
except Exception as err:
print('Error:', err) |
def charemb(w):
chars = ((['#BEGIN#'] + list(w)) + ['#END#'])
match = {}
for i in [2, 3, 4]:
grams = ngrams(chars, i)
for g in grams:
g = '{}gram-{}'.format(i, ''.join(g))
e = None
if (g in kazuma['stoi']):
e = kazuma['vectors'][kazuma['stoi'][g]]
if (e is not None):
match[g] = e
if match:
emb = (sum(match.values()) / len(match))
else:
emb = torch.FloatTensor(100).uniform_((- 0.1), 0.1)
return emb |
def load_audio_pydub(path, shape=None, normalize=False):
if shape:
return auresize(auread(path), shape)
return auread(path) |
def trainModel(model, trainData, validData, dataset, optim):
print(model)
sys.stdout.flush()
model.train()
crit1 = NLLLoss(dataset['dicts']['tgt'].size())
crit2 = BCELoss()
start_time = time.time()
def trainEpoch(epoch):
if (opt.extra_shuffle and (epoch > opt.curriculum)):
trainData.shuffle()
batchOrder = torch.randperm(len(trainData))
(total_loss, total_words, total_num_correct) = (0, 0, 0)
(report_loss, report_closs, report_tgt_words, report_src_words, report_num_correct) = (0, 0, 0, 0, 0)
start = time.time()
for i in range(len(trainData)):
batchIdx = (batchOrder[i] if (epoch > opt.curriculum) else i)
batch = trainData[batchIdx][:(- 1)]
model.zero_grad()
(encStates, context) = model.encoder(batch[0])
outputs = model(batch, encStates, context)
targets = batch[1][1:]
(loss, closs, gradOutput, num_correct) = memoryEfficientLoss(outputs, targets, model, crit1, crit2)
outputs.backward(gradOutput)
optim.step()
num_words = targets.data.ne(onmt.Constants.PAD).sum()
report_loss += loss
report_closs += closs
report_num_correct += num_correct
report_tgt_words += num_words
report_src_words += sum(batch[0][1])
total_loss += loss
total_num_correct += num_correct
total_words += num_words
if ((i % opt.log_interval) == ((- 1) % opt.log_interval)):
print(('Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; closs: %6.4f; %3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed' % (epoch, (i + 1), len(trainData), ((report_num_correct / report_tgt_words) * 100), math.exp((report_loss / opt.log_interval)), (report_closs / opt.log_interval), (report_src_words / (time.time() - start)), (report_tgt_words / (time.time() - start)), (time.time() - start_time))))
sys.stdout.flush()
report_loss = report_tgt_words = report_src_words = report_num_correct = report_closs = 0
start = time.time()
return ((total_loss / total_words), (total_num_correct / total_words))
for epoch in range(opt.start_epoch, (opt.epochs + 1)):
print('')
(train_loss, train_acc) = trainEpoch(epoch)
train_ppl = math.exp(min(train_loss, 100))
print(('Train perplexity: %g' % train_ppl))
print(('Train accuracy: %g' % (train_acc * 100)))
(valid_loss, valid_acc) = eval(model, crit1, crit2, validData)
valid_ppl = math.exp(min(valid_loss, 100))
print(('Validation perplexity: %g' % valid_ppl))
print(('Validation accuracy: %g' % (valid_acc * 100)))
sys.stdout.flush()
optim.updateLearningRate(valid_loss, epoch)
model_state_dict = (model.module.state_dict() if (len(opt.gpus) > 1) else model.state_dict())
model_state_dict = {k: v for (k, v) in model_state_dict.items() if ('generator' not in k)}
generator_state_dict = (model.generator.module.state_dict() if (len(opt.gpus) > 1) else model.generator.state_dict())
checkpoint = {'decoder': model.decoder.state_dict(), 'generator': generator_state_dict, 'dicts': dataset['dicts'], 'opt': opt, 'epoch': epoch, 'optim': optim}
torch.save(checkpoint, ('%s_acc_%.2f_ppl_%.2f_e%d.pt' % (opt.save_model, (100 * valid_acc), valid_ppl, epoch))) |
(3, 4, FOptsDir.DOWNLINK, fOptsDownlink)
class LinkADRReq(FOpt):
_MASK_DATARATE = 240
_MASK_TXPOWER = 15
_MASK_NBTRANS = 15
_MASK_CHMASKCNTL = 112
def __init__(self, dataRate=None, txPower=None, chMask=set(), chMaskCntl=None, nbTrans=1, **kwargs):
super().__init__(**kwargs)
if (dataRate is not None):
self.dataRate = dataRate
if (txPower is not None):
self.txPower = txPower
self.chMask = chMask
if (chMaskCntl is not None):
self.chMaskCntl = chMaskCntl
self.nbTrans = nbTrans
def chMask(self):
rawChMask = (self._raw[1] + (self._raw[2] << 8))
return set((x for x in range(16) if (((1 << x) & rawChMask) > 0)))
def chMask(self, chMask):
if (not isinstance(chMask, set)):
raise ValueError('chMask must be a set with channel IDs')
rawChMask = sum(((1 << x) for x in chMask.intersection(range(16))))
self._raw[1:3] = [(rawChMask & 255), ((rawChMask & 65280) >> 8)]
def nbTrans(self):
return getWithMask(self._raw[3], self._MASK_NBTRANS)
def nbTrans(self, nbTrans):
if ((type(nbTrans) != int) or (nbTrans < 0) or (nbTrans > 15)):
raise ValueError('nbTrans must be between 0 and 15')
self._raw[3] = setWithMask(self.raw[3], nbTrans, self._MASK_NBTRANS)
def dataRate(self):
return self._region.binToDataRate(getWithMask(self._raw[0], self._MASK_DATARATE))
def dataRate(self, dataRate):
self._raw[0] = setWithMask(self._raw[0], self._region.dataRateToBin(dataRate), self._MASK_DATARATE)
def txPower(self):
return self._region.binToTxPower(getWithMask(self._raw[0], self._MASK_TXPOWER))
def txPower(self, txPower):
self._raw[0] = setWithMask(self._raw[0], self._region.txPowerToBin(txPower), self._MASK_TXPOWER)
def chMaskCntl(self):
raise NotImplementedError()
def chMaskCntl(self, chMaskCntl):
raise NotImplementedError() |
class ProjectionHead(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim, dropout):
super(ProjectionHead, self).__init__()
self.l1 = nn.Linear(in_dim, hidden_dim, bias=False)
self.l2 = nn.Linear(hidden_dim, out_dim, bias=False)
self.dropout = dropout
def forward(self, x):
x = self.l1(x)
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.l2(x)
return x |
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
self.name = name
if (member is None):
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
else:
return CompoundType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_member(self):
return self.member
def set_member(self, member):
self.member = member
def add_member(self, value):
self.member.append(value)
def insert_member(self, index, value):
self.member[index] = value
def get_kind(self):
return self.kind
def set_kind(self, kind):
self.kind = kind
def get_refid(self):
return self.refid
def set_refid(self, refid):
self.refid = refid
def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
outfile.write((' kind=%s' % (quote_attrib(self.kind),)))
outfile.write((' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'),)))
def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
if (self.name is not None):
showIndent(outfile, level)
outfile.write(('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)))
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if ((self.name is not None) or (self.member is not None)):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CompoundType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if (self.kind is not None):
showIndent(outfile, level)
outfile.write(('kind = "%s",\n' % (self.kind,)))
if (self.refid is not None):
showIndent(outfile, level)
outfile.write(('refid = %s,\n' % (self.refid,)))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write(('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)))
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member in self.member:
showIndent(outfile, level)
outfile.write('model_.member(\n')
member.exportLiteral(outfile, level, name_='member')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'name')):
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'member')):
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_) |
def register_Ns3EpcS1apSapErabSetupItem_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS1apSap::ErabSetupItem const &', 'arg0')])
cls.add_instance_attribute('enbTeid', 'uint32_t', is_const=False)
cls.add_instance_attribute('enbTransportLayerAddress', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('erabId', 'uint16_t', is_const=False)
return |
def test_extract_boundary():
result = {}
with pytest.raises(AssertionError):
mask_utils.extract_boundary(result)
result = {'boundary_result': [0, 1]}
with pytest.raises(AssertionError):
mask_utils.extract_boundary(result)
result = {'boundary_result': [[0, 0, 1, 0, 1, 1, 0, 1, 1]]}
output = mask_utils.extract_boundary(result)
assert (output[2] == [1]) |
def _get_walk_files_to_hash(dir: str, filter: Optional[str]=None):
files_to_hash = []
for (foldername, _, filenames) in os.walk(dir):
if ((filter is not None) and (filter in foldername.split('/'))):
continue
relative_foldername = os.path.relpath(foldername, dir)
if (relative_foldername == '.'):
relative_foldername = ''
relative_foldername = relative_foldername.replace(os.path.sep, '_')
if (relative_foldername != ''):
relative_foldername += '_'
hash_file_names = [(os.path.join(foldername, filename), (relative_foldername + filename)) for filename in filenames]
files_to_hash.extend(hash_file_names)
return files_to_hash |
class FastViterbiOp(NativeOpGenBase):
in_info = ({'name': 'am_scores', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'am_seq_len', 'ndim': 1, 'shape': ((0, 0),), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'edges', 'ndim': 2, 'shape': (4, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'weights', 'ndim': 1, 'shape': ((3, 1),), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start_end_states', 'ndim': 2, 'shape': (2, (0, 0)), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'n_states', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected', 'host_memory': True})
out_info = ({'name': 'output', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'dtype': 'int32', 'need_contiguous': True}, {'name': 'scores', 'ndim': 1, 'shape': ((0, 1),), 'need_contiguous': True})
c_extra_support_code = {'01_IdxAndVal': '\n struct __attribute__((__packed__)) IdxAndVal {\n int idx;\n float val;\n };\n ', '04_select_max': '\n DEV_FUNC\n void select_max(IdxAndVal* a, IdxAndVal b) {\n // fast path\n if(b.val < a->val)\n return;\n // Maybe we could use double compare-and-swap ( // But not sure how.\n // So instead, we use double-wide compare-and-swap.\n union U {\n IdxAndVal s;\n unsigned long long int v64;\n };\n while(true) {\n U prev;\n prev.s = *a;\n if(b.val < prev.s.val)\n return;\n if(b.val == prev.s.val && b.idx >= prev.s.idx)\n return;\n U updated;\n updated.s = b;\n\n U old;\n old.v64 = elem_atomic_cas((unsigned long long int*) a, prev.v64, updated.v64);\n if(old.v64 == prev.v64)\n return;\n // Not the same, so repeat.\n }\n }\n ', '05_init_buffer': '\n DEF_KERNEL\n void init_buffer\n (\n int n_time,\n int n_states, // for the whole batch\n IdxAndVal* buffer // (time+1,n_states), states for the whole batch\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < (n_time + 1) * n_states) {\n buffer[idx].val = -INF_F;\n buffer[idx].idx = -1;\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '06_init_first_frame': '\n DEF_KERNEL\n void init_first_frame\n (\n int n_batch,\n int n_states, // for the whole batch\n IdxAndVal* frame, // (n_states,), states for the whole batch\n const int32_t* d_start_states // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n int state_idx = d_start_states[idx];\n frame[state_idx].val = 0;\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '08_next_frame': '\n DEF_KERNEL\n void next_frame\n (\n int n_time,\n int n_states,\n int n_edges,\n int n_classes,\n int t,\n const float* d_am_scores,\n const int32_t* d_am_seq_len,\n const IdxAndVal* prev_frame,\n IdxAndVal* frame,\n const int32_t* d_edge_from,\n const int32_t* d_edge_to,\n const int32_t* d_edge_emission_idx,\n const int32_t* d_edge_seq_idx,\n const float* d_edge_weights,\n const int32_t* d_end_states // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_edges) {\n int from_idx = d_edge_from[idx];\n //assert_cmp(0, <=, from_idx); assert_cmp(from_idx, <, n_states);\n\n int seq_idx = d_edge_seq_idx[idx];\n if(t < d_am_seq_len[seq_idx]) {\n float prev_val = prev_frame[from_idx].val;\n int emission_idx = d_edge_emission_idx[idx];\n //assert_cmp(0, <=, emission_idx); assert_cmp(emission_idx, <, n_classes);\n int to_idx = d_edge_to[idx];\n //assert_cmp(0, <=, to_idx); assert_cmp(to_idx, <, n_states);\n IdxAndVal candidate;\n candidate.val = prev_val + d_edge_weights[idx] + d_am_scores[seq_idx * n_classes + emission_idx];\n candidate.idx = idx;\n select_max(&frame[to_idx], candidate);\n }\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '11_select_scores': '\n DEF_KERNEL\n void select_scores\n (\n int n_batch,\n int n_states,\n int buffer_stride,\n const IdxAndVal* buffer,\n const int32_t* d_am_seq_len, // (n_batch,)\n const int32_t* d_end_states, // (n_batch,)\n float* d_score // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n const IdxAndVal* last_frame = buffer + d_am_seq_len[idx] * buffer_stride;\n int end_state_idx = d_end_states[idx];\n d_score[idx] = last_frame[end_state_idx].val;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '13_select_best_path': '\n DEF_KERNEL\n void select_best_path\n (\n int n_batch,\n int n_states,\n int n_edges,\n int t,\n int32* cur_state, // (n_batch,)\n const IdxAndVal* frame,\n const int32_t* d_am_seq_len,\n const int32_t* d_edge_from,\n const int32_t* d_edge_to,\n const int32_t* d_edge_emission_idx,\n int32_t* output\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n if(t < d_am_seq_len[idx]) {\n int state_idx = cur_state[idx];\n //assert_cmp(0, <=, state_idx); assert_cmp(state_idx, <, n_states);\n int edge_idx = frame[state_idx].idx;\n if(edge_idx >= 0) {\n //assert_cmp(0, <=, edge_idx); assert_cmp(edge_idx, <, n_edges);\n //assert_cmp(state_idx, ==, d_edge_to[edge_idx]);\n cur_state[idx] = d_edge_from[edge_idx];\n output[idx] = d_edge_emission_idx[edge_idx];\n }\n else // no path found\n output[idx] = 0;\n }\n else {\n output[idx] = 0;\n }\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n using namespace std;\n // am_scores, am_seq_len, edges, weights, start_end_states, n_states = input_names\n // output, scores = output_names\n assert(n_inputs == 6);\n assert(n_outputs == 2);\n Ndarray* am_scores = inputs[0];\n Ndarray* am_seq_len = inputs[1];\n Ndarray* edges = inputs[2];\n Ndarray* weights = inputs[3];\n Ndarray* start_end_states = inputs[4];\n Ndarray* n_states_ref = inputs[5];\n Ndarray* output = *outputs[0];\n Ndarray* score = *outputs[1];\n\n assert_cmp(Ndarray_NDIM(am_scores), ==, 3);\n assert_cmp(Ndarray_NDIM(am_seq_len), ==, 1);\n assert_cmp(Ndarray_NDIM(edges), ==, 2);\n assert_cmp(Ndarray_NDIM(weights), ==, 1);\n assert_cmp(Ndarray_NDIM(start_end_states), ==, 2);\n assert_cmp(Ndarray_NDIM(n_states_ref), ==, 0);\n assert_cmp(Ndarray_NDIM(output), ==, 2);\n assert_cmp(Ndarray_NDIM(score), ==, 1);\n int n_time = Ndarray_DIMS(am_scores)[0];\n int n_batch = Ndarray_DIMS(am_scores)[1];\n int n_classes = Ndarray_DIMS(am_scores)[2];\n assert_cmp(Ndarray_DIMS(am_scores)[0], ==, n_time);\n assert_cmp(Ndarray_DIMS(am_scores)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(am_scores)[2], ==, n_classes);\n assert_cmp(Ndarray_DIMS(am_seq_len)[0], ==, n_batch);\n int n_edges = Ndarray_DIMS(edges)[1];\n assert_cmp(Ndarray_DIMS(edges)[0], ==, 4);\n assert_cmp(Ndarray_DIMS(edges)[1], ==, n_edges);\n assert_cmp(Ndarray_DIMS(weights)[0], ==, n_edges);\n assert_cmp(Ndarray_DIMS(start_end_states)[0], ==, 2);\n assert_cmp(Ndarray_DIMS(start_end_states)[1], ==, n_batch);\n int n_states = Ndarray_DEV_DATA_int32_scalar(n_states_ref);\n assert_cmp(Ndarray_DIMS(output)[0], ==, n_time);\n assert_cmp(Ndarray_DIMS(output)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(score)[0], ==, n_batch);\n\n int32_t* d_edge_from = Ndarray_DEV_DATA_int32(edges) + 0 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_to = Ndarray_DEV_DATA_int32(edges) + 1 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_emission_idx = Ndarray_DEV_DATA_int32(edges) + 2 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_seq_idx = Ndarray_DEV_DATA_int32(edges) + 3 * Ndarray_STRIDE(edges, 0);\n float* d_edge_weights = Ndarray_DEV_DATA(weights);\n float* d_am_scores = Ndarray_DEV_DATA(am_scores);\n int am_scores_stride = Ndarray_STRIDE(am_scores, 0);\n int32_t* d_am_seq_len = Ndarray_DEV_DATA_int32(am_seq_len);\n int32_t* d_start_states = Ndarray_DEV_DATA_int32(start_end_states) + 0 * Ndarray_STRIDE(start_end_states, 0);\n int32_t* d_end_states = Ndarray_DEV_DATA_int32(start_end_states) + 1 * Ndarray_STRIDE(start_end_states, 0);\n int32_t* d_output = Ndarray_DEV_DATA_int32(output);\n int output_stride = Ndarray_STRIDE(output, 0);\n float* d_score = Ndarray_DEV_DATA(score);\n\n IdxAndVal* d_buffer = (IdxAndVal*) device_malloc((n_time + 1) * n_states * sizeof(IdxAndVal));\n int buffer_stride = n_states;\n start_dev_kernel(init_buffer, (n_time, n_states, d_buffer));\n start_dev_kernel(init_first_frame, (n_batch, n_states, d_buffer, d_start_states));\n HANDLE_LAST_ERROR();\n\n for(int t = 0; t < n_time; ++t) {\n start_dev_kernel(next_frame, (\n n_time,\n n_states,\n n_edges,\n n_classes,\n t,\n d_am_scores + t * am_scores_stride,\n d_am_seq_len,\n d_buffer + t * buffer_stride,\n d_buffer + (t + 1) * buffer_stride,\n d_edge_from,\n d_edge_to,\n d_edge_emission_idx,\n d_edge_seq_idx,\n d_edge_weights,\n d_end_states\n ));\n }\n HANDLE_LAST_ERROR();\n\n start_dev_kernel(select_scores, (\n n_batch,\n n_states,\n buffer_stride,\n d_buffer,\n d_am_seq_len,\n d_end_states,\n d_score // out\n ));\n\n int32_t* d_cur_state = (int32_t*) device_malloc(n_batch * sizeof(int32_t));\n Ndarray_memcpy(d_cur_state, d_end_states, n_batch * sizeof(int32_t));\n\n for(int t = n_time - 1; t >= 0; --t) {\n start_dev_kernel(select_best_path, (\n n_batch,\n n_states,\n n_edges,\n t,\n d_cur_state,\n d_buffer + (t + 1) * buffer_stride,\n d_am_seq_len,\n d_edge_from,\n d_edge_to,\n d_edge_emission_idx,\n d_output + t * output_stride // out\n ));\n }\n HANDLE_LAST_ERROR();\n\n device_free(d_cur_state);\n device_free(d_buffer);\n '
c_bw_code = None |
def get_solution_dicts(output_file_contents, input_ring, get_failures=True):
output_list = output_file_contents.splitlines()
solution_dicts = []
for solution_line in range((len(output_list) - 1), (- 1), (- 1)):
if (output_list[solution_line].find('THE SOLUTIONS') == 0):
break
try:
var_number = int(output_list[(solution_line + 2)].split(' ')[1])
except IndexError:
var_number = int(output_list[(solution_line + 1)].split(' ')[1])
for i in range((solution_line + 1), len(output_list)):
if (output_list[i].count('the solution for t') == 1):
if ((output_list[(i - 3)].count('success') > 0) or get_failures):
temp_dict = {}
for j in range(1, (var_number + 1)):
rawsplit = output_list[(i + j)].split(': ')[1].split(' ')
for extras in range(rawsplit.count('')):
rawsplit.remove('')
temp_var = output_list[(i + j)].split(': ')[0].replace(' ', '')
temp_dict[input_ring(temp_var)] = CC(rawsplit[0], rawsplit[1])
solution_dicts.append(temp_dict)
return solution_dicts |
def losses(real_images):
z = tf.truncated_normal([FLAGS.batch_size, FLAGS.z_size], stddev=1)
d_template = discriminator_template()
g_template = generator_template()
(gen_images, z_prediction) = pt.construct_all(g_template, input=z)
tf.image_summary('generated_images', gen_images, max_images=FLAGS.batch_size, name='generated_images_summary')
real_logits = d_template.construct(input=real_images)
fake_logits = d_template.construct(input=gen_images)
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(real_logits, tf.ones_like(real_logits)))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(fake_logits, tf.zeros_like(fake_logits)))
discriminator_loss = tf.add(real_loss, fake_loss, name='discriminator_loss')
generator_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(fake_logits, tf.ones_like(fake_logits)), name='generator_loss')
z_prediction_loss = tf.reduce_mean(tf.square((z - z_prediction)), name='z_prediction_loss')
tf.add_to_collection('losses', generator_loss)
tf.add_to_collection('losses', discriminator_loss)
tf.add_to_collection('losses', z_prediction_loss)
return (generator_loss, discriminator_loss, z_prediction_loss) |
_testing
def test_random_simplicial_complex(level=1, trials=1, verbose=False):
deprecation(33777, 'the CHomP interface is deprecated; hence so is this function')
for i in range(trials):
X = random_simplicial_complex(level=level)
chomp = X.homology(verbose=verbose)
no_chomp = X.homology(algorithm='no_chomp', verbose=verbose)
if (chomp != no_chomp):
print(('Homology according to CHomP: %s' % chomp))
print(('Homology according to Sage: %s' % no_chomp))
print(('Simplicial complex: %s' % X))
print(('Its chain complex: %s' % X.chain_complex()))
raise ValueError |
def get_module_constant(module, symbol, default=(- 1), paths=None):
try:
(f, path, (suffix, mode, kind)) = find_module(module, paths)
except ImportError:
return None
try:
if (kind == PY_COMPILED):
f.read(8)
code = marshal.load(f)
elif (kind == PY_FROZEN):
code = imp.get_frozen_object(module)
elif (kind == PY_SOURCE):
code = compile(f.read(), path, 'exec')
else:
if (module not in sys.modules):
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default) |
class Base3DFusionModel(BaseModule, metaclass=ABCMeta):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.fp16_enabled = False
def _parse_losses(self, losses):
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for (loss_name, loss_value) in log_vars.items():
if (dist.is_available() and dist.is_initialized()):
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return (loss, log_vars)
def train_step(self, data, optimizer):
losses = self(**data)
(loss, log_vars) = self._parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['metas']))
return outputs
def val_step(self, data, optimizer):
losses = self(**data)
(loss, log_vars) = self._parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['metas']))
return outputs |
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')])
return |
def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
if (('desc' in kwargs) and ('eval' in kwargs['desc'])):
return
test_name = name
if ('desc' in kwargs):
test_name = '{}_{}'.format(test_name, kwargs['desc'])
test_name = get_nn_mod_test_name(**kwargs)
if (test_name in EXCLUDE_SCRIPT_MODULES):
return
if ('constructor' in kwargs):
nn_module = kwargs['constructor']
else:
nn_module = getattr(torch.nn, name)
if ('FunctionalModule' in str(nn_module)):
return
if ('constructor_args_fn' in kwargs):
constructor_args = kwargs['constructor_args_fn']()
else:
constructor_args = kwargs.get('constructor_args', ())
input_dtype = torch.double
if ('input_fn' in kwargs):
input = kwargs['input_fn']()
if isinstance(input, torch.Tensor):
input = (input,)
if all((tensor.is_complex() for tensor in input)):
input_dtype = torch.cdouble
else:
input = (kwargs['input_size'],)
if ('extra_args' in kwargs):
input = (input + kwargs['extra_args'])
if ('target_size' in kwargs):
input = (input + (kwargs['target_size'],))
elif ('target_fn' in kwargs):
if torch.is_tensor(input):
input = (input,)
input = (input + (kwargs['target_fn'](),))
(args_variable, kwargs_variable) = create_input(input, dtype=input_dtype)
f_args_variable = deepcopy(unpack_variables(args_variable))
out_var = deepcopy(f_args_variable)
(args, mod) = (f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable))
return (mod, out_var) |
def get_embedder(multires, input_dims=3):
embed_kwargs = {'include_input': True, 'input_dims': input_dims, 'max_freq_log2': (multires - 1), 'num_freqs': multires, 'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]}
embedder_obj = Embedder(**embed_kwargs)
embed = (lambda x, eo=embedder_obj: eo.embed(x))
return (embed, embedder_obj.out_dim) |
def basic_bn_shortcut(model, prefix, blob_in, dim_in, dim_out, stride):
if (dim_in == dim_out):
return blob_in
c = model.Conv(blob_in, (prefix + '_branch1'), dim_in, dim_out, kernel=1, stride=stride, no_bias=1)
return model.AffineChannel(c, (prefix + '_branch1_bn'), dim=dim_out) |
def set_cycles_renderer(scene: bpy.types.Scene, camera_object: bpy.types.Object, num_samples: int, use_denoising: bool=True, use_motion_blur: bool=False, use_transparent_bg: bool=False) -> None:
scene.camera = camera_object
scene.render.image_settings.file_format = 'PNG'
scene.render.engine = 'CYCLES'
scene.render.use_motion_blur = use_motion_blur
scene.render.film_transparent = use_transparent_bg
scene.view_layers[0].cycles.use_denoising = use_denoising
scene.cycles.samples = num_samples |
def setup_dataset(args, dataset_clazz, data_config, main_gpu, is_training_data):
if (not isinstance(data_config, dict)):
data_config = literal_eval(data_config)
if (('batch_size' not in data_config) or (data_config['batch_size'] is None)):
data_config['batch_size'] = args['batch_size']
if (('device' not in data_config) or args['resume']):
data_config['device'] = (main_gpu if ((not args['no_cuda']) or (('cuda' in data_config) and data_config['cuda']) or args['resume']) else 'cpu')
dataset = dataset_clazz(dataset_dir=args['dataset_dir'], is_training_data=is_training_data, **data_config, **args['experiment_settings'])
return dataset |
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale=0.1, map_reduce=8, vision=1, groups=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = (in_planes // map_reduce)
self.branch0 = nn.Sequential(BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False), BasicConv(inter_planes, (2 * inter_planes), kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=(vision + 1), dilation=(vision + 1), relu=False, groups=groups))
self.branch1 = nn.Sequential(BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False), BasicConv(inter_planes, (2 * inter_planes), kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=(vision + 2), dilation=(vision + 2), relu=False, groups=groups))
self.branch2 = nn.Sequential(BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False), BasicConv(inter_planes, ((inter_planes // 2) * 3), kernel_size=3, stride=1, padding=1, groups=groups), BasicConv(((inter_planes // 2) * 3), (2 * inter_planes), kernel_size=3, stride=stride, padding=1, groups=groups), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=(vision + 4), dilation=(vision + 4), relu=False, groups=groups))
self.ConvLinear = BasicConv((6 * inter_planes), out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = ((out * self.scale) + short)
out = self.relu(out)
return out |
def _lemmas_to_words(tokens):
lemma_to_word = {}
for (word, unit) in tokens.items():
lemma = unit.token
if (lemma in lemma_to_word):
lemma_to_word[lemma].append(word)
else:
lemma_to_word[lemma] = [word]
return lemma_to_word |
class MultiPage():
def __init__(self):
self.pages = []
def add_page(self, title, function):
self.pages.append({'title': title, 'function': function})
def run(self, database):
page = st.sidebar.selectbox('App Navigation', self.pages, format_func=(lambda page: page['title']))
page['function'](database) |
class CnnC3_3(Convolution2DArchitectureBase, NeuralNetworkTrainingDefault):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_gpu = False
def build_model(self, x_shape, y_shape):
self.assert_shapes(x_shape, y_shape)
assert (x_shape[1:] == (101, 6, 1))
n_classes = y_shape[1]
h1 = Convolution(filtersize=(3, 3, 1, 32), stride=(1, 3))
h2 = Convolution(filtersize=(3, 2, 32, 32), stride=(1, 1))
h3 = Linear(3104, n_classes)
self.model = Sequential([h1, Rect(), h2, Rect(), Flatten(), h3, SoftMax()])
if (not self.use_gpu):
self.model.to_numpy()
else:
self.model.to_cupy() |
class NezhaForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Trainer(DefaultTrainer):
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type == 'sem_seg'):
return SemSegEvaluator(dataset_name, distributed=True, num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, output_dir=output_folder)
if (evaluator_type == 'cityscapes_sem_seg'):
assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.'
return CityscapesSemSegEvaluator(dataset_name)
if (len(evaluator_list) == 0):
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
if (len(evaluator_list) == 1):
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def build_train_loader(cls, cfg):
if ('SemanticSegmentor' in cfg.MODEL.META_ARCHITECTURE):
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
def build_lr_scheduler(cls, cfg, optimizer):
return build_lr_scheduler(cfg, optimizer) |
def vgg11(pretrained=False, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model |
class Chunking(TaggingTask):
def __init__(self, config, tokenizer):
super(Chunking, self).__init__(config, 'chunk', tokenizer, False) |
def inspecs_params():
inspecs = []
inspecs.append([Inspec((64, 64, 224, 224))])
inspecs.append([Inspec((64, 128, 112, 112))])
inspecs.append([Inspec((64, 512, 14, 14))])
return inspecs |
class TestBasisUtilFunctions(unittest.TestCase):
def test_basis_size(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
n1 = len(b.fock.basis(n, m))
n2 = b.fock.basis_size(n, m)
self.assertEqual(n1, n2)
def test_lossy_basis_size(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
n1 = len(b.fock.lossy_basis(n, m))
n2 = b.fock.lossy_basis_size(n, m)
self.assertEqual(n1, n2)
def test_basis_array(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
b1 = b.fock.basis_array(n, m)
b2 = np.array(b.fock.basis(n, m))
diff = np.sum(np.abs((b1 - b2)))
self.assertEqual(diff, 0)
def test_loossy_basis_array(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
b1 = b.fock.lossy_basis_array(n, m)
b2 = np.array(b.fock.lossy_basis(n, m))
diff = np.sum(np.abs((b1 - b2)))
self.assertEqual(diff, 0)
def test_basis_lookup(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
basis = b.fock.basis(n, m)
basisLookup = b.fock.basis_lookup(n, m)
for (i, elem) in enumerate(basis):
lookupI = basisLookup[tuple(elem)]
self.assertEqual(i, lookupI)
def test_lossy_basis_lookup(self):
for n in range(1, (MAX_PHOTONS + 1)):
for m in range(1, (MAX_MODES + 1)):
basis = b.fock.lossy_basis(n, m)
basisLookup = b.fock.lossy_basis_lookup(n, m)
for (i, elem) in enumerate(basis):
lookupI = basisLookup[tuple(elem)]
self.assertEqual(i, lookupI) |
def test_mmi(data_with_redundancy):
random.seed(SEED)
from ndd.nsb import interaction_information
h0 = ndd.from_data(data_with_redundancy[0], ks=[3])
h1 = ndd.from_data(data_with_redundancy[1], ks=[3])
h2 = ndd.from_data(data_with_redundancy[2], ks=[3])
h01 = ndd.from_data(data_with_redundancy[[0, 1]], ks=([3] * 2))
h02 = ndd.from_data(data_with_redundancy[[0, 2]], ks=([3] * 2))
h12 = ndd.from_data(data_with_redundancy[[1, 2]], ks=([3] * 2))
h012 = ndd.from_data(data_with_redundancy, ks=([3] * 3))
mmi = (- ((((((h0 + h1) + h2) - h01) - h02) - h12) + h012))
estimate = interaction_information(data_with_redundancy, ks=([3] * 3))
assert (estimate == approx(mmi, abs=0.01)) |
def mean_absolute_scaled_error(y_true, y_pred):
(y_true, y_pred) = (np.array(y_true).flatten(), np.array(y_pred).flatten())
n = y_true.shape[0]
d = (np.abs(np.diff(y_true, axis=(- 1))).sum() / (n - 1))
errors = np.abs((y_true - y_pred))
return (errors.mean() / d) |
class ModelBuilder():
def weights_init(self, m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
nn.init.kaiming_normal_(m.weight.data)
elif (classname.find('BatchNorm') != (- 1)):
m.weight.data.fill_(1.0)
m.bias.data.fill_(0.0001)
def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
pretrained = (True if (len(weights) == 0) else False)
if (arch == 'resnet34'):
raise NotImplementedError
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn34'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif (arch == 'resnet34_dilated8'):
raise NotImplementedError
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif (arch == 'resnet34_dilated16'):
raise NotImplementedError
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=16)
elif (arch == 'resnet50'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif (arch == 'resnet50_dilated8'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif (arch == 'resnet50_dilated16'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=16)
elif (arch == 'resnet101'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif (arch == 'resnet101_dilated8'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
elif (arch == 'resnet101_dilated16'):
orig_resnet = resnet_v1_sn.__dict__['resnetv1sn101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet, dilate_scale=16)
else:
raise Exception('Architecture undefined!')
if (len(weights) > 0):
print('Loading weights for net_encoder')
net_encoder.load_state_dict(torch.load(weights, map_location=(lambda storage, loc: storage)), strict=False)
return net_encoder
def build_decoder(self, arch='ppm_bilinear_deepsup', fc_dim=512, num_class=150, weights='', use_softmax=False):
if (arch == 'c1_bilinear_deepsup'):
net_decoder = C1BilinearDeepSup(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax)
elif (arch == 'c1_bilinear'):
net_decoder = C1Bilinear(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax)
elif (arch == 'ppm_bilinear'):
net_decoder = PPMBilinear(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax)
elif (arch == 'ppm_bilinear_deepsup'):
net_decoder = PPMBilinearDeepsup(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax)
elif (arch == 'upernet_lite'):
net_decoder = UPerNet(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax, fpn_dim=256)
elif (arch == 'upernet'):
net_decoder = UPerNet(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax, fpn_dim=512)
elif (arch == 'upernet_tmp'):
net_decoder = UPerNetTmp(num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax, fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(self.weights_init)
if (len(weights) > 0):
print('Loading weights for net_decoder')
net_decoder.load_state_dict(torch.load(weights, map_location=(lambda storage, loc: storage)), strict=False)
return net_decoder |
def cinc_elbow2(coors, mode):
if (mode == 0):
centre = nm.array([0.0, (- 1e-05), 0.0], nm.float64)
else:
centre = nm.array([0.2, (- 1e-05), 0.0], nm.float64)
axis = nm.array([0, 1, 0], nm.float64)
radius = 0.029
length = 2e-05
return get_coors_in_tube(coors, centre, axis, (- 1.0), radius, length) |
def bilerp_impl(vf: ti.template(), p):
(u, v) = p
(s, t) = ((u - 0.5), (v - 0.5))
(iu, iv) = (ti.floor(s), ti.floor(t))
(fu, fv) = ((s - iu), (t - iv))
a = sample(vf, iu, iv)
b = sample(vf, (iu + 1), iv)
c = sample(vf, iu, (iv + 1))
d = sample(vf, (iu + 1), (iv + 1))
return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) |
def evaluate(args, model, tokenizer, prefix=''):
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = (SequentialSampler(eval_dataset) if (args.local_rank == (- 1)) else DistributedSampler(eval_dataset))
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc='Evaluating'):
batch = batch.to(args.device)
with torch.no_grad():
outputs = (model(batch, masked_lm_labels=batch) if args.mlm else model(batch, labels=batch))
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
perplexity = torch.exp(torch.tensor(eval_loss))
result = {'perplexity': perplexity}
output_eval_file = os.path.join(eval_output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return result |
class VarGRU(VarRNNBase):
def __init__(self, *args, **kwargs):
super(VarGRU, self).__init__(*args, mode='GRU', Cell=nn.GRUCell, **kwargs)
def forward(self, x, hx=None):
return super(VarGRU, self).forward(x, hx) |
def convert_to_float(image, preserve_range):
if (image.dtype == np.float16):
return image.astype(np.float32)
if preserve_range:
if (image.dtype.char not in 'df'):
image = image.astype(float)
else:
from ..util.dtype import img_as_float
image = img_as_float(image)
return image |
def main(args, init_distributed=False):
utils.import_user_module(args)
try:
from fairseq.fb_pathmgr import fb_pathmgr
global fb_pathmgr_registerd
if (not fb_pathmgr_registerd):
fb_pathmgr.register()
fb_pathmgr_registerd = True
except (ModuleNotFoundError, ImportError):
pass
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
if (torch.cuda.is_available() and (not args.cpu)):
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
print(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad))))
trainer = Trainer(args, task, model, criterion)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences))
(extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer)
max_epoch = (args.max_epoch or math.inf)
max_update = (args.max_update or math.inf)
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
if (not hasattr(checkpoint_utils.save_checkpoint, 'not_best')):
checkpoint_utils.save_checkpoint.not_best = 0
while ((lr > args.min_lr) and (epoch_itr.epoch < max_epoch) and (trainer.get_num_updates() < max_update)):
train(args, trainer, task, epoch_itr)
if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
if (args.early_stop > 0):
if (hasattr(checkpoint_utils.save_checkpoint, 'best') and (valid_losses[0] > checkpoint_utils.save_checkpoint.best)):
checkpoint_utils.save_checkpoint.not_best += 1
print('| Not the best ckpt... not best:', checkpoint_utils.save_checkpoint.not_best)
if (checkpoint_utils.save_checkpoint.not_best > args.early_stop):
print('| Early stop...')
break
else:
checkpoint_utils.save_checkpoint.not_best = 0
else:
valid_losses = [None]
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
if ((epoch_itr.epoch % args.save_interval) == 0):
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
reload_dataset = (':' in getattr(args, 'data', ''))
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum)) |
def makevocabs(line, ratio):
toks = line.split()
ret_sets = []
for i in range(ratio):
sub_toks = toks[i::ratio]
ret_sets.append(set(sub_toks))
return ret_sets |
def wer_details_for_batch(ids, refs, hyps, compute_alignments=False):
refs = _batch_to_dict_format(ids, refs)
hyps = _batch_to_dict_format(ids, hyps)
return wer_details_by_utterance(refs, hyps, compute_alignments=compute_alignments, scoring_mode='strict') |
def parse_version(string_version):
match = SEMVER_REGEX.match(string_version)
if (match is None):
msg = ('Invalid version: %s. Accepted versions must match the following regex pattern: %s' % (string_version, SEMVER_PATTERN))
raise ValueError(msg)
return match.groupdict() |
def display_failures_for_single_test(context: ExecutionContext, result: SerializedTestResult) -> None:
from ...transports.responses import get_reason
display_subsection(result)
if result.is_flaky:
click.secho(FLAKY_FAILURE_MESSAGE, fg='red')
click.echo()
for (idx, (code_sample, group)) in enumerate(group_by_case(result.checks, context.code_sample_style), 1):
checks = sorted(group, key=(lambda c: (c.name != 'not_a_server_error')))
for (check_idx, check) in enumerate(checks):
if (check_idx == 0):
click.secho(f'{idx}. {TEST_CASE_ID_TITLE}: {check.example.id}', bold=True)
if (check.context is not None):
title = check.context.title
if check.context.message:
message = check.context.message
else:
message = None
else:
title = f'Custom check failed: `{check.name}`'
message = check.message
click.secho(f'''
- {title}''', fg='red', bold=True)
if message:
message = textwrap.indent(message, prefix=' ')
click.secho(f'''
{message}''', fg='red')
if ((check_idx + 1) == len(checks)):
if (check.response is not None):
status_code = check.response.status_code
reason = get_reason(status_code)
response = bold(f'[{check.response.status_code}] {reason}')
click.echo(f'''
{response}:''')
response_body = check.response.body
if ((check.response is not None) and (response_body is not None)):
if (not response_body):
click.echo('\n <EMPTY>')
else:
encoding = (check.response.encoding or 'utf8')
try:
payload = base64.b64decode(response_body).decode(encoding)
payload = prepare_response_payload(payload)
payload = textwrap.indent(f'''
`{payload}`''', prefix=' ')
click.echo(payload)
except UnicodeDecodeError:
click.echo('\n <BINARY>')
click.echo(f'''
{bold('Reproduce with')}:
{code_sample}
''') |
class DeviceRootKeys_V1_1(DeviceRootKeys):
def __init__(self, joinEUI=None, nwkKey=None, **kwargs):
super().__init__(**kwargs)
self.joinEUI = joinEUI
self.nwkKey = nwkKey |
class MemoryMeasurements(Measurements):
def __init__(self, pids, output_dir):
super().__init__(pids, output_dir)
smaps_files = ' '.join([' /proc/{pid}/smaps '.format(pid=pid) for pid in self._pids])
self._copy_cmd = 'cat {smaps} > {output_dir}/smaps_{{id}}'.format(smaps=smaps_files, output_dir=output_dir)
self._cached_cmd = "cat /proc/meminfo | grep ^Cached: | awk '{{print $2}}'"
def measure(self):
timestamp = datetime.datetime.now()
ret = subprocess.run('{copy} && {cached}'.format(copy=self._copy_cmd.format(id=self._counter), cached=self._cached_cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
self._counter += 1
if (ret.returncode != 0):
print('Memory query failed!')
print(ret.stderr.decode('utf-8'))
raise RuntimeError()
else:
self._data.append([timestamp.strftime('%s.%f'), self._pids_num, int(ret.stdout.decode('utf-8').split('\n')[0])])
def postprocess(self):
self._samples_counter = len(self._data)
cmd = "awk '/Rss:/{{ sum3 += $2 }} /Pss:/{{ sum += $2 }} /Private/{{ sum2 += $2 }} END {{ print sum2, sum, sum3 }}' {dir}/smaps_{counter}"
for i in range(0, self._samples_counter):
ret = subprocess.run([cmd.format(dir=self._output_dir, counter=i)], stdout=subprocess.PIPE, shell=True)
self._data[i].extend(map(int, ret.stdout.decode('utf-8').strip().split()))
def header():
return ['Timestamp', 'N', 'Cached', 'USS', 'PSS', 'RSS'] |
class TestHipify(TestCase):
def test_import_hipify(self):
from torch.utils.hipify import hipify_python |
def trapezoid_integration_caller(data, h, actual):
x = cuda.grid(1)
actual[x] = formal_integral_cuda.trapezoid_integration_cuda(data, h) |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Mac8Address', import_from_module='ns.network')
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', u'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', u'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', u'ns3::NetDeviceContainer::Iterator&')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SixLowPanDispatch')
module.add_enum('Dispatch_e', ['LOWPAN_NALP', 'LOWPAN_NALP_N', 'LOWPAN_IPv6', 'LOWPAN_HC1', 'LOWPAN_BC0', 'LOWPAN_IPHC', 'LOWPAN_IPHC_N', 'LOWPAN_MESH', 'LOWPAN_MESH_N', 'LOWPAN_FRAG1', 'LOWPAN_FRAG1_N', 'LOWPAN_FRAGN', 'LOWPAN_FRAGN_N', 'LOWPAN_UNSUPPORTED'], outer_class=root_module['ns3::SixLowPanDispatch'])
module.add_enum('NhcDispatch_e', ['LOWPAN_NHC', 'LOWPAN_NHC_N', 'LOWPAN_UDPNHC', 'LOWPAN_UDPNHC_N', 'LOWPAN_NHCUNSUPPORTED'], outer_class=root_module['ns3::SixLowPanDispatch'])
module.add_class('SixLowPanHelper')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SixLowPanFrag1', parent=root_module['ns3::Header'])
module.add_class('SixLowPanFragN', parent=root_module['ns3::Header'])
module.add_class('SixLowPanHc1', parent=root_module['ns3::Header'])
module.add_enum('LowPanHc1Addr_e', ['HC1_PIII', 'HC1_PIIC', 'HC1_PCII', 'HC1_PCIC'], outer_class=root_module['ns3::SixLowPanHc1'])
module.add_enum('LowPanHc1NextHeader_e', ['HC1_NC', 'HC1_UDP', 'HC1_ICMP', 'HC1_TCP'], outer_class=root_module['ns3::SixLowPanHc1'])
module.add_class('SixLowPanIphc', parent=root_module['ns3::Header'])
module.add_enum('TrafficClassFlowLabel_e', ['TF_FULL', 'TF_DSCP_ELIDED', 'TF_FL_ELIDED', 'TF_ELIDED'], outer_class=root_module['ns3::SixLowPanIphc'])
module.add_enum('Hlim_e', ['HLIM_INLINE', 'HLIM_COMPR_1', 'HLIM_COMPR_64', 'HLIM_COMPR_255'], outer_class=root_module['ns3::SixLowPanIphc'])
module.add_enum('HeaderCompression_e', ['HC_INLINE', 'HC_COMPR_64', 'HC_COMPR_16', 'HC_COMPR_0'], outer_class=root_module['ns3::SixLowPanIphc'])
module.add_class('SixLowPanIpv6', parent=root_module['ns3::Header'])
module.add_class('SixLowPanNhcExtension', parent=root_module['ns3::Header'])
module.add_enum('Eid_e', ['EID_HOPBYHOP_OPTIONS_H', 'EID_ROUTING_H', 'EID_FRAGMENTATION_H', 'EID_DESTINATION_OPTIONS_H', 'EID_MOBILITY_H', 'EID_IPv6_H'], outer_class=root_module['ns3::SixLowPanNhcExtension'])
module.add_class('SixLowPanUdpNhcExtension', parent=root_module['ns3::Header'])
module.add_enum('Ports_e', ['PORTS_INLINE', 'PORTS_ALL_SRC_LAST_DST', 'PORTS_LAST_SRC_ALL_DST', 'PORTS_LAST_SRC_LAST_DST'], outer_class=root_module['ns3::SixLowPanUdpNhcExtension'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
module.add_class('SixLowPanNetDevice', parent=root_module['ns3::NetDevice'])
module.add_enum('DropReason', ['DROP_FRAGMENT_TIMEOUT', 'DROP_FRAGMENT_BUFFER_FULL', 'DROP_UNKNOWN_EXTENSION'], outer_class=root_module['ns3::SixLowPanNetDevice'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )', u'ns3::SixLowPanNetDevice::RxTxTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )*', u'ns3::SixLowPanNetDevice::RxTxTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )&', u'ns3::SixLowPanNetDevice::RxTxTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SixLowPanNetDevice::DropReason, ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )', u'ns3::SixLowPanNetDevice::DropTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SixLowPanNetDevice::DropReason, ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )*', u'ns3::SixLowPanNetDevice::DropTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SixLowPanNetDevice::DropReason, ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::SixLowPanNetDevice >, uint32_t )&', u'ns3::SixLowPanNetDevice::DropTracedCallback&')
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::SixLowPanNetDevice>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::SixLowPanNetDevice::DropReason', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::SixLowPanNetDevice>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module) |
def getAllObjs(v):
rights = list(v.rights)
objs = [tok for tok in rights if (tok.dep_ in OBJECTS)]
objs.extend(getObjsFromPrepositions(rights))
(potentialNewVerb, potentialNewObjs) = getObjFromXComp(rights)
if ((potentialNewVerb is not None) and (potentialNewObjs is not None) and (len(potentialNewObjs) > 0)):
objs.extend(potentialNewObjs)
v = potentialNewVerb
if (len(objs) > 0):
objs.extend(getObjsFromConjunctions(objs))
return (v, objs) |
def all_newer(src_files, dst_files):
from distutils.dep_util import newer
return all(((os.path.exists(dst) and newer(dst, src)) for dst in dst_files for src in src_files)) |
def get_precision(input_number):
try:
number_str = str(input_number)
(_, decimalpart) = number_str.split('.')
return len(decimalpart)
except Exception:
return 0 |
def _int_or_half_int(k):
if (k in ZZ):
return (True, ZZ(k))
try:
k = QQ(k)
if (k.denominator() == 2):
return (False, k.floor())
except (ValueError, TypeError):
pass
raise ValueError('k must be an integer or an integer + 1/2') |
def preProcess(data_path, use_preprocess, publish_time, filter_len):
if use_preprocess:
print('Loading preprocessed middle file...')
sess_clicks = pickle.load(open('../data/mind/sess_clicks.mid.1', 'rb'))
sess_date_sorted = pickle.load(open('../data/mind/sess_date_sorted.mid.1', 'rb'))
sess_impressions = pickle.load(open('../data/mind/sess_impressions.mid.1', 'rb'))
else:
sess_clicks = {}
sess_date = {}
sess_impressions = {}
sess_userid = {}
data1 = pd.read_csv((data_path + 'MIND_train/behaviors.tsv'), sep='\t', header=None)
data1.columns = ['impressionId', 'userId', 'time', 'history', 'impressions']
data2 = pd.read_csv((data_path + 'MIND_dev/behaviors.tsv'), sep='\t', header=None)
data2.columns = ['impressionId', 'userId', 'time', 'history', 'impressions']
data = pd.concat([data1, data2], ignore_index=True)
data['time'] = pd.to_datetime(data['time'])
print('Reloading origin {} users...'.format(len(data['userId'].unique().tolist())))
sess_id = 1
countMiss = 0
MissSessionID = set()
sessions = data['impressions'].tolist()
userids = data['userId'].tolist()
session_start_t = data['time'].tolist()
badList = ['N51761', 'N18422', 'N36288', 'N74235']
for (user_id, session, sess_time) in zip(userids, sessions, session_start_t):
size_cnt = 0
tmp_session = []
tmp_impression = []
isBad = False
for item in session.split(' '):
if (item.split('-')[0] in badList):
isBad = True
if (item.split('-')[1] == '1'):
size_cnt += 1
tmp_session.append(item.split('-')[0])
else:
tmp_impression.append(item.split('-')[0])
if ((len(tmp_session) > 1) and (not isBad)):
for (index, click_article_id) in enumerate(tmp_session):
article_click_time = sess_time
article_publish_time = publish_time[click_article_id]
time_context = get_datetime(article_click_time, article_publish_time)
if (time_context['delta_h'] < 0):
countMiss += 1
MissSessionID.add(sess_id)
item = (click_article_id, time_context)
if (index >= 1):
sess_clicks[sess_id] += [item]
else:
sess_userid[sess_id] = user_id
sess_impressions[sess_id] = tmp_impression
sess_clicks[sess_id] = [item]
sess_date[sess_id] = article_click_time
sess_id += 1
print('Length of session clicks: {}, number of bad case {}.'.format(len(sess_clicks), countMiss))
for sess in list(sess_clicks.items())[1]:
print(sess)
for s in list(sess_clicks):
if (len(sess_clicks[s]) < filter_len):
del sess_clicks[s]
del sess_date[s]
del sess_impressions[s]
if ((s in MissSessionID) and (s in sess_clicks)):
del sess_clicks[s]
del sess_date[s]
del sess_impressions[s]
print('After filter out sessions whose length is less than {} and remove bad case, length of session clicks: {}.'.format(filter_len, len(sess_clicks)))
sess_date_sorted = sorted(sess_date.items(), key=operator.itemgetter(1))
print(sess_date_sorted[0])
print(sess_date_sorted[(- 1)])
print('Saving middle file...')
pickle.dump(sess_clicks, open('../data/mind/sess_clicks.mid.1', 'wb'))
pickle.dump(sess_date_sorted, open('../data/mind/sess_date_sorted.mid.1', 'wb'))
pickle.dump(sess_impressions, open('../data/mind/sess_impressions.mid.1', 'wb'))
pickle.dump(sess_userid, open('../data/mind/sess_userid.mid.1', 'wb'))
return (sess_clicks, sess_date_sorted) |
def main():
parser = argparse.ArgumentParser(description='OGBN-papers100M (MLP)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--use_sgc_embedding', action='store_true')
parser.add_argument('--num_sgc_iterations', type=int, default=3)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
if (not args.use_sgc_embedding):
try:
data_dict = torch.load('data_dict.pt')
except:
raise RuntimeError('data_dict.pt not found. Need to run python node2vec.py first')
x = data_dict['node_feat']
split_idx = data_dict['split_idx']
y = data_dict['label'].to(torch.long)
if args.use_node_embedding:
x = torch.cat([x, data_dict['node2vec_embedding']], dim=(- 1))
print(x.shape)
elif args.use_node_embedding:
raise ValueError('No option to use node embedding and sgc embedding at the same time.')
else:
try:
sgc_dict = torch.load('sgc_dict.pt')
except:
raise RuntimeError('sgc_dict.pt not found. Need to run python sgc.py first')
x = sgc_dict['sgc_embedding'][args.num_sgc_iterations]
split_idx = sgc_dict['split_idx']
y = sgc_dict['label'].to(torch.long)
train_dataset = SimpleDataset(x[split_idx['train']], y[split_idx['train']])
valid_dataset = SimpleDataset(x[split_idx['valid']], y[split_idx['valid']])
test_dataset = SimpleDataset(x[split_idx['test']], y[split_idx['test']])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=128, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
model = MLP(x.size((- 1)), args.hidden_channels, 172, args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbn-papers100M')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
train(model, device, train_loader, optimizer)
train_acc = test(model, device, train_loader, evaluator)
valid_acc = test(model, device, valid_loader, evaluator)
test_acc = test(model, device, test_loader, evaluator)
logger.add_result(run, (train_acc, valid_acc, test_acc))
if ((epoch % args.log_steps) == 0):
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Train: {(100 * train_acc):.2f}%, Valid: {(100 * valid_acc):.2f}%, Test: {(100 * test_acc):.2f}%')
logger.print_statistics(run)
logger.print_statistics() |
def tadgan_hyperparameters():
return {'mlstars.custom.timeseries_preprocessing.time_segments_aggregate#1': {'interval': 1, 'time_column': 'timestamp'}, 'mlstars.custom.timeseries_preprocessing.rolling_window_sequences#1': {'target_column': 0, 'window_size': 100, 'target_size': 1}, 'orion.primitives.tadgan.TadGAN#1': {'epochs': 2, 'verbose': False}} |
def test_arrow_struct_null():
a = pyarrow.array([{'x': 1, 'y': 1.1}, {'x': 2, 'y': None}, {'x': 3, 'y': 3.3}])
assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': None}, {'x': 3, 'y': 3.3}]) |
def register_Ns3UlCqiInfo_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UlCqiInfo const &', 'arg0')])
cls.add_instance_attribute('m_sinr', 'std::vector< double >', is_const=False)
cls.add_instance_attribute('m_type', 'ns3::UlCqiInfo::UlCqiType', is_const=False)
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.