code
stringlengths
101
5.91M
class HomogenizationWorkerMultiMPI(HomogenizationWorkerMulti): def __call__(self, problem, options, post_process_hook, req_info, coef_info, micro_states, store_micro_idxs, chunks_per_worker, time_tag=''): multiproc = multi.multiproc_mpi dependencies = multiproc.get_dict('dependecies', clear=True) save_names = multiproc.get_dict('save_names', clear=True) numdeps = multiproc.get_dict('numdeps', mutable=True, clear=True) remaining = multiproc.get_int_value('remaining', 0) tasks = multiproc.get_queue('tasks') if (micro_states is not None): (micro_chunk_tab, req_info, coef_info) = self.chunk_micro_tasks(self.num_workers, len(micro_states['coors']), req_info, coef_info, chunks_per_worker, store_micro_idxs) else: micro_chunk_tab = None sorted_names = self.get_sorted_dependencies(req_info, coef_info, options.compute_only) inverse_deps = {} loc_numdeps = {} for name in sorted_names: if name.startswith('c.'): reqs = coef_info[name[2:]].get('requires', []) else: reqs = req_info[name].get('requires', []) loc_numdeps[name] = len(reqs) if (len(reqs) > 0): for req in reqs: if (req in inverse_deps): inverse_deps[req].append(name) else: inverse_deps[req] = [name] if (multiproc.mpi_rank == multiproc.mpi_master): for (k, v) in six.iteritems(loc_numdeps): numdeps[k] = v remaining.value = len(sorted_names) for name in sorted_names: if (numdeps[name] == 0): tasks.put(name) multiproc.master_loop() multiproc.master_send_continue() if (micro_states is not None): dependencies = self.dechunk_reqs_coefs(dependencies, len(micro_chunk_tab)) multiproc.master_send_task('deps', dependencies) multiproc.master_send_continue() return (dependencies, save_names) else: lock = multiproc.RemoteLock() multiproc.slave_get_task('engine') self.calculate_req_multi(tasks, lock, remaining, numdeps, inverse_deps, problem, options, post_process_hook, req_info, coef_info, save_names, dependencies, micro_states, time_tag, micro_chunk_tab, str((multiproc.mpi_rank + 1))) multiproc.slave_task_done('engine') multiproc.wait_for_tag(multiproc.tags.CONTINUE) (task, deps) = multiproc.slave_get_task('get_deps') multiproc.wait_for_tag(multiproc.tags.CONTINUE) return (deps, None)
class TableauTuples(UniqueRepresentation, Parent): Element = TableauTuple level_one_parent_class = Tableaux_all options = Tableaux.options def __classcall_private__(cls, level=None, size=None): if (not ((level is None) or (level in PositiveIntegers()))): raise ValueError('the level must be a positive integer') if (not ((size is None) or (size in NN))): raise ValueError('the size must be a non-negative integer') if (level == 1): if (size is not None): return Tableaux_size(size) else: return Tableaux_all() elif ((level is not None) and (size is not None)): return TableauTuples_level_size(level=level, size=size) elif (level is not None): return TableauTuples_level(level=level) elif (size is not None): return TableauTuples_size(size=size) else: return TableauTuples_all() def _element_constructor_(self, t): if (t not in self): raise ValueError(('%s is not an element of %s' % (t, self))) if ((t == []) or (t == [[]])): return self.level_one_parent_class().element_class(self.level_one_parent_class(), []) try: tab = [Tableau(s) for s in t] except (TypeError, ValueError): try: tab = [Tableau(t)] except ValueError: pass if (tab in self): if (len(tab) == 1): return self.level_one_parent_class().element_class(self.level_one_parent_class(), tab[0]) else: return self.element_class(self, tab) raise ValueError(('%s is not an element of %s' % (t, self))) def __contains__(self, t): if isinstance(t, (Tableau, TableauTuple)): return True elif isinstance(t, (tuple, list)): return (all(((s in Tableaux()) for s in t)) or (t in Tableaux())) else: return False _level = None _size = None def level(self): return self._level def size(self): return self._size def list(self): if self.is_finite(): return [y for y in self] else: raise NotImplementedError('this is an infinite set of tableaux')
def skipIfRocm(fn): (fn) def wrapper(*args, **kwargs): if TEST_WITH_ROCM: raise unittest.SkipTest("test doesn't currently work on the ROCm stack") else: fn(*args, **kwargs) return wrapper
def pprint(dump, hl=None): for (idx, line) in enumerate(dump.split('\n')): bts = line.split('\t') print('{2}\t{0}\t{1}'.format(bts[0], (bts[1] if (len(bts) > 1) else ''), ('*' if ((hl is not None) and (int(hl) == idx)) else '')))
.fpga def test(input_to_constant=False, extensive=False): print(f' Testing Convolution (extensive: {extensive}) ') queue = Queue() p = Process(target=evaluate, args=(1, 6, 5, 1, (100, 1, 28, 28), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) if extensive: p = Process(target=evaluate, args=(10, 1, 5, 1, (100, 10, 20, 20), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) p = Process(target=evaluate, args=(14, 8, 3, 1, (100, 14, 20, 20), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) p = Process(target=evaluate, args=(1, 6, 5, 8, (100, 1, 28, 28), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) p = Process(target=evaluate, args=(6, 16, 5, 8, (100, 6, 12, 12), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) if extensive: p = Process(target=evaluate, args=(6, 4, 5, 4, (100, 6, 12, 12), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) p = Process(target=evaluate, args=(3, 3, 3, 16, (100, 3, 34, 34), input_to_constant, False, queue)) p.start() p.join() assert (queue.get() < 1e-06) queue = Queue() p = Process(target=evaluate, args=(1, 6, 3, 1, (100, 1, 28, 28), input_to_constant, False, queue, 1)) p.start() p.join() assert (queue.get() < 1e-06) queue = Queue() p = Process(target=evaluate, args=(1, 6, 3, 4, (100, 1, 28, 28), input_to_constant, False, queue, 1)) p.start() p.join() assert (queue.get() < 1e-06) if extensive: queue = Queue() p = Process(target=evaluate, args=(1, 6, 5, 1, (100, 1, 12, 12), input_to_constant, False, queue, 2)) p.start() p.join() assert (queue.get() < 1e-06) queue = Queue() p = Process(target=evaluate, args=(1, 6, 5, 2, (100, 1, 12, 12), input_to_constant, False, queue, 1)) p.start() p.join() assert (queue.get() < 1e-06) print(' Success! ')
def bleu_1(gold: str, pred: str) -> float: return sentence_bleu([word_tokenize(gold)], word_tokenize(pred), weights=(1, 0, 0, 0))
def _separator(char, lengths): return [(char * separator_length) for separator_length in lengths]
def record_tabular_misc_stat(key, values, placement='back'): if (placement == 'front'): prefix = '' suffix = key else: prefix = key suffix = '' if (len(values) > 0): record_tabular(((prefix + 'Average') + suffix), np.average(values)) record_tabular(((prefix + 'Std') + suffix), np.std(values)) record_tabular(((prefix + 'Median') + suffix), np.median(values)) record_tabular(((prefix + 'Min') + suffix), np.min(values)) record_tabular(((prefix + 'Max') + suffix), np.max(values)) else: record_tabular(((prefix + 'Average') + suffix), np.nan) record_tabular(((prefix + 'Std') + suffix), np.nan) record_tabular(((prefix + 'Median') + suffix), np.nan) record_tabular(((prefix + 'Min') + suffix), np.nan) record_tabular(((prefix + 'Max') + suffix), np.nan)
def load_depth(path): r = png.Reader(filename=path) im = np.vstack(itertools.imap(np.uint16, r.asDirect()[2])).astype(np.float32) return im
def get_ancestors(start_ops, end_ops=[], include_control_inputs=False): ancestor_ops = set() queue = [] queue.extend(start_ops) while (len(queue) > 0): curr_op = queue.pop() if (curr_op in ancestor_ops): continue ancestor_ops.add(curr_op) if (curr_op in end_ops): continue queue.extend([input.op for input in curr_op.inputs]) consumers = get_consumers(curr_op) if include_control_inputs: queue.extend([op for op in curr_op.control_inputs]) return ancestor_ops
class Room(): def __init__(self, top, size, entryDoorPos, exitDoorPos): self.top = top self.size = size self.entryDoorPos = entryDoorPos self.exitDoorPos = exitDoorPos
class Vertex(): def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None, g_cons_args=(), nn=None, index=None): self.x = x self.order = sum(x) x_a = np.array(x, dtype=float) if (bounds is not None): for (i, (lb, ub)) in enumerate(bounds): x_a[i] = ((x_a[i] * (ub - lb)) + lb) self.x_a = x_a if (func is not None): self.feasible = True if (g_cons is not None): for (g, args) in zip(g_cons, g_cons_args): if (g(self.x_a, *args) < 0.0): self.f = np.inf self.feasible = False break if self.feasible: self.f = func(x_a, *func_args) if (nn is not None): self.nn = nn else: self.nn = set() self.fval = None self.check_min = True if (index is not None): self.index = index def __hash__(self): return hash(self.x) def connect(self, v): if ((v is not self) and (v not in self.nn)): self.nn.add(v) v.nn.add(self) if self.minimiser(): v._min = False v.check_min = False self.check_min = True v.check_min = True def disconnect(self, v): if (v in self.nn): self.nn.remove(v) v.nn.remove(self) self.check_min = True v.check_min = True def minimiser(self): if self.check_min: self._min = all(((self.f < v.f) for v in self.nn)) self.check_min = False return self._min def print_out(self): print('Vertex: {}'.format(self.x)) constr = 'Connections: ' for vc in self.nn: constr += '{} '.format(vc.x) print(constr) print('Order = {}'.format(self.order))
def adaptive_avg_pool2d(input, output_size): output_size = _list_with_default(output_size, input.size()) return torch._C._nn.adaptive_avg_pool2d(input, output_size)
class KitchenMicrowaveKettleLightTopLeftBurnerV0(KitchenBase): TASK_ELEMENTS = ['microwave', 'kettle', 'light switch', 'top left burner'] REMOVE_TASKS_WHEN_COMPLETE = True
def evaluate(args, agents, ob_rms, env_name, seed, num_processes, eval_log_dir, device, n_agent, out_file): e_env = AgarEnv(args, eval=True) eval_episode_rewards = [] obs = e_env.reset() for i in range(n_agent): obs[('t' + str(i))] = torch.Tensor(obs[('t' + str(i))]).to(device) eval_recurrent_hidden_states = [torch.zeros(1, agents[0].actor_critic.recurrent_hidden_state_size, device=device) for i in range(n_agent)] eval_masks = [torch.zeros(1, 1, device=device) for i in range(n_agent)] action = [[] for i in range(n_agent)] step = 0 while (len(eval_episode_rewards) < 10): step += 1 if render: e_env.render(0, mode='rgb_array', name=str(len(eval_episode_rewards))) for i in range(n_agent): with torch.no_grad(): (_, action[i], r, eval_recurrent_hidden_states[i]) = agents[0].actor_critic.act(obs[('t' + str(i))].reshape(1, (- 1)), eval_recurrent_hidden_states[i], eval_masks[i], deterministic=True) (obs, r, done, infos) = e_env.step(torch.cat(action, (- 1)).reshape((- 1)).cpu()) if (len(eval_episode_rewards) == 0): print(obs['t0'][(- 5):]) out_file.write((str(obs['t0'][(- 5):]) + '\n')) print('action & reward in evaluation', action, r) out_file.write((((('action & reward in evaluation ' + str(action)) + ' ') + str(r)) + '\n')) for i in range(n_agent): obs[('t' + str(i))] = torch.Tensor(obs[('t' + str(i))]).to(device) for i in range(n_agent): eval_masks[i] = torch.tensor(([0.0] if done[i] else [1.0]), dtype=torch.float32, device=device) for i in range(n_agent): if ('episode' in infos[i].keys()): eval_episode_rewards.append(infos[i]['episode']['r']) done = (np.array(done) != 0).all() if done: step = 0 e_env.close() e_env = AgarEnv(args, eval=True) obs = e_env.reset() for i in range(n_agent): obs[('t' + str(i))] = torch.Tensor(obs[('t' + str(i))]).to(device) ss = ' Evaluation using {} episodes: mean reward {:.5f}\n'.format(len(eval_episode_rewards), np.mean(eval_episode_rewards)) print(ss) out_file.write((ss + '\n')) print('var: ', (np.var(eval_episode_rewards) / 10)) out_file.write((('var: ' + str((np.var(eval_episode_rewards) / 10))) + '\n')) print(eval_episode_rewards) out_file.write((str(eval_episode_rewards) + '\n')) return np.mean(eval_episode_rewards)
def generate_spec(scenario, model, tokenizer, num_prompt_tokens, num_output_tokens, random): random_str: str = '' if (random is not None): random_str = f',random={random}' return f'"{scenario}:model={model},tokenizer={tokenizer},num_prompt_tokens={num_prompt_tokens},num_output_tokens={num_output_tokens}{random_str}": {{priority: 1}}'
class DemoTransformationTest(unittest.TestCase): def setUp(self) -> None: sitter_lib_path = 'sitter-libs' libs = [os.path.join(sitter_lib_path, d) for d in os.listdir(sitter_lib_path)] tree_sitter.Language.build_library('parser/languages.so', libs) def test_parsing(self): code = '\n class A {\n public void foo(){\n int i=0;\n }\n }\n ' transformer = DemoTransformation(parser='parser/languages.so', language='java') root = transformer.parse_code(code) self.assertTrue(isinstance(root, tree_sitter.Node)) def test_tokens(self): code = '\n class A {\n public void foo(){\n int i=0;\n }\n }\n ' expected_tokens = 'class A { public void foo ( ) { int i = 0 ; } }'.split() transformer = DemoTransformation(parser='parser/languages.so', language='java') root = transformer.parse_code(code) (tokens, _) = transformer.get_tokens_with_node_type(code.encode(), root) self.assertListEqual(tokens, expected_tokens)
def register_Ns3FdNetDeviceFdReader_methods(root_module, cls): cls.add_constructor([param('ns3::FdNetDeviceFdReader const &', 'arg0')]) cls.add_constructor([]) cls.add_method('SetBufferSize', 'void', [param('uint32_t', 'bufferSize')]) cls.add_method('DoRead', 'ns3::FdReader::Data', [], visibility='private', is_virtual=True) return
def rsync(src, dst): rsync_cmd = f'rsync -a {src} {dst}' print(rsync_cmd) run_command(rsync_cmd)
def filter_long_ex(dataset, use_span_clip, allowed_spanlen, notanfeid): if (not use_span_clip): sys.stderr.write((('\nfiltering out training examples with spans longer than ' + str(allowed_spanlen)) + '...\n')) else: sys.stderr.write((('\nclipping spans longer than ' + str(allowed_spanlen)) + '...\n')) longestspan = 0 longestfespan = 0 tmpdataset = [] for ex in dataset: haslongfe = False for feid in ex.invertedfes: haslongspans = False for span in ex.invertedfes[feid]: spanlen = ((span[1] - span[0]) + 1) if (spanlen > allowed_spanlen): haslongspans = True haslongfe = True if (spanlen > longestspan): longestspan = spanlen if ((feid != notanfeid) and (spanlen > longestfespan)): longestfespan = spanlen if (haslongspans and use_span_clip): clip_long_spans(ex.invertedfes[feid], allowed_spanlen) if (haslongfe and (not use_span_clip)): continue tmpdataset.append(ex) sys.stderr.write((('longest span size: ' + str(longestspan)) + '\n')) sys.stderr.write((('longest FE span size: ' + str(longestfespan)) + '\n')) sys.stderr.write((('# train examples before filter: ' + str(len(dataset))) + '\n')) sys.stderr.write((('# train examples after filter: ' + str(len(tmpdataset))) + '\n\n')) return tmpdataset
def draw_circle(d, r, loc, color='white'): (y, x) = (loc[0], loc[1]) d.ellipse(((x - r), (y - r), (x + r), (y + r)), fill=tuple(color))
def default_ids(n_layers): ids = [f't_{l}' for l in range(n_layers)] ids[0] = 'x' if (n_layers > 1): ids[(- 1)] = 'y' return ids
class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): if isinstance(val, torch.Tensor): val = val.item() self.val = (val / n) self.sum += val self.count += n self.avg = (self.sum / self.count)
class Linear(torch.nn.Module): _version = 3 _FLOAT_MODULE = nn.Linear def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8): super(Linear, self).__init__() self.in_features = in_features self.out_features = out_features bias = None if bias_: bias = torch.zeros(out_features, dtype=torch.float) if (dtype == torch.qint8): qweight = torch._empty_affine_quantized([out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8) elif (dtype == torch.float16): qweight = torch.zeros([out_features, in_features], dtype=torch.float) else: raise RuntimeError('Unsupported dtype specified for quantized Linear!') self._packed_params = LinearPackedParams(dtype) self._packed_params.set_weight_bias(qweight, bias) self.scale = 1.0 self.zero_point = 0 def _get_name(self): return 'QuantizedLinear' def extra_repr(self): return 'in_features={}, out_features={}, scale={}, zero_point={}, qscheme={}'.format(self.in_features, self.out_features, self.scale, self.zero_point, self.weight().qscheme()) def __repr__(self): return hide_packed_params_repr(self, LinearPackedParams) def forward(self, x): return torch.ops.quantized.linear(x, self._packed_params._packed_params, self.scale, self.zero_point) def _save_to_state_dict(self, destination, prefix, keep_vars): super(Linear, self)._save_to_state_dict(destination, prefix, keep_vars) destination[(prefix + 'scale')] = torch.tensor(self.scale) destination[(prefix + 'zero_point')] = torch.tensor(self.zero_point) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): self.scale = float(state_dict[(prefix + 'scale')]) state_dict.pop((prefix + 'scale')) self.zero_point = int(state_dict[(prefix + 'zero_point')]) state_dict.pop((prefix + 'zero_point')) version = local_metadata.get('version', None) if ((version is None) or (version == 1)): weight = state_dict.pop((prefix + 'weight')) bias = state_dict.pop((prefix + 'bias')) state_dict.update({(prefix + '_packed_params.weight'): weight, (prefix + '_packed_params.bias'): bias}) super(Linear, self)._load_from_state_dict(state_dict, prefix, local_metadata, False, missing_keys, unexpected_keys, error_msgs) def _weight_bias(self): return self._packed_params._weight_bias() def weight(self): return self._weight_bias()[0] def bias(self): return self._weight_bias()[1] def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None: self._packed_params.set_weight_bias(w, b) def from_float(cls, mod): if hasattr(mod, 'weight_fake_quant'): weight_post_process = mod.weight_fake_quant activation_post_process = mod.activation_post_process else: assert (type(mod) == cls._FLOAT_MODULE), (((' nnq.' + cls.__name__) + '.from_float only works for ') + cls._FLOAT_MODULE.__name__) assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' if (type(mod) == nni.LinearReLU): activation_post_process = mod[1].activation_post_process mod = mod[0] else: activation_post_process = mod.activation_post_process weight_post_process = mod.qconfig.weight() weight_post_process(mod.weight) dtype = weight_post_process.dtype (act_scale, act_zp) = activation_post_process.calculate_qparams() assert (dtype == torch.qint8), 'Weight observer must have dtype torch.qint8' qweight = _quantize_weight(mod.weight.float(), weight_post_process) qlinear = cls(mod.in_features, mod.out_features, dtype=dtype) qlinear.set_weight_bias(qweight, mod.bias) qlinear.scale = float(act_scale) qlinear.zero_point = int(act_zp) return qlinear
def create_lmdb_for_gopro(): folder_path = './datasets/GoPro/train/blur_crops' lmdb_path = './datasets/GoPro/train/blur_crops.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/GoPro/train/sharp_crops' lmdb_path = './datasets/GoPro/train/sharp_crops.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/GoPro/test/target' lmdb_path = './datasets/GoPro/test/target.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/GoPro/test/input' lmdb_path = './datasets/GoPro/test/input.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
def get_compiled_model(model, steps_per_execution): model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss=JointsMSE(), metrics=[PercentageOfCorrectKeypoints()], steps_per_execution=steps_per_execution) return model
def get_sql_inference_query(model, table_name, round_digits=3, round_features=5, output_name='PROB', alias='WOE_TAB', bypass_encoded=True, template=None, nan_pattern_numbers="({0} IS NULL OR {0} = 'NaN')", nan_pattern_category="({0} IS NULL OR LOWER(CAST({0} AS VARCHAR(50))) = 'nan')", preprocessing=None, mark_values=None, mark_encoding=None): assert ((template in ['td']) or (template is None)), 'Unknown template' if (template == 'td'): nan_pattern_numbers = '{0} IS NULL' nan_pattern_category = '{0} IS NULL' encode_table = '({0})'.format(get_encoded_table(model, table_name, round_digits, round_features, nan_pattern_numbers, nan_pattern_category, preprocessing, mark_values, mark_encoding)) encode_table = ('\n ' + set_indent(encode_table)) query = get_weights_query(model, encode_table, output_name=output_name, bypass_encoded=bypass_encoded, alias=alias, round_wts=round_digits) return query
class AnnotatedNestedModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) if (qengine == 'fbgemm'): self.sub2.fc1.qconfig = default_per_channel_qconfig else: self.sub2.fc1.qconfig = default_qconfig def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x
def encode_sequence(x, alphabet): x = x.encode('utf-8').upper() x = alphabet.encode(x) return x
_config def task_mlm_itm_mpp(): exp_name = 'mlm_itm_mpp' datasets = ['coco', 'vg', 'sbu', 'gcc'] loss_names = _loss_names({'itm': 1, 'mlm': 1, 'mpp': 1}) batch_size = 4096 max_epoch = 10 max_image_len = (- 1)
def adjust_learning_rate(optimizer, args): if (args.cur_iter < args.warmup_iters): frac = (args.cur_iter / args.warmup_iters) step = (args.lr - args.warmup_lr) args.running_lr = (args.warmup_lr + (step * frac)) else: frac = ((float(args.cur_iter) - args.warmup_iters) / (args.max_iters - args.warmup_iters)) scale_running_lr = (max((1.0 - frac), 0.0) ** args.lr_pow) args.running_lr = (args.lr * scale_running_lr) for param_group in optimizer.param_groups: param_group['lr'] = args.running_lr
def to_sparse_tensor(M, value=False): M = sp.coo_matrix(M) if value: return tf.SparseTensorValue(np.vstack((M.row, M.col)).T, M.data, M.shape) else: return tf.SparseTensor(np.vstack((M.row, M.col)).T, M.data, M.shape)
class CFiniteSequences_generic(Parent, UniqueRepresentation): Element = CFiniteSequence def __init__(self, polynomial_ring, category): base_ring = polynomial_ring.base_ring() self._polynomial_ring = polynomial_ring self._fraction_field = FractionField(self._polynomial_ring) if (category is None): category = Rings().Commutative() Parent.__init__(self, base_ring, names=self._polynomial_ring.gens(), category=category) def _repr_(self): return 'The ring of C-Finite sequences in {} over {}'.format(self.gen(), self.base_ring()) def _element_constructor_(self, ogf): ogf = self.fraction_field()(ogf) return self.element_class(self, ogf) def ngens(self) -> int: return 1 def gen(self, i=0): if i: raise ValueError('{} has only one generator (i=0)'.format(self)) return self.polynomial_ring().gen() def gens(self) -> tuple: return (self.gen(0),) def an_element(self): x = self.gen() return self(((2 - x) / ((1 - x) - (x ** 2)))) def __contains__(self, x) -> bool: return self.has_coerce_map_from(parent(x)) def fraction_field(self): return self._fraction_field def polynomial_ring(self): return self._polynomial_ring def _coerce_map_from_(self, S): if self.fraction_field().has_coerce_map_from(S): return True def from_recurrence(self, coefficients, values): if (not isinstance(coefficients, list)): raise ValueError('Wrong type for recurrence coefficient list.') if (not isinstance(values, list)): raise ValueError('Wrong type for recurrence start value list.') deg = len(coefficients) co = (coefficients[::(- 1)] + ([0] * (len(values) - deg))) R = self.polynomial_ring() den = R(([(- 1)] + co[:deg])) num = R(([(- values[0])] + [((- values[n]) + sum(((values[k] * co[((n - 1) - k)]) for k in range(n)))) for n in range(1, len(values))])) return self((num / den)) def guess(self, sequence, algorithm='sage'): S = self.polynomial_ring() if (algorithm == 'bm'): from sage.matrix.berlekamp_massey import berlekamp_massey if (len(sequence) < 2): raise ValueError('sequence too short for guessing') R = PowerSeriesRing(QQ, 'x') if (len(sequence) % 2): sequence.pop() l = (len(sequence) - 1) denominator = S(berlekamp_massey(sequence).reverse()) numerator = R((S(sequence) * denominator), prec=l).truncate() return CFiniteSequence((numerator / denominator)) if (algorithm == 'pari'): global _gp if (len(sequence) < 6): raise ValueError('sequence too short for guessing') if (_gp is None): _gp = Gp() _gp('ggf(v)=local(l,m,p,q,B);l=length(v);B=floor(l/2); if(B<3,return(0));m=matrix(B,B,x,y,v[x-y+B+1]); q=qflll(m,4)[1];if(length(q)==0,return(0)); p=sum(k=1,B,x^(k-1)*q[k,1]); q=Pol(Pol(vector(l,n,v[l-n+1]))*p+O(x^(B+1))); if(polcoeff(p,0)<0,q=-q;p=-p);q=q/p;p=Ser(q+O(x^(l+1))); for(m=1,l,if(polcoeff(p,m-1)!=v[m],return(0)));q') _gp.set('gf', sequence) _gp('gf=ggf(gf)') num = S(sage_eval(_gp.eval('Vec(numerator(gf))'))[::(- 1)]) den = S(sage_eval(_gp.eval('Vec(denominator(gf))'))[::(- 1)]) if (num == 0): return 0 return CFiniteSequence((num / den)) from sage.matrix.constructor import matrix from sage.arith.misc import integer_ceil as ceil from numpy import trim_zeros seq = sequence[:] while (seq and (sequence[(- 1)] == 0)): seq.pop() l = len(seq) if (l == 0): return 0 if (l < 6): raise ValueError('sequence too short for guessing') hl = ceil((ZZ(l) / 2)) A = matrix([sequence[k:(k + hl)] for k in range(hl)]) K = A.kernel() if (K.dimension() == 0): return 0 R = PolynomialRing(QQ, 'x') den = R(trim_zeros(K.basis()[(- 1)].list()[::(- 1)])) if (den == 1): return 0 offset = next((i for (i, x) in enumerate(sequence) if x), None) S = PowerSeriesRing(QQ, 'x', default_prec=(l - offset)) num = S((R(sequence) * den)).truncate(((ZZ(l) // 2) + 1)) if ((num == 0) or (sequence != S((num / den)).list())): return 0 return CFiniteSequence((num / den))
class Config(): root = '.' meka = 'meka' skmultilearn = 'skmultilearn' tests = 'tests' utils = 'utils'
def clean_dir(dir_path): file_list = glob.glob((dir_path + '/*.*')) if (len(file_list) > 0): for file_ in file_list: os.remove(file_)
def register_Ns3SimpleRefCount__Ns3LteHarqPhy_Ns3Empty_Ns3DefaultDeleter__lt__ns3LteHarqPhy__gt___methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::SimpleRefCount< ns3::LteHarqPhy, ns3::empty, ns3::DefaultDeleter< ns3::LteHarqPhy > > const &', 'o')]) return
class MaxClipGradScaler(GradScaler): def __init__(self, init_scale, max_scale: float, growth_interval=100): super().__init__(init_scale=init_scale, growth_interval=growth_interval) self.max_scale = max_scale def scale_clip(self): if (self.get_scale() == self.max_scale): self.set_growth_factor(1) elif (self.get_scale() < self.max_scale): self.set_growth_factor(2) elif (self.get_scale() > self.max_scale): self._scale.fill_(self.max_scale) self.set_growth_factor(1) def scale(self, outputs): if (not self._enabled): return outputs self.scale_clip() if isinstance(outputs, torch.Tensor): assert outputs.is_cuda if (self._scale is None): self._lazy_init_scale_growth_tracker(outputs.device) assert (self._scale is not None) return (outputs * self._scale.to(device=outputs.device, non_blocking=True)) stash: List[_MultiDeviceReplicator] = [] def apply_scale(val): if isinstance(val, torch.Tensor): assert val.is_cuda if (len(stash) == 0): if (self._scale is None): self._lazy_init_scale_growth_tracker(val.device) assert (self._scale is not None) stash.append(_MultiDeviceReplicator(self._scale)) return (val * stash[0].get(val.device)) elif isinstance(val, container_abcs.Iterable): iterable = map(apply_scale, val) if (isinstance(val, list) or isinstance(val, tuple)): return type(val)(iterable) else: return iterable else: raise ValueError('outputs must be a Tensor or an iterable of Tensors') return apply_scale(outputs)
class CNNModelWithMaxPooling(Model): def __init__(self, filters, strides, name=None, padding='SAME', pool_strides=(2, 2), pool_shapes=(2, 2), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer()): super().__init__(name) self._filters = filters self._strides = strides self._padding = padding self._pool_strides = pool_strides self._pool_shapes = pool_shapes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init def _build(self, state_input, name=None): del name return cnn_with_max_pooling(input_var=state_input, filters=self._filters, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, strides=self._strides, padding=self._padding, pool_shapes=self._pool_shapes, pool_strides=self._pool_strides, name='cnn')
class Parser(BaseParser): def __call__(self, vocabs, moving_params=None): top_recur = super(Parser, self).__call__(vocabs, moving_params=moving_params) int_tokens_to_keep = tf.to_int32(self.tokens_to_keep) with tf.variable_scope('MLP'): (dep_mlp, head_mlp) = self.MLP(top_recur, (self.arc_mlp_size + self.rel_mlp_size), n_splits=2) (arc_dep_mlp, rel_dep_mlp) = tf.split(dep_mlp, [self.arc_mlp_size, self.rel_mlp_size], axis=2) (arc_head_mlp, rel_head_mlp) = tf.split(head_mlp, [self.arc_mlp_size, self.rel_mlp_size], axis=2) with tf.variable_scope('Arc'): arc_logits = self.bilinear(arc_dep_mlp, arc_head_mlp, 1, add_bias2=False) arc_probs = tf.nn.softmax(arc_logits) arc_preds = tf.to_int32(tf.argmax(arc_logits, axis=(- 1))) arc_targets = self.vocabs['heads'].placeholder arc_correct = (tf.to_int32(tf.equal(arc_preds, arc_targets)) * int_tokens_to_keep) arc_loss = tf.losses.sparse_softmax_cross_entropy(arc_targets, arc_logits, self.tokens_to_keep) with tf.variable_scope('Rel'): rel_logits = self.bilinear(rel_dep_mlp, rel_head_mlp, len(self.vocabs['rels'])) rel_probs = tf.nn.softmax(rel_logits, dim=2) one_hot = tf.one_hot((arc_preds if (moving_params is not None) else arc_targets), self.bucket_size) one_hot = tf.expand_dims(one_hot, axis=3) select_rel_logits = tf.matmul(rel_logits, one_hot) select_rel_logits = tf.squeeze(select_rel_logits, axis=3) rel_preds = tf.to_int32(tf.argmax(select_rel_logits, axis=(- 1))) rel_targets = self.vocabs['rels'].placeholder rel_correct = (tf.to_int32(tf.equal(rel_preds, rel_targets)) * int_tokens_to_keep) rel_loss = tf.losses.sparse_softmax_cross_entropy(rel_targets, select_rel_logits, self.tokens_to_keep) n_arc_correct = tf.reduce_sum(arc_correct) n_rel_correct = tf.reduce_sum(rel_correct) correct = (arc_correct * rel_correct) n_correct = tf.reduce_sum(correct) n_seqs_correct = tf.reduce_sum(tf.to_int32(tf.equal(tf.reduce_sum(correct, axis=1), (self.sequence_lengths - 1)))) loss = (arc_loss + rel_loss) outputs = {'arc_logits': arc_logits, 'arc_probs': arc_probs, 'arc_preds': arc_preds, 'arc_targets': arc_targets, 'arc_correct': arc_correct, 'arc_loss': arc_loss, 'n_arc_correct': n_arc_correct, 'rel_logits': rel_logits, 'rel_probs': rel_probs, 'rel_preds': rel_preds, 'rel_targets': rel_targets, 'rel_correct': rel_correct, 'rel_loss': rel_loss, 'n_rel_correct': n_rel_correct, 'n_tokens': self.n_tokens, 'n_seqs': self.batch_size, 'tokens_to_keep': self.tokens_to_keep, 'n_correct': n_correct, 'n_seqs_correct': n_seqs_correct, 'loss': loss} return outputs
class ANDescr(SageObject): def is_simple(self): return False def neg(self, n): return ANUnaryExpr(n, '-') def invert(self, n): return ANUnaryExpr(n, '~') def abs(self, n): return ANUnaryExpr(n, 'abs') def real(self, n): if self.is_complex(): return ANUnaryExpr(n, 'real') else: return self def imag(self, n): if self.is_complex(): return ANUnaryExpr(n, 'imag') else: return ANRational(0) def conjugate(self, n): if self.is_complex(): return ANUnaryExpr(n, 'conjugate') else: return self def norm(self, n): if self.is_complex(): return ANUnaryExpr(n, 'norm') else: return (n * n)._descr
class AnnotatedConvModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.qconfig = torch.quantization.get_default_qconfig(qengine) self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.dequant(x) return x
class DataTrainingArguments(): data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'}) task: Optional[str] = field(default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}) max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) val_max_target_length: Optional[int] = field(default=142, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'}) test_max_target_length: Optional[int] = field(default=142, metadata={'help': 'The maximum total sequence length for test target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) n_train: Optional[int] = field(default=(- 1), metadata={'help': '# training examples. -1 means use all.'}) n_val: Optional[int] = field(default=(- 1), metadata={'help': '# validation examples. -1 means use all.'}) n_test: Optional[int] = field(default=(- 1), metadata={'help': '# test examples. -1 means use all.'}) src_lang: Optional[str] = field(default=None, metadata={'help': 'Source language id for translation.'}) tgt_lang: Optional[str] = field(default=None, metadata={'help': 'Target language id for translation.'}) eval_beams: Optional[int] = field(default=None, metadata={'help': '# num_beams to use for evaluation.'}) ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'})
def download(url, filename, cookies=None): with open(filename, 'wb') as f: response = requests.get(url, stream=True, cookies=cookies) total = response.headers.get('content-length') if (total is None): f.write(response.content) else: downloaded = 0 total = int(total) for data in response.iter_content(chunk_size=max(int((total / 1000)), (1024 * 1024))): downloaded += len(data) f.write(data) completed = int(((50 * downloaded) / total)) sys.stdout.write('\r[{}{}]'.format(('' * completed), ('.' * (50 - completed)))) sys.stdout.flush() sys.stdout.write('\n')
class LocationMatcher(RegexMatchEach): def __init__(self, *children, **kwargs): kwargs['attrib'] = 'ner_tags' kwargs['rgx'] = 'LOCATION|LOC' super(LocationMatcher, self).__init__(*children, **kwargs)
def main(): midi_path = '/home/joann8512/NAS_189/home/PEmoDataset/midis/Q1__8v0MFBZoco_0.mid' key_data = '../src/key_mode_tempo.csv' path_outdir = '../test/events' os.makedirs(path_outdir, exist_ok=True) fn = midi_path.split('/')[(- 1)] key = get_key(key_data, os.path.splitext(fn)[0]) midi_obj = analyzer(midi_path) song_data = corpus(midi_obj) final_sequence = event(song_data, key) pickle.dump(final_sequence, open(os.path.join(path_outdir, (fn + '.pkl')), 'wb'))
def scope_aware_topological_sort(G: SDFGState, sources: Optional[Sequence[Node]]=None, condition: Optional[Callable[([Node, Node], bool)]]=None, reverse: bool=False, visited: Optional[Set[Node]]=None): if reverse: source_nodes = 'sink_nodes' predecessors = G.successors neighbors = G.predecessors else: source_nodes = 'source_nodes' predecessors = G.predecessors neighbors = G.successors if (sources is None): src_nodes = getattr(G, source_nodes, (lambda : G)) nodes = list(src_nodes()) if (len(nodes) == 0): nodes = G else: try: nodes = iter(sources) except TypeError: nodes = [sources] visited = (visited if (visited is not None) else set()) for start in nodes: if (start in visited): continue (yield start) visited.add(start) stack = [(start, iter(neighbors(start)))] while stack: (parent, children) = stack[(- 1)] try: child = next(children) if (child not in visited): skip = False for pred in predecessors(child): if (pred not in visited): skip = True break if skip: continue visited.add(child) if ((reverse and isinstance(child, dace.nodes.ExitNode)) or ((not reverse) and isinstance(child, dace.nodes.EntryNode))): if reverse: entry = G.entry_node(child) scope_subgraph = G.scope_subgraph(entry) else: scope_subgraph = G.scope_subgraph(child) (yield from scope_aware_topological_sort(scope_subgraph, sources=[child], condition=condition, reverse=reverse, visited=visited)) if ((condition is None) or condition(parent, child)): (yield child) stack.append((child, iter(neighbors(child)))) except StopIteration: stack.pop() return visited
class FlattenLayer(Layer): def get_output_shape_for(self, input_shape): return (input_shape[0], int(np.prod(input_shape[1:]))) def get_output_for(self, input, **kwargs): return input.flatten(2)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--run_group', type=str, default='Debug') parser.add_argument('--memo', type=str, default=None) parser.add_argument('--algo_name', type=str, default=None) parser.add_argument('--env', type=str, default='maze', choices=['maze', 'half_cheetah', 'ant-v3', 'hopper-v3', 'walker2d-v3', 'ip', 'idp', 'reacher']) parser.add_argument('--tasks', type=str, default=['default'], nargs='*') parser.add_argument('--max_path_length', type=int, default=200) parser.add_argument('--use_gpu', type=int, default=0, choices=[0, 1]) parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num_epochs', type=int, default=1000000) parser.add_argument('--num_epochs_per_save', type=int, default=5000) parser.add_argument('--num_epochs_per_eval', type=int, default=500) parser.add_argument('--num_epochs_per_log', type=int, default=1) parser.add_argument('--plot_axis', type=float, default=None, nargs='*') parser.add_argument('--video_skip_frames', type=int, default=1) parser.add_argument('--model_master_dim', type=int, default=512) parser.add_argument('--dyn_num_layers', type=int, default=2) parser.add_argument('--dim_option', type=int, default=2) parser.add_argument('--collect_steps', type=int, default=2000) parser.add_argument('--num_policy_updates', type=int, default=64) parser.add_argument('--num_discrim_updates', type=int, default=None) parser.add_argument('--reward_scale', type=int, default=10) parser.add_argument('--batch_size', type=int, default=256) parser.add_argument('--learning_rate', type=float, default=0.0003) parser.add_argument('--replay_buffer_size', type=int, default=None) parser.add_argument('--normalize_observations', type=int, default=1) parser.add_argument('--fix_variance', type=int, default=1) parser.add_argument('--det_fix_variance', type=int, default=1) parser.add_argument('--z_eq_a', type=int, default=0, choices=[0, 1]) parser.add_argument('--sample_latent_every', type=int, default=1) parser.add_argument('--aux_reward_type', type=str, default='none', choices=['none', 'disagreement']) parser.add_argument('--aux_reward_coef', type=float, default=0.0) parser.add_argument('--restore_path', type=str, default=None) parser.add_argument('--restore_idx', type=int, default=None) parser.add_argument('--restore_epoch', type=int, default=None) parser.add_argument('--cp_path', type=str, default=None) parser.add_argument('--cp_idx', type=int, default=None) parser.add_argument('--cp_epoch', type=int, default=None) parser.add_argument('--cp_z_eq_a', type=int, default=0, choices=[0, 1]) parser.add_argument('--cp_always_use_true_env', type=int, default=0, choices=[0, 1]) parser.add_argument('--cp_min_zero', type=int, default=0, choices=[0, 1]) parser.add_argument('--mbpo', type=int, default=0) parser.add_argument('--mbpo_reset_ratio', type=float, default=0) parser.add_argument('--mbpo_max_path_length', type=int, default=0) parser.add_argument('--train_model_determ', type=str, default='sepmod', choices=['off', 'sepmod']) parser.add_argument('--ensemble_size', type=int, default=1) parser.add_argument('--mppi_num_evals', type=int, default=2) parser.add_argument('--mppi_planning_horizon', type=int, default=5) parser.add_argument('--mppi_num_candidate_sequences', type=int, default=50) parser.add_argument('--mppi_refine_steps', type=int, default=10) parser.add_argument('--mppi_gamma', type=float, default=1.0) parser.add_argument('--mppi_action_std', type=float, default=1.0) parser.add_argument('--penalty_type', type=str, default='none', choices=['none', 'disagreement']) parser.add_argument('--penalty_lambdas', type=float, default=[0.0], nargs='*') args = parser.parse_args(sys.argv[1:]) if (args.env == 'maze'): env_kwargs = dict(n=args.max_path_length) else: env_kwargs = dict() if (args.env == 'half_cheetah'): done_ground = 0 last_ground = 0 omit_input_size = 1 elif (args.env == 'ant-v3'): done_ground = 1 last_ground = 0 omit_input_size = 2 elif (args.env == 'hopper-v3'): done_ground = 1 last_ground = 0 omit_input_size = 1 env_kwargs.update(action_repetition=5) elif (args.env == 'walker2d-v3'): done_ground = 1 last_ground = 0 omit_input_size = 1 env_kwargs.update(action_repetition=5) elif (args.env == 'ip'): done_ground = 1 last_ground = 1 omit_input_size = 0 elif (args.env == 'idp'): done_ground = 1 last_ground = 1 omit_input_size = 0 elif (args.env == 'reacher'): done_ground = 0 last_ground = 0 omit_input_size = 0 elif (args.env == 'maze'): done_ground = 0 last_ground = 0 omit_input_size = 0 else: raise NotImplementedError() if (args.cp_path is not None): env_kwargs.update(cp_info=dict(cp_path=args.cp_path, cp_epoch=args.cp_epoch, cp_z_eq_a=args.cp_z_eq_a, cp_action_range=1.0, cp_multi_step=1, cp_num_truncate_obs=0, use_true_env=(True if args.cp_always_use_true_env else False), mbpo=args.mbpo, mbpo_reset_ratio=args.mbpo_reset_ratio, mbpo_max_path_length=args.mbpo_max_path_length, penalty_type=args.penalty_type, penalty_lambda=args.penalty_lambdas[0], cp_min_zero=args.cp_min_zero)) use_gpu = args.use_gpu eval_record_video = (args.env != 'maze') replay_buffer_size = (args.collect_steps if (args.replay_buffer_size is None) else args.replay_buffer_size) variant = dict(seed=args.seed, memo=args.memo, algo_name=args.algo_name, algorithm='PMA', collector_type='batch_latent', replay_buffer_size=replay_buffer_size, generated_replay_buffer_size=replay_buffer_size, sample_latent_every=args.sample_latent_every, z_eq_a=args.z_eq_a, latent_dim=args.dim_option, aux_reward_type=args.aux_reward_type, aux_reward_coef=args.aux_reward_coef, ensemble_size=args.ensemble_size, done_ground=done_ground, last_ground=last_ground, restore_path=args.restore_path, restore_epoch=args.restore_epoch, env_name=args.env, env_kwargs=env_kwargs, policy_kwargs=dict(layer_size=args.model_master_dim, latent_dim=args.dim_option, omit_input_size=omit_input_size), discriminator_kwargs=dict(layer_size=args.model_master_dim, num_layers=args.dyn_num_layers, restrict_input_size=0, omit_input_size=omit_input_size, normalize_observations=args.normalize_observations, fix_variance=args.fix_variance, det_fix_variance=args.det_fix_variance), trainer_kwargs=dict(num_prior_samples=100, num_discrim_updates=((args.num_policy_updates // 2) if (args.num_discrim_updates is None) else args.num_discrim_updates), num_policy_updates=args.num_policy_updates, discrim_learning_rate=args.learning_rate, policy_batch_size=args.batch_size, reward_bounds=((- .0), .0), reward_scale=args.reward_scale), policy_trainer_kwargs=dict(discount=0.995, policy_lr=args.learning_rate, qf_lr=args.learning_rate, soft_target_tau=0.005), algorithm_kwargs=dict(num_epochs=args.num_epochs, num_eval_steps_per_epoch=args.collect_steps, num_trains_per_train_loop=1, num_expl_steps_per_train_loop=args.collect_steps, min_num_steps_before_training=0, max_path_length=args.max_path_length, save_snapshot_freq=args.num_epochs_per_save, num_epochs_per_eval=args.num_epochs_per_eval, num_epochs_per_log=args.num_epochs_per_log, plot_axis=args.plot_axis, eval_record_video=eval_record_video, video_skip_frames=args.video_skip_frames, train_model_determ=args.train_model_determ, mppi_num_evals=args.mppi_num_evals, penalty_type=args.penalty_type, penalty_lambdas=args.penalty_lambdas, tasks=args.tasks, mppi_kwargs=dict(planning_horizon=args.mppi_planning_horizon, primitive_horizon=1, num_candidate_sequences=args.mppi_num_candidate_sequences, refine_steps=args.mppi_refine_steps, gamma=args.mppi_gamma, action_std=args.mppi_action_std, smoothing_beta=0.0), mbpo=args.mbpo, mbpo_max_path_length=args.mbpo_max_path_length)) experiment_config = dict() if (get_config is not None): experiment_config['get_config'] = get_config if (get_algorithm is not None): experiment_config['get_algorithm'] = get_algorithm g_start_time = int(datetime.now().timestamp()) exp_name = '' exp_name += f'sd{args.seed:03d}_' if ('SLURM_JOB_ID' in os.environ): exp_name += f"s_{os.environ['SLURM_JOB_ID']}." if ('SLURM_PROCID' in os.environ): exp_name += f"{os.environ['SLURM_PROCID']}." if ('SLURM_RESTART_COUNT' in os.environ): exp_name += f"rs_{os.environ['SLURM_RESTART_COUNT']}." exp_name += f'{g_start_time}' def list_to_str(arg_list): return str(arg_list).replace(',', '|').replace(' ', '').replace("'", '') def add_name(abbr, argument, value_dict=None, max_length=None, log_only_if_changed=False): nonlocal exp_name value = getattr(args, argument) if (log_only_if_changed and (parser.get_default(argument) == value)): return if isinstance(value, list): if (value_dict is not None): value = [value_dict.get(v) for v in value] value = list_to_str(value) elif (value_dict is not None): value = value_dict.get(value) if (value is None): value = 'X' if (max_length is not None): value = str(value)[:max_length] if isinstance(value, str): value = value.replace('/', '-') exp_name += f'_{abbr}{value}' add_name('', 'env', {'maze': 'MZ', 'half_cheetah': 'CH', 'ant-v3': 'ANT3', 'hopper-v3': 'HP3', 'walker2d-v3': 'WK3', 'ip': 'IP', 'idp': 'IDP', 'reacher': 'RC'}, log_only_if_changed=False) add_name('mm', 'memo') add_name('do', 'dim_option') add_name('sl', 'sample_latent_every') add_name('za', 'z_eq_a') run_experiment(experiment_config=experiment_config, run_group=args.run_group, exp_prefix=exp_name, variant=variant, gpu_kwargs={'mode': use_gpu}, log_to_wandb=('WANDB_API_KEY' in os.environ))
def validate_lei(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(lei.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(lei.is_valid) else: return df.applymap(lei.is_valid) return lei.is_valid(df)
def get_unknown_model_metadata(helm_model_name: str) -> ModelMetadata: return ModelMetadata(name=helm_model_name, creator_organization_name='Unknown', display_name=helm_model_name, description=helm_model_name, access='open', release_date=date.today(), tags=[TEXT_MODEL_TAG, FULL_FUNCTIONALITY_TEXT_MODEL_TAG])
def update_learning_rate(scheduler, optimizer): scheduler.step() lr = optimizer.param_groups[0]['lr'] print(('learning rate = %.7f' % lr))
def collect_env_info(): has_gpu = torch.cuda.is_available() torch_version = torch.__version__ from torch.utils.cpp_extension import CUDA_HOME has_rocm = False if (tuple(map(int, torch_version.split('.')[:2])) >= (1, 5)): from torch.utils.cpp_extension import ROCM_HOME if ((getattr(torch.version, 'hip', None) is not None) and (ROCM_HOME is not None)): has_rocm = True has_cuda = (has_gpu and (not has_rocm)) data = [] data.append(('sys.platform', sys.platform)) data.append(('Python', sys.version.replace('\n', ''))) data.append(('numpy', np.__version__)) try: import detectron2 data.append(('detectron2', ((detectron2.__version__ + ' ') + os.path.dirname(detectron2.__file__)))) except ImportError: data.append(('detectron2', 'failed to import')) try: from detectron2 import _C except ImportError: data.append(('detectron2._C', 'failed to import')) if (sys.platform != 'win32'): try: cxx = os.environ.get('CXX', 'c++') cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) cxx = cxx.decode('utf-8').strip().split('\n')[0] except subprocess.SubprocessError: cxx = 'Not found' data.append(('Compiler', cxx)) if (has_cuda and (CUDA_HOME is not None)): try: nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc') nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) nvcc = nvcc.decode('utf-8').strip().split('\n')[(- 1)] except subprocess.SubprocessError: nvcc = 'Not found' data.append(('CUDA compiler', nvcc)) else: data.append(('Compiler', _C.get_compiler_version())) data.append(('CUDA compiler', _C.get_cuda_version())) if has_cuda: data.append(('detectron2 arch flags', detect_compute_compatibility(CUDA_HOME, _C.__file__))) data.append(get_env_module()) data.append(('PyTorch', ((torch_version + ' ') + os.path.dirname(torch.__file__)))) data.append(('PyTorch debug build', torch.version.debug)) data.append(('GPU available', has_gpu)) if has_gpu: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for (name, devids) in devices.items(): data.append((('GPU ' + ','.join(devids)), name)) if has_rocm: data.append(('ROCM_HOME', str(ROCM_HOME))) else: data.append(('CUDA_HOME', str(CUDA_HOME))) cuda_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) if cuda_arch_list: data.append(('TORCH_CUDA_ARCH_LIST', cuda_arch_list)) data.append(('Pillow', PIL.__version__)) try: data.append(('torchvision', ((str(torchvision.__version__) + ' ') + os.path.dirname(torchvision.__file__)))) if has_cuda: try: torchvision_C = importlib.util.find_spec('torchvision._C').origin msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) data.append(('torchvision arch flags', msg)) except ImportError: data.append(('torchvision._C', 'failed to find')) except AttributeError: data.append(('torchvision', 'unknown')) try: import fvcore data.append(('fvcore', fvcore.__version__)) except ImportError: pass try: import cv2 data.append(('cv2', cv2.__version__)) except ImportError: pass env_str = (tabulate(data) + '\n') env_str += collect_torch_env() return env_str
_model def ecaresnetlight(pretrained=False, **kwargs): model_args = dict(block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) return _create_resnet('ecaresnetlight', pretrained, **model_args)
def main(): (hparams_file, run_opts, overrides) = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) sb.utils.distributed.ddp_init_group(run_opts) sb.create_experiment_directory(experiment_directory=hparams['output_folder'], hyperparams_to_save=hparams_file, overrides=overrides) from ljspeech_prepare import prepare_ljspeech sb.utils.distributed.run_on_main(prepare_ljspeech, kwargs={'data_folder': hparams['data_folder'], 'save_folder': hparams['save_folder'], 'splits': hparams['splits'], 'split_ratio': hparams['split_ratio'], 'model_name': hparams['model'].__class__.__name__, 'seed': hparams['seed'], 'pitch_n_fft': hparams['n_fft'], 'pitch_hop_length': hparams['hop_length'], 'pitch_min_f0': hparams['min_f0'], 'pitch_max_f0': hparams['max_f0'], 'skip_prep': hparams['skip_prep'], 'use_custom_cleaner': True, 'device': 'cuda'}) datasets = dataio_prepare(hparams) fastspeech2_brain = FastSpeech2Brain(modules=hparams['modules'], opt_class=hparams['opt_class'], hparams=hparams, run_opts=run_opts, checkpointer=hparams['checkpointer']) fastspeech2_brain.fit(fastspeech2_brain.hparams.epoch_counter, datasets['train'], datasets['valid'], train_loader_kwargs=hparams['train_dataloader_opts'], valid_loader_kwargs=hparams['valid_dataloader_opts'])
.parametrize('passthrough', [None, 'passthrough']) def test_set_pipeline_step_passthrough(passthrough): X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) def make(): return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)]) pipeline = make() exp = ((2 * 3) * 5) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline.set_params(m3=passthrough) exp = (2 * 5) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) expected_params = {'steps': pipeline.steps, 'm2': mult2, 'm3': passthrough, 'last': mult5, 'memory': None, 'm2__mult': 2, 'last__mult': 5, 'verbose': False} assert (pipeline.get_params(deep=True) == expected_params) pipeline.set_params(m2=passthrough) exp = 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) other_methods = ['predict_proba', 'predict_log_proba', 'decision_function', 'transform', 'score'] for method in other_methods: getattr(pipeline, method)(X) pipeline.set_params(m2=mult2) exp = (2 * 5) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline = make() pipeline.set_params(last=passthrough) exp = 6 pipeline.fit(X, y) pipeline.transform(X) assert_array_equal([[exp]], pipeline.fit(X, y).transform(X)) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) with raises(AttributeError, match="has no attribute 'predict'"): getattr(pipeline, 'predict') exp = (2 * 5) pipeline = Pipeline([('m2', mult2), ('m3', passthrough), ('last', mult5)]) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]]))
class TestHamming(): def test_basic(self): assert_allclose(windows.hamming(6, False), [0.08, 0.31, 0.77, 1.0, 0.77, 0.31]) assert_allclose(windows.hamming(7, sym=False), [0.08, 0., 0., 0., 0., 0., 0.]) assert_allclose(windows.hamming(6), [0.08, 0., 0., 0., 0., 0.08]) assert_allclose(windows.hamming(7, sym=True), [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
def affect_conv_init(real_weight, imag_weight, kernel_size, init_func, criterion): in_channels = real_weight.size(1) out_channels = real_weight.size(0) (a, b) = init_func(in_channels, out_channels, kernel_size=kernel_size, criterion=criterion) (a, b) = (torch.from_numpy(a), torch.from_numpy(b)) real_weight.data = a.type_as(real_weight.data) imag_weight.data = b.type_as(imag_weight.data)
class CMP_reg(atomic_reg): OP_NAME = 'CMP' _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('cmd_id', ctypes.c_uint64, 20), ('cmd_id_dep', ctypes.c_uint64, 20), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('eu_half_en', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('pad_mode', ctypes.c_uint64, 2), ('cmd_id_en', ctypes.c_uint64, 4), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res_add', ctypes.c_uint64, 1), ('relu', ctypes.c_uint64, 1), ('left_tran', ctypes.c_uint64, 1), ('rsvd1', ctypes.c_uint64, 1), ('kernel_rotate', ctypes.c_uint64, 1), ('opd0_sign', ctypes.c_uint64, 1), ('opd1_sign', ctypes.c_uint64, 1), ('opd2_sign', ctypes.c_uint64, 1), ('res0_prec', ctypes.c_uint64, 3), ('opd0_prec', ctypes.c_uint64, 3), ('opd1_prec', ctypes.c_uint64, 3), ('opd2_prec', ctypes.c_uint64, 3), ('opd0_const', ctypes.c_uint64, 1), ('opd1_const', ctypes.c_uint64, 1), ('opd2_const', ctypes.c_uint64, 1), ('res0_str', ctypes.c_uint64, 3), ('opd0_str', ctypes.c_uint64, 3), ('opd1_str', ctypes.c_uint64, 3), ('opd2_str', ctypes.c_uint64, 3), ('res_add_sign', ctypes.c_uint64, 1), ('rsvd2', ctypes.c_uint64, 25), ('rsvd3', ctypes.c_uint64, 1), ('opd3_const', ctypes.c_uint64, 1), ('rsvd4', ctypes.c_uint64, 1), ('opd0_x_ins0', ctypes.c_uint64, 4), ('opd0_y_ins0', ctypes.c_uint64, 4), ('opd1_x_ins0', ctypes.c_uint64, 4), ('opd1_y_ins0', ctypes.c_uint64, 4), ('opd0_up_pad', ctypes.c_uint64, 4), ('opd0_dn_pad', ctypes.c_uint64, 4), ('opd0_lf_pad', ctypes.c_uint64, 4), ('opd0_rt_pad', ctypes.c_uint64, 4), ('res_op_x_str', ctypes.c_uint64, 4), ('res_op_y_str', ctypes.c_uint64, 4), ('res0_h_shift', ctypes.c_uint64, 4), ('res0_w_shift', ctypes.c_uint64, 4), ('opd0_h_shift', ctypes.c_uint64, 4), ('opd0_w_shift', ctypes.c_uint64, 4), ('opd1_h_shift', ctypes.c_uint64, 4), ('opd1_w_shift', ctypes.c_uint64, 4), ('tsk_lane_num', ctypes.c_uint64, 64), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_n', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_h', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('opd1_c', ctypes.c_uint64, 16), ('opd1_h', ctypes.c_uint64, 16), ('opd1_w', ctypes.c_uint64, 16), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('opd2_n_str', ctypes.c_uint64, 16), ('opd2_c_str', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_h_str', ctypes.c_uint64, 32), ('res0_w_str', ctypes.c_uint64, 32), ('opd0_h_str', ctypes.c_uint64, 32), ('opd0_w_str', ctypes.c_uint64, 32), ('opd1_h_str', ctypes.c_uint64, 32), ('opd1_w_str', ctypes.c_uint64, 32), ('opd2_h_str', ctypes.c_uint64, 32), ('opd2_w_str', ctypes.c_uint64, 32), ('res1_addr', ctypes.c_uint64, 32), ('opd3_addr', ctypes.c_uint64, 32)] cmd_short: int cmd_id: int cmd_id_dep: int tsk_typ: int tsk_eu_typ: int eu_half_en: int tsk_opd_num: int pad_mode: int cmd_id_en: int pwr_step: int intr_en: int res_add: int relu: int left_tran: int rsvd1: int kernel_rotate: int opd0_sign: int opd1_sign: int opd2_sign: int res0_prec: int opd0_prec: int opd1_prec: int opd2_prec: int opd0_const: int opd1_const: int opd2_const: int res0_str: int opd0_str: int opd1_str: int opd2_str: int res_add_sign: int rsvd2: int rsvd3: int opd3_const: int rsvd4: int opd0_x_ins0: int opd0_y_ins0: int opd1_x_ins0: int opd1_y_ins0: int opd0_up_pad: int opd0_dn_pad: int opd0_lf_pad: int opd0_rt_pad: int res_op_x_str: int res_op_y_str: int res0_h_shift: int res0_w_shift: int opd0_h_shift: int opd0_w_shift: int opd1_h_shift: int opd1_w_shift: int tsk_lane_num: int res0_n: int res0_c: int res0_h: int res0_w: int opd0_n: int opd0_c: int opd0_h: int opd0_w: int opd1_n: int opd1_c: int opd1_h: int opd1_w: int res0_n_str: int res0_c_str: int opd0_n_str: int opd0_c_str: int opd1_n_str: int opd1_c_str: int opd2_n_str: int opd2_c_str: int res0_addr: int opd0_addr: int opd1_addr: int opd2_addr: int res0_h_str: int res0_w_str: int opd0_h_str: int opd0_w_str: int opd1_h_str: int opd1_w_str: int opd2_h_str: int opd2_w_str: int res1_addr: int opd3_addr: int length: int = 1024
def draw_net(caffe_net, rankdir, ext='png'): return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def validate(args): model = create_model(args.model, pretrained=True) print(f'Created {args.model} model. Validating...') eval_step = objax.Jit((lambda images, labels: eval_forward(model, images, labels)), model.vars()) image_size = model.default_cfg['input_size'][(- 1)] (test_ds, num_batches) = imagenet_data.load(imagenet_data.Split.TEST, is_training=False, image_size=image_size, batch_dims=[args.batch_size], chw=True, mean=tuple([(x * 255) for x in model.default_cfg['mean']]), std=tuple([(x * 255) for x in model.default_cfg['std']]), tfds_data_dir=args.data) batch_time = AverageMeter() (correct_top1, correct_top5) = (0, 0) total_examples = 0 start_time = prev_time = time.time() for (batch_index, batch) in enumerate(test_ds): (images, labels) = (batch['images'], batch['labels']) (top1_count, top5_count) = eval_step(images, labels) correct_top1 += int(top1_count) correct_top5 += int(top5_count) total_examples += images.shape[0] batch_time.update((time.time() - prev_time)) if (((batch_index % 20) == 0) and (batch_index > 0)): print(f'Test: [{batch_index:>4d}/{num_batches}] Rate: {(images.shape[0] / batch_time.val):>5.2f}/s ({(images.shape[0] / batch_time.avg):>5.2f}/s) : {((100 * correct_top1) / total_examples):>7.3f} : {((100 * correct_top5) / total_examples):>7.3f}') prev_time = time.time() acc_1 = ((100 * correct_top1) / total_examples) acc_5 = ((100 * correct_top5) / total_examples) print(f'Validation complete. {(total_examples / (prev_time - start_time)):>5.2f} img/s. {acc_1:>7.3f}, {acc_5:>7.3f}') return dict(top1=float(acc_1), top5=float(acc_5))
.spark def test_tf_idf(weighting_log, tf_idf_model): train_dataset = create_dataset(weighting_log) tf_idf_model.fit(train_dataset) idf = tf_idf_model._get_idf(train_dataset.interactions).toPandas() assert np.allclose(idf[(idf['user_idx'] == 1)]['idf'], np.log1p((2 / 1))) assert np.allclose(idf[(idf['user_idx'] == 0)]['idf'], np.log1p((2 / 2))) assert np.allclose(idf[(idf['user_idx'] == 2)]['idf'], np.log1p((2 / 2))) tf_idf_model.fit(train_dataset) recs = tf_idf_model.predict(train_dataset, k=1, queries=[0, 1]).toPandas() assert (recs.loc[((recs['user_idx'] == 1), 'item_idx')].iloc[0] == 0)
def _constant_speed_and_yaw_rate(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray: (x, y, vx, vy, _, _, speed, yaw_rate, _, yaw) = kinematics_data preds = [] time_step = (1.0 / sampled_at) distance_step = (time_step * speed) yaw_step = (time_step * yaw_rate) for _ in np.arange(time_step, (sec_from_now + time_step), time_step): x += (distance_step * np.cos(yaw)) y += (distance_step * np.sin(yaw)) preds.append((x, y)) yaw += yaw_step return np.array(preds)
class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BartphoTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ['This', 'is', 'a', 't', 'est'] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {'unk_token': '<unk>'} self.monolingual_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file']) with open(self.monolingual_vocab_file, 'w', encoding='utf-8') as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]} ''') tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = 'This is a la test' output_text = 'This is a<unk><unk> test' return (input_text, output_text) def test_full_tokenizer(self): tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) text = 'This is a la test' bpe_tokens = 'This is a l a t est'.split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = (tokens + [tokenizer.unk_token]) input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def main(): args = parser.parse_args() args.pretrained = True if args.checkpoint: args.pretrained = False print('==> Creating PyTorch {} model'.format(args.model)) model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True) model.eval() example_input = torch.randn((args.batch_size, 3, (args.img_size or 224), (args.img_size or 224)), requires_grad=True) model(example_input) print("==> Exporting model to ONNX format at '{}'".format(args.output)) input_names = ['input0'] output_names = ['output0'] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if args.dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if args.aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX torch_out = torch.onnx._export(model, example_input, args.output, export_params=True, verbose=True, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes, opset_version=args.opset, operator_export_type=export_type) print("==> Loading and checking exported model from '{}'".format(args.output)) onnx_model = onnx.load(args.output) onnx.checker.check_model(onnx_model) print('==> Passed') if (args.keep_init and args.aten_fallback): import caffe2.python.onnx.backend as onnx_caffe2 print('==> Loading model into Caffe2 backend and comparing forward pass.'.format(args.output)) caffe2_backend = onnx_caffe2.prepare(onnx_model) B = {onnx_model.graph.input[0].name: x.data.numpy()} c2_out = caffe2_backend.run(B)[0] np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5) print('==> Passed')
class TestNNLinker(unittest.TestCase): def test_link_prediction(self): for input_matrix in [test_graph(), test_digraph(), test_bigraph()]: n_neighbors = 5 threshold = 0.2 algo = NNLinker(n_neighbors=n_neighbors, threshold=threshold) links = algo.fit_predict(input_matrix) self.assertTrue((links.shape == input_matrix.shape)) self.assertTrue(np.all((get_degrees(links) <= n_neighbors))) self.assertTrue(np.all((links.data >= threshold))) algo = NNLinker(embedding_method=Spectral(2)) links = algo.fit_predict(input_matrix) self.assertTrue((links.shape == input_matrix.shape))
.parametrize('variable_batch_size', [False, True]) .parametrize('batch_size', [1, 4]) .parametrize('shape', [(10, 32, (- 1)), ((- 1), 32, 8)]) def test_nnp_graph_reshape(tmpdir, variable_batch_size, batch_size, shape): x = nn.Variable([10, 2, 10, 10]) h = PF.convolution(x, 4, kernel=(3, 3), stride=(1, 1)) y = F.reshape(h, shape=shape) (x2, y2) = check_nnp_graph_save_load(tmpdir, x, y, batch_size, variable_batch_size) if (not variable_batch_size): return shape2 = list(y.shape) shape2[0] = batch_size x2.d = np.random.randn(*x2.shape) y2.forward()
def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >']) register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer']) register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper']) register_Ns3EnergyHarvesterHelper_methods(root_module, root_module['ns3::EnergyHarvesterHelper']) register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3LiIonEnergySourceHelper_methods(root_module, root_module['ns3::LiIonEnergySourceHelper']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3RvBatteryModelHelper_methods(root_module, root_module['ns3::RvBatteryModelHelper']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3BasicEnergyHarvesterHelper_methods(root_module, root_module['ns3::BasicEnergyHarvesterHelper']) register_Ns3BasicEnergySourceHelper_methods(root_module, root_module['ns3::BasicEnergySourceHelper']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TracedValue__Ns3Time_methods(root_module, root_module['ns3::TracedValue< ns3::Time >']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker']) register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel']) register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnergyHarvester_methods(root_module, root_module['ns3::EnergyHarvester']) register_Ns3EnergyHarvesterContainer_methods(root_module, root_module['ns3::EnergyHarvesterContainer']) register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource']) register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LiIonEnergySource_methods(root_module, root_module['ns3::LiIonEnergySource']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3RvBatteryModel_methods(root_module, root_module['ns3::RvBatteryModel']) register_Ns3SimpleDeviceEnergyModel_methods(root_module, root_module['ns3::SimpleDeviceEnergyModel']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BasicEnergyHarvester_methods(root_module, root_module['ns3::BasicEnergyHarvester']) register_Ns3BasicEnergySource_methods(root_module, root_module['ns3::BasicEnergySource']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Time, ns3::Time, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return
def splat(vs, dim): if (vs.function_space().ufl_element().num_sub_elements() == dim): v = vs[0] if (dim == 2): s = vs[1] else: s = as_vector([vs[i] for i in range(1, dim)]) else: (v, s) = split(vs) return (v, s)
_group.command(name='train') ('corpus_file', type=click.Path(exists=True)) ('out_file', type=click.Path()) ('--mode', type=click.Choice(['sg', 'cbow']), default='sg') ('--dim-size', default=300) ('--window', default=10) ('--min-count', default=3) ('--negative', default=5) ('--epoch', default=5) ('--pool-size', default=multiprocessing.cpu_count()) ('--chunk-size', default=30) def train_word2vec(corpus_file, out_file, **kwargs): word2vec.train(corpus_file, out_file, **kwargs)
def tadgan_pipline(tadgan_hyperparameters): pipeline_path = 'tadgan' pipline = analysis._load_pipeline(pipeline_path, tadgan_hyperparameters) return pipline
def annotate_and_time(client, text, properties={}): start = time.time() ann = client.annotate(text, properties=properties, output_format='text') end = time.time() return {'annotation': ann, 'start_time': start, 'end_time': end}
_REGISTRY.register() class ImageNetV2(DatasetBase): dataset_dir = 'imagenetv2' def __init__(self, cfg): root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT)) self.dataset_dir = os.path.join(root, self.dataset_dir) image_dir = 'imagenetv2-matched-frequency-format-val' self.image_dir = os.path.join(self.dataset_dir, image_dir) text_file = os.path.join(self.dataset_dir, 'classnames.txt') classnames = ImageNet.read_classnames(text_file) data = self.read_data(classnames) super().__init__(train_x=data, test=data) def read_data(self, classnames): image_dir = self.image_dir folders = list(classnames.keys()) items = [] for label in range(1000): class_dir = os.path.join(image_dir, str(label)) imnames = listdir_nohidden(class_dir) folder = folders[label] classname = classnames[folder] for imname in imnames: impath = os.path.join(class_dir, imname) item = Datum(impath=impath, label=label, classname=classname) items.append(item) return items
def test_attack_directions(model, testset, adversarialset, points=51, ord=float('inf'), cuda=False): assert (model.training is False) assert (len(testset) > 0) assert isinstance(testset, torch.utils.data.DataLoader) assert isinstance(testset.sampler, torch.utils.data.SequentialSampler) assert (len(adversarialset) > 0) assert (len(testset) >= len(adversarialset)) assert isinstance(adversarialset, torch.utils.data.DataLoader) assert isinstance(adversarialset.sampler, torch.utils.data.SequentialSampler) assert ((points % 2) == 1) assert ((cuda and common.torch.is_cuda(model)) or ((not cuda) and (not common.torch.is_cuda(model)))) probabilities = [] norms = [] for (testdata, adversarialdata) in zip(enumerate(testset), enumerate(adversarialset)): testb = testdata[0] adversarialb = adversarialdata[0] assert (testb == adversarialb) assert isinstance(testdata[1], list) assert isinstance(adversarialdata[1], list) testdata = testdata[1] adversarialdata = adversarialdata[1] inputs = testdata[0] adversarial_inputs = adversarialdata[0] batch_size = inputs.shape[0] adversarial_directions = (adversarial_inputs - inputs) adversarial_directions = adversarial_directions.numpy() adversarial_norms = numpy.linalg.norm(adversarial_directions.reshape(batch_size, (- 1)), axis=1, ord=ord) for b in range(batch_size): factors = numpy.linspace((- 2), 2, points).astype(numpy.float32) adversarial_input_sequence = numpy.repeat(numpy.expand_dims(adversarial_directions[b], axis=0), points, axis=0) adversarial_input_sequence = (adversarial_input_sequence * common.numpy.expand_as(factors, adversarial_input_sequence)) adversarial_input_sequence = (common.torch.as_variable(inputs[b], cuda) + common.torch.as_variable(adversarial_input_sequence, cuda)) assert numpy.isclose(factors[0], (- 2)) assert numpy.isclose(factors[(points // 2)], 0) assert numpy.isclose(factors[(- 1)], 2) numpy.testing.assert_almost_equal(adversarial_input_sequence[(points // 2)].cpu().numpy(), inputs[b].cpu().numpy(), 4) numpy.testing.assert_almost_equal(adversarial_input_sequence[0].cpu().numpy(), (inputs[b].cpu().numpy() - (2 * adversarial_directions[b])), 4) numpy.testing.assert_almost_equal(adversarial_input_sequence[(- 1)].cpu().numpy(), (inputs[b].cpu().numpy() + (2 * adversarial_directions[b])), 4) adversarial_input_sequence = adversarial_input_sequence.permute(0, 3, 1, 2) adversarial_input_sequence = torch.clamp(adversarial_input_sequence, min=0, max=1) logit_sequence = model.forward(adversarial_input_sequence) probability_sequence = torch.nn.functional.softmax(logit_sequence, dim=1) probabilities.append(probability_sequence.detach().cpu().numpy()) norms.append((adversarial_norms[b] * factors)) progress(((adversarialb * batch_size) + b), (len(adversarialset) * batch_size)) return (numpy.array(probabilities), numpy.array(norms))
def GreedyDecoder(output, labels, label_lengths, blank_label=28, collapse_repeated=True): arg_maxes = torch.argmax(output, dim=2) decodes = [] targets = [] for (i, args) in enumerate(arg_maxes): decode = [] targets.append(text_transform.int_to_text(labels[i][:label_lengths[i]].tolist())) for (j, index) in enumerate(args): if (index != blank_label): if (collapse_repeated and (j != 0) and (index == args[(j - 1)])): continue decode.append(index.item()) decodes.append(text_transform.int_to_text(decode)) return (decodes, targets)
class ModelArguments(): model_name_or_path: str = field(default='facebook/wav2vec2-base', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'Name or path of preprocessor config.'}) freeze_feature_encoder: bool = field(default=True, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'}) attention_mask: bool = field(default=True, metadata={'help': 'Whether to generate an attention mask in the feature extractor.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) freeze_feature_extractor: Optional[bool] = field(default=None, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'}) def __post_init__(self): if ((not self.freeze_feature_extractor) and self.freeze_feature_encoder): warnings.warn('The argument `--freeze_feature_extractor` is deprecated and will be removed in a future version. Use `--freeze_feature_encoder`instead. Setting `freeze_feature_encoder==True`.', FutureWarning) if (self.freeze_feature_extractor and (not self.freeze_feature_encoder)): raise ValueError('The argument `--freeze_feature_extractor` is deprecated and should not be used in combination with `--freeze_feature_encoder`.Only make use of `--freeze_feature_encoder`.')
def inference(data_dir: str, is_query: bool, encoder: Encoder, prefix: str, max_length: int, output_dir: str=None, batch_size: int=1024, enable_rewrite: bool=True, dataparallel: bool=True, return_vecs: bool=False, save_to_memmap: bool=True): dataset = DatasetForEncoding(data_dir=data_dir, prefix=prefix, max_length=max_length) if (output_dir is not None): if save_to_memmap: output_file = os.path.join(output_dir, f'{prefix}.memmap') else: output_file = os.path.join(output_dir, f'{prefix}') else: output_file = None return inference_dataset(encoder=encoder, dataset=dataset, is_query=is_query, output_file=output_file, batch_size=batch_size, enable_rewrite=enable_rewrite, dataparallel=dataparallel, return_vecs=return_vecs, save_to_memmap=save_to_memmap)
class ResNet(nn.Module): def __init__(self, block, layers, mode, num_classes): super(ResNet, self).__init__() valid_modes = {'encode', 'classify', 'both'} if (mode not in valid_modes): raise Exception(('mode should be one of ' + str(valid_modes))) self.mode = mode self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True) self.bn1 = nn.BatchNorm2d(64, track_running_stats=True) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=7) self.fc1 = nn.Linear((512 * block.expansion), 512) self.multiply = Multiply(50.0) self.fc2 = nn.Linear(512, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) if (m.bias is not None): m.bias.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, use_conv=False): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): if use_conv: downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), track_running_stats=True)) else: downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride), ConcatChannels((((planes * block.expansion) // self.inplanes) - 1))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x, mode=None): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) if isinstance(x, dict): x['An'] = x['An'].view(x['An'].size(0), (- 1)) x['Xn'] = x['Xn'].view(x['Xn'].size(0), (- 1)) else: x = x.view(x.size(0), (- 1)) x = self.fc1(x) if isinstance(x, dict): xnorm = {'An': F.normalize(x['An'], p=2, dim=1), 'Xn': F.normalize(x['Xn'], p=2, dim=1)} else: xnorm = F.normalize(x, p=2, dim=1) xnorm = self.multiply(xnorm) mode = (self.mode if None else mode) if (mode == 'encode'): return xnorm else: scores = self.fc2(xnorm) if (mode == 'classify'): return scores elif (mode == 'both'): return (xnorm, scores) else: raise Exception(('Invalid mode: ' + mode))
def test_closure_over_workspace_build(simplemodels_model_data): (model, data) = simplemodels_model_data one = pyhf.infer.hypotest(1.0, (data + model.config.auxdata), model) workspace = pyhf.Workspace.build(model, data) assert json.dumps(workspace) newmodel = workspace.model() newdata = workspace.data(newmodel) two = pyhf.infer.hypotest(1.0, newdata, newmodel) assert (one == two) newworkspace = pyhf.Workspace.build(newmodel, newdata) assert (pyhf.utils.digest(newworkspace) == pyhf.utils.digest(workspace))
def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): tensors_to_transpose = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') var_map = (('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel')) if (not os.path.isdir(ckpt_dir)): os.makedirs(ckpt_dir) state_dict = model.state_dict() def to_tf_var_name(name: str): for (patt, repl) in iter(var_map): name = name.replace(patt, repl) return f'bert/{name}' def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): tf_dtype = tf.dtypes.as_dtype(tensor.dtype) tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(tf_var) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any([(x in var_name) for x in tensors_to_transpose]): torch_tensor = torch_tensor.T tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) tf.keras.backend.set_value(tf_var, torch_tensor) tf_weight = session.run(tf_var) print(f'Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}') saver = tf.train.Saver(tf.trainable_variables()) saver.save(session, os.path.join(ckpt_dir, (model_name.replace('-', '_') + '.ckpt')))
def requires_datasets(obj): name = (obj.__name__ if hasattr(obj, '__name__') else obj.__class__.__name__) if (not is_datasets_available()): raise ImportError(DATASETS_IMPORT_ERROR.format(name))
('/chat', methods=['POST']) def chat(): logger.info('Entered /chat') request_args = req_parser.parse_args() logger.info('Input arguments received: %s', str(request_args)) user_utterance = request_args['new_user_utterance'] dialog_id = request_args['dialog_id'] turn_id = request_args['turn_id'] system_name = request_args['system_name'] experiment_id = request_args['experiment_id'] if (user_utterance == 'FINISHED'): return {'agent_utterance': f'{dialog_id} is your dialog ID. Please submit it in the Google Form.', 'log_object': {}} dlgItem = connection.compute_next(dialog_id, user_utterance, turn_id, system_name, experiment_id) log = {} log['1st_sql'] = dlgItem.user_target log['2nd_sql'] = dlgItem.temp_target log['db_results'] = (json.loads(dlgItem.genie_utterance) if (dlgItem.genie_utterance is not None) else None) def pp_time(time_statement): return ['First classifier: {:.2f}s'.format(time_statement['first_classification']), 'Semantic parser: {:.2f}s'.format(time_statement['semantic_parser']), 'SUQL execution: {:.2f}s'.format(time_statement['suql_execution']), 'Final response: {:.2f}s'.format(time_statement['final_response'])] log['Elapsed Time'] = pp_time(dlgItem.time_statement) return {'agent_utterance': dlgItem.agent_utterance, 'log_object': log}
def tensor_to_shm(array, data_type='float32', lock=False): array1d = array.view(array.numel()) if (data_type == 'float32'): c_type = ctypes.c_float elif (data_type == 'int64'): c_type = ctypes.c_long result = mp.Array(c_type, array.numel(), lock=lock) shm_as_tensor(result)[:] = array1d return result
def to_dag(node): dag = nx.DiGraph() dag.add_node(node) for _ in range(node.n_next): dag.add_edge(node, LeafPlaceHolder()) for _ in range(node.n_prev): dag.add_edge(RootPlaceHolder(), node) return dag
def test_varlen_string(): t = ListType(NumpyType('uint8', {'__array__': 'char'}), {'__array__': 'string'}) assert (str(parser.parse(str(t))) == str(t))
def setup_args(current_time): parser = eval_setupargs() parser.set_defaults(task='tasks.emocause', datapath=os.path.join(__PATH__, 'data'), context_length=(- 1), metrics='default', batchsize=8, display_examples=True, display_add_fields='emotion', datatype='test') return parser
class QueryOnTrilineGradFeature(PythonFunction): def __init__(self, ctx, min_, max_, boundary_check=False, G=None): super(QueryOnTrilineGradFeature, self).__init__(ctx) self._min = min_ self._max = max_ self._boundary_check = boundary_check self._G = G def name(self): return self.__class__.__name__ def min_outputs(self): return 1 def setup_impl(self, inputs, outputs): grad_output = inputs[0] D = grad_output.shape[(- 1)] outputs[0].reset_shape((self._G + (D,)), True) def forward_impl(self, inputs, outputs): grad_feature = outputs[0] grad_output = inputs[0] query = inputs[1] batch_sizes = query.shape[:(- 1)] B = np.prod(batch_sizes) D = grad_output.shape[(- 1)] grad_feature_ptr = grad_feature.data.data_ptr(np.float32, self.ctx) grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx) query_ptr = query.data.data_ptr(np.float32, self.ctx) triline_feature_cuda.grad_feature(((B * D) * 3), grad_feature_ptr, grad_output_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, False) def backward_impl(self, inputs, outputs, propagate_down, accum): grad_feature = outputs[0] grad_output = inputs[0] query = inputs[1] batch_sizes = query.shape[:(- 1)] B = np.prod(batch_sizes) D = grad_output.shape[(- 1)] grad_grad_feature_ptr = grad_feature.grad.data_ptr(np.float32, self.ctx) grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx) query_ptr = query.data.data_ptr(np.float32, self.ctx) grad_grad_output_ptr = grad_output.grad.data_ptr(np.float32, self.ctx) grad_query_ptr = query.grad.data_ptr(np.float32, self.ctx) if propagate_down[0]: triline_feature_cuda.grad_feature_grad_grad_output(((B * D) * 3), grad_grad_output_ptr, grad_grad_feature_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, accum[0]) if propagate_down[1]: triline_feature_cuda.grad_feature_grad_query(((B * D) * 3), grad_query_ptr, grad_grad_feature_ptr, grad_output_ptr, query_ptr, self._G, D, self._min, self._max, self._boundary_check, accum[1]) def grad_depends_output_data(self, i, o): return False def grad_depends_input_data(self, i, j): if ((i == 0) and (j == 1)): return True if (i == 1): return True return False
class _FunctionCorrelation(torch.autograd.Function): def forward(self, one, two, intStride): rbot0 = one.new_zeros([one.shape[0], (one.shape[2] + (6 * intStride)), (one.shape[3] + (6 * intStride)), one.shape[1]]) rbot1 = one.new_zeros([one.shape[0], (one.shape[2] + (6 * intStride)), (one.shape[3] + (6 * intStride)), one.shape[1]]) self.intStride = intStride one = one.contiguous() assert (one.is_cuda == True) two = two.contiguous() assert (two.is_cuda == True) output = one.new_zeros([one.shape[0], 49, int(math.ceil((one.shape[2] / intStride))), int(math.ceil((one.shape[3] / intStride)))]) if (one.is_cuda == True): n = (one.shape[2] * one.shape[3]) cupy_launch('kernel_Correlation_rearrange', cupy_kernel('kernel_Correlation_rearrange', {'intStride': self.intStride, 'input': one, 'output': rbot0}))(grid=tuple([int((((n + 16) - 1) / 16)), one.shape[1], one.shape[0]]), block=tuple([16, 1, 1]), args=[cupy.int32(n), one.data_ptr(), rbot0.data_ptr()]) n = (two.shape[2] * two.shape[3]) cupy_launch('kernel_Correlation_rearrange', cupy_kernel('kernel_Correlation_rearrange', {'intStride': self.intStride, 'input': two, 'output': rbot1}))(grid=tuple([int((((n + 16) - 1) / 16)), two.shape[1], two.shape[0]]), block=tuple([16, 1, 1]), args=[cupy.int32(n), two.data_ptr(), rbot1.data_ptr()]) n = ((output.shape[1] * output.shape[2]) * output.shape[3]) cupy_launch('kernel_Correlation_updateOutput', cupy_kernel('kernel_Correlation_updateOutput', {'intStride': self.intStride, 'rbot0': rbot0, 'rbot1': rbot1, 'top': output}))(grid=tuple([output.shape[3], output.shape[2], output.shape[0]]), block=tuple([32, 1, 1]), shared_mem=(one.shape[1] * 4), args=[cupy.int32(n), rbot0.data_ptr(), rbot1.data_ptr(), output.data_ptr()]) elif (one.is_cuda == False): raise NotImplementedError() self.save_for_backward(one, two, rbot0, rbot1) return output def backward(self, gradOutput): (one, two, rbot0, rbot1) = self.saved_tensors gradOutput = gradOutput.contiguous() assert (gradOutput.is_cuda == True) gradOne = (one.new_zeros([one.shape[0], one.shape[1], one.shape[2], one.shape[3]]) if (self.needs_input_grad[0] == True) else None) gradTwo = (one.new_zeros([one.shape[0], one.shape[1], one.shape[2], one.shape[3]]) if (self.needs_input_grad[1] == True) else None) if (one.is_cuda == True): if (gradOne is not None): for intSample in range(one.shape[0]): n = ((one.shape[1] * one.shape[2]) * one.shape[3]) cupy_launch('kernel_Correlation_updateGradOne', cupy_kernel('kernel_Correlation_updateGradOne', {'intStride': self.intStride, 'rbot0': rbot0, 'rbot1': rbot1, 'gradOutput': gradOutput, 'gradOne': gradOne, 'gradTwo': None}))(grid=tuple([int((((n + 512) - 1) / 512)), 1, 1]), block=tuple([512, 1, 1]), args=[cupy.int32(n), intSample, rbot0.data_ptr(), rbot1.data_ptr(), gradOutput.data_ptr(), gradOne.data_ptr(), None]) if (gradTwo is not None): for intSample in range(one.shape[0]): n = ((one.shape[1] * one.shape[2]) * one.shape[3]) cupy_launch('kernel_Correlation_updateGradTwo', cupy_kernel('kernel_Correlation_updateGradTwo', {'intStride': self.intStride, 'rbot0': rbot0, 'rbot1': rbot1, 'gradOutput': gradOutput, 'gradOne': None, 'gradTwo': gradTwo}))(grid=tuple([int((((n + 512) - 1) / 512)), 1, 1]), block=tuple([512, 1, 1]), args=[cupy.int32(n), intSample, rbot0.data_ptr(), rbot1.data_ptr(), gradOutput.data_ptr(), None, gradTwo.data_ptr()]) elif (one.is_cuda == False): raise NotImplementedError() return (gradOne, gradTwo, None)
class ReductionRT(ExecutableOperation): KernelTemplate = '\nextern "C"\n__global__ void\n${operation_name}(${operation_name}${operation_suffix}::Params params) {\n\n // Dynamic shared memory base pointer\n extern __shared__ int SharedStorageBase[];\n\n // Declare pointer to dynamic shared memory.\n ${operation_name}${operation_suffix}::SharedStorage *shared_storage =\n reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);\n\n ${operation_name}${operation_suffix} op;\n\n op(params, *shared_storage);\n}\n ' HostTemplate = '\nextern "C" {\n // Get the size of params in bytes\n int ${operation_name}_get_param_size(){\n return sizeof(${operation_name}${operation_suffix}::Params);\n }\n\n // Get the size of dynamic shared memory in bytes\n int ${operation_name}_shared_memory_size() {\n return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));\n }\n\n // Get the params as byte array\n char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){\n char *bytes = ((char*)(params));\n char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];\n for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)\n output[i] = bytes[i];\n\n return output;\n }\n}\n ' def __init__(self, operation: ReductionOperation): super().__init__(operation) self.operation: ReductionOperation = operation self.emitter = EmitReductionInstance('_type') self.elements_per_access = self.operation.count (self.argument_type, self.epilogue_type) = get_reduction_params(operation.epilogue_functor) self.argtype = [ctypes.POINTER(self.argument_type)] def emit(self): return self.emitter.emit(self.operation) def plan(self, arguments: ReductionArguments): block_shape = [(self.operation.shape.column() // self.elements_per_access), self.operation.shape.row(), 1] grid_shape = [(((arguments.problem_size.row + self.operation.shape.row()) - 1) // self.operation.shape.row()), (((arguments.problem_size.column + self.operation.shape.column()) - 1) // self.operation.shape.column()), 1] return LaunchConfiguration(grid_shape, block_shape, self.shared_memory_capacity) def initialize(self): (err,) = cuda.cuFuncSetAttribute(self.kernel, attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, value=self.shared_memory_capacity) if (err != cuda.CUresult.CUDA_SUCCESS): raise RuntimeError('Cuda Error: {}'.format(err))
class FuncEntry(): def __init__(self, entry, ctx): self.entry = entry self.ctx = ctx Z3_func_entry_inc_ref(self.ctx.ref(), self.entry) def __deepcopy__(self, memo={}): return FuncEntry(self.entry, self.ctx) def __del__(self): if (self.ctx.ref() is not None): Z3_func_entry_dec_ref(self.ctx.ref(), self.entry) def num_args(self): return int(Z3_func_entry_get_num_args(self.ctx.ref(), self.entry)) def arg_value(self, idx): if (idx >= self.num_args()): raise IndexError return _to_expr_ref(Z3_func_entry_get_arg(self.ctx.ref(), self.entry, idx), self.ctx) def value(self): return _to_expr_ref(Z3_func_entry_get_value(self.ctx.ref(), self.entry), self.ctx) def as_list(self): args = [self.arg_value(i) for i in range(self.num_args())] args.append(self.value()) return args def __repr__(self): return repr(self.as_list())
def histogram(name: str, data: (TensorType | Callable[([], TensorType)]), **kwargs: Any) -> bool: if include_summary(name): try: return tf.summary.histogram(name, evaluate_data(data), **kwargs) except Exception as e: tf.print(f'''Failed to write tensorboard histogram summary '{name}': {e}''', output_stream=absl.logging.INFO) return False
def test_ASGDA_optimizer_decrese(): from XCurve.AUROC.optimizer import ASGDA from XCurve.AUROC.losses.PartialAUROC import UnbiasedPAUCLoss hyper_param = {'mini-batch': 1024, 'alpha': 1.0, 'beta': 0.3, 'weight_decay': 1e-05, 'init_lr': 0.001} args = edict({'model_type': 'resnet18', 'num_classes': 2, 'pretrained': None}) model = generate_net(args).cuda() for dataset in ['cifar-10-b', 'cifar-100-b']: args = load_cfg(dataset) batch_size = 32 (train_set, val_set, test_set) = get_datasets(args) (trainloader, valloader, testloader) = get_data_loaders(train_set, val_set, test_set, train_batch_size=batch_size, test_batch_size=batch_size) criterion = UnbiasedPAUCLoss(hyper_param['alpha'], hyper_param['beta'], 'cuda') if (dataset == 'cifar-10-b'): hparams = {'k': 1, 'c1': 3, 'c2': 3, 'lam': 0.02, 'nu': 0.02, 'm': 500, 'device': 'cuda'} elif (dataset == 'cifar-100-b'): hparams = {'k': 1, 'c1': 3, 'c2': 3, 'lam': 0.035, 'nu': 0.035, 'm': 1000, 'device': 'cuda'} optimizer = ASGDA([{'params': model.parameters(), 'name': 'net'}, {'params': [criterion.a, criterion.b], 'clip': (0, 1), 'name': 'ab'}, {'params': criterion.s_n, 'clip': (0, 5), 'name': 'sn'}, {'params': criterion.s_p, 'clip': ((- 4), 1), 'name': 'sp'}, {'params': criterion.lam_b, 'clip': (0, .0), 'name': 'lamn'}, {'params': criterion.lam_a, 'clip': (0, .0), 'name': 'lamp'}, {'params': criterion.g, 'clip': ((- 1), 1), 'name': 'g'}], weight_decay=hyper_param['weight_decay'], hparams=hparams) train_loss = [] for (i, (images, targets)) in enumerate(trainloader): (images, targets) = (images.cuda(), targets.cuda().reshape(((- 1), 1))) feats = torch.sigmoid(model(images)) loss = criterion(feats, targets) optimizer.zero_grad() loss.backward() optimizer.step() train_loss.append(loss.item()) if (((i + 1) % 100) == 0): break assert (sum(train_loss[:min((len(train_loss) // 2), 10)]) > sum(train_loss[((- min((len(train_loss) // 2), 10)) // 2):])), train_loss
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_eu_banknote(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format in {'compact', 'standard'}): result = ([banknote.compact(val)] + result) return result
class MMDistributedDataParallel(nn.Module): def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25): super(MMDistributedDataParallel, self).__init__() self.module = module self.dim = dim self.broadcast_buffers = broadcast_buffers self.broadcast_bucket_size = ((bucket_cap_mb * 1024) * 1024) self._sync_params() def _dist_broadcast_coalesced(self, tensors, buffer_size): for tensors in _take_tensors(tensors, buffer_size): flat_tensors = _flatten_dense_tensors(tensors) dist.broadcast(flat_tensors, 0) for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)): tensor.copy_(synced) def _sync_params(self): module_states = list(self.module.state_dict().values()) if (len(module_states) > 0): self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size) if self.broadcast_buffers: if (torch.__version__ < '1.0'): buffers = [b.data for b in self.module._all_buffers()] else: buffers = [b.data for b in self.module.buffers()] if (len(buffers) > 0): self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size) def scatter(self, inputs, kwargs, device_ids): return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) def forward(self, *inputs, **kwargs): (inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()]) return self.module(*inputs[0], **kwargs[0])
class A005843(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return 'The even numbers: a(n) = 2n.' def _eval(self, n): return ZZ((2 * n))
def test_lof_values(global_dtype): X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype) clf1 = neighbors.LocalOutlierFactor(n_neighbors=2, contamination=0.1, novelty=True).fit(X_train) clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train) s_0 = ((2.0 * sqrt(2.0)) / (1.0 + sqrt(2.0))) s_1 = ((1.0 + sqrt(2)) * ((1.0 / (4.0 * sqrt(2.0))) + (1.0 / (2.0 + (2.0 * sqrt(2)))))) assert_allclose((- clf1.negative_outlier_factor_), [s_0, s_1, s_1]) assert_allclose((- clf2.negative_outlier_factor_), [s_0, s_1, s_1]) assert_allclose((- clf1.score_samples([[2.0, 2.0]])), [s_0]) assert_allclose((- clf2.score_samples([[2.0, 2.0]])), [s_0]) assert_allclose((- clf1.score_samples([[1.0, 1.0]])), [s_1]) assert_allclose((- clf2.score_samples([[1.0, 1.0]])), [s_1])
_model def ecaresnet50d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['ecaresnet50d'] model = ResNet(Bottleneck, [3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, num_classes=num_classes, in_chans=in_chans, block_args=dict(attn_layer='eca'), **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def load_data(fname='../data/CC-MAIN-2018-34-bios.pkl'): with open(fname, 'rb') as f: return pickle.load(f)
def test_numba_arraybuilder(): numba = pytest.importorskip('numba') builder = ak.ArrayBuilder(attrs=SOME_ATTRS) assert (builder.attrs is SOME_ATTRS) def func(array): return array assert (func(builder).attrs is SOME_ATTRS)
class DatasetWithTimeContext(StereoHdfDataset): def __init__(self, hdfFile, tau=1, **kwargs): if (tau <= 0): raise ValueError('context parameter tau should be greater than zero') self._tau = tau super(DatasetWithTimeContext, self).__init__(hdfFile, **kwargs) def _collect_single_seq(self, seq_idx): if (seq_idx >= self.num_seqs): return None originalSeq = super(DatasetWithTimeContext, self)._collect_single_seq(seq_idx) inputFeatures = originalSeq.get_data('data') (frames, bins) = inputFeatures.shape leftContext = deque() rightContext = deque() inFeatWithContext = [] for i in range(self._tau): leftContext.append(np.zeros(bins)) if ((i + 1) < frames): rightContext.append(inputFeatures[((i + 1), ...)]) else: rightContext.append(np.zeros(bins)) for t in range(frames): f = inputFeatures[(t, ...)] newFeature = np.concatenate([np.concatenate(leftContext, axis=0), f, np.concatenate(rightContext, axis=0)], axis=0) inFeatWithContext.append(newFeature) leftContext.popleft() leftContext.append(f) rightContext.popleft() if (((t + 1) + self._tau) < frames): rightContext.append(inputFeatures[(((t + 1) + self._tau), ...)]) else: rightContext.append(np.zeros(bins)) inputFeatures = np.array(inFeatWithContext) targets = None if ('classes' in originalSeq.get_data_keys()): targets = originalSeq.get_data('classes') return DatasetSeq(seq_idx, inputFeatures, targets)
class BoundarySpace_wtk_g0(BoundarySpace): def __init__(self, level, weight, sign, F): level = int(level) sign = int(sign) weight = int(weight) if (sign not in [(- 1), 0, 1]): raise ArithmeticError('sign must be an int in [-1,0,1]') if (level <= 0): raise ArithmeticError('level must be positive') BoundarySpace.__init__(self, weight=weight, group=arithgroup.Gamma0(level), sign=sign, base_ring=F) def _repr_(self): return ('Space of Boundary Modular Symbols for %s of weight %s over %s' % (self.group(), self.weight(), self.base_ring())) def _coerce_cusp(self, c): if (self.weight() % 2): return self(0) i = self._cusp_index(c) if (i != (- 1)): if (i == (- 2)): return self(0) return BoundarySpaceElement(self, {i: 1}) sign = self.sign() if (sign != 0): i2 = self._cusp_index((- c)) if (i2 != (- 1)): if (i2 == (- 2)): return self(0) else: return BoundarySpaceElement(self, {i2: sign}) g = self._known_gens g.append(c) if (sign == (- 1)): if self._is_equiv(c, (- c)): self._zero_cusps.append(c) del self._known_gens[(- 1)] return self(0) return BoundarySpaceElement(self, {(len(g) - 1): 1}) def _is_equiv(self, c1, c2): return c1.is_gamma0_equiv(c2, self.level())