code
stringlengths
101
5.91M
def log_t(u, t): def _internal_log_t(u, t): return (((u ** (1.0 - t)) - 1.0) / (1.0 - t)) return tf.cond(tf.equal(t, 1.0), (lambda : tf.math.log(u)), functools.partial(_internal_log_t, u, t))
def test_get_constant_for(pool): pool.add_constant(42) assert (pool.get_constant_for(int) == 42)
.lower_builtin('real', ArrayBuilderType, numba.types.Integer) .lower_builtin('real', ArrayBuilderType, numba.types.Float) def lower_real(context, builder, sig, args): (arraybuildertype, xtype) = sig.args (arraybuilderval, xval) = args proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval) if (isinstance(xtype, numba.types.Integer) and xtype.signed): x = builder.sitofp(xval, context.get_value_type(numba.types.float64)) elif isinstance(xtype, numba.types.Integer): x = builder.uitofp(xval, context.get_value_type(numba.types.float64)) elif (xtype.bitwidth < 64): x = builder.fpext(xval, context.get_value_type(numba.types.float64)) elif (xtype.bitwidth > 64): x = builder.fptrunc(xval, context.get_value_type(numba.types.float64)) else: x = xval call(context, builder, libawkward.ArrayBuilder_real, (proxyin.rawptr, x)) return context.get_dummy_value()
class AttentionGRUEncoder(GRUEncoder): def __init__(self, n_layers, n_vocab, n_genre, pretrained_w2v, is_update_w2v, dropout, genre_units=5): super(AttentionGRUEncoder, self).__init__(n_layers=n_layers, n_vocab=n_vocab, n_genre=n_genre, pretrained_w2v=pretrained_w2v, is_update_w2v=is_update_w2v, dropout=dropout, genre_units=genre_units) with self.init_scope(): self.attn_title = L.Linear(self.base_embedding_layer.n_units, 1) self.attn_content = L.Linear(self.base_embedding_layer.n_units, 1) def calc_attention(self, xs, ys, attn_linear): concat_ys = F.concat(ys, axis=0) attn_ys = attn_linear(F.tanh(concat_ys)) cumsum_ys = self.xp.cumsum(self.xp.array([len(x) for x in xs], dtype=self.xp.int32)) split_attn_ys = F.split_axis(attn_ys, cumsum_ys[:(- 1)].tolist(), axis=0) split_attn_ys_pad = F.pad_sequence(split_attn_ys, padding=(- 1024)) attn_softmax = F.softmax(split_attn_ys_pad, axis=1) return attn_softmax def apply_attention(self, ys, attn_softmax): batchsize = len(ys) ys_pad = F.pad_sequence(ys, padding=0.0) ys_pad_reshape = F.reshape(ys_pad, ((- 1), ys_pad.shape[(- 1)])) attn_softmax_reshape = F.broadcast_to(F.reshape(attn_softmax, ((- 1), attn_softmax.shape[(- 1)])), ys_pad_reshape.shape) attn_hidden = (ys_pad_reshape * attn_softmax_reshape) attn_hidden_reshape = F.reshape(attn_hidden, (batchsize, (- 1), attn_hidden.shape[(- 1)])) return F.sum(attn_hidden_reshape, axis=1) def forward(self, genre_xs, gender_xs, title_xs, content_xs, **kwargs): embedding = self.base_embedding_layer(title_xs=title_xs, content_xs=content_xs, genre_xs=genre_xs) (title_exs, content_exs, genre_exs) = embedding gender_exs = F.stack(gender_xs) (last_title_h, title_ys) = self.title_encoder(None, title_exs) (last_content_h, content_ys) = self.content_encoder(None, content_exs) attn_title = self.calc_attention(title_xs, title_ys, self.attn_title) attn_title_h = self.apply_attention(title_ys, attn_title) attn_content = self.calc_attention(content_xs, content_ys, self.attn_content) attn_content_h = self.apply_attention(content_ys, attn_content) concat_outputs = F.concat((genre_exs, gender_exs, attn_title_h, attn_content_h)) return concat_outputs
def main(): style = {'border': {'color': 'red', 'linewidth': 0.5}} (world, reward, terminal) = setup_mdp() ax = plt.figure(num='Original Reward').add_subplot(111) P.plot_state_values(ax, world, reward, **style) plt.draw() (trajectories, expert_policy) = generate_trajectories(world, reward, terminal) ax = plt.figure(num='Expert Trajectories and Policy').add_subplot(111) P.plot_stochastic_policy(ax, world, expert_policy, **style) for t in trajectories: P.plot_trajectory(ax, world, t, lw=5, color='white', alpha=0.025) plt.draw() reward_maxent = maxent(world, terminal, trajectories) ax = plt.figure(num='MaxEnt Reward').add_subplot(111) P.plot_state_values(ax, world, reward_maxent, **style) plt.draw() plt.show()
def polish(): output_dir = 'data' KLEJ = ' path = (lambda p: os.path.join(output_dir, p)) get_data(KLEJ.format('klej_nkjp-ner'), path('KLEJ/NKJP-NER'), 'NKJP-NER') get_data(KLEJ.format('klej_cdsc-e'), path('KLEJ/CDSC-E'), 'CDSC-E') get_data(KLEJ.format('klej_cdsc-r'), path('KLEJ/CDSC-R'), 'CDSC-R') get_data(KLEJ.format('klej_cbd'), path('KLEJ/CBD'), 'CBD') get_data(KLEJ.format('klej_polemo2.0-in'), path('KLEJ/POLEMO2.0-IN'), 'POLEMO2.0-IN') get_data(KLEJ.format('klej_polemo2.0-out'), path('KLEJ/POLEMO2.0-OUT'), 'POLEMO2.0-OUT') get_data(KLEJ.format('klej_dyk'), path('KLEJ/DYK'), 'DYK') get_data(KLEJ.format('klej_psc'), path('KLEJ/PSC'), 'PSC') get_data(KLEJ.format('klej_ar'), path('KLEJ/ECR'), 'ECR') PSE = ' get_data(PSE.format('8TAGS'), output_dir, '8TAGS', dataset_dir=os.path.join(output_dir, '8TAGS')) get_data(PSE.format('SICK'), output_dir, 'SICK', dataset_dir=os.path.join(output_dir, 'SICK')) get_data(PSE.format('WCCRS_HOTELS'), output_dir, 'WCCRS_HOTELS', dataset_dir=os.path.join(output_dir, 'WCCRS_HOTELS')) get_data(PSE.format('WCCRS_MEDICINE'), output_dir, 'WCCRS_MEDICINE', dataset_dir=os.path.join(output_dir, 'WCCRS_MEDICINE'))
class Manifolds(Category_over_base_ring): def __init__(self, base, name=None): if (base not in Fields().Topological()): raise ValueError('base must be a topological field') Category_over_base_ring.__init__(self, base, name) _method def super_categories(self): return [Sets().Topological()] def additional_structure(self): return None class ParentMethods(): _method def dimension(self): class SubcategoryMethods(): _method def Connected(self): return self._with_axiom('Connected') _method def FiniteDimensional(self): return self._with_axiom('FiniteDimensional') _method def Differentiable(self): return self._with_axiom('Differentiable') _method def Smooth(self): return self._with_axiom('Smooth') _method def Analytic(self): return self._with_axiom('Analytic') _method def AlmostComplex(self): return self._with_axiom('AlmostComplex') _method def Complex(self): return ComplexManifolds(self.base())._with_axioms(self.axioms()) class Differentiable(CategoryWithAxiom_over_base_ring): class Smooth(CategoryWithAxiom_over_base_ring): def extra_super_categories(self): return [Manifolds(self.base()).Differentiable()] class Analytic(CategoryWithAxiom_over_base_ring): def extra_super_categories(self): return [Manifolds(self.base()).Smooth()] class AlmostComplex(CategoryWithAxiom_over_base_ring): def extra_super_categories(self): return [Manifolds(self.base()).Smooth()] class FiniteDimensional(CategoryWithAxiom_over_base_ring): class Connected(CategoryWithAxiom_over_base_ring):
class NeuralOptimizer1(BaseCustomOptimizer): def __init__(self, beta1=0.9, decrease_factor=0.1, **kwargs): super(NeuralOptimizer1, self).__init__(**kwargs) self._beta1 = beta1 self._decrease_factor = decrease_factor def _prepare(self): super(NeuralOptimizer1, self)._prepare() self._beta1_t = tf.convert_to_tensor(self._beta1, name='beta1') def _create_slots(self, var_list): for v in var_list: self._zeros_slot(v, 'm', self._name) def _apply(self, grad, var, indices=None): lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype) m = self.get_slot(var, 'm') beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype) m_scaled_g_values = (grad * (1 - beta1_t)) m_t = tf_compat.v1.assign(m, (m * beta1_t), use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices) m_gathered = self._gather(m_t, indices=indices) ones = tf.ones_like(grad) update = ((lr * grad) * tf.where(tf.equal(tf.sign(m_gathered), tf.sign(grad)), ones, (ones * self._decrease_factor))) var_update = self._assign_sub(ref=var, updates=update, indices=indices) return tf.group(*[var_update, m_t])
def test_module_field_field(static_module_field_mock): ref = vr.StaticModuleFieldReference(static_module_field_mock) assert (ref.field == static_module_field_mock)
def test_dict(capture, doc): d = m.get_dict() assert (d == {'key': 'value'}) with capture: d['key2'] = 'value2' m.print_dict(d) assert (capture.unordered == '\n key: key, value=value\n key: key2, value=value2\n ') assert (doc(m.get_dict) == 'get_dict() -> dict') assert (doc(m.print_dict) == 'print_dict(arg0: dict) -> None') assert (m.dict_keyword_constructor() == {'x': 1, 'y': 2, 'z': 3})
(scope='module') def df_null_headers() -> pd.DataFrame: df = pd.DataFrame({'': [], np.nan: ['How Google Works'], None: ['Eric Schmidt, Jonathan Rosenberg'], 'N/A': [2014]}) return df
def cora_pandas_parts(include_nodes): if include_nodes: nodes = pd.read_csv(cora_content_path, header=None, sep='\t', index_col=0, usecols=range(0, (1433 + 1)), dtype=cora_dtypes, na_filter=False) else: nodes = None edges = pd.read_csv(cora_cites_path, header=None, sep='\t', names=['source', 'target'], dtype=int, na_filter=False) return (nodes, edges, {})
_function_api('dummy') def dummy_parametric_function(shape, f=10, i=1, s='dummy', fix_parameters=False): from nnabla import Variable from nnabla.parameter import get_parameter_or_create from nnabla.initializer import UniformInitializer p1 = get_parameter_or_create('p1', shape, UniformInitializer(((- 1), 1))) p2 = get_parameter_or_create('p2', (shape + (1,)), UniformInitializer(((- 1), 1))) return Variable(shape)
def print_final_metrics(metrics: TrackingMetrics) -> None: print('\n### Final results ###') metric_names = metrics.label_metrics.keys() print('\nPer-class results:') print('\t\t', end='') print('\t'.join([m.upper() for m in metric_names])) class_names = metrics.class_names max_name_length = 7 for class_name in class_names: print_class_name = class_name[:max_name_length].ljust((max_name_length + 1)) print(('%s' % print_class_name), end='') for metric_name in metric_names: val = metrics.label_metrics[metric_name][class_name] print_format = ('%f' if np.isnan(val) else metric_name_to_print_format(metric_name)) print(('\t%s' % (print_format % val)), end='') print() print('\nAggregated results:') for metric_name in metric_names: val = metrics.compute_metric(metric_name, 'all') print_format = metric_name_to_print_format(metric_name) print(('%s\t%s' % (metric_name.upper(), (print_format % val)))) print(('Eval time: %.1fs' % metrics.eval_time)) print()
def main(): exit_success = 0 exit_failure = 1 cargs = parse_cmdline() if cargs.version: print(('afl-cov-' + __version__)) return exit_success if (cargs.gcov_check or cargs.gcov_check_bin): if is_gcov_enabled(cargs): return exit_success else: return exit_failure if (not check_requirements(cargs)): return exit_failure if cargs.stop_afl: return (not stop_afl(cargs)) if (not validate_cargs(cargs)): return exit_failure if cargs.validate_args: return exit_success if (cargs.func_search or cargs.line_search): return (not search_cov(cargs)) if cargs.background: run_in_background() if cargs.live: is_afl_running(cargs) return (not process_afl_test_cases(cargs))
def get_shape_nodedict(finaltree, prefix, nodedict): nodename = prefix nodeval = finaltree['id'] nodedict[nodeval] = nodename treeshape = [nodename] if ('children' in finaltree): for childid in range(len(finaltree['children'])): (childshape, nodedict) = get_shape_nodedict(finaltree['children'][childid], (nodename + str((childid + 1))), nodedict) treeshape.append(childshape) return (treeshape, nodedict)
def test_TaskSystem_Pickler(): from returnn.util.task_system import Pickler from io import BytesIO stream = BytesIO() pickler = Pickler(stream) obj = {'foo': 'bar'} pickler.dump(obj)
def construct_raw_transaction(sender, recipient, nonce, amount, data): tx = {'nonce': nonce, 'from': sender, 'to': recipient, 'value': Web3.toWei(amount, 'ether'), 'gas': 2000000, 'chainId': 10, 'gasPrice': Web3.toWei('50', 'gwei'), 'data': data} return tx
def tetrad_graph_to_pcalg(g): endpoint_map = {'NULL': 0, 'CIRCLE': 1, 'ARROW': 2, 'TAIL': 3} nodes = g.getNodes() p = g.getNumNodes() A = np.zeros((p, p), dtype=int) for edge in g.getEdges(): i = nodes.indexOf(edge.getNode1()) j = nodes.indexOf(edge.getNode2()) A[j][i] = endpoint_map[edge.getEndpoint1().name()] A[i][j] = endpoint_map[edge.getEndpoint2().name()] columns_ = [] for name in nodes: columns_.append(str(name)) return pd.DataFrame(A, columns=columns_)
class TestCaseToAstVisitor(TestCaseVisitor): def __init__(self, module_aliases: ns.NamingScope, common_modules: set[str], exec_result: (ex.ExecutionResult | None)=None) -> None: self._module_aliases: ns.NamingScope = module_aliases self._common_modules: set[str] = common_modules self._exec_result = exec_result self._test_case_ast: list[stmt] = [] self._is_failing_test: bool = False def visit_default_test_case(self, test_case: dtc.DefaultTestCase) -> None: self._test_case_ast = [] return_type_trace = (None if (self._exec_result is None) else self._exec_result.proper_return_type_trace) variables = ns.VariableTypeNamingScope(return_type_trace=return_type_trace) for (idx, statement) in enumerate(test_case.statements): store_call_return = True if ((self._exec_result is not None) and (self._exec_result.get_first_position_of_thrown_exception() == idx)): store_call_return = False statement_visitor = stmt_to_ast.StatementToAstVisitor(self._module_aliases, variables, store_call_return=store_call_return) statement.accept(statement_visitor) assertion_visitor = ata.PyTestAssertionToAstVisitor(variables, self._module_aliases, self._common_modules, statement_node=statement_visitor.ast_node) for assertion in statement.assertions: if self.__should_assertion_be_generated(assertion, statement): assertion.accept(assertion_visitor) else: self._common_modules.add('pytest') self._is_failing_test = True self._test_case_ast.extend(assertion_visitor.nodes) def test_case_ast(self) -> list[stmt]: return self._test_case_ast def is_failing_test(self) -> bool: return self._is_failing_test def __should_assertion_be_generated(assertion, statement) -> bool: if (isinstance(assertion, ass.ExceptionAssertion) and isinstance(statement, statmt.ParametrizedStatement)): return (assertion.exception_type_name in statement.raised_exceptions) return True
def loss_calc_(y_true, y_pred, gain_type, sigma, N, device): rank_df = pd.DataFrame({'y': y_true, 'doc': np.arange(y_true.shape[0])}) rank_df = rank_df.sort_values('y').reset_index(drop=True) rank_order = (rank_df.sort_values('doc').index.values + 1) pos_pairs_score_diff = (1.0 + torch.exp(((- sigma) * (y_pred - y_pred.t())))) y_tensor = torch.tensor(y_true, dtype=torch.float32, device=device).view((- 1), 1) rel_diff = (y_tensor - y_tensor.t()) pos_pairs = (rel_diff > 0).type(torch.float32) neg_pairs = (rel_diff < 0).type(torch.float32) Sij = (pos_pairs - neg_pairs) if (gain_type == 'exp2'): gain_diff = (torch.pow(2.0, y_tensor) - torch.pow(2.0, y_tensor.t())) elif (gain_type == 'identity'): gain_diff = (y_tensor - y_tensor.t()) else: raise ValueError('NDCG_gain method not supported yet {}'.format(ndcg_gain_in_train)) rank_order_tensor = torch.tensor(rank_order, dtype=torch.float32, device=device).view((- 1), 1) decay_diff = ((1.0 / torch.log2((rank_order_tensor + 1.0))) - (1.0 / torch.log2((rank_order_tensor.t() + 1.0)))) loss = ((((0.5 * sigma) * (1 - Sij)) * (y_pred - y_pred.t())) + torch.log(pos_pairs_score_diff)) loss = torch.sum(loss, 1, keepdim=True) return loss
def get_supported(version=None, platform=None, impl=None, abi=None): supported = [] python_version = None if (version is not None): python_version = _get_python_version(version) interpreter = _get_custom_interpreter(impl, version) abis = None if (abi is not None): abis = [abi] platforms = None if (platform is not None): platforms = _get_custom_platforms(platform) is_cpython = ((impl or interpreter_name()) == 'cp') if is_cpython: supported.extend(cpython_tags(python_version=python_version, abis=abis, platforms=platforms)) else: supported.extend(generic_tags(interpreter=interpreter, abis=abis, platforms=platforms)) supported.extend(compatible_tags(python_version=python_version, interpreter=interpreter, platforms=platforms)) return supported
def download_glove(data_dir_path=None): if (data_dir_path is None): data_dir_path = 'very_large_data' glove_model_path = (((data_dir_path + '/glove.6B.') + str(GLOVE_EMBEDDING_SIZE)) + 'd.txt') if (not os.path.exists(glove_model_path)): glove_zip = (data_dir_path + '/glove.6B.zip') if (not os.path.exists(data_dir_path)): os.makedirs(data_dir_path) if (not os.path.exists(glove_zip)): print('glove file does not exist, downloading from internet') urllib.request.urlretrieve(url=' filename=glove_zip, reporthook=reporthook) print('unzipping glove file') zip_ref = zipfile.ZipFile(glove_zip, 'r') zip_ref.extractall(data_dir_path) zip_ref.close()
def main(args): print('Loading models...') TOKENIZER_GPT2 = load_tokenizer_for_causal_lm('gpt2') MODEL_GPT2 = load_model_for_causal_lm('gpt2', device) MODEL_GPT2_XL = load_model_for_causal_lm('gpt2-xl', device) print('GPT2 and GPT2-XL models loaded!') seq_len = 256 top_k = 40 num_batches = int(math.ceil((args.N / args.batch_size))) new_tot = (num_batches * args.batch_size) generated_samples = [] scores = defaultdict(list) with tqdm(total=new_tot) as pbar: for batch in range(num_batches): prompts = ([TOKENIZER_GPT2.eos_token] * args.batch_size) inputs = TOKENIZER_GPT2(prompts, return_tensors='pt', padding=True).to(device) generated_sequences = MODEL_GPT2_XL.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, max_length=seq_len, do_sample=True, top_k=top_k, top_p=1.0) generated_texts = TOKENIZER_GPT2.batch_decode(generated_sequences, skip_special_tokens=True) for text in generated_texts: perplexity_gpt2_xl = calculate_perplexity(text, MODEL_GPT2_XL, TOKENIZER_GPT2, device) perplexity_gpt2 = calculate_perplexity(text, MODEL_GPT2, TOKENIZER_GPT2, device) perplexity_gpt2_xl_lower = calculate_perplexity(text.lower(), MODEL_GPT2_XL, TOKENIZER_GPT2, device) zlib_entropy = len(zlib.compress(bytes(text, 'utf-8'))) perplexity_gpt2_xl_window = calculate_perplexity(text.lower(), MODEL_GPT2_XL, TOKENIZER_GPT2, device) generated_samples.append(text) scores['XL'].append(perplexity_gpt2_xl.cpu()) scores['SMALL'].append(perplexity_gpt2.cpu()) scores['ZLIB'].append(zlib_entropy) scores['LOWER'].append(perplexity_gpt2_xl_lower.cpu()) scores['WINDOW'].append(perplexity_gpt2_xl_window.cpu()) pbar.update(args.batch_size) print(len(scores['XL'])) scores['XL'] = np.asarray(scores['XL']) scores['SMALL'] = np.asarray(scores['SMALL']) scores['ZLIB'] = np.asarray(scores['ZLIB']) scores['LOWER'] = np.asarray(scores['LOWER']) scores['WINDOW'] = np.asarray(scores['WINDOW']) idxs = pd.Index(generated_samples) idxs_mask = (~ idxs.duplicated()) print(idxs_mask) generated_samples_clean = np.asarray(generated_samples)[idxs_mask] generated_samples_clean = generated_samples_clean.tolist() scores['XL'] = scores['XL'][idxs_mask] scores['SMALL'] = scores['SMALL'][idxs_mask] scores['ZLIB'] = scores['ZLIB'][idxs_mask] scores['LOWER'] = scores['LOWER'][idxs_mask] scores['WINDOW'] = scores['WINDOW'][idxs_mask] assert (len(generated_samples_clean) == len(scores['XL'])) assert (len(scores['SMALL']) == len(scores['XL'])) print('Num duplicates:', (len(generated_samples) - len(generated_samples_clean))) metric = np.log(scores['XL']) print(f' top samples by XL perplexity: ') print_best(metric, generated_samples_clean, 'Sort by perplexity of GPT2-XL', 'PPL-XL', scores['XL'], lower_better=True) print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by perplexity of GPT2-XL', 'PPL-XL', scores['XL'], lower_better=True) print() print() metric = (np.log(scores['XL']) / np.log(scores['SMALL'])) print(f' top samples by ratio of XL and SMALL perplexity: ') print_best(metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL and GPT2-Small', 'PPL-XL', scores['XL'], 'PPL-SMALL', scores['SMALL'], lower_better=True) print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL and GPT2-Small', 'PPL-XL', scores['XL'], 'PPL-SMALL', scores['SMALL'], lower_better=True) print() print() metric = (np.log(scores['XL']) / np.log(scores['ZLIB'])) print(f' top samples by ratio of XL perplexity and ZLIB entropy: ') print_best(metric, generated_samples_clean, 'Sort by ratio of XL perplexity and ZLIB entropy', 'PPL-XL', scores['XL'], 'Entropy-Zlib', scores['ZLIB'], lower_better=True) print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of XL perplexity and ZLIB entropy', 'PPL-XL', scores['XL'], 'Entropy-Zlib', scores['ZLIB'], lower_better=True) print() print() metric = (np.log(scores['XL']) / np.log(scores['LOWER'])) print(f' top samples by ratio of perplexity of GPT2-XL on normal and lower-cased sample: ') print_best(metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL on normal and lower-cased sample', 'PPL-XL', scores['XL'], 'PPL-XL-Lower', scores['LOWER'], lower_better=True) print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL on normal and lower-cased sample', 'PPL-XL', scores['XL'], 'PPL-XL-Lower', scores['LOWER'], lower_better=True) print() print() metric = np.log(scores['WINDOW']) print(f' top samples by minimum XL perplexity across a sliding window of size 50: ') print_best(metric, generated_samples_clean, 'Sort by minimum perplexity of GPT2-XL on window of size 50', 'PPL-WINDOW', scores['WINDOW'], lower_better=True) print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by minimum perplexity of GPT2-XL on window of size 50', 'PPL-WINDOW', scores['WINDOW'], lower_better=True) print() print()
class NeuralNet(): def __init__(self, game): pass def train(self, examples): pass def predict(self, board): pass def save_checkpoint(self, folder, filename): pass def load_checkpoint(self, folder, filename): pass
def get_bootstrap_dataset_config() -> CN: _C = CN() _C.DATASET = '' _C.RATIO = 0.1 _C.IMAGE_LOADER = CN(new_allowed=True) _C.IMAGE_LOADER.TYPE = '' _C.IMAGE_LOADER.BATCH_SIZE = 4 _C.IMAGE_LOADER.NUM_WORKERS = 4 _C.IMAGE_LOADER.CATEGORIES = [] _C.IMAGE_LOADER.MAX_COUNT_PER_CATEGORY = 1000000 _C.IMAGE_LOADER.CATEGORY_TO_CLASS_MAPPING = CN(new_allowed=True) _C.INFERENCE = CN() _C.INFERENCE.INPUT_BATCH_SIZE = 4 _C.INFERENCE.OUTPUT_BATCH_SIZE = 2 _C.DATA_SAMPLER = CN(new_allowed=True) _C.DATA_SAMPLER.TYPE = '' _C.DATA_SAMPLER.USE_GROUND_TRUTH_CATEGORIES = False _C.FILTER = CN(new_allowed=True) _C.FILTER.TYPE = '' return _C
class CBNet(CBNetBase): def __init__(self, subnet, cb_inplanes, cb_zero_init=True, cb_del_stages=0, cb_num_modules=2, **kwargs): super(CBNet, self).__init__() self.cb_zero_init = cb_zero_init self.cb_del_stages = cb_del_stages self.cb_num_modules = cb_num_modules assert (cb_num_modules >= 2) self.cb_modules = nn.ModuleList() for cb_idx in range(self.cb_num_modules): cb_module = subnet(**kwargs) if (cb_idx > 0): cb_module.del_layers(self.cb_del_stages) self.cb_modules.append(cb_module) self.out_indices = self.cb_modules[0].out_indices self.cb_linears = nn.ModuleList() self.num_layers = len(self.cb_modules[0].stage_blocks) norm_cfg = self.cb_modules[0].norm_cfg for i in range(self.num_layers): linears = nn.ModuleList() if (i >= self.cb_del_stages): jrange = (4 - i) for j in range(jrange): linears.append(nn.Sequential(nn.Conv2d(cb_inplanes[((i + j) + 1)], cb_inplanes[i], 1, bias=False), build_norm_layer(norm_cfg, cb_inplanes[i])[1])) self.cb_linears.append(linears) def init_cb_weights(self): if self.cb_zero_init: for ls in self.cb_linears: for m in ls: if isinstance(m, nn.Sequential): constant_init(m[(- 1)], 0) else: constant_init(m, 0) def _forward_cb_feats(self, feats, spatial_info): cb_feats = [] for i in range(self.num_layers): if (i >= self.cb_del_stages): (h, w) = spatial_info[i] feeds = [] jrange = (4 - i) for j in range(jrange): tmp = self.cb_linears[i][j](feats[((j + i) + 1)]) tmp = F.interpolate(tmp, size=(h, w), mode='nearest') feeds.append(tmp) feed = torch.sum(torch.stack(feeds, dim=(- 1)), dim=(- 1)) else: feed = 0 cb_feats.append(feed) return cb_feats
def get_minified_adata_scrna(adata: AnnData, minified_data_type: MinifiedDataType) -> AnnData: if (minified_data_type != ADATA_MINIFY_TYPE.LATENT_POSTERIOR): raise NotImplementedError(f'Unknown MinifiedDataType: {minified_data_type}') all_zeros = csr_matrix(adata.X.shape) layers = {layer: all_zeros.copy() for layer in adata.layers} bdata = AnnData(X=all_zeros, layers=layers, uns=adata.uns.copy(), obs=adata.obs, var=adata.var, varm=adata.varm, obsm=adata.obsm, obsp=adata.obsp) del bdata.uns[_SCVI_UUID_KEY] bdata.uns[_ADATA_MINIFY_TYPE_UNS_KEY] = minified_data_type return bdata
class ContentChecker(object): def feed(self, block): return def is_valid(self): return True def report(self, reporter, template): return
def configure_pipeline(cfg: DictConfig) -> Pipeline: if (cfg.model == 'flat'): classifier = configure_flat[cfg.classifier] classifier.set_params(**delete_non_hyperparameters(cfg)) else: local_classifier = configure_flat[cfg.classifier] local_classifier.set_params(**delete_non_hyperparameters(cfg)) local_classifier.set_params(n_jobs=1) classifier = configure_hierarchical[cfg.model] classifier.set_params(local_classifier=local_classifier, n_jobs=cfg.n_jobs, verbose=30) pipeline = Pipeline([('count', CountVectorizer()), ('tfidf', TfidfTransformer()), ('classifier', classifier)]) return pipeline
def save_checkpoint(args, state, is_best, filename='checkpoint.pth.tar'): savedir = args.snapshot_dir if (not os.path.exists(savedir)): os.makedirs(savedir) savepath = os.path.join(savedir, filename) torch.save(state, savepath) if is_best: shutil.copyfile(savepath, os.path.join(savedir, 'model_best.pth.tar'))
def item_frequency(data_tr, power): item_counts = {} item_population = set([]) for (u, i, _) in data_tr: item_counts[i] = (1 if (i not in item_counts) else (item_counts[i] + 1)) item_population.add(i) item_population = list(item_population) counts = [item_counts[v] for v in item_population] count_sum = (sum(counts) * 1.0) p_item_unormalized = [np.power((c / count_sum), power) for c in counts] p_item_sum = sum(p_item_unormalized) p_item = [(f / p_item_sum) for f in p_item_unormalized] return (item_population, p_item)
def main_train(): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ['CUDA_VISIBLE_DEVICES'] = GPU config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) x1 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL]) x2 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL]) x3 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL]) x5 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL]) is_training = tf.placeholder_with_default(False, shape=()) PSNR_0 = cal_PSNR(x2, x5) x1to2 = warp_img(tf.shape(x2)[0], x2, x1, False) x3to2 = warp_img(tf.shape(x2)[0], x2, x3, True) FlowLoss_1 = cal_MSE(x1to2, x2) FlowLoss_2 = cal_MSE(x3to2, x2) flow_loss = (FlowLoss_1 + FlowLoss_2) x2_enhanced = net_MFCNN.network(x1to2, x2, x3to2, is_training) MSE = cal_MSE(x2_enhanced, x5) PSNR = cal_PSNR(x2_enhanced, x5) delta_PSNR = (PSNR - PSNR_0) OptimizeLoss_1 = (flow_loss + (ratio_small * MSE)) OptimizeLoss_2 = ((ratio_small * flow_loss) + MSE) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): Training_step1 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_1) Training_step2 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_2) tf.summary.scalar('PSNR improvement', delta_PSNR) tf.summary.scalar('PSNR before enhancement', PSNR_0) tf.summary.scalar('PSNR after enhancement', PSNR) tf.summary.scalar('MSE loss of motion compensation', flow_loss) tf.summary.scalar('MSE loss of final quality enhancement', MSE) tf.summary.scalar('MSE loss for training step1 (mainly MC-subnet)', OptimizeLoss_1) tf.summary.scalar('MSE loss for training step2 (mainly QE-subnet)', OptimizeLoss_2) tf.summary.image('cmp', x2) tf.summary.image('enhanced', x2_enhanced) tf.summary.image('raw', x5) tf.summary.image('x1to2', x1to2) tf.summary.image('x3to2', x3to2) summary_writer = tf.summary.FileWriter(dir_model, sess.graph) summary_op = tf.summary.merge_all() saver = tf.train.Saver(max_to_keep=None) num_params = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() num_params += reduce(mul, [dim.value for dim in shape], 1) print(('# num of parameters: %d #' % num_params)) file_object.write(('# num of parameters: %d #\n' % num_params)) file_object.flush() stack_name = os.path.join(dir_stack, 'stack_tra_pre_*') num_TrainingStack = len(glob.glob(stack_name)) stack_name = os.path.join(dir_stack, 'stack_val_pre_*') num_ValidationStack = len(glob.glob(stack_name)) saver_res = tf.train.Saver() saver_res.restore(sess, model_res_path) print(('successfully restore model %d!' % (int(res_index) + 1))) file_object.write(('successfully restore model %d!\n' % (int(res_index) + 1))) file_object.flush() print('##### Start running! #####') num_TrainingBatch_count = 0 for ite_epoch in range(epoch_step2): for ite_stack in range(num_TrainingStack): if ((ite_epoch == 0) and (ite_stack == 0)): (pre_list, cmp_list, sub_list, raw_list) = load_stack('tra', ite_stack) num_batch = int((len(pre_list) / BATCH_SIZE)) for ite_batch in range(num_batch): print(('\rstep 2 - epoch %2d/%2d - training stack %2d/%2d - batch %3d/%3d' % ((ite_epoch + 1), epoch_step2, (ite_stack + 1), num_TrainingStack, (ite_batch + 1), num_batch)), end='') start_index = (ite_batch * BATCH_SIZE) next_start_index = ((ite_batch + 1) * BATCH_SIZE) Training_step2.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True}) num_TrainingBatch_count += 1 if (((ite_batch + 1) == int((num_batch / 2))) or ((ite_batch + 1) == num_batch)): (summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch) = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False}) summary_writer.add_summary(summary, (num_TrainingBatch_count + ((int(res_index) + 21) * num_batch))) print(('\rstep 2 - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f' % (((ite_epoch + int(res_index)) + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch))) file_object.write(('step 2 - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f\n' % (((ite_epoch + int(res_index)) + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch))) file_object.flush() CheckPoint_path = os.path.join(dir_model, 'model_step2.ckpt') saver.save(sess, CheckPoint_path, global_step=((ite_epoch + int(res_index)) + 1)) sum_improved_PSNR = 0 num_patch_count = 0 for ite_stack in range(num_ValidationStack): (pre_list, cmp_list, sub_list, raw_list) = ([], [], [], []) gc.collect() (pre_list, cmp_list, sub_list, raw_list) = load_stack('val', ite_stack) gc.collect() num_batch = int((len(pre_list) / BATCH_SIZE)) for ite_batch in range(num_batch): print(('\rstep 2 - epoch %2d/%2d - validation stack %2d/%2d ' % ((((ite_epoch + 1) + int(res_index)) + 1), ((epoch_step2 + int(res_index)) + 1), (ite_stack + 1), num_ValidationStack)), end='') start_index = (ite_batch * BATCH_SIZE) next_start_index = ((ite_batch + 1) * BATCH_SIZE) delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False}) sum_improved_PSNR += (delta_PSNR_batch * BATCH_SIZE) num_patch_count += BATCH_SIZE if (num_patch_count != 0): print(('\n### imp PSNR by model after step 2 - epoch %2d/%2d: %.3f ###\n' % ((((ite_epoch + 1) + int(res_index)) + 1), ((epoch_step2 + int(res_index)) + 1), (sum_improved_PSNR / num_patch_count)))) file_object.write(('### imp PSNR by model after step 2 - epoch %2d/%2d: %.3f ###\n' % ((ite_epoch + 1), ((epoch_step2 + int(res_index)) + 1), (sum_improved_PSNR / num_patch_count)))) file_object.flush()
def test_persistDockerImage2(): designerUrl = (designerIp + '/dockerimage') headers = {'Content-Type': 'application/json'} r = requests.post(designerUrl, data=json.dumps(data.test201), headers=headers) assert (r.status_code == 200)
def exec_bfs_compact(G, workers, calcUntilLayer): futures = {} degreeList = {} t0 = time() vertices = G.keys() parts = workers chunks = partition(vertices, parts) logging.info('Capturing larger degree...') maxDegree = 0 for v in vertices: if (len(G[v]) > maxDegree): maxDegree = len(G[v]) logging.info('Larger degree captured') with ProcessPoolExecutor(max_workers=workers) as executor: part = 1 for c in chunks: job = executor.submit(getCompactDegreeListsVertices, G, c, maxDegree, calcUntilLayer) futures[job] = part part += 1 for job in as_completed(futures): dl = job.result() v = futures[job] degreeList.update(dl) logging.info('Saving degreeList on disk...') saveVariableOnDisk(degreeList, 'compactDegreeList') t1 = time() logging.info('Execution time - BFS: {}m'.format(((t1 - t0) / 60))) return
def test_reshape_behavior(): xp = _NumPyAPIWrapper() X = xp.asarray([[1, 2, 3], [3, 4, 5]]) X_no_copy = xp.reshape(X, ((- 1),), copy=False) assert (X_no_copy.base is X) X_copy = xp.reshape(X, (6, 1), copy=True) assert (X_copy.base is not X.base) with pytest.raises(TypeError, match='shape must be a tuple'): xp.reshape(X, (- 1))
def prepare_hypothesis_settings(database: (str | None)=None, deadline: ((int | NotSet) | None)=None, derandomize: (bool | None)=None, max_examples: (int | None)=None, phases: (list[hypothesis.Phase] | None)=None, report_multiple_bugs: (bool | None)=None, suppress_health_check: (list[hypothesis.HealthCheck] | None)=None, verbosity: (hypothesis.Verbosity | None)=None) -> hypothesis.settings: import hypothesis from hypothesis.database import DirectoryBasedExampleDatabase, InMemoryExampleDatabase kwargs = {key: value for (key, value) in (('derandomize', derandomize), ('max_examples', max_examples), ('phases', phases), ('report_multiple_bugs', report_multiple_bugs), ('suppress_health_check', suppress_health_check), ('verbosity', verbosity)) if (value is not None)} if (deadline is not None): if isinstance(deadline, NotSet): kwargs['deadline'] = None else: kwargs['deadline'] = deadline if (database is not None): if (database.lower() == 'none'): kwargs['database'] = None elif (database == HYPOTHESIS_IN_MEMORY_DATABASE_IDENTIFIER): kwargs['database'] = InMemoryExampleDatabase() else: kwargs['database'] = DirectoryBasedExampleDatabase(database) kwargs.setdefault('deadline', DEFAULT_DEADLINE) return hypothesis.settings(print_blob=False, **kwargs)
class PointPillar(Detector3DTemplate): def __init__(self, model_cfg, num_class, dataset): super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) self.module_list = self.build_networks() def forward(self, batch_dict): for cur_module in self.module_list: batch_dict = cur_module(batch_dict) if self.training: (loss, tb_dict, disp_dict) = self.get_training_loss() ret_dict = {'loss': loss} return (ret_dict, tb_dict, disp_dict) else: (pred_dicts, recall_dicts) = self.post_processing(batch_dict) return (pred_dicts, recall_dicts) def get_training_loss(self): disp_dict = {} (loss_rpn, tb_dict) = self.dense_head.get_loss() tb_dict = {'loss_rpn': loss_rpn.item(), **tb_dict} loss = loss_rpn return (loss, tb_dict, disp_dict)
class ElementWithLabel(): def __init__(self, element, label): self.element = element self.label = label def _latex_(self): return latex(self.label) def __str__(self): return str(self.label) def __repr__(self): return repr(self.label) def __hash__(self): return hash(self.element) def __eq__(self, other): if (not (isinstance(self, ElementWithLabel) and isinstance(other, ElementWithLabel))): return False return ((self.element == other.element) and (self.label == other.label)) def __ne__(self, other): return (not (self == other))
def dbladd(A: dace.float64[(1000, 1000)], B: dace.float64[(1000, 1000)]): dbl = B return (A + (dbl * B))
class Human(): def __init__(self, name: str, number: (int | float)) -> None: self._name = name self._number = number def __str__(self): return super().__str__() def get_name(self) -> str: return self._name def get_number(self) -> (int | float): return self._number def static_state(self) -> float: global static_state static_state += 1 return (static_state * self._number)
def configure_output(options): output_screen = options.get('output_screen', True) output_log_name = options.get('output_log_name', None) output.set_output(filename=output_log_name, quiet=(not output_screen), combined=(output_screen and (output_log_name is not None)))
def test_enm_6(): SBP_enm = enm.Enm(fname, sparse=True) import mdtraj as md traj_mode = SBP_enm.get_mode_traj(6) traj_mode.save_pdb(('%s/enm_14.test.pdb' % outdir)) comp(('%s/enm_14.test.pdb' % refdir))
class FC(nn.Module): def __init__(self, in_features, out_features, NL='relu'): super(FC, self).__init__() self.fc = nn.Linear(in_features, out_features) if (NL == 'relu'): self.relu = nn.ReLU(inplace=True) elif (NL == 'prelu'): self.relu = nn.PReLU() else: self.relu = None def forward(self, x): x = self.fc(x) if (self.relu is not None): x = self.relu(x) return x
class onlyOn(object): def __init__(self, device_type): self.device_type = device_type def __call__(self, fn): (fn) def only_fn(slf, device, *args, **kwargs): if (self.device_type != slf.device_type): reason = 'Only runs on {0}'.format(self.device_type) raise unittest.SkipTest(reason) return fn(slf, device, *args, **kwargs) return only_fn
def trim_sigfig(x: float, n: int) -> float: assert (n == int(n)) magnitude = int(np.ceil(np.log10(np.abs(x)))) scale = (10 ** (magnitude - n)) return (np.round((x / scale)) * scale)
def test_maxpool_agg_constructor_1(): agg = MaxPoolingAggregator(output_dim=4, bias=True, act=(lambda x: (x + 1))) assert (agg.output_dim == 4) assert (agg.hidden_dim == 4) assert agg.has_bias assert (agg.act(2) == 3)
def test_crt(): assert (crt([0, 1, 2, 4], [2, 3, 4, 5]) == 34) assert (crt([3, 5], [6, 21]) is None)
def PbLe(args, k): _z3_check_cint_overflow(k, 'k') (ctx, sz, _args, _coeffs, args) = _pb_args_coeffs(args) return BoolRef(Z3_mk_pble(ctx.ref(), sz, _args, _coeffs, k), ctx)
def eval_sl2z_word(w): mat = [Lm, Rm] w0 = Idm w1 = w return (w0 * prod(((mat[a[0]] ** a[1]) for a in w1), Idm))
.parametrize('checked', [True, False]) def test_write_label_html(checked): name = 'LogisticRegression' tool_tip = 'hello-world' with closing(StringIO()) as out: _write_label_html(out, name, tool_tip, checked=checked) html_label = out.getvalue() p = '<label for="sk-estimator-id-[0-9]*" class="sk-toggleable__label (fitted)? sk-toggleable__label-arrow ">LogisticRegression' re_compiled = re.compile(p) assert re_compiled.search(html_label) assert html_label.startswith('<div class="sk-label-container">') assert ('<pre>hello-world</pre>' in html_label) if checked: assert ('checked>' in html_label)
def write_json_to_file(json_object, json_file, mode='w', encoding='utf-8'): with open(json_file, mode, encoding=encoding) as outfile: json.dump(json_object, outfile, indent=4, sort_keys=True, ensure_ascii=False)
class DeploymentConfig(object): def __init__(self, num_clones=1, clone_on_cpu=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'): if (num_replicas > 1): if (num_ps_tasks < 1): raise ValueError('When using replicas num_ps_tasks must be positive') if ((num_replicas > 1) or (num_ps_tasks > 0)): if (not worker_job_name): raise ValueError('Must specify worker_job_name when using replicas') if (not ps_job_name): raise ValueError('Must specify ps_job_name when using parameter server') if (replica_id >= num_replicas): raise ValueError('replica_id must be less than num_replicas') self._num_clones = num_clones self._clone_on_cpu = clone_on_cpu self._replica_id = replica_id self._num_replicas = num_replicas self._num_ps_tasks = num_ps_tasks self._ps_device = (('/job:' + ps_job_name) if (num_ps_tasks > 0) else '') self._worker_device = (('/job:' + worker_job_name) if (num_ps_tasks > 0) else '') def num_clones(self): return self._num_clones def clone_on_cpu(self): return self._clone_on_cpu def replica_id(self): return self._replica_id def num_replicas(self): return self._num_replicas def num_ps_tasks(self): return self._num_ps_tasks def ps_device(self): return self._ps_device def worker_device(self): return self._worker_device def caching_device(self): if (self._num_ps_tasks > 0): return (lambda op: op.device) else: return None def clone_device(self, clone_index): if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') device = '' if (self._num_ps_tasks > 0): device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' elif (self._num_clones > 1): device += ('/device:GPU:%d' % clone_index) return device def clone_scope(self, clone_index): if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') scope = '' if (self._num_clones > 1): scope = ('clone_%d' % clone_index) return scope def optimizer_device(self): if ((self._num_ps_tasks > 0) or (self._num_clones > 0)): return (self._worker_device + '/device:CPU:0') else: return '' def inputs_device(self): device = '' if (self._num_ps_tasks > 0): device += self._worker_device device += '/device:CPU:0' return device def variables_device(self): device = '' if (self._num_ps_tasks > 0): device += self._ps_device device += '/device:CPU:0' class _PSDeviceChooser(object): def __init__(self, device, tasks): self._device = device self._tasks = tasks self._task = 0 def choose(self, op): if op.device: return op.device node_def = (op if isinstance(op, tf.NodeDef) else op.node_def) if (node_def.op == 'Variable'): t = self._task self._task = ((self._task + 1) % self._tasks) d = ('%s/task:%d' % (self._device, t)) return d else: return op.device if (not self._num_ps_tasks): return device else: chooser = _PSDeviceChooser(device, self._num_ps_tasks) return chooser.choose
.parametrize('data, lower_bound, upper_bound', [(np.geomspace(0.1, 1, 5), 5, 6), ((- np.geomspace(0.1, 1, 10)), 7, 8), (np.linspace(0, 1, 5), 0.9, 1.1), ([1, 2, 5, 10, 20, 50], 20, 40)]) def test_inverval_max_min_ratio(data, lower_bound, upper_bound): assert (lower_bound < _interval_max_min_ratio(data) < upper_bound)
def test_ListOffsetArray_RecordArray_NumpyArray(): v2a = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6], np.int64)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]))], ['nest'])) resultv2 = v2a[np.array([1, 2], np.int64)] assert (to_list(resultv2) == [[], [{'nest': 4.4}, {'nest': 5.5}]]) assert (v2a.to_typetracer()[np.array([1, 2], np.int64)].form == resultv2.form)
def rouge_l_sentence_level(evaluated_sentences, reference_sentences): if ((len(evaluated_sentences) <= 0) or (len(reference_sentences) <= 0)): raise ValueError('Collections must contain at least 1 sentence.') reference_words = _split_into_words(reference_sentences) evaluated_words = _split_into_words(evaluated_sentences) m = len(reference_words) n = len(evaluated_words) lcs = _len_lcs(evaluated_words, reference_words) return _f_p_r_lcs(lcs, m, n)
class SKLearnEmbedder(BaseEstimator): def __init__(self, embedder=None, pass_input_space=False): super(BaseEstimator, self).__init__() self.embedder = embedder self.pass_input_space = pass_input_space def fit(self, X, y): self.embedder.fit(X, y) def fit_transform(self, X, y): if self.pass_input_space: result = self.embedder.fit_transform(X, y) else: result = self.embedder.fit_transform(y) return (X, result)
def test_batch_norm(): time_dim = Dim(Tensor('time', [batch_dim], dtype='int32')) in_dim = Dim(7, name='in') extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')}) class _Net(rf.Module): def __init__(self): super().__init__() self.bn = rf.BatchNorm(in_dim, use_mask=False) def __call__(self, out: Tensor) -> Tensor: out = self.bn(out) return out def _forward_step(*, model: _Net, extern_data: TensorDict): out = model(extern_data['data']) out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim)) run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
class VertexFeatureEmbedder(nn.Module): def __init__(self, num_vertices: int, feature_dim: int, embed_dim: int, train_features: bool=False): super(VertexFeatureEmbedder, self).__init__() if train_features: self.features = nn.Parameter(torch.Tensor(num_vertices, feature_dim)) else: self.register_buffer('features', torch.Tensor(num_vertices, feature_dim)) self.embeddings = nn.Parameter(torch.Tensor(feature_dim, embed_dim)) self.reset_parameters() _grad() def reset_parameters(self): self.features.zero_() self.embeddings.zero_() def forward(self) -> torch.Tensor: return normalize_embeddings(torch.mm(self.features, self.embeddings)) _grad() def load(self, fpath: str): with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['features', 'embeddings']: if (name in data): getattr(self, name).copy_(torch.tensor(data[name]).float().to(device=getattr(self, name).device))
.parametrize('observation_shape', [(4, 84, 84)]) .parametrize('action_size', [2]) .parametrize('discrete_action', [False, True]) def test_pixel_encoder_factory(observation_shape: Sequence[int], action_size: int, discrete_action: bool) -> None: factory = PixelEncoderFactory() encoder = factory.create(observation_shape) assert isinstance(encoder, PixelEncoder) encoder = factory.create_with_action(observation_shape, action_size, discrete_action) assert isinstance(encoder, PixelEncoderWithAction) assert (encoder._discrete_action == discrete_action) assert (factory.get_type() == 'pixel') PixelEncoderFactory.deserialize(factory.serialize())
class ForeignKeyConstraint(): imported_key_cascade = '0' imported_key_restrict = '1' imported_key_set_null = '2' imported_key_no_action = '3' def __init__(self, child: Table, name: str, delete_rule: str, update_rule: str): self.name = name.replace("'", '') self.delete_rule = delete_rule self.update_rule = update_rule self.parent_columns = [] self.child_columns = [] self.parent_table = None self.child_table = child def add_parent_column(self, column): if (column is not None): self.parent_columns.append(column) self.parent_table = column.table def add_child_column(self, column): if (column is not None): self.child_columns.append(column) def get_parent_table(self): return self.parent_table def get_child_table(self): return self.child_table def is_cascade_on_delete(self): return (self.delete_rule == self.imported_key_cascade) def is_restrict_delete(self): return ((self.delete_rule == self.imported_key_no_action) or (self.delete_rule == self.imported_key_restrict)) def is_null_on_delete(self): return (self.delete_rule == self.imported_key_set_null) def get_delete_rule_name(self): if (self.delete_rule == self.imported_key_cascade): return 'Cascade on delete' elif ((self.delete_rule == self.imported_key_restrict) or (self.delete_rule == self.imported_key_no_action)): return 'Restrict delete' elif (self.delete_rule == self.imported_key_set_null): return 'Null on delete' else: return '' def get_delete_rule_description(self): if (self.delete_rule == self.imported_key_cascade): return 'Cascade on delete:\nDeletion of parent deletes child' elif ((self.delete_rule == self.imported_key_restrict) or (self.delete_rule == self.imported_key_no_action)): return 'Restrict delete:\nParent cannot be deleted if children exist' elif (self.delete_rule == self.imported_key_set_null): return 'Null on delete:\nForeign key to parent set to NULL when parent deleted' else: return '' def get_delete_rule_alias(self): if (self.delete_rule == self.imported_key_cascade): return 'C' elif ((self.delete_rule == self.imported_key_restrict) or (self.delete_rule == self.imported_key_no_action)): return 'R' elif (self.delete_rule == self.imported_key_set_null): return 'N' else: return '' def get_all_foreign_key_constraints(tables): constraints = [] for table in tables: constraints.extend(table.get_foreign_keys()) return constraints
def __setstate__(state): g = globals() for (k, v) in state.items(): g[('_sset_' + _state_vars[k])](k, g[k], v) return state
def infer_trainer_type(trainer_type): if (trainer_type == 'si'): return TrainerTypes.SILOG if (trainer_type == 'silog_chamfer'): return TrainerTypes.SILOG_CHAMFER
class LSQUnivariateSpline(UnivariateSpline): def __init__(self, x, y, t, w=None, bbox=([None] * 2), k=3, ext=0, check_finite=False): (x, y, w, bbox, self.ext) = self.validate_input(x, y, w, bbox, k, None, ext, check_finite) if (not np.all((diff(x) >= 0.0))): raise ValueError('x must be increasing') xb = bbox[0] xe = bbox[1] if (xb is None): xb = x[0] if (xe is None): xe = x[(- 1)] t = concatenate((([xb] * (k + 1)), t, ([xe] * (k + 1)))) n = len(t) if (not np.all(((t[(k + 1):(n - k)] - t[k:((n - k) - 1)]) > 0), axis=0)): raise ValueError('Interior knots t must satisfy Schoenberg-Whitney conditions') if (not (dfitpack.fpchec(x, t, k) == 0)): raise ValueError(_fpchec_error_string) data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe) self._data = (data[:(- 3)] + (None, None, data[(- 1)])) self._reset_class()
class DechunkedInput(io.RawIOBase): def __init__(self, rfile): self._rfile = rfile self._done = False self._len = 0 def readable(self): return True def read_chunk_len(self): try: line = self._rfile.readline().decode('latin1') _len = int(line.strip(), 16) except ValueError: raise IOError('Invalid chunk header') if (_len < 0): raise IOError('Negative chunk length not allowed') return _len def readinto(self, buf): read = 0 while ((not self._done) and (read < len(buf))): if (self._len == 0): self._len = self.read_chunk_len() if (self._len == 0): self._done = True if (self._len > 0): n = min(len(buf), self._len) buf[read:(read + n)] = self._rfile.read(n) self._len -= n read += n if (self._len == 0): terminator = self._rfile.readline() if (terminator not in (b'\n', b'\r\n', b'\r')): raise IOError('Missing chunk terminating newline') return read
def test_constructor_statement_accept(test_case_mock, variable_reference_mock, constructor_mock): statement = stmt.ConstructorStatement(test_case_mock, constructor_mock) visitor = MagicMock(stmt.StatementVisitor) statement.accept(visitor) visitor.visit_constructor_statement.assert_called_once_with(statement)
def _peel(G, A): Acomp = set(G) Acomp.difference_update(A) peeling = [] H = copy(G) H.delete_vertices(list(Acomp)) del Acomp while H: ui = next(H.vertex_iterator()) Vi = set(H) peeling.append((ui, Vi)) H.delete_vertices(H.neighbor_iterator(ui, closed=True)) peeling.append((None, set())) peeling.reverse() return peeling
def create_summarized_columns_node(columns): count_dict = defaultdict(int) for (column_name, meta) in columns.items(): sdtype = ('other' if (meta['sdtype'] not in DEFAULT_SDTYPES) else meta['sdtype']) count_dict[sdtype] += 1 count_dict = dict(sorted(count_dict.items())) columns = ['Columns'] columns.extend([f'&nbsp; &nbsp; {sdtype} : {count}' for (sdtype, count) in count_dict.items()]) return '\\l'.join(columns)
.parametrize('CurveDisplay, specific_params', [(ValidationCurveDisplay, {'param_name': 'max_depth', 'param_range': [1, 3, 5]}), (LearningCurveDisplay, {'train_sizes': [0.3, 0.6, 0.9]})]) def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params): (X, y) = data estimator = DecisionTreeClassifier(max_depth=1, random_state=0) negate_score = False display = CurveDisplay.from_estimator(estimator, X, y, **specific_params, negate_score=negate_score) positive_scores = display.lines_[0].get_data()[1] assert (positive_scores >= 0).all() assert (display.ax_.get_ylabel() == 'Score') negate_score = True display = CurveDisplay.from_estimator(estimator, X, y, **specific_params, negate_score=negate_score) negative_scores = display.lines_[0].get_data()[1] assert (negative_scores <= 0).all() assert_allclose(negative_scores, (- positive_scores)) assert (display.ax_.get_ylabel() == 'Negative score') negate_score = False display = CurveDisplay.from_estimator(estimator, X, y, **specific_params, negate_score=negate_score) assert (display.ax_.get_ylabel() == 'Score') display.plot(negate_score=(not negate_score)) assert (display.ax_.get_ylabel() == 'Score') assert (display.lines_[0].get_data()[1] < 0).all()
def plot_acc(model_dir): file_dir = os.path.join(model_dir, 'acc.csv') data = pd.read_csv(file_dir) epochs = data['epoch'].ravel() acc_train = data['acc_train'].ravel() acc_test = data['acc_test'].ravel() (fig, ax) = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400) ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8) ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8) ax.set_ylabel('Accuracy', fontsize=10) ax.set_xlabel('Epoch', fontsize=10) ax.legend(loc='lower right', prop={'size': 15}, ncol=3, framealpha=0.5) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.tight_layout() acc_dir = os.path.join(model_dir, 'figures', 'acc') os.makedirs(acc_dir, exist_ok=True) file_name = os.path.join(acc_dir, 'accuracy.png') plt.savefig(file_name, dpi=400) print('Plot saved to: {}'.format(file_name)) file_name = os.path.join(acc_dir, 'accuracy.pdf') plt.savefig(file_name, dpi=400) plt.close() print('Plot saved to: {}'.format(file_name))
def get_end_time(list_: List, is_sorted: bool=False, attr: str='time'): if (not list_): return 0 if is_sorted: return getattr(list_[(- 1)], attr) return max((getattr(item, attr) for item in list_))
class HasNUWPred(FunPred): sig = (WrappingBinaryOperator,) code = 'hasNUW' type_constraints = _none
class TensorFlowBenchmarkArguments(BenchmarkArguments): deprecated_args = ['no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process'] def __init__(self, **kwargs): for deprecated_arg in self.deprecated_args: if (deprecated_arg in kwargs): positive_arg = deprecated_arg[3:] kwargs[positive_arg] = (not kwargs.pop(deprecated_arg)) logger.warning(f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or {positive_arg}={kwargs[positive_arg]}') self.tpu_name = kwargs.pop('tpu_name', self.tpu_name) self.device_idx = kwargs.pop('device_idx', self.device_idx) self.eager_mode = kwargs.pop('eager_mode', self.eager_mode) self.use_xla = kwargs.pop('use_xla', self.use_xla) super().__init__(**kwargs) tpu_name: str = field(default=None, metadata={'help': 'Name of TPU'}) device_idx: int = field(default=0, metadata={'help': 'CPU / GPU device index. Defaults to 0.'}) eager_mode: bool = field(default=False, metadata={'help': 'Benchmark models in eager model.'}) use_xla: bool = field(default=False, metadata={'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'}) _property _required def _setup_tpu(self) -> Tuple['tf.distribute.cluster_resolver.TPUClusterResolver']: if self.tpu: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None return tpu _property _required def _setup_strategy(self) -> Tuple[('tf.distribute.Strategy', 'tf.distribute.cluster_resolver.TPUClusterResolver')]: if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) strategy = tf.distribute.TPUStrategy(self._setup_tpu) elif self.is_gpu: tf.config.set_visible_devices(self.gpu_list[self.device_idx], 'GPU') strategy = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}') else: tf.config.set_visible_devices([], 'GPU') strategy = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}') return strategy _required def is_tpu(self) -> bool: return (self._setup_tpu is not None) _required def strategy(self) -> 'tf.distribute.Strategy': return self._setup_strategy _required def gpu_list(self): return tf.config.list_physical_devices('GPU') _required def n_gpu(self) -> int: if self.cuda: return len(self.gpu_list) return 0 def is_gpu(self) -> bool: return (self.n_gpu > 0)
def report_speed(outputs, speed_meters): total_time = 0 for key in outputs: if ('time' in key): total_time += outputs[key] speed_meters[key].update(outputs[key]) print(('%s: %.4f' % (key, speed_meters[key].avg))) speed_meters['total_time'].update(total_time) print(('FPS: %.1f' % (1.0 / speed_meters['total_time'].avg)))
def monkey_patch_RMSprop(RMSProp_class): def step(self, closure=None): loss = None if (closure is not None): loss = closure() effective_lrs = {} for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['square_avg'] = torch.zeros_like(p.data) if (group['momentum'] > 0): state['momentum_buffer'] = torch.zeros_like(p.data) if group['centered']: state['grad_avg'] = torch.zeros_like(p.data) square_avg = state['square_avg'] alpha = group['alpha'] state['step'] += 1 if (group['weight_decay'] != 0): grad = grad.add(group['weight_decay'], p.data) square_avg.mul_(alpha).addcmul_((1 - alpha), grad, grad) if group['centered']: grad_avg = state['grad_avg'] grad_avg.mul_(alpha).add_((1 - alpha), grad) avg = square_avg.addcmul((- 1), grad_avg, grad_avg).sqrt().add_(group['eps']) else: avg = square_avg.sqrt().add_(group['eps']) if (group['momentum'] > 0): buf = state['momentum_buffer'] buf.mul_(group['momentum']).addcdiv_(grad, avg) p.data.add_((- group['lr']), buf) else: p.data.addcdiv_((- group['lr']), grad, avg) effective_lrs[p] = ((group['lr'] * grad) / avg) return (loss, effective_lrs) RMSProp_class.step = step
class FormanRicci(): def __init__(self, G: nx.Graph, weight='weight', method='augmented', verbose='ERROR'): self.G = G.copy() self.weight = weight self.method = method if (not nx.get_edge_attributes(self.G, self.weight)): logger.info('Edge weight not detected in graph, use "weight" as default edge weight.') for (v1, v2) in self.G.edges(): self.G[v1][v2][self.weight] = 1.0 if (not nx.get_node_attributes(self.G, self.weight)): logger.info('Node weight not detected in graph, use "weight" as default node weight.') for v in self.G.nodes(): self.G.nodes[v][self.weight] = 1.0 if self.G.is_directed(): logger.info('Forman-Ricci curvature is not supported for directed graph yet, covert input graph to undirected.') self.G = self.G.to_undirected() set_verbose(verbose) def compute_ricci_curvature(self): if (self.method == '1d'): for (v1, v2) in self.G.edges(): v1_nbr = set(self.G.neighbors(v1)) v1_nbr.remove(v2) v2_nbr = set(self.G.neighbors(v2)) v2_nbr.remove(v1) w_e = self.G[v1][v2][self.weight] w_v1 = self.G.nodes[v1][self.weight] w_v2 = self.G.nodes[v2][self.weight] ev1_sum = sum([(w_v1 / math.sqrt((w_e * self.G[v1][v][self.weight]))) for v in v1_nbr]) ev2_sum = sum([(w_v2 / math.sqrt((w_e * self.G[v2][v][self.weight]))) for v in v2_nbr]) self.G[v1][v2]['formanCurvature'] = (w_e * (((w_v1 / w_e) + (w_v2 / w_e)) - (ev1_sum + ev2_sum))) logger.debug(('Source: %s, target: %d, Forman-Ricci curvature = %f ' % (v1, v2, self.G[v1][v2]['formanCurvature']))) elif (self.method == 'augmented'): for (v1, v2) in self.G.edges(): v1_nbr = set(self.G.neighbors(v1)) v1_nbr.remove(v2) v2_nbr = set(self.G.neighbors(v2)) v2_nbr.remove(v1) face = (v1_nbr & v2_nbr) w_e = self.G[v1][v2][self.weight] w_f = 1 w_v1 = self.G.nodes[v1][self.weight] w_v2 = self.G.nodes[v2][self.weight] sum_ef = sum([(w_e / w_f) for _ in face]) sum_ve = sum([((w_v1 / w_e) + (w_v2 / w_e))]) sum_ehef = 0 sum_veeh = sum(([(w_v1 / math.sqrt((w_e * self.G[v1][v][self.weight]))) for v in (v1_nbr - face)] + [(w_v2 / math.sqrt((w_e * self.G[v2][v][self.weight]))) for v in (v2_nbr - face)])) self.G[v1][v2]['formanCurvature'] = (w_e * ((sum_ef + sum_ve) - math.fabs((sum_ehef - sum_veeh)))) logger.debug(('Source: %s, target: %d, Forman-Ricci curvature = %f ' % (v1, v2, self.G[v1][v2]['formanCurvature']))) else: assert True, ('Method %s not available. Support methods: {"1d","augmented"}' % self.method) for n in self.G.nodes(): fcsum = 0 if (self.G.degree(n) != 0): for nbr in self.G.neighbors(n): if ('formanCurvature' in self.G[n][nbr]): fcsum += self.G[n][nbr]['formanCurvature'] self.G.nodes[n]['formanCurvature'] = (fcsum / self.G.degree(n)) else: self.G.nodes[n]['formanCurvature'] = fcsum logger.debug(('node %d, Forman Curvature = %f' % (n, self.G.nodes[n]['formanCurvature']))) logger.debug(('Forman curvature (%s) computation done.' % self.method))
def get_data(file): fin = open(file) for i in range(3): fin.readline() x = [] y = [] for line in fin.readlines(): line = line.strip().split(' ') if (len(line) < 3): break x.append(float(line[2])) line = line[3].split('[')[1].split(',')[0] y.append(float(line)) return (x, y)
.corpus def test_speech_commands(): env = dotenv_values() corpus = SpeechCommandsV1(env['GSC1'], env['GSC1_TEST']) all_data = corpus.all_data classes = set([value['class_name'] for (key, value) in all_data.items()]) assert (len(classes) == 12), f'{classes}' (train, valid, test) = corpus.data_split train_class_counter = _class_counter(train) valid_class_counter = _class_counter(valid) test_class_counter = _class_counter(test) assert (train_class_counter == Counter({'_unknown_': 32550, 'stop': 1885, 'on': 1864, 'go': 1861, 'yes': 1860, 'no': 1853, 'right': 1852, 'up': 1843, 'down': 1842, 'left': 1839, 'off': 1839, '_silence_': 6})) assert (valid_class_counter == Counter({'_unknown_': 4221, 'stop': 246, 'on': 257, 'go': 260, 'yes': 261, 'no': 270, 'right': 256, 'up': 260, 'down': 264, 'left': 247, 'off': 256, '_silence_': 6})) assert (test_class_counter == Counter({'_unknown_': 257, 'stop': 249, 'on': 246, 'go': 251, 'yes': 256, 'no': 252, 'right': 259, 'up': 272, 'down': 253, 'left': 267, 'off': 262, '_silence_': 257}))
class RandomVerticalFlip(object): def __init__(self, prob: float=0.5): self.prob = prob def __call__(self, img, mask=None): if (mask is not None): if (random.random() < self.prob): return (img.transpose(Image.FLIP_TOP_BOTTOM), mask.transpose(Image.FLIP_TOP_BOTTOM)) else: return (img, mask) elif (random.random() < self.prob): return img.transpose(Image.FLIP_TOP_BOTTOM) else: return img
def input_user() -> str: try: user_utterance = input(((bcolors.OKCYAN + bcolors.BOLD) + 'User: ')) while (not user_utterance.strip()): user_utterance = input(((bcolors.OKCYAN + bcolors.BOLD) + 'User: ')) finally: print(bcolors.ENDC) return user_utterance
def get_qd_to_answer(data): key_to_answer = {} for datum in data['Data']: for page in (datum.get('EntityPages', []) + datum.get('SearchResults', [])): qd_tuple = get_question_doc_string(datum['QuestionId'], page['Filename']) key_to_answer[qd_tuple] = datum['Answer'] return key_to_answer
def test_fetch(fetch_california_housing_fxt): data = fetch_california_housing_fxt() assert ((20640, 8) == data.data.shape) assert ((20640,) == data.target.shape) assert data.DESCR.startswith('.. _california_housing_dataset:') fetch_func = partial(fetch_california_housing_fxt) check_return_X_y(data, fetch_func)
class DatasetLoader(): supported_datasets = {'reddit': partial(Reddit, transform=Compose([RandomNodeSplit(num_val=0.1, num_test=0.15), FilterClassByCount(min_count=10000, remove_unlabeled=True)])), 'amazon': partial(Amazon, transform=Compose([RandomNodeSplit(num_val=0.1, num_test=0.15), FilterClassByCount(min_count=100000, remove_unlabeled=True)])), 'facebook': partial(Facebook, name='UIllinois20', target='year', transform=Compose([RandomNodeSplit(num_val=0.1, num_test=0.15), FilterClassByCount(min_count=1000, remove_unlabeled=True)]))} def __init__(self, dataset: Annotated[(str, ArgInfo(help='name of the dataset', choices=supported_datasets))]='facebook', data_dir: Annotated[(str, ArgInfo(help='directory to store the dataset'))]='./datasets'): self.name = dataset self.data_dir = data_dir def load(self, verbose=False) -> Data: data = self.supported_datasets[self.name](root=os.path.join(self.data_dir, self.name))[0] data = Compose([RemoveSelfLoops(), RemoveIsolatedNodes(), ToSparseTensor()])(data) if verbose: self.print_stats(data) return data def print_stats(self, data: Data): nodes_degree: torch.Tensor = data.adj_t.sum(dim=1) baseline: float = ((data.y[data.test_mask].unique(return_counts=True)[1].max().item() * 100) / data.test_mask.sum().item()) train_ratio: float = ((data.train_mask.sum().item() / data.num_nodes) * 100) val_ratio: float = ((data.val_mask.sum().item() / data.num_nodes) * 100) test_ratio: float = ((data.test_mask.sum().item() / data.num_nodes) * 100) stat = {'nodes': f'{data.num_nodes:,}', 'edges': f'{data.num_edges:,}', 'features': f'{data.num_features:,}', 'classes': f'{int((data.y.max() + 1))}', 'mean degree': f'{nodes_degree.mean():.2f}', 'median degree': f'{nodes_degree.median()}', 'train/val/test (%)': f'{train_ratio:.1f}/{val_ratio:.1f}/{test_ratio:.1f}', 'baseline acc (%)': f'{baseline:.2f}'} table = dict2table(stat, num_cols=2, title=f'dataset: [yellow]{self.name}[/yellow]') console.info(table) console.print()
def test(): j1 = ak.from_numpy(np.empty(0, np.int32)) assert (str(ak.Record({'d': j1}).type) == '{d: 0 * int32}')
def prologue_opt(args, OUTD_OPTMASKS, SHARED_OPT_MASKS): subs = ['learning', 'gifs', 'tmp', 'bin_masks', 'continuous_masks', 'final_masks'] for fd in subs: if (not os.path.exists(join(OUTD_OPTMASKS, fd))): os.makedirs(join(OUTD_OPTMASKS, fd)) if args.share_masks: msg = '{} is not a valid directory.'.format(SHARED_OPT_MASKS) assert os.path.isdir(SHARED_OPT_MASKS), msg fd_masks = SHARED_OPT_MASKS else: msg = '{} is not a valid directory.'.format(OUTD_OPTMASKS) fd_masks = join(OUTD_OPTMASKS, 'bin_masks') assert os.path.isdir(fd_masks), msg metrics_fd = join(fd_masks, 'metrics') if (not os.path.isdir(metrics_fd)): os.makedirs(metrics_fd) return metrics_fd
class ScaledValuation_generic(DiscreteValuation): def __init__(self, parent, base_valuation, s): DiscreteValuation.__init__(self, parent) self._base_valuation = base_valuation self._scale = s def _repr_(self): return ('%r * %r' % (self._scale, self._base_valuation)) def residue_ring(self): return self._base_valuation.residue_ring() def uniformizer(self): return self._base_valuation.uniformizer() def _call_(self, f): return (self._scale * self._base_valuation(f)) def reduce(self, f): return self._base_valuation.reduce(f) def lift(self, F): return self._base_valuation.lift(F) def extensions(self, ring): return [ScaledValuation(w, self._scale) for w in self._base_valuation.extensions(ring)] def restriction(self, ring): restricted = self._base_valuation.restriction(ring) if restricted.is_trivial(): return restricted return ScaledValuation(restricted, self._scale) def _strictly_separating_element(self, other): return self._base_valuation._strictly_separating_element(other) def _weakly_separating_element(self, other): return self._base_valuation._weakly_separating_element(other) def _ge_(self, other): if (self == other): return True if isinstance(other, ScaledValuation_generic): return (((self._scale / other._scale) * self._base_valuation) >= other._base_valuation) if (self._scale >= 1): if (self._base_valuation >= other): return True else: assert (not self.is_trivial()) if (self._base_valuation <= other): return False return super()._ge_(other) def _le_(self, other): return ((other / self._scale) >= self._base_valuation) def value_semigroup(self): return (self._scale * self._base_valuation.value_semigroup())
class PrecoDocumentState(BaseDocumentState): def __init__(self, key): super().__init__(key) def final_process(self): all_mentions = flatten(self.clusters) self.sentence_map = get_sentence_map(self.segments, self.sentence_end) self.subtoken_map = flatten(self.segment_subtoken_map) assert (len(all_mentions) == len(set(all_mentions))) num_words = len(flatten(self.segments)) assert (num_words == len(self.subtoken_map)), (num_words, len(self.subtoken_map)) assert (num_words == len(self.sentence_map)), (num_words, len(self.sentence_map)) def finalize(self): self.final_process() return {'doc_key': self.doc_key, 'sentences': self.segments, 'clusters': self.clusters, 'sentence_map': self.sentence_map, 'subtoken_map': self.subtoken_map}
def shap_explain(booster, datasource, dataset, summary_params, result_table='', is_pai=False, oss_dest=None, oss_ak=None, oss_sk=None, oss_endpoint=None, oss_bucket_name=None): tree_explainer = shap.TreeExplainer(booster) shap_values = tree_explainer.shap_values(dataset) if result_table: if is_pai: conn = PaiIOConnection.from_table(result_table) else: conn = db.connect_with_data_source(datasource) if isinstance(shap_values, list): to_write = shap_values[0] else: to_write = shap_values columns = list(dataset.columns) with db.buffered_db_writer(conn, result_table, columns) as w: for row in to_write: w.write(list(row)) conn.close() if (summary_params.get('plot_type') == 'decision'): shap_interaction_values = tree_explainer.shap_interaction_values(dataset) expected_value = tree_explainer.expected_value if isinstance(shap_interaction_values, list): shap_interaction_values = shap_interaction_values[0] if isinstance(expected_value, list): expected_value = expected_value[0] plot_func = (lambda : shap.decision_plot(expected_value, shap_interaction_values, dataset, show=False, feature_display_range=slice(None, (- 40), (- 1)), alpha=1)) else: plot_func = (lambda : shap.summary_plot(shap_values, dataset, show=False, **summary_params)) explainer.plot_and_save(plot_func, oss_dest=oss_dest, oss_ak=oss_ak, oss_sk=oss_sk, oss_endpoint=oss_endpoint, oss_bucket_name=oss_bucket_name, filename='summary')
def wrightomega_exp_error(x): exponential_approx = mpmath.exp(x) desired = mpmath_wrightomega(x) return (abs((exponential_approx - desired)) / desired)
class TFMPNetPreTrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def remove_leading_spaces(data): print('Removing leading spaces ...') for i in range(len(data)): for j in range(len(data[i])): data[i][j] = data[i][j].strip() return data
def fit_predict_selected(model, train_log, inf_log, user_features, queries): train_dataset = create_dataset(train_log, user_features=user_features) pred_dataset = create_dataset(inf_log, user_features=user_features) model.fit(train_dataset) return model.predict(dataset=pred_dataset, queries=queries, k=1)
def unpad_seqs(seqs, seq_lens): if isinstance(seq_lens, torch.LongTensor): seq_lens = seq_lens.cpu().tolist() return [seq[:seq_len] for (seq, seq_len) in zip(seqs.cpu().tolist(), seq_lens)]
class UpBottleneck(nn.Module): def __init__(self, in_places, places, stride=2, expansion=4, is_relu=True, p=0.01): super(UpBottleneck, self).__init__() mid_channels = (in_places // expansion) self.bottleneck = nn.Sequential(Conv1x1BNReLU(in_places, mid_channels, is_relu), TransposeConv3x3BNReLU(mid_channels, mid_channels, stride, is_relu), Conv1x1BNReLU(mid_channels, places, is_relu), nn.Dropout2d(p=p)) self.upsample_conv = Conv1x1BN(in_places, places) self.upsample_unpool = nn.MaxUnpool2d(kernel_size=2) self.relu = (nn.ReLU(inplace=True) if is_relu else nn.PReLU()) def forward(self, x, indices): out = self.bottleneck(x) residual = self.upsample_conv(x) residual = self.upsample_unpool(residual, indices) out += residual out = self.relu(out) return out
.torch def test_item_id_feature_not_specified(small_dataset): schema = TensorSchemaBuilder().categorical('item_id', cardinality=6, is_seq=True, feature_source=TensorFeatureSource(FeatureSource.INTERACTIONS, 'item_id')).categorical('user_id', cardinality=6, is_seq=True, feature_source=TensorFeatureSource(FeatureSource.INTERACTIONS, 'user_id'), feature_hint=FeatureHint.QUERY_ID).build() with pytest.raises(ValueError) as exc: SequenceTokenizer._check_if_tensor_schema_matches_data(small_dataset, schema) assert (str(exc.value) == 'Tensor schema must have item id feature defined')
def find_sage_dangling_links(app, env, node, contnode): debug_inf(app, ' find_sage_dangling_links ') reftype = node['reftype'] reftarget = node['reftarget'] try: doc = node['refdoc'] except KeyError: debug_inf(app, ('-- no refdoc in node %s' % node)) return None debug_inf(app, ('Searching %s from %s' % (reftarget, doc))) if ((reftarget in base_class_as_func) and (reftype == 'class')): node['reftype'] = 'func' res = call_intersphinx(app, env, node, contnode) if res: debug_inf(app, ('++ DONE %s' % res['refuri'])) return res if (node.get('refdomain') != 'py'): return None try: module = node['py:module'] cls = node['py:class'] except KeyError: debug_inf(app, ('-- no module or class for :%s:%s' % (reftype, reftarget))) return None basename = reftarget.split('.')[0] try: target_module = getattr(sys.modules['sage.all'], basename).__module__ debug_inf(app, ('++ found %s using sage.all in %s' % (basename, target_module))) except AttributeError: try: target_module = getattr(sys.modules[node['py:module']], basename).__module__ debug_inf(app, ('++ found %s in this module' % (basename,))) except AttributeError: debug_inf(app, ('-- %s not found in sage.all or this module' % basename)) return None except KeyError: target_module = None if (target_module is None): target_module = '' debug_inf(app, '?? found in None !!!') newtarget = ((target_module + '.') + reftarget) node['reftarget'] = newtarget builder = app.builder searchmode = ((node.hasattr('refspecific') and 1) or 0) matches = builder.env.domains['py'].find_obj(builder.env, module, cls, newtarget, reftype, searchmode) if (not matches): debug_inf(app, ('?? no matching doc for %s' % newtarget)) return call_intersphinx(app, env, node, contnode) elif (len(matches) > 1): env.warn(target_module, ('more than one target found for cross-reference %r: %s' % (newtarget, ', '.join((match[0] for match in matches)))), node.line) (name, obj) = matches[0] debug_inf(app, ('++ match = %s %s' % (name, obj))) from docutils import nodes newnode = nodes.reference('', '', internal=True) if (name == target_module): newnode['refid'] = name else: newnode['refuri'] = builder.get_relative_uri(node['refdoc'], obj[0]) newnode['refuri'] += ('#' + name) debug_inf(app, ('++ DONE at URI %s' % newnode['refuri'])) newnode['reftitle'] = name newnode.append(contnode) return newnode
class InstanceWhitening(nn.Module): def __init__(self, dim): super(InstanceWhitening, self).__init__() self.instance_standardization = nn.InstanceNorm2d(dim, affine=False) def forward(self, x): x = self.instance_standardization(x) w = x return (x, w)
class QuantumCliffordAlgebraGeneric(QuantumCliffordAlgebra): def __init__(self, n, k, q, F): psi = cartesian_product(([((- 1), 0, 1)] * n)) indices = [(tuple(p), tuple(w)) for p in psi for w in product(*[list(range(((4 - (2 * abs(p[i]))) * k))) for i in range(n)])] super().__init__(n, k, q, F, psi, indices) def _repr_term(self, m): (p, v) = m rp = '*'.join(((('psi%s' % i) if (p[i] > 0) else ('psid%s' % i)) for i in range(self._n) if (p[i] != 0))) gen_str = (lambda e: ('' if (e == 1) else ('^%s' % e))) rv = '*'.join(((('w%s' % i) + gen_str(v[i])) for i in range(self._n) if (v[i] != 0))) if rp: if rv: return ((rp + '*') + rv) return rp if rv: return rv return '1' def _latex_term(self, m): (p, v) = m rp = ''.join(((('\\psi_{%s}' % i) if (p[i] > 0) else ('\\psi^{\\dagger}_{%s}' % i)) for i in range(self._n) if (p[i] != 0))) gen_str = (lambda e: ('' if (e == 1) else ('^{%s}' % e))) rv = ''.join(((('\\omega_{%s}' % i) + gen_str(v[i])) for i in range(self._n) if (v[i] != 0))) if ((not rp) and (not rv)): return '1' return (rp + rv) _method def product_on_basis(self, m1, m2): (p1, w1) = m1 (p2, w2) = m2 if any((((p1[i] != 0) and (p1[i] == p2[i])) for i in range(self._n))): return self.zero() k = self._k q_power = 0 sign = 1 pairings = [] supported = [] p = ([0] * self._n) for i in range(self._n): if (p2[i] != 0): supported.append(i) q_power += (w1[i] * p2[i]) if (p1[i] != 0): pairings.append(((i + 1) * p1[i])) p[i] = (p1[i] + p2[i]) supported.append((self._n - 1)) for i in reversed(range(1, len(supported))): if ((i % 2) != 0): for j in reversed(range((supported[(i - 1)] + 1), (supported[i] + 1))): if (p1[j] != 0): sign = (((- 1) ** i) * sign) vp = self._w_poly.gens() poly = self._w_poly.one() q = self._q for i in pairings: if (i < 0): i = ((- i) - 1) vpik = (((- (q ** (2 * k))) * (vp[i] ** (3 * k))) + ((1 + (q ** (2 * k))) * (vp[i] ** k))) poly *= ((- ((vp[i] ** k) - vpik)) / ((q ** k) - (q ** (- k)))) else: i -= 1 vpik = (((- (q ** (2 * k))) * (vp[i] ** (3 * k))) + ((1 + (q ** (2 * k))) * (vp[i] ** k))) poly *= ((((q ** k) * (vp[i] ** k)) - ((q ** (- k)) * vpik)) / ((q ** k) - (q ** (- k)))) v = list(w1) for i in range(self._n): v[i] += w2[i] for i in range(self._n): if ((p[i] > 0) and (v[i] != 0)): q_power -= ((2 * k) * (v[i] // (2 * k))) v[i] = (v[i] % (2 * k)) if ((p[i] < 0) and (v[i] != 0)): v[i] = (v[i] % (2 * k)) poly *= self._w_poly.monomial(*v) poly = poly.reduce([(((vp[i] ** (4 * k)) - ((1 + (q ** ((- 2) * k))) * (vp[i] ** (2 * k)))) + (q ** ((- 2) * k))) for i in range(self._n)]) pdict = poly.dict() ret = {(self._psi(p), tuple(e)): ((pdict[e] * (q ** q_power)) * sign) for e in pdict} return self._from_dict(ret) class Element(CombinatorialFreeModule.Element): def inverse(self): if (not self): raise ZeroDivisionError if (len(self) != 1): return super().__invert__() Cl = self.parent() (((p, w), coeff),) = list(self._monomial_coefficients.items()) if any(((p[i] != 0) for i in range(Cl._n))): return super().__invert__() poly = Cl._w_poly.monomial(*w) wp = Cl._w_poly.gens() q = Cl._q k = Cl._k poly = poly.subs({wi: (((- (q ** (2 * k))) * (wi ** ((4 * k) - 1))) + ((1 + (q ** (2 * k))) * (wi ** ((2 * k) - 1)))) for wi in wp}) poly = poly.reduce([(((wi ** (4 * k)) - ((1 + (q ** ((- 2) * k))) * (wi ** (2 * k)))) + (q ** ((- 2) * k))) for wi in wp]) pdict = poly.dict() coeff = coeff.inverse_of_unit() ret = {(p, tuple(e)): (coeff * c) for (e, c) in pdict.items()} return Cl.element_class(Cl, ret) __invert__ = inverse
class Lark(Serialize): def __init__(self, grammar, **options): self.options = LarkOptions(options) use_regex = self.options.regex if use_regex: if regex: re_module = regex else: raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') else: re_module = re if (self.options.source_path is None): try: self.source_path = grammar.name except AttributeError: self.source_path = '<string>' else: self.source_path = self.options.source_path try: read = grammar.read except AttributeError: pass else: grammar = read() cache_fn = None cache_md5 = None if isinstance(grammar, STRING_TYPE): self.source_grammar = grammar if self.options.use_bytes: if (not isascii(grammar)): raise ConfigurationError('Grammar must be ascii only, when use_bytes=True') if ((sys.version_info[0] == 2) and (self.options.use_bytes != 'force')): raise ConfigurationError("`use_bytes=True` may have issues on python2.Use `use_bytes='force'` to use it at your own risk.") if self.options.cache: if (self.options.parser != 'lalr'): raise ConfigurationError("cache only works with parser='lalr' for now") unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals') options_str = ''.join(((k + str(v)) for (k, v) in options.items() if (k not in unhashable))) from . import __version__ s = (((grammar + options_str) + __version__) + str(sys.version_info[:2])) cache_md5 = hashlib.md5(s.encode('utf8')).hexdigest() if isinstance(self.options.cache, STRING_TYPE): cache_fn = self.options.cache else: if (self.options.cache is not True): raise ConfigurationError('cache argument must be bool or str') cache_fn = (tempfile.gettempdir() + ('/.lark_cache_%s_%s_%s.tmp' % ((cache_md5,) + sys.version_info[:2]))) if FS.exists(cache_fn): logger.debug('Loading grammar from cache: %s', cache_fn) for name in (set(options) - _LOAD_ALLOWED_OPTIONS): del options[name] with FS.open(cache_fn, 'rb') as f: old_options = self.options try: file_md5 = f.readline().rstrip(b'\n') cached_used_files = pickle.load(f) if ((file_md5 == cache_md5.encode('utf8')) and verify_used_files(cached_used_files)): cached_parser_data = pickle.load(f) self._load(cached_parser_data, **options) return except Exception: logger.exception(('Failed to load Lark from cache: %r. We will try to carry on.' % cache_fn)) self.options = old_options (self.grammar, used_files) = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) else: assert isinstance(grammar, Grammar) self.grammar = grammar if (self.options.lexer == 'auto'): if (self.options.parser == 'lalr'): self.options.lexer = 'contextual' elif (self.options.parser == 'earley'): if (self.options.postlex is not None): logger.info("postlex can't be used with the dynamic lexer, so we use standard instead. Consider using lalr with contextual instead of earley") self.options.lexer = 'standard' else: self.options.lexer = 'dynamic' elif (self.options.parser == 'cyk'): self.options.lexer = 'standard' else: assert False, self.options.parser lexer = self.options.lexer if isinstance(lexer, type): assert issubclass(lexer, Lexer) else: assert_config(lexer, ('standard', 'contextual', 'dynamic', 'dynamic_complete')) if ((self.options.postlex is not None) and ('dynamic' in lexer)): raise ConfigurationError("Can't use postlex with a dynamic lexer. Use standard or contextual instead") if (self.options.ambiguity == 'auto'): if (self.options.parser == 'earley'): self.options.ambiguity = 'resolve' else: assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") if (self.options.priority == 'auto'): self.options.priority = 'normal' if (self.options.priority not in _VALID_PRIORITY_OPTIONS): raise ConfigurationError(f'invalid priority option: {self.options.priority!r}. Must be one of {_VALID_PRIORITY_OPTIONS!r}') assert (self.options.ambiguity not in ('resolve__antiscore_sum',)), 'resolve__antiscore_sum has been replaced with the option priority="invert"' if (self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS): raise ConfigurationError(f'invalid ambiguity option: {self.options.ambiguity!r}. Must be one of {_VALID_AMBIGUITY_OPTIONS!r}') if (self.options.parser is None): terminals_to_keep = '*' elif (self.options.postlex is not None): terminals_to_keep = set(self.options.postlex.always_accept) else: terminals_to_keep = set() (self.terminals, self.rules, self.ignore_tokens) = self.grammar.compile(self.options.start, terminals_to_keep) if self.options.edit_terminals: for t in self.terminals: self.options.edit_terminals(t) self._terminals_dict = {t.name: t for t in self.terminals} if (self.options.priority == 'invert'): for rule in self.rules: if (rule.options.priority is not None): rule.options.priority = (- rule.options.priority) elif (self.options.priority is None): for rule in self.rules: if (rule.options.priority is not None): rule.options.priority = None self.lexer_conf = LexerConf(self.terminals, re_module, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes) if self.options.parser: self.parser = self._build_parser() elif lexer: self.lexer = self._build_lexer() if cache_fn: logger.debug('Saving grammar to cache: %s', cache_fn) with FS.open(cache_fn, 'wb') as f: f.write((cache_md5.encode('utf8') + b'\n')) pickle.dump(used_files, f) self.save(f) if __doc__: __doc__ += ('\n\n' + LarkOptions.OPTIONS_DOC) __serialize_fields__ = ('parser', 'rules', 'options') def _build_lexer(self, dont_ignore=False): lexer_conf = self.lexer_conf if dont_ignore: from copy import copy lexer_conf = copy(lexer_conf) lexer_conf.ignore = () return TraditionalLexer(lexer_conf) def _prepare_callbacks(self): self._callbacks = {} if (self.options.ambiguity != 'forest'): self._parse_tree_builder = ParseTreeBuilder(self.rules, (self.options.tree_class or Tree), self.options.propagate_positions, ((self.options.parser != 'lalr') and (self.options.ambiguity == 'explicit')), self.options.maybe_placeholders) self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) def _build_parser(self): self._prepare_callbacks() parser_class = get_frontend(self.options.parser, self.options.lexer) parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) return parser_class(self.lexer_conf, parser_conf, options=self.options) def save(self, f): (data, m) = self.memo_serialize([TerminalDef, Rule]) pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) def load(cls, f): inst = cls.__new__(cls) return inst._load(f) def _deserialize_lexer_conf(self, data, memo, options): lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) lexer_conf.callbacks = (options.lexer_callbacks or {}) lexer_conf.re_module = (regex if options.regex else re) lexer_conf.use_bytes = options.use_bytes lexer_conf.g_regex_flags = options.g_regex_flags lexer_conf.skip_validation = True lexer_conf.postlex = options.postlex return lexer_conf def _load(self, f, **kwargs): if isinstance(f, dict): d = f else: d = pickle.load(f) memo_json = d['memo'] data = d['data'] assert memo_json memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) options = dict(data['options']) if ((set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults)): raise ConfigurationError('Some options are not allowed when loading a Parser: {}'.format((set(kwargs) - _LOAD_ALLOWED_OPTIONS))) options.update(kwargs) self.options = LarkOptions.deserialize(options, memo) self.rules = [Rule.deserialize(r, memo) for r in data['rules']] self.source_path = '<deserialized>' parser_class = get_frontend(self.options.parser, self.options.lexer) self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) self.terminals = self.lexer_conf.terminals self._prepare_callbacks() self._terminals_dict = {t.name: t for t in self.terminals} self.parser = parser_class.deserialize(data['parser'], memo, self.lexer_conf, self._callbacks, self.options) return self def _load_from_dict(cls, data, memo, **kwargs): inst = cls.__new__(cls) return inst._load({'data': data, 'memo': memo}, **kwargs) def open(cls, grammar_filename, rel_to=None, **options): if rel_to: basepath = os.path.dirname(rel_to) grammar_filename = os.path.join(basepath, grammar_filename) with open(grammar_filename, encoding='utf8') as f: return cls(f, **options) def open_from_package(cls, package, grammar_path, search_paths=('',), **options): package_loader = FromPackageLoader(package, search_paths) (full_path, text) = package_loader(None, grammar_path) options.setdefault('source_path', full_path) options.setdefault('import_paths', []) options['import_paths'].append(package_loader) return cls(text, **options) def __repr__(self): return f'Lark(open({self.source_path!r}), parser={self.options.parser!r}, lexer={self.options.lexer!r}, ...)' def lex(self, text, dont_ignore=False): if ((not hasattr(self, 'lexer')) or dont_ignore): lexer = self._build_lexer(dont_ignore) else: lexer = self.lexer lexer_thread = LexerThread(lexer, text) stream = lexer_thread.lex(None) if self.options.postlex: return self.options.postlex.process(stream) return stream def get_terminal(self, name): return self._terminals_dict[name] def parse_interactive(self, text=None, start=None): return self.parser.parse_interactive(text, start=start) def parse(self, text, start=None, on_error=None): return self.parser.parse(text, start=start, on_error=on_error) def source(self): warn('Attribute Lark.source was renamed to Lark.source_path', DeprecationWarning) return self.source_path def source(self, value): self.source_path = value def grammar_source(self): warn('Attribute Lark.grammar_source was renamed to Lark.source_grammar', DeprecationWarning) return self.source_grammar _source.setter def grammar_source(self, value): self.source_grammar = value
def assign_pyramid(roi, k0=4, size=224): roi_width = (roi[3] - roi[1]) roi_height = (roi[4] - roi[2]) return np.ceil((np.log2((np.sqrt(float((roi_width * roi_height))) / float(size))) + k0))