code
stringlengths
101
5.91M
class BytecodeCache(object): def load_bytecode(self, bucket): raise NotImplementedError() def dump_bytecode(self, bucket): raise NotImplementedError() def clear(self): def get_cache_key(self, name, filename=None): hash = sha1(name.encode('utf-8')) if (filename is not None): filename = ('|' + filename) if isinstance(filename, text_type): filename = filename.encode('utf-8') hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): return sha1(source.encode('utf-8')).hexdigest() def get_bucket(self, environment, name, filename, source): key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): self.dump_bytecode(bucket)
class CloseConstituent(Transition): def delta_opens(self): return (- 1) def update_state(self, state, model): children = [] constituents = state.constituents while (not isinstance(model.get_top_constituent(constituents), Dummy)): children.append(constituents.value) constituents = constituents.pop() label = model.get_top_constituent(constituents).label constituents = constituents.pop() if (not model.is_top_down()): children.append(constituents.value) constituents = constituents.pop() children.reverse() return (state.word_position, constituents, (label, children), CloseConstituent) def build_constituents(model, data): (labels, children_lists) = map(list, zip(*data)) new_constituents = model.build_constituents(labels, children_lists) return new_constituents def is_legal(self, state, model): if (state.num_opens <= 0): return False if model.is_top_down(): if isinstance(model.get_top_transition(state.transitions), OpenConstituent): return False if ((state.num_opens <= 1) and (not state.empty_word_queue())): return False if (model.transition_scheme() == TransitionScheme.TOP_DOWN_COMPOUND): if ((state.num_opens == 1) and (not state.empty_word_queue())): return False elif (not model.has_unary_transitions()): if ((state.num_opens == 2) and (not state.empty_word_queue())): return False elif (model.transition_scheme() == TransitionScheme.IN_ORDER): if (not isinstance(model.get_top_transition(state.transitions), OpenConstituent)): return True if (isinstance(model.get_top_transition(state.transitions), OpenConstituent) and ((model.transition_scheme() is TransitionScheme.IN_ORDER_UNARY) or (model.transition_scheme() is TransitionScheme.IN_ORDER_COMPOUND))): return False if ((state.num_opens > 1) or state.empty_word_queue()): return True node = model.get_top_constituent(state.constituents.pop()) if too_many_unary_nodes(node, model.unary_limit()): return False elif (model.transition_scheme() == TransitionScheme.IN_ORDER_COMPOUND): if isinstance(model.get_top_transition(state.transitions), OpenConstituent): return False return True def short_name(self): return 'Close' def __repr__(self): return 'CloseConstituent' def __eq__(self, other): if (self is other): return True if isinstance(other, CloseConstituent): return True return False def __hash__(self): return hash(93)
def _do_matlab_eval(json_dataset, salt, output_dir='output'): import subprocess logger.info('') logger.info('Computing results with the official MATLAB eval code.') logger.info('') info = voc_info(json_dataset) path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper') cmd = 'cd {} && '.format(path) cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB) cmd += '-r "dbstop if error; ' cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"'.format(info['devkit_path'], ('comp4' + salt), info['image_set'], output_dir) logger.info('Running:\n{}'.format(cmd)) subprocess.call(cmd, shell=True)
.torch def test_sasrec_predictions(tensor_schema, simple_masks): model = SasRecModel(tensor_schema.subset(['item_id']), hidden_size=64, max_len=5) (item_sequences, padding_mask, _, _) = simple_masks inputs = {'item_id': item_sequences} predictions_by_one = model.predict(inputs, padding_mask, torch.tensor([0, 1, 2, 3])) predictions_all = model.predict(inputs, padding_mask) assert (predictions_all.size() == predictions_by_one.size())
def logical_or(a, b): return _binary_operation(_ti_core.expr_logical_or, (lambda a, b: (a or b)), a, b)
class RemBertConfig(PretrainedConfig): model_type = 'rembert' def __init__(self, vocab_size=250300, hidden_size=1152, num_hidden_layers=32, num_attention_heads=18, input_embedding_size=256, output_embedding_size=1664, intermediate_size=4608, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=312, eos_token_id=313, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = False
class TestSearchBitwidthConfiguration(unittest.TestCase): def run_search_bitwidth_config_test(self, core_config): (base_config, mixed_precision_cfg_list) = get_op_quantization_configs() base_config = base_config.clone_and_edit(enable_activation_quantization=False) tpc = get_weights_only_mp_tpc_keras(base_config=base_config, mp_bitwidth_candidates_list=[(c.weights_n_bits, c.activation_n_bits) for c in mixed_precision_cfg_list], name='bitwidth_cfg_test') fw_info = DEFAULT_KERAS_INFO input_shape = (1, 8, 8, 3) input_tensor = keras.layers.Input(shape=input_shape[1:]) conv = keras.layers.Conv2D(3, 3)(input_tensor) bn = keras.layers.BatchNormalization()(conv) relu = keras.layers.ReLU()(bn) in_model = keras.Model(inputs=input_tensor, outputs=relu) keras_impl = KerasImplementation() def dummy_representative_dataset(): return None graph = keras_impl.model_reader(in_model, dummy_representative_dataset) graph.set_fw_info(fw_info) graph.set_tpc(tpc) graph = set_quantization_configuration_to_graph(graph=graph, quant_config=core_config.quantization_config, mixed_precision_enable=core_config.mixed_precision_enable) for node in graph.nodes: node.prior_info = keras_impl.get_node_prior_info(node=node, fw_info=fw_info, graph=graph) analyzer_graph(keras_impl.attach_sc_to_node, graph, fw_info) mi = ModelCollector(graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl) for i in range(1): mi.infer([np.random.randn(*input_shape)]) def representative_data_gen(): (yield [np.random.random(input_shape)]) calculate_quantization_params(graph, fw_info, fw_impl=keras_impl) keras_impl.get_sensitivity_evaluator(graph, core_config.mixed_precision_config, representative_data_gen, fw_info=fw_info) cfg = search_bit_width(graph_to_search_cfg=graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl, target_kpi=KPI(np.inf), mp_config=core_config.mixed_precision_config, representative_data_gen=representative_data_gen, search_method=BitWidthSearchMethod.INTEGER_PROGRAMMING) with self.assertRaises(Exception): cfg = search_bit_width(graph_to_search_cfg=graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl, target_kpi=KPI(np.inf), mp_config=core_config.mixed_precision_config, representative_data_gen=representative_data_gen, search_method=None) with self.assertRaises(Exception): cfg = search_bit_width(graph_to_search_cfg=graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl, target_kpi=None, mp_config=core_config.mixed_precision_config, representative_data_gen=representative_data_gen, search_method=BitWidthSearchMethod.INTEGER_PROGRAMMING) def test_mixed_precision_search_facade(self): core_config_avg_weights = CoreConfig(quantization_config=DEFAULTCONFIG, mixed_precision_config=MixedPrecisionQuantizationConfigV2(compute_mse, get_average_weights, num_of_images=1, use_hessian_based_scores=False)) self.run_search_bitwidth_config_test(core_config_avg_weights) core_config_last_layer = CoreConfig(quantization_config=DEFAULTCONFIG, mixed_precision_config=MixedPrecisionQuantizationConfigV2(compute_mse, get_last_layer_weights, num_of_images=1, use_hessian_based_scores=False)) self.run_search_bitwidth_config_test(core_config_last_layer)
def get_datasets_for_test(P): test_transform = get_test_transform() benchmark = P.dataset file_path = f'data/data_txt/{benchmark}/{P.test_domain}.txt' target_ds = FileDataset(benchmark, file_path, test_transform, add_idx=True) if (benchmark == 'OfficeHome'): source_name = f'no_{P.test_domain}OpenSet' n_classes = 45 elif (benchmark == 'Office31'): source_name = f'no_{P.test_domain}OpenSet' n_classes = 20 elif (benchmark == 'DomainNet'): source_name = 'OpenSet_source_train' n_classes = 100 else: raise NotImplementedError(f'Unknown benchmark {benchmark}') source_file_path = f'data/data_txt/{benchmark}/{source_name}.txt' source_ds = FileDataset(benchmark, source_file_path, test_transform, add_idx=True) return (source_ds, target_ds, n_classes)
def p2_2partitions(model='wrn_28x10_c100_dr03_p2'): csv = '2partitions.csv' out_file_name = f'{model}_output.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100' and model == ").query('epoch == 200') ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(80, 83) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def create_model_4(input_shape): random_uniform = initializers.random_uniform(0, 1) inputs = Input(shape=input_shape) x = Conv2D(2, 3, padding='same', name='conv2d')(inputs) x_bn = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', moving_mean_initializer='random_normal', moving_variance_initializer=random_uniform, name='bn1')(x) x_bn2 = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', moving_mean_initializer='random_normal', moving_variance_initializer=random_uniform, name='bn2')(x_bn) outputs = ReLU()(x_bn2) return keras.Model(inputs=inputs, outputs=outputs)
_function_dispatch(_linspace_dispatcher) def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): num = _index_deprecate(num) if (num < 0): raise ValueError(('Number of samples, %s, must be non-negative.' % num)) div = ((num - 1) if endpoint else num) start = (asanyarray(start) * 1.0) stop = (asanyarray(stop) * 1.0) dt = result_type(start, stop, float(num)) if (dtype is None): dtype = dt delta = (stop - start) y = _nx.arange(0, num, dtype=dt).reshape((((- 1),) + ((1,) * ndim(delta)))) _mult_inplace = _nx.isscalar(delta) if (num > 1): step = (delta / div) if _nx.any((step == 0)): y /= div if _mult_inplace: y *= delta else: y = (y * delta) elif _mult_inplace: y *= step else: y = (y * step) else: step = NaN y = (y * delta) y += start if (endpoint and (num > 1)): y[(- 1)] = stop if (axis != 0): y = _nx.moveaxis(y, 0, axis) if retstep: return (y.astype(dtype, copy=False), step) else: return y.astype(dtype, copy=False)
def create_dummy_data(data_dir, num_examples=1000, maxlen=20, alignment=False): def _create_dummy_data(filename): data = torch.rand((num_examples * maxlen)) data = (97 + torch.floor((26 * data)).int()) with open(os.path.join(data_dir, filename), 'w') as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = ' '.join(map(chr, data[offset:(offset + ex_len)])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), 'r') as src_f, open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, open(os.path.join(data_dir, filename), 'w') as h: for (src, tgt) in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = ((src_len + tgt_len) // 2) num_alignments = random.randint((avg_len // 2), (2 * avg_len)) src_indices = torch.floor((torch.rand(num_alignments) * src_len)).int() tgt_indices = torch.floor((torch.rand(num_alignments) * tgt_len)).int() ex_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in zip(src_indices, tgt_indices)]) print(ex_str, file=h) _create_dummy_data('train.in') _create_dummy_data('train.out') _create_dummy_data('valid.in') _create_dummy_data('valid.out') _create_dummy_data('test.in') _create_dummy_data('test.out') if alignment: _create_dummy_alignment_data('train.in', 'train.out', 'train.align') _create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align') _create_dummy_alignment_data('test.in', 'test.out', 'test.align')
def evaluate_on_saved_data(args, data_loader, epoch): total_lsd = 0 total_visqol = 0 lsd_count = 0 visqol_count = 0 total_cnt = 0 files_to_log = [] wandb_n_files_to_log = (args.wandb.n_files_to_log if ('wandb' in args) else args.wandb_n_files_to_log) with torch.no_grad(): iterator = LogProgress(logger, data_loader, name='Eval estimates') for (i, data) in enumerate(iterator): metrics_i = evaluate_lr_hr_pr_data(data, wandb_n_files_to_log, files_to_log, epoch, args) total_lsd += metrics_i['lsd'] total_visqol += metrics_i['visqol'] lsd_count += (1 if (metrics_i['lsd'] != 0) else 0) visqol_count += (1 if (metrics_i['visqol'] != 0) else 0) total_cnt += 1 if (lsd_count != 0): (avg_lsd,) = [(total_lsd / lsd_count)] else: avg_lsd = 0 if (visqol_count != 0): (avg_visqol,) = [(total_visqol / visqol_count)] else: avg_visqol = 0 logger.info(bold(f'{args.experiment.name}, {args.experiment.lr_sr}->{args.experiment.hr_sr}. Test set performance:LSD={avg_lsd} ({lsd_count}/{total_cnt}), VISQOL={avg_visqol} ({visqol_count}/{total_cnt}).')) return (avg_lsd, avg_visqol)
def test_rpad_listoffset_array(): content = ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])) offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.contents.listoffsetarray.ListOffsetArray(offsets, content) assert (to_list(listoffsetarray) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]) assert (to_list(ak._do.pad_none(listoffsetarray, 3, 0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 3, 0).form == ak._do.pad_none(listoffsetarray, 3, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.types.ListType(ak.types.NumpyType('float64'))), 6) == ak.operations.type(ak._do.pad_none(listoffsetarray, 3, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 7, 0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [], None]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 7, 0).form == ak._do.pad_none(listoffsetarray, 7, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.types.ListType(ak.types.NumpyType('float64'))), 7) == ak.operations.type(ak._do.pad_none(listoffsetarray, 7, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 5, 1)) == [[0.0, 1.1, 2.2, None, None], [None, None, None, None, None], [3.3, 4.4, None, None, None], [5.5, None, None, None, None], [6.6, 7.7, 8.8, 9.9, None], [None, None, None, None, None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 5, 1).form == ak._do.pad_none(listoffsetarray, 5, 1).form) assert (ak.operations.type(ak._do.pad_none(listoffsetarray, 5, 1)) == ak.types.ArrayType(ak.types.ListType(ak.types.OptionType(ak.types.NumpyType('float64'))), 6)) assert (to_list(ak._do.pad_none(listoffsetarray, 1, 1)) == [[0.0, 1.1, 2.2], [None], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 1).form == ak._do.pad_none(listoffsetarray, 1, 1).form) content = ak.contents.numpyarray.NumpyArray(np.array([1.5, 3.3])) index = ak.index.Index64(np.array([0, (- 3), 1, (- 2), 1, 0, 0, (- 3), (- 13), 0, 1, 1, 0, 1, 1, 1, 1, (- 10), 0, (- 1), 0, 0, 0, 1, (- 1), 1, 1])) indexedarray = ak.contents.indexedoptionarray.IndexedOptionArray(index, content) offsets = ak.index.Index64(np.array([14, 15, 15, 15, 26, 26, 26])) listoffsetarray = ak.contents.listoffsetarray.ListOffsetArray(offsets, indexedarray) assert (to_list(listoffsetarray) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]) assert (to_list(ak._do.pad_none(listoffsetarray, 1, 0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 0).form == ak._do.pad_none(listoffsetarray, 1, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.types.ListType(ak.types.OptionType(ak.types.NumpyType('float64')))), 6) == ak.operations.type(ak._do.pad_none(listoffsetarray, 1, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 6, 0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 6, 0).form == ak._do.pad_none(listoffsetarray, 6, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.types.ListType(ak.types.OptionType(ak.types.NumpyType('float64')))), 6) == ak.operations.type(ak._do.pad_none(listoffsetarray, 6, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 7, 0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], [], None]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 7, 0).form == ak._do.pad_none(listoffsetarray, 7, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.operations.type(listoffsetarray).content), 7) == ak.operations.type(ak._do.pad_none(listoffsetarray, 7, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 9, 0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], [], None, None, None]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 9, 0).form == ak._do.pad_none(listoffsetarray, 9, 0).form) assert (ak.types.ArrayType(ak.types.OptionType(ak.operations.type(listoffsetarray).content), 9) == ak.operations.type(ak._do.pad_none(listoffsetarray, 9, 0))) assert (to_list(ak._do.pad_none(listoffsetarray, 1, 1)) == [[3.3], [None], [None], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [None], [None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 1).form == ak._do.pad_none(listoffsetarray, 1, 1).form) assert (ak.types.ArrayType(ak.types.ListType(ak.types.OptionType(ak.types.NumpyType('float64'))), 6) == ak.operations.type(ak._do.pad_none(listoffsetarray, 1, 1))) assert (to_list(ak._do.pad_none(listoffsetarray, 4, 1)) == [[3.3, None, None, None], [None, None, None, None], [None, None, None, None], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [None, None, None, None], [None, None, None, None]]) assert (ak.operations.type(listoffsetarray) == ak.operations.type(ak._do.pad_none(listoffsetarray, 4, 1)))
class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for (name, objtype) in self.directive_mangling_map.items(): self.directives[name] = wrap_mangling_directive(self.directives[name], objtype)
class DepLabelDataset(PosTagDataset): def load_data_index(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) (x_raw, y_raw) = ([], []) for (sentence_ud, words) in data_ud: for (i, token) in enumerate(sentence_ud): head = token['head'] rel = token['rel'] if ((rel == '_') or (rel == 'root')): continue x_raw_tail = words[i] x_raw_head = words[(head - 1)] x_raw += [[x_raw_tail, x_raw_head]] y_raw += [rel] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw) def load_index(self, x_raw, words=None): if (words is None): words = [] new_words = sorted(list((set(np.unique(x_raw)) - set(words)))) if new_words: words = np.concatenate([words, new_words]) words_dict = {word: i for (i, word) in enumerate(words)} x = np.array([[words_dict[token] for token in tokens] for tokens in x_raw]) self.x = torch.from_numpy(x) self.words = words self.n_words = len(words) def load_data(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) data_embeddings = util.read_data((self.input_name_base % (self.mode, self.representation))) (x_raw, y_raw) = ([], []) for ((sentence_ud, words), (sentence_emb, _)) in zip(data_ud, data_embeddings): for (i, token) in enumerate(sentence_ud): head = token['head'] rel = token['rel'] if ((rel == '_') or (rel == 'root')): continue x_raw_tail = sentence_emb[i] x_raw_head = sentence_emb[(head - 1)] x_raw += [np.concatenate([x_raw_tail, x_raw_head])] y_raw += [rel] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw)
def get_checkpoints_for_epochs(experiment_folder: Path, epochs: Union[(List, str)]) -> List: if isinstance(epochs, str): epochs = epochs.split(',') epochs = list(map(int, epochs)) ep = (lambda s: int(s.stem.split('=')[1])) return [chk for chk in get_all_checkpoints(experiment_folder) if (ep(chk) in epochs)]
def auto_augment_transform(config_str, hparams): config = config_str.split('-') policy_name = config[0] config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if (len(cs) < 2): continue (key, val) = cs[:2] if (key == 'mstd'): hparams.setdefault('magnitude_std', float(val)) else: assert False, 'Unknown AutoAugment config section' aa_policy = auto_augment_policy(policy_name) return AutoAugment(aa_policy)
class IsotopicMassFraction(pd.DataFrame): _metadata = ['time_0'] def __init__(self, *args, **kwargs): if ('time_0' in kwargs): time_0 = kwargs['time_0'] kwargs.pop('time_0') else: time_0 = (0 * u.d) super(IsotopicMassFraction, self).__init__(*args, **kwargs) self.time_0 = time_0 def _constructor(self): return IsotopicMassFraction def _update_inventory(self): self.comp_dicts = [dict() for i in range(len(self.columns))] for ((atomic_number, mass_number), mass_fractions) in self.iterrows(): nuclear_symbol = f'{Z_to_elem(atomic_number)}{mass_number}' for i in range(len(self.columns)): self.comp_dicts[i][nuclear_symbol] = mass_fractions[i] def from_inventories(cls, inventories): multi_index_tuples = set([]) for inventory in inventories: multi_index_tuples.update([cls.id_to_tuple(key) for key in inventory.contents.keys()]) index = pd.MultiIndex.from_tuples(multi_index_tuples, names=['atomic_number', 'mass_number']) abundances = pd.DataFrame(data=0.0, index=index, columns=range(len(inventories))) for (i, inventory) in enumerate(inventories): for (nuclide, abundance) in inventory.masses('g').items(): abundances.loc[(cls.id_to_tuple(nuclide), i)] = abundance return cls(abundances) def id_to_tuple(atomic_id): nuclide = Nuclide(atomic_id) return (nuclide.Z, nuclide.A) def to_inventories(self, shell_masses=None): comp_dicts = [dict() for i in range(len(self.columns))] for ((atomic_number, mass_number), abundances) in self.iterrows(): nuclear_symbol = f'{Z_to_elem(atomic_number)}{mass_number}' for i in range(len(self.columns)): if (shell_masses is None): comp_dicts[i][nuclear_symbol] = abundances[i] else: comp_dicts[i][nuclear_symbol] = (abundances[i] * shell_masses[i].to(u.g).value) return [Inventory(comp_dict, 'g') for comp_dict in comp_dicts] def decay(self, t): inventories = self.to_inventories() t_second = (u.Quantity(t, u.day).to(u.s).value - self.time_0.to(u.s).value) logger.info(f'Decaying abundances for {t_second} seconds') if (t_second < 0): logger.warning(f'Decay time {t_second} is negative. This could indicate a miss-specified input model. A negative decay time can potentially lead to negative abundances.') decayed_inventories = [item.decay(t_second) for item in inventories] df = IsotopicMassFraction.from_inventories(decayed_inventories) df.sort_index(inplace=True) assert df.ge(0.0).all().all(), 'Negative abundances detected. Please make sure your input abundances are correct.' return df def as_atoms(self): return self.groupby('atomic_number').sum() def merge(self, other, normalize=True): isotope_abundance = self.as_atoms() isotope_abundance = isotope_abundance.fillna(0.0) modified_df = isotope_abundance.add(other, fill_value=0) if normalize: norm_factor = modified_df.sum(axis=0) modified_df /= norm_factor return modified_df
class _BatchNorm(_NormBase): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True): super(_BatchNorm, self).__init__(num_features, eps, momentum, affine, track_running_stats) def forward(self, input: Tensor) -> Tensor: self._check_input_dim(input) if (self.momentum is None): exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if (self.training and self.track_running_stats): if (self.num_batches_tracked is not None): self.num_batches_tracked = (self.num_batches_tracked + 1) if (self.momentum is None): exponential_average_factor = (1.0 / float(self.num_batches_tracked)) else: exponential_average_factor = self.momentum '\n Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n ' if self.training: bn_training = True else: bn_training = ((self.running_mean is None) and (self.running_var is None)) '\n Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be\n passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are\n used for normalization (i.e. in eval mode when buffers are not None).\n ' return F.batch_norm(input, (self.running_mean if ((not self.training) or self.track_running_stats) else None), (self.running_var if ((not self.training) or self.track_running_stats) else None), self.weight, self.bias, bn_training, exponential_average_factor, self.eps)
def is_nominal(dtype: Any) -> bool: if (is_continuous(dtype) or is_datetime(dtype)): return False if isinstance(dtype, np.dtype): dtype = dtype.type return any((issubclass(dtype, c) for c in CATEGORICAL_NUMPY_DTYPES)) else: return any((isinstance(dtype, c) for c in CATEGORICAL_PANDAS_DTYPES))
def make_lr_scheduler(cfg, optimizer): return WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=cfg.SOLVER.WARMUP_ITERS, warmup_method=cfg.SOLVER.WARMUP_METHOD)
class AstToTestCaseTransformer(ast.NodeVisitor): def __init__(self, test_cluster: ModuleTestCluster, create_assertions: bool, constant_provider: ConstantProvider): self._current_testcase: dtc.DefaultTestCase = dtc.DefaultTestCase(test_cluster) self._current_parsable: bool = True self._var_refs: dict[(str, vr.VariableReference)] = {} self._testcases: list[dtc.DefaultTestCase] = [] self._number_found_testcases: int = 0 self._test_cluster = test_cluster self._create_assertions = create_assertions self._constant_provider = constant_provider def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: self._number_found_testcases += 1 self._current_testcase = dtc.DefaultTestCase(self._test_cluster) self._current_parsable = True self._var_refs.clear() self.generic_visit(node) if self._current_parsable: self._testcases.append(self._current_testcase) def visit_Assign(self, node: ast.Assign) -> Any: if self._current_parsable: if ((result := create_assign_stmt(node, self._current_testcase, self._var_refs, self._test_cluster, self._constant_provider)) is None): self._current_parsable = False else: (ref_id, stm) = result var_ref = self._current_testcase.add_variable_creating_statement(stm) self._var_refs[ref_id] = var_ref def visit_Assert(self, node: ast.Assert) -> Any: if (self._current_parsable and self._create_assertions): if ((result := create_assert_stmt(self._var_refs, node)) is not None): (assertion, var_ref) = result self._current_testcase.get_statement(var_ref.get_statement_position()).add_assertion(assertion) def testcases(self) -> list[dtc.DefaultTestCase]: return self._testcases
def boundary_handle(pos: ti.types.ndarray(ndim=1), vel: ti.types.ndarray(ndim=1), boundary_box: ti.types.ndarray(ndim=1)): for i in range(particle_num): collision_normal = ti.Vector([0.0, 0.0, 0.0]) for j in ti.static(range(3)): if (pos[i][j] < boundary_box[0][j]): pos[i][j] = boundary_box[0][j] collision_normal[j] += (- 1.0) for j in ti.static(range(3)): if (pos[i][j] > boundary_box[1][j]): pos[i][j] = boundary_box[1][j] collision_normal[j] += 1.0 collision_normal_length = collision_normal.norm() if (collision_normal_length > eps): collision_normal /= collision_normal_length vel[i] -= (((1.0 + damping) * collision_normal.dot(vel[i])) * collision_normal)
def psp_block(prev_layer, level, feature_map_shape, input_shape): if (input_shape == (512, 512)): kernel_strides_map = {1: [64, 64], 2: [32, 32], 3: [22, 21], 6: [11, 9]} else: raise ValueError((('Pooling parameters for input shape ' + input_shape) + ' are not defined.')) if (K.image_data_format() == 'channels_last'): bn_axis = 3 else: bn_axis = 1 names = [(('class_psp_' + str(level)) + '_conv'), (('class_psp_' + str(level)) + '_bn')] kernel = (kernel_strides_map[level][0], kernel_strides_map[level][0]) strides = (kernel_strides_map[level][1], kernel_strides_map[level][1]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = resnet.BN(bn_axis, name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) prev_layer = Upsampling(feature_map_shape)(prev_layer) return prev_layer
def count_params(model: tf.keras.models.Model) -> int: return int(sum((np.prod(p.shape.as_list()) for p in model.trainable_weights)))
class DEAPQDAlgorithm(object): def __init__(self, toolbox, container=None, stats=None, halloffame=None, iteration_filename='iteration-%i.p', final_filename='final.p', ea_fn=qdSimple, cxpb=0.0, mutpb=1.0, verbose=False, results_infos=None, log_base_path='.', save_period=None, iteration_callback_fn=None, **kwargs): self._update_params(**kwargs) self.toolbox = toolbox self.halloffame = halloffame self.iteration_filename = iteration_filename self.final_filename = final_filename self.ea_fn = ea_fn self.cxpb = cxpb self.mutpb = mutpb self.verbose = verbose self.log_base_path = log_base_path self.save_period = save_period self.iteration_callback_fn = iteration_callback_fn self._init_container(container) self._init_stats(stats) self._results_infos = {} if (results_infos != None): self.add_results_infos(results_infos) self.total_elapsed = 0.0 def _init_container(self, container=None): if (container == None): self.container = Container() else: self.container = container def _init_stats(self, stats=None): if (stats == None): self.stats = deap.tools.Statistics((lambda ind: ind.fitness.values)) self.stats.register('avg', np.mean, axis=0) self.stats.register('std', np.std, axis=0) self.stats.register('min', np.min, axis=0) self.stats.register('max', np.max, axis=0) else: self.stats = stats def gen_init_batch(self, init_batch_size=None): if ((not hasattr(self, 'start_time')) or (self.start_time == None)): self.start_time = timer() if (init_batch_size != None): self.init_batch_size = init_batch_size if ((not hasattr(self, 'init_batch_size')) or (self.init_batch_size == None)): raise ValueError("Please specify 'init_batch_size'") self.init_batch = self.toolbox.population(n=self.init_batch_size) def _update_params(self, **kwargs): for (k, v) in kwargs.items(): if (v != None): if ((k == 'init_batch_size') or (k == 'batch_size') or (k == 'niter') or (k == 'start_time') or (k == 'save_period') or (k == 'iteration_filename') or (k == 'final_filename') or (k == 'log_base_path')): setattr(self, k, v) def _iteration_callback(self, iteration, batch, container, logbook): self.current_iteration = iteration self.current_batch = batch self.logbook = logbook if (self.iteration_callback_fn is not None): self.iteration_callback_fn(iteration, batch, container, logbook) if ((self.save_period == None) or (self.save_period == 0)): return if (((iteration % self.save_period) == 0) and (iteration != self.niter) and (self.iteration_filename != None) and (self.iteration_filename != '')): self.save(os.path.join(self.log_base_path, (self.iteration_filename % self.current_iteration))) def run(self, init_batch=None, **kwargs): self._update_params(**kwargs) if (init_batch == None): if ((not hasattr(self, 'init_batch')) or (self.init_batch == None)): self.gen_init_batch() else: self.init_batch = init_batch (batch, logbook) = self.ea_fn(self.init_batch, self.toolbox, self.container, self.batch_size, self.niter, cxpb=self.cxpb, mutpb=self.mutpb, stats=self.stats, halloffame=self.halloffame, verbose=self.verbose, start_time=self.start_time, iteration_callback=self._iteration_callback) if ((self.final_filename != None) and (self.final_filename != '')): self.save(os.path.join(self.log_base_path, self.final_filename)) self.total_elapsed = (timer() - self.start_time) return self.total_elapsed def data_archive(self): results = {} def copy_attr(obj, names): for name in names: if hasattr(obj, name): results[name] = getattr(obj, name) copy_attr(self, ['init_batch_size', 'batch_size', 'niter', 'container', 'current_iteration', 'current_batch', 'logbook', 'container']) results = {**results, **self._results_infos} return results def save(self, outputFile): results = self.data_archive() with open(outputFile, 'wb') as f: pickle.dump(results, f) def add_results_infos(self, *args): if (len(args) == 1): self._results_infos = {**self._results_infos, **args[0]} elif (len(args) == 2): self._results_infos[args[0]] = args[1] else: raise ValueError('Please either pass a dictionary or key, value as parameter.')
def loss_fn(x, y): x = F.normalize(x, dim=(- 1), p=2) y = F.normalize(y, dim=(- 1), p=2) return (2 - (2 * (x * y).sum(dim=(- 1))))
def construct_optimizer(model: torch.nn.Module, cfg: OmegaConf): optimizer_type = cfg.train.optimizer lr = cfg.train.lr radius_lr_factor = cfg.train.radius_lr_factor momentum = cfg.train.optimizer_params.momentum nesterov = cfg.train.optimizer_params.nesterov (others, radius, no_decay) = ([], [], []) for (name, param) in model.named_parameters(): if ((len(param.shape) == 1) or name.endswith('.bias')): no_decay.append(param) elif ('weight_coord' in name): no_decay.append(param) elif ('radius' in name): radius.append(param) else: others.append(param) if (optimizer_type == 'SGD'): optimizer = torch.optim.SGD([{'params': others}, {'params': no_decay, 'weight_decay': 0.0}, {'params': radius, 'weight_decay': 0.0, 'lr': (radius_lr_factor * lr)}], lr=lr, momentum=momentum, nesterov=nesterov, weight_decay=cfg.train.weight_decay) elif (optimizer_type == 'Adam'): optimizer = torch.optim.Adam([{'params': others}, {'params': no_decay, 'weight_decay': 0.0}, {'params': radius, 'weight_decay': 0.0, 'lr': (radius_lr_factor * lr)}], lr=lr, weight_decay=cfg.train.weight_decay) elif (optimizer_type == 'RMSprop'): optimizer = torch.optim.RMSprop(model.parameters(), lr=lr) else: raise ValueError(f'Unexpected value for type of optimizer (cfg.train.optimizer): {optimizer_type}') return optimizer
def _calc_estimate_time(timeinfo, max_iter, last_iter, iter): timeinfo.past_time = (time.time() - timeinfo.start_time) timeinfo.estimate_time = ((timeinfo.past_time * (max_iter - last_iter)) / (iter - last_iter)) timeinfo.remain_time = (timeinfo.estimate_time - timeinfo.past_time) timeinfo.last_past_time = timeinfo.past_time return timeinfo
def test_random(env, nb_episodes, nb_dims=2, gif=False, score_step=1000, verbose=True, params={}): scores = [] gif_step_size = 250 bk = {'comp_grids': [], 'comp_xs': [], 'comp_ys': [], 'tasks': []} for i in range((nb_episodes + 1)): if ((i % score_step) == 0): scores.append(env.get_score()) if (nb_dims == 2): if verbose: print(env.cube_competence) elif verbose: print('it:{}, score:{}'.format(i, scores[(- 1)])) if ((i > 100) and ((i % gif_step_size) == 0) and (gif is True)): if (nb_dims == 2): bk['comp_grids'].append(env.cube_competence.copy()) bk['comp_xs'].append(env.bnds[0].copy()) bk['comp_ys'].append(env.bnds[1].copy()) p = np.random.random(nb_dims) env.episode(p) if (gif and (nb_dims == 2)): bk['tasks'] = env.params print('Creating gif...') random_plot_gif(bk, gifname=('random_' + str(time.time())), gifdir='toy_env_gifs/') print('Done (see graphics/toy_env_gifs/ folder)') return scores
class AlgebraicReal(AlgebraicNumber_base): def __init__(self, x): AlgebraicNumber_base.__init__(self, AA, x) self._ensure_real() def _ensure_real(self): if is_ComplexIntervalFieldElement(self._value): self._value = self._value.real() def _more_precision(self): AlgebraicNumber_base._more_precision(self) self._ensure_real() def __reduce__(self): return (AlgebraicReal, (self._descr,)) def _richcmp_(self, other, op): if (self is other): return rich_to_bool(op, 0) sd = self._descr od = other._descr if ((type(sd) is ANRational) and (type(od) is ANRational)): return richcmp(sd._value, od._value, op) if (not self._value.overlaps(other._value)): return self._value._richcmp_(other._value, op) if ((op == op_EQ) or (op == op_NE)): if ((type(sd) is ANRational) and (not sd._value)): return (bool(other) == (op == op_NE)) elif ((type(od) is ANRational) and (not od._value)): return (bool(self) == (op == op_NE)) elif ((type(sd) is ANExtensionElement) and (type(od) is ANExtensionElement) and (sd._generator is od._generator)): return ((sd._value == od._value) if (op == op_EQ) else (sd._value != od._value)) else: try: if (self._minimal_polynomial != other._minimal_polynomial): return (op == op_NE) except AttributeError: pass try: if (self._minimal_polynomial != other._minimal_polynomial): c = cmp_elements_with_same_minpoly(self, other, self.minpoly()) if (c is not None): return rich_to_bool(op, c) except AttributeError: pass if (self._value.prec() < 128): self._more_precision() if (other._value.prec() < 128): other._more_precision() if (not self._value.overlaps(other._value)): return self._value._richcmp_(other._value, op) return rich_to_bool(op, (self - other).sign()) def _integer_(self, Z=None): if (self._value.lower().ceiling() > self._value.upper().floor()): raise ValueError(lazy_string('Cannot coerce non-integral Algebraic Real %s to Integer', self)) self.exactify() if (not isinstance(self._descr, ANRational)): raise ValueError(lazy_string('Cannot coerce irrational Algebraic Real %s to Integer', self)) return ZZ(self._descr._value) def _floor_ceil(self, method): for i in itertools.count(): candidate = method(self._value.lower()) if (candidate == method(self._value.upper())): return candidate self._more_precision() if ((i == 2) and (not isinstance(self._descr, ANExtensionElement))): try: return method(self._rational_()) except (ValueError, TypeError): pass def floor(self): return self._floor_ceil((lambda x: x.floor())) def ceil(self): return self._floor_ceil((lambda x: x.ceil())) def round(self): return self._floor_ceil((lambda x: x.round())) def trunc(self): return self._floor_ceil((lambda x: x.trunc())) def _rational_(self): self.exactify() if (not isinstance(self._descr, ANRational)): raise ValueError(lazy_string('Cannot coerce irrational Algebraic Real %s to Rational', self)) return QQ(self._descr._value) def real(self): return self def imag(self): return AA_0 def conjugate(self): return self def multiplicative_order(self): if (1 not in RIF(self).abs()): return infinity.infinity if (self == 1): return 1 elif (self == (- 1)): return 2 else: return infinity.infinity def sign(self): if (not self._value.contains_zero()): return self._value.unique_sign() sd = self._descr if isinstance(self._descr, ANRational): return sd._value.sign() elif isinstance(self._descr, ANExtensionElement): while self._value.contains_zero(): self._more_precision() return self._value.unique_sign() elif (type(sd) is ANBinaryExpr): ls = sd._left.sign() rs = sd._right.sign() if ((sd._op is operator.mul) or (sd._op is operator.truediv)): return (ls * rs) elif (sd._op is operator.add): if (ls == rs): return ls elif (ls == (- rs)): return ls elif (not ls): self._set_descr((- sd._right)._descr) return (- rs) elif (not rs): self._set_descr(sd._left._descr) return ls elif (sd._left is sd._right): self._set_descr(ANRational(QQ.zero())) return 0 elif (type(sd) is ANUnaryExpr): if (sd._op == 'abs'): c = (1 if bool(sd._arg) else 0) if (not c): self._set_descr(ANRational(QQ.zero())) return c elif (sd._op == '-'): return (- sd._arg.sign()) elif (sd._op == '~'): return sd._arg.sign() if (self._value.prec() < 128): self._more_precision() if (not self._value.contains_zero()): return self._value.unique_sign() if (type(sd) is ANBinaryExpr): sd._left.exactify() sd._right.exactify() if ((type(sd._left._descr) is ANRational) and (type(sd._right._descr) is ANRational)): ret = sd._op(sd._left._descr._value, sd._right._descr._value) if (ret == 0): self._set_descr(ANRational(QQ.zero())) return 0 return ret.sign() if (sd._left.minpoly() == sd._right.minpoly()): right = (sd._right if (sd._op is operator.sub) else (- sd._right)) c = cmp_elements_with_same_minpoly(sd._left, right, sd._left.minpoly()) if (c == 0): self._set_descr(ANRational(QQ.zero())) return 0 elif (c is not None): return c ret = sd._op(sd._left._value, sd._right._value) if (not ret.contains_zero()): return ret.unique_sign() if (not ret): self._set_descr(ANRational(QQ.zero())) return 0 self.exactify() return self.sign() def _interval_fast(self, prec): return self.interval_fast(RealIntervalField(prec)) def interval_exact(self, field): for extra in (0, 40): target = (RR(1.0) >> (field.prec() + extra)) pval = self.interval_diameter(target) pbot = pval.lower() ptop = pval.upper() val = field(pval) bot = val.lower() top = val.upper() prbot = pbot.parent()(bot) prtop = ptop.parent()(top) if ((bot == top) or ((bot.nextabove() == top) and (prbot < pbot) and (ptop < prtop))): return val self.exactify() while True: pval = self._value pbot = pval.lower() ptop = pval.upper() val = field(pval) bot = val.lower() top = val.upper() prbot = pbot.parent()(bot) prtop = ptop.parent()(top) if ((bot == top) or ((bot.nextabove() == top) and (prbot < pbot) and (ptop < prtop))): return val self._more_precision() def real_number(self, field): v = self.interval(RealIntervalField(field.prec())) return field(v) _mpfr_ = real_number def __float__(self): return float(RR(self)) def _complex_mpfr_field_(self, field): if isinstance(field, sage.rings.abc.ComplexIntervalField): return field(self.interval(field._real_field())) else: return field(self.real_number(field._real_field())) def real_exact(self, field): for extra in (0, 40): target = (RR(1.0) >> (field.prec() + extra)) val = self.interval_diameter(target) fbot = field(val.lower()) ftop = field(val.upper()) if (fbot == ftop): return ftop rifp1 = RealIntervalField((field.prec() + 1)) rifp2 = RealIntervalField((field.prec() + 2)) val = self.interval_exact(rifp1) mid = rifp2(val).center() return field(mid)
def check_ieee_macros(config): priv = [] pub = [] macros = [] def _add_decl(f): priv.append(fname2def(('decl_%s' % f))) pub.append(('NPY_%s' % fname2def(('decl_%s' % f)))) _macros = ['isnan', 'isinf', 'signbit', 'isfinite'] for f in _macros: py_symbol = fname2def(('decl_%s' % f)) already_declared = config.check_decl(py_symbol, headers=['Python.h', 'math.h']) if already_declared: if config.check_macro_true(py_symbol, headers=['Python.h', 'math.h']): pub.append(('NPY_%s' % fname2def(('decl_%s' % f)))) else: macros.append(f) for f in macros: st = config.check_decl(f, headers=['Python.h', 'math.h']) if st: _add_decl(f) return (priv, pub)
def GenerateSM60_Simt(manifest, cuda_version): layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)] math_instructions = [MathInstruction([1, 1, 1], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.Simt, MathOperation.multiply_add)] min_cc = 60 max_cc = 1024 alignment_constraints = [1] for math_inst in math_instructions: tile_descriptions = [TileDescription([256, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc)] data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator] CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
class stanford_params(): def __init__(self): self.class_freq = np.asarray([19.203, 16.566, 27.329, 2.428, 2.132, 2.123, 5.494, 3.25, 4.079, 0.488, 4.726, 1.264, 10.918, 100.0]) self.class_weights = (- np.log((self.class_freq / 100.0))) self.num_classes = (len(self.class_freq) + 1) self.color_map = [[255, 255, 255], [128, 128, 128], [124, 152, 0], [255, 225, 25], [0, 130, 200], [245, 130, 48], [145, 30, 180], [0, 130, 200], [0, 0, 128], [128, 0, 0], [250, 190, 190], [170, 110, 40], [0, 0, 0], [170, 110, 40], [128, 128, 128]]
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, norm_type='batch', stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv_stride1(inplanes, planes, kernel_size=3, norm_type=norm_type) self.bn1 = normalization(planes, norm_type) self.conv2 = conv_stride1(planes, planes, kernel_size=3, norm_type=norm_type) self.bn2 = normalization(planes, norm_type) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) if (self.stride != 1): out = F.avg_pool2d(out, kernel_size=self.stride, stride=self.stride) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def _get_resource(resources, resource_name): if ((resource_name not in resources) or (resources[resource_name] is None)): raise MissingResource(("Resource '%s' not found" % resource_name)) return resources[resource_name]
class EvaluateParser(JavaProtobufContext): def __init__(self, classpath=None, kbest=None, silent=False): if (kbest is not None): extra_args = ['-evalPCFGkBest', '{}'.format(kbest), '-evals', 'pcfgTopK'] else: extra_args = [] if silent: extra_args.extend(['-evals', 'summary=False']) super(EvaluateParser, self).__init__(classpath, EvaluateParserResponse, EVALUATE_JAVA, extra_args=extra_args) def process(self, treebank): request = build_request(treebank) return self.process_request(request)
def test_gmm_e2e(): gmm = learn_gmm(np.random.random((100, 64)), n_modes=5) assert (gmm.means_ is not None) assert (gmm.covariances_ is not None) assert (gmm.weights_ is not None)
class TestPadding(TestCase): (batch_size=st.integers(1, 64), channels=st.integers(1, 64), width=st.integers(16, 128), qtype=st.sampled_from(hu._ALL_QINT_TYPES)) def test_reflection_pad1d(self, batch_size, channels, width, qtype): padding = (width // 4) x = torch.arange(((batch_size * channels) * width)).to(torch.float) x = x.resize(batch_size, channels, width) (scale, zp) = _calculate_dynamic_qparams(x, qtype) qx = torch.quantize_per_tensor(x, scale, zp, qtype) padding_op = torch.nn.ReflectionPad1d(padding) y_ref = padding_op(x) qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype) qy_hat = padding_op(qx) self.assertEqual(qy_ref, qy_hat) (batch_size=st.integers(1, 64), channels=st.integers(1, 64), hwd=st.integers(1, 16), d=st.sampled_from([1, 2, 3]), value=st.floats((- 5), 5, allow_nan=False, allow_infinity=False), qtype=st.sampled_from(hu._ALL_QINT_TYPES)) def test_constant_padNd(self, batch_size, channels, d, hwd, value, qtype): padding = (hwd // 4) shape = [batch_size, channels, hwd] op = torch.nn.ConstantPad1d if (d >= 2): shape.append(hwd) op = torch.nn.ConstantPad2d if (d == 3): shape.append(hwd) op = torch.nn.ConstantPad3d numel = np.prod(shape) x = torch.arange(numel).to(torch.float) x = x.resize(*shape) (scale, zp) = _calculate_dynamic_qparams(x, qtype) qx = torch.quantize_per_tensor(x, scale, zp, qtype) padding_op = op(padding, value) y_ref = padding_op(x) qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype) qy_hat = padding_op(qx) self.assertEqual(qy_ref, qy_hat)
def clipped_error(x): return tf.where((tf.abs(x) < 1.0), (0.5 * tf.square(x)), (tf.abs(x) - 0.5))
def test__extract_geometry(h3_tess): extracted_geometry = h3_tess._extract_geometry(bbox) assert (extracted_geometry['type'] == 'Polygon')
def parse_args(): parser = argparse.ArgumentParser(description='Train a model') parser.add_argument('config', help='train config file path') args = parser.parse_args() return args
class DWConv(nn.Module): def __init__(self, dim): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, groups=dim) def forward(self, x: Tensor, H, W) -> Tensor: (B, _, C) = x.shape x = x.transpose(1, 2).view(B, C, H, W) x = self.dwconv(x) return x.flatten(2).transpose(1, 2)
def evaluate(model, batches): model.eval() meters = collections.defaultdict((lambda : AverageMeter())) with torch.no_grad(): for (inputs, targets) in batches: losses = model.autoenc(inputs, targets) for (k, v) in losses.items(): meters[k].update(v.item(), inputs.size(1)) loss = model.loss({k: meter.avg for (k, meter) in meters.items()}) meters['loss'].update(loss) return meters
def get(dataset, crop_size, batch_size, min_resize_value=None, max_resize_value=None, resize_factor=None, min_scale_factor=1.0, max_scale_factor=1.0, scale_factor_step_size=0, num_readers=1, num_threads=1, dataset_split=None, is_training=True, model_variant=None): if (dataset_split is None): raise ValueError('Unknown dataset split.') if (model_variant is None): tf.logging.warning('Please specify a model_variant. See feature_extractor.network_map for supported model variants.') data_provider = dataset_data_provider.DatasetDataProvider(dataset, num_readers=num_readers, num_epochs=(None if is_training else 1), shuffle=is_training) (image, label, image_name, height, width) = _get_data(data_provider, dataset_split) if (label is not None): if (label.shape.ndims == 2): label = tf.expand_dims(label, 2) elif ((label.shape.ndims == 3) and (label.shape.dims[2] == 1)): pass else: raise ValueError('Input label shape must be [height, width], or [height, width, 1].') label.set_shape([None, None, 1]) (original_image, image, label) = input_preprocess.preprocess_image_and_label(image, label, crop_height=crop_size[0], crop_width=crop_size[1], min_resize_value=min_resize_value, max_resize_value=max_resize_value, resize_factor=resize_factor, min_scale_factor=min_scale_factor, max_scale_factor=max_scale_factor, scale_factor_step_size=scale_factor_step_size, ignore_label=dataset.ignore_label, is_training=is_training, model_variant=model_variant) sample = {common.IMAGE: image, common.IMAGE_NAME: image_name, common.HEIGHT: height, common.WIDTH: width} if (label is not None): sample[common.LABEL] = label if (not is_training): sample[common.ORIGINAL_IMAGE] = (original_image,) num_threads = 1 return tf.train.batch(sample, batch_size=batch_size, num_threads=num_threads, capacity=(32 * batch_size), allow_smaller_final_batch=(not is_training), dynamic_pad=True)
def _workers(workers): if (workers is None): return getattr(_config, 'default_workers', 1) if (workers < 0): if (workers >= (- _cpu_count)): workers += (1 + _cpu_count) else: raise ValueError('workers value out of range; got {}, must not be less than {}'.format(workers, (- _cpu_count))) elif (workers == 0): raise ValueError('workers must not be zero') return workers
def _tested_estimators(): for (name, Estimator) in all_estimators(): try: estimator = _construct_instance(Estimator) set_random_state(estimator) except SkipTest: continue if isinstance(estimator, NearMiss): for version in (1, 2, 3): (yield clone(estimator).set_params(version=version)) else: (yield estimator)
class DropboxDeleteItem(VirtualFunctionTool): name = 'DropboxDeleteItem' summary = "Delete a file or folder from the user's Dropbox account." parameters: List[ArgParameter] = [{'name': 'item_path', 'type': 'string', 'description': "The cloud file or folder path in the user's Dropbox account to be deleted.", 'required': True}] returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the file or folder was successfully deleted.'}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'item_path' does not exist."}]
class ParserImageInTar(Parser): def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): super().__init__() class_name_to_idx = None if class_map: class_name_to_idx = load_class_map(class_map, root) self.root = root (self.samples, self.targets, self.class_name_to_idx, tarfiles) = extract_tarinfos(self.root, class_name_to_idx=class_name_to_idx, cache_tarinfo=cache_tarinfo, extensions=IMG_EXTENSIONS) self.class_idx_to_name = {v: k for (k, v) in self.class_name_to_idx.items()} if ((len(tarfiles) == 1) and (tarfiles[0][0] is None)): self.root_is_tar = True self.tar_state = tarfiles[0][1] else: self.root_is_tar = False self.tar_state = dict(tarfiles) self.cache_tarfiles = cache_tarfiles def __len__(self): return len(self.samples) def __getitem__(self, index): sample = self.samples[index] target = self.targets[index] (sample_ti, parent_fn, child_ti) = sample parent_abs = (os.path.join(self.root, parent_fn) if parent_fn else self.root) tf = None cache_state = None if self.cache_tarfiles: cache_state = (self.tar_state if self.root_is_tar else self.tar_state[parent_fn]) tf = cache_state.tf if (tf is None): tf = tarfile.open(parent_abs) if self.cache_tarfiles: cache_state.tf = tf if (child_ti is not None): ctf = (cache_state.children[child_ti.name].tf if self.cache_tarfiles else None) if (ctf is None): ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) if self.cache_tarfiles: cache_state.children[child_ti.name].tf = ctf tf = ctf return (tf.extractfile(sample_ti), target) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
def de_vectorize_field_ptr(vec_cpu, rev_vocab, memory, schema, table_po=None, field_po=None, post_process=None, return_tokens=False): tokens = [] for j in range(len(vec_cpu)): token_id = int(vec_cpu[j]) if ((j == 0) and (token_id == rev_vocab.start_id)): continue if ((token_id == rev_vocab.eos_id) or (token_id == rev_vocab.pad_id)): break if (token_id < rev_vocab.size): tokens.append(rev_vocab.to_token(token_id)) else: memory_pos = (token_id - rev_vocab.size) if (memory_pos < len(memory)): tokens.append(memory[memory_pos]) else: schema_pos = (memory_pos - len(memory)) tokens.append(schema.get_signature_by_schema_pos(schema_pos, table_po=table_po, field_po=field_po)) if return_tokens: return tokens s = post_process(tokens) return s
class BridgeLayer(nn.Module): def __init__(self, enc_hidden_size, dec_hidden_size): super(BridgeLayer, self).__init__() self.input_size = enc_hidden_size self.output_size = dec_hidden_size self.proj_layer = nn.Linear(self.input_size, self.output_size) def forward(self, enc_final_hidden, enc_cell_type='GRU', bidirectional_encoder=True): if (enc_cell_type == 'LSTM'): enc_final_hidden = enc_final_hidden[0] if bidirectional_encoder: hidden_size = enc_final_hidden.size(0) enc_final_hidden = torch.cat([enc_final_hidden[0:hidden_size:2], enc_final_hidden[1:hidden_size:2]], 2) enc_final_hidden = F.relu(enc_final_hidden) return enc_final_hidden
def get_strongly_connected_components(dependencies): sorted_vars = sorted(dependencies.derived_variables) variable_to_index = {var: index for (index, var) in enumerate(sorted_vars)} adjacency_list = [] for derived_var in sorted_vars: pos = dependencies.positive_dependencies[derived_var] neg = dependencies.negative_dependencies[derived_var] indices = [variable_to_index[atom] for atom in sorted(pos.union(neg))] adjacency_list.append(indices) index_groups = sccs.get_sccs_adjacency_list(adjacency_list) groups = [[sorted_vars[i] for i in g] for g in index_groups] return groups
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, 3) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 3) self.fc1 = nn.Linear(((32 * 5) * 5), 32) self.fc2 = nn.Linear(32, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(x.size(0), (- 1)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
class GEM(keras.Model): def __init__(self, input_dim, output_dim, args): super().__init__() self.nodes_num = args.nodes_num self.class_size = args.class_size self.input_dim = input_dim self.output_dim = output_dim self.device_num = args.device_num self.hop = args.hop self.zero_init = tf.keras.initializers.Zeros() self.h_0 = tf.Variable(initial_value=self.zero_init(shape=(self.nodes_num, self.output_dim), dtype=tf.float32)) self.layers_ = [] self.input_layer = GEMLayer(self.nodes_num, self.input_dim, self.output_dim, self.device_num) for _ in range((self.hop - 1)): self.layers_.append(GEMLayer(self.nodes_num, self.input_dim, self.output_dim, self.device_num)) self.x_init = tf.keras.initializers.GlorotUniform() self.u = tf.Variable(initial_value=self.x_init(shape=(self.output_dim, self.class_size), dtype=tf.float32), trainable=True) def call(self, inputs): (supports, x, label, idx_mask) = inputs outputs = [self.input_layer((x, supports, self.h_0))] for layer in self.layers_: hidden = layer((x, supports, outputs[(- 1)])) outputs.append(hidden) gem_out = outputs[(- 1)] masked_data = tf.gather(gem_out, idx_mask) masked_label = tf.gather(label, idx_mask) logits = tf.nn.softmax(tf.matmul(masked_data, self.u)) loss = (- tf.reduce_sum(tf.math.log(tf.nn.sigmoid((masked_label * logits))))) acc = accuracy(logits, masked_label) return (loss, acc)
def compute_head_information(attributes): mention_subtree = attributes['parse_tree'] head_finder = head_finders.HeadFinder() head_index = 0 head = [attributes['tokens'][0]] if (len(mention_subtree.leaves()) == len(attributes['tokens'])): head_tree = head_finder.get_head(mention_subtree) head_index = get_head_index(head_tree, mention_subtree.pos()) head = [head_tree[0]] in_mention_span = spans.Span(head_index, head_index) if attributes['pos'][head_index].startswith('NNP'): (in_mention_span, head) = head_finders.HeadFinder.adjust_head_for_nam(attributes['tokens'], attributes['pos'], attributes['ner'][head_index], in_mention_span, head) head_index = in_mention_span.end if attributes['is_apposition']: if (len(mention_subtree) == 2): head_tree = mention_subtree[1] head = head_tree.leaves() in_mention_span = spans.Span(len(mention_subtree[0].leaves()), (len(attributes['tokens']) - 1)) head_index = in_mention_span.end else: start = 0 for child in mention_subtree: if __head_pos_starts_with(child, 'NNP'): end = min([(start + len(child.leaves())), len(attributes['tokens'])]) head_index = (end - 1) (in_mention_span, head) = head_finders.HeadFinder.adjust_head_for_nam(attributes['tokens'][start:end], attributes['pos'][start:end], attributes['ner'][head_index], in_mention_span, head) break start += len(child.leaves()) return (head, in_mention_span, head_index)
_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) def test_multiple_ib_deeper(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): for j in range(2): for i in range(3): y[None] += x[None] for i in range(3): for ii in range(2): y[None] += x[None] for i in range(3): for ii in range(2): for iii in range(2): y[None] += x[None] x[None] = 1.0 with ti.ad.Tape(y): compute_y(x, y) assert (y[None] == 42.0) assert (x.grad[None] == 42.0)
class Theta(nn.Module): def __init__(self, n_comp=100, T=431, num_classes=50): super().__init__() self.hard_att = nn.Linear(T, 1, bias=False) self.classifier = nn.Sequential(nn.Linear(n_comp, num_classes, bias=False), nn.Softmax(dim=1)) def forward(self, H): theta_out = self.hard_att(H).squeeze(2) theta_out = self.classifier(theta_out) return theta_out
class RandomMaskingGenerator(): def __init__(self, input_size, mask_ratio): (self.frames, self.height, self.width) = input_size self.total_patches = ((self.frames * self.height) * self.width) self.num_masks = int((mask_ratio * self.total_patches)) self.total_masks = self.num_masks def __repr__(self): repr_str = 'Maks: total patches {}, mask patches {}'.format(self.total_patches, self.total_masks) return repr_str def __call__(self): mask = np.hstack([np.zeros((self.total_patches - self.num_masks)), np.ones(self.num_masks)]) np.random.shuffle(mask) return mask
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sections', type=str, nargs='*', help='Which transformations to use: {}'.format(' '.join(ARGUMENTS.keys()))) args = parser.parse_args() if (not args.sections): args.sections = list(ARGUMENTS.keys()) return args
def get_optimizer(args, params_list, **options): if (args.optim is None): if (options['dataset'] == 'tinyimagenet'): optimizer = torch.optim.Adam(params_list, lr=args.lr) else: optimizer = torch.optim.SGD(params_list, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif (args.optim == 'sgd'): optimizer = torch.optim.SGD(params_list, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif (args.optim == 'adam'): optimizer = torch.optim.Adam(params_list, lr=args.lr) else: raise NotImplementedError return optimizer
class PruningException(Exception): _node: Node def __init__(self, message, node): super().__init__(message) self._node = node def node(self): return self._node
class TFLEDForConditionalGeneration(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def test_scanvi_predict_use_posterior_mean(): adata = synthetic_iid() SCANVI.setup_anndata(adata, labels_key='labels', unlabeled_category='label_0') model = SCANVI(adata) model.train(max_epochs=1) _ = model.predict(use_posterior_mean=True) _ = model.predict(use_posterior_mean=False)
def compile_dense_field_aot_test(arch): ti.init(arch) if (ti.lang.impl.current_cfg().arch != arch): return n = 10 place = ti.field(ti.i32, shape=(n,)) def simple_return() -> ti.f32: sum = 0.2 return sum def init(): for index in range(n): place[index] = index def ret() -> ti.f32: sum = 0.0 for index in place: sum += place[index] return sum assert ('TAICHI_AOT_FOLDER_PATH' in os.environ.keys()) dir_name = str(os.environ['TAICHI_AOT_FOLDER_PATH']) m = ti.aot.Module() m.add_kernel(simple_return) m.add_kernel(init) m.add_kernel(ret) m.add_field('place', place) m.save(dir_name)
def mkdir_p(folder_path): try: makedirs(folder_path) except OSError as exc: if ((exc.errno == EEXIST) and path.isdir(folder_path)): pass else: raise
def test_sanitize_output(case_factory, request_factory): response = Response() response.headers = {'API-Key': 'secret'} response.request = request_factory(headers={'Custom-Token': 'custom_token_value'}) case = case_factory(headers={'Authorization': 'Bearer token'}, query={'api_key': '12345'}) sanitize_output(case, response=response) assert (case.headers == {'Authorization': '[Filtered]'}) assert (case.query == {'api_key': '[Filtered]'}) assert (response.headers == {'API-Key': '[Filtered]'}) assert (response.request.headers == {'Custom-Token': '[Filtered]', 'Content-Length': '0'})
class BaseBadSampler(BaseEstimator): _sampling_type = 'bypass' def fit(self, X, y): return self def fit_resample(self, X, y): check_classification_targets(y) self.fit(X, y) return (X, y)
def test_histosys_additional_properties(): spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'sample', 'data': [10.0], 'modifiers': [{'name': 'histosys', 'type': 'histosys', 'data': {'hi_data': [1.0], 'lo_data': [0.5], 'foo': 2.0}}]}]}]} with pytest.raises(pyhf.exceptions.InvalidSpecification): pyhf.Model(spec)
def build_param(ctx, py_arg, self_name, kwarg_only): name = py_arg.arg r = ctx.make_range(py_arg.lineno, py_arg.col_offset, (py_arg.col_offset + len(name))) if (getattr(py_arg, 'annotation', None) is not None): annotation_expr = build_expr(ctx, py_arg.annotation) elif ((self_name is not None) and (name == 'self')): annotation_expr = Var(Ident(r, self_name)) else: annotation_expr = EmptyTypeAnnotation(r) return Param(annotation_expr, Ident(r, name), kwarg_only)
def _decode_value(value): if (not isinstance(value, str)): return value if (value == 'None'): value = None try: value = literal_eval(value) except ValueError: pass except SyntaxError: pass return value
class MlpAttention(Attention): def __init__(self, query_size, key_size, out_size=100, dropout=0): super(MlpAttention, self).__init__(dropout) self.query_projection = nn.Linear(query_size, out_size) self.key_projection = nn.Linear(key_size, out_size) self.v = nn.Parameter(torch.FloatTensor(out_size, 1)) init.xavier_uniform_(self.v) def _score(self, query, key): (batch_size, num_queries, time_step, out_size) = (query.size(0), query.size(1), key.size(1), self.v.size(0)) query = self.query_projection(query).unsqueeze((- 1)).expand(batch_size, num_queries, time_step, out_size) key = self.key_projection(key).unsqueeze(1).expand(batch_size, num_queries, time_step, out_size) score = torch.tanh((query + key)).matmul(self.v).squeeze((- 1)) return score
_node_type() class PlaneWaveSource(optplan.EmSource): type = schema_utils.polymorphic_model_type('source.plane_wave') center = optplan.vec3d() extents = optplan.vec3d() normal = optplan.vec3d() theta = types.FloatType() psi = types.FloatType() polarization_angle = types.FloatType() overwrite_bloch_vector = types.BooleanType() border = types.ListType(types.FloatType()) power = types.FloatType() normalize_by_sim = types.BooleanType(default=False)
class HierarchyLinkage(Benchmark): params = ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'] param_names = ['method'] def __init__(self): rnd = np.random.RandomState(0) self.X = rnd.randn(2000, 2) def time_linkage(self, method): linkage(self.X, method=method)
def _memoize_get_funcs(func): memo = {} func.memo = memo (func) def getter(names, arrays=(), dtype=None): key = (names, dtype) for array in arrays: key += (array.dtype.char, array.flags.fortran) try: value = memo.get(key) except TypeError: key = None value = None if (value is not None): return value value = func(names, arrays, dtype) if (key is not None): memo[key] = value return value return getter
def meta_net(x, params): x = F.linear(x, params[0], params[1]) x1 = F.relu(x) x = F.linear(x1, params[2], params[3]) x2 = F.relu(x) y = F.linear(x2, params[4], params[5]) return (y, x2, x1)
class DialogsReader(object): def __init__(self, dialogs_jsonpath: str): with open(dialogs_jsonpath, 'r') as visdial_file: visdial_data = json.load(visdial_file) self._split = visdial_data['split'] self.captions = {} self.dialogs = {} self.num_rounds = {} for dialog_for_image in visdial_data['data']['dialogs']: self.captions[dialog_for_image['image_id']] = dialog_for_image['caption'] self.num_rounds[dialog_for_image['image_id']] = len(dialog_for_image['dialog']) while (len(dialog_for_image['dialog']) < 10): dialog_for_image['dialog'].append({'question': (- 1), 'answer': (- 1)}) for i in range(len(dialog_for_image['dialog'])): if ('answer' not in dialog_for_image['dialog'][i]): dialog_for_image['dialog'][i]['answer'] = (- 1) if ('answer_options' not in dialog_for_image['dialog'][i]): dialog_for_image['dialog'][i]['answer_options'] = ([(- 1)] * 100) self.dialogs[dialog_for_image['image_id']] = dialog_for_image['dialog'] print(f'[{self._split}] Tokenizing captions...') for (image_id, caption) in self.captions.items(): self.captions[image_id] = word_tokenize(caption) if (self._split in ['val2018', 'train']): print((self._split + 'tokens load token from data/tokens.json')) with open('data/tokens.json', 'r') as f: data = json.loads(f.read()) f.close() self.questions = data[('questions_' + self._split)] self.answers = data[('answers_' + self._split)] self.questions.append('') self.answers.append('') else: self.questions = visdial_data['data']['questions'] self.answers = visdial_data['data']['answers'] self.questions.append('') self.answers.append('') print(f'[{self._split}] Tokenizing questions...') for i in range(len(self.questions)): self.questions[i] = word_tokenize((self.questions[i] + '?')) print(f'[{self._split}] Tokenizing answers...') for i in range(len(self.answers)): self.answers[i] = word_tokenize(self.answers[i]) def __len__(self): return len(self.dialogs) def __getitem__(self, image_id: int) -> Dict[(str, Union[(int, str, List)])]: caption_for_image = self.captions[image_id] dialog_for_image = copy.deepcopy(self.dialogs[image_id]) num_rounds = self.num_rounds[image_id] opt_list = [] for i in range(len(dialog_for_image)): opt_list.append(copy.deepcopy(dialog_for_image[i]['answer_options'])) dialog_for_image[i]['question'] = self.questions[dialog_for_image[i]['question']] dialog_for_image[i]['answer'] = self.answers[dialog_for_image[i]['answer']] for (j, answer_option) in enumerate(dialog_for_image[i]['answer_options']): dialog_for_image[i]['answer_options'][j] = self.answers[answer_option] return {'image_id': image_id, 'caption': caption_for_image, 'dialog': dialog_for_image, 'num_rounds': num_rounds, 'opt_list': opt_list} def keys(self) -> List[int]: return list(self.dialogs.keys()) def split(self): return self._split
class chamferDist(nn.Module): def __init__(self): super(chamferDist, self).__init__() def forward(self, input1, input2): return chamferFunction.apply(input1, input2)
def split_sequence(sequence): (X, y) = (list(), list()) for i in range(len(sequence)): end_ix = (i + w) out_end_ix = (end_ix + p_w) if (out_end_ix > len(sequence)): break (seq_x, seq_y) = (sequence[i:end_ix], sequence[end_ix:out_end_ix]) X.append(seq_x) y.append(seq_y) return (array(X), array(y))
def main(argv=None): if (FLAGS.non_linearity == 'tanh'): non_linearity = tf.nn.tanh elif (FLAGS.non_linearity == 'sigmoid'): non_linearity = tf.nn.sigmoid else: non_linearity = myrelu args = parseArgs() adam_beta1 = args.adam_beta1 adam_beta2 = args.adam_beta2 learning_rate = args.learning_rate dir_prior = args.dir_prior warm_up_period = args.warm_up_period n_sample = args.n_sample n_topic = args.n_topic lexicon = [] vocab_path = os.path.join(args.data_dir, 'vocab.new') with open(vocab_path, 'r') as rf: for line in rf: word = line.split()[0] lexicon.append(word) vocab_size = len(lexicon) nvdm = NVDM(vocab_size=vocab_size, n_hidden=FLAGS.n_hidden, n_topic=n_topic, learning_rate=learning_rate, batch_size=FLAGS.batch_size, non_linearity=non_linearity, adam_beta1=adam_beta1, adam_beta2=adam_beta2, dir_prior=dir_prior) sess = tf.Session() init = tf.global_variables_initializer() result = sess.run(init) train_url = os.path.join(args.data_dir, 'train.feat') test_url = os.path.join(args.data_dir, 'test.feat') train(sess, nvdm, train_url, test_url, FLAGS.batch_size, vocab_size, lexicon=lexicon, result_file=None, warm_up_period=warm_up_period)
class A000302(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return 'Powers of 4: a(n) = 4^n.' def _eval(self, n): return ZZ((4 ** n))
def prepare_sentence(sent): ret_str = [] ret_box_seq = [] for word in sent: if isinstance(word, list): ret_str.append(BOXES_PLACEHOLDER) ret_box_seq.append(word) else: ret_str.append(word) return (' '.join(ret_str), ret_box_seq)
class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def _train_transformer(self, seed, extra_args=None): if (extra_args is None): extra_args = [] with tempfile.TemporaryDirectory(f'_train_transformer_seed{seed}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', (['--encoder-layers', '3', '--decoder-layers', '3', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--seed', str(seed)] + extra_args)) (yield os.path.join(data_dir, 'checkpoint_last.pt')) def test_load_model_ensemble_and_task(self): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: (ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model1, model2]) self.assertEqual(len(ensemble), 2) self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) self.assertTrue(('seed123' in task.cfg.data)) self.assertEqual(cfg.common.seed, 456) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ['--encoder-layerdrop', '0.01', '--decoder-layerdrop', '0.01'] with self._train_transformer(seed=1, extra_args=extra_args) as model: (ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model], arg_overrides={'encoder_layers_to_keep': '0,2', 'decoder_layers_to_keep': '1'}) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1)
class QuestionAnsweringPipeline(Pipeline): default_input_names = 'question,context' def __init__(self, model, tokenizer: Optional[PreTrainedTokenizer], modelcard: Optional[ModelCard], framework: Optional[str]=None, device: int=(- 1), **kwargs): super().__init__(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, args_parser=QuestionAnsweringArgumentHandler(), device=device, **kwargs) def create_sample(question: Union[(str, List[str])], context: Union[(str, List[str])]) -> Union[(SquadExample, List[SquadExample])]: if isinstance(question, list): return [SquadExample(None, q, c, None, None, None) for (q, c) in zip(question, context)] else: return SquadExample(None, question, context, None, None, None) def __call__(self, *texts, **kwargs): kwargs.setdefault('topk', 1) kwargs.setdefault('doc_stride', 128) kwargs.setdefault('max_answer_len', 15) kwargs.setdefault('max_seq_len', 384) kwargs.setdefault('max_question_len', 64) if (kwargs['topk'] < 1): raise ValueError('topk parameter should be >= 1 (got {})'.format(kwargs['topk'])) if (kwargs['max_answer_len'] < 1): raise ValueError('max_answer_len parameter should be >= 1 (got {})'.format(kwargs['max_answer_len'])) examples = self._args_parser(*texts, **kwargs) features = squad_convert_examples_to_features(examples, self.tokenizer, kwargs['max_seq_len'], kwargs['doc_stride'], kwargs['max_question_len'], False) fw_args = self.inputs_for_model([f.__dict__ for f in features]) with self.device_placement(): if (self.framework == 'tf'): fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()} (start, end) = self.model(fw_args) (start, end) = (start.numpy(), end.numpy()) else: with torch.no_grad(): fw_args = {k: torch.tensor(v) for (k, v) in fw_args.items()} (start, end) = self.model(**fw_args) (start, end) = (start.cpu().numpy(), end.cpu().numpy()) answers = [] for (example, feature, start_, end_) in zip(examples, features, start, end): start_ = (np.exp(start_) / np.sum(np.exp(start_))) end_ = (np.exp(end_) / np.sum(np.exp(end_))) (start_, end_) = ((start_ * np.abs((np.array(feature.p_mask) - 1))), (end_ * np.abs((np.array(feature.p_mask) - 1)))) start_[0] = end_[0] = 0 (starts, ends, scores) = self.decode(start_, end_, kwargs['topk'], kwargs['max_answer_len']) char_to_word = np.array(example.char_to_word_offset) answers += [{'score': score.item(), 'start': np.where((char_to_word == feature.token_to_orig_map[s]))[0][0].item(), 'end': np.where((char_to_word == feature.token_to_orig_map[e]))[0][(- 1)].item(), 'answer': ' '.join(example.doc_tokens[feature.token_to_orig_map[s]:(feature.token_to_orig_map[e] + 1)])} for (s, e, score) in zip(starts, ends, scores)] if (len(answers) == 1): return answers[0] return answers def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple: if (start.ndim == 1): start = start[None] if (end.ndim == 1): end = end[None] outer = np.matmul(np.expand_dims(start, (- 1)), np.expand_dims(end, 1)) candidates = np.tril(np.triu(outer), (max_answer_len - 1)) scores_flat = candidates.flatten() if (topk == 1): idx_sort = [np.argmax(scores_flat)] elif (len(scores_flat) < topk): idx_sort = np.argsort((- scores_flat)) else: idx = np.argpartition((- scores_flat), topk)[0:topk] idx_sort = idx[np.argsort((- scores_flat[idx]))] (start, end) = np.unravel_index(idx_sort, candidates.shape)[1:] return (start, end, candidates[(0, start, end)]) def span_to_answer(self, text: str, start: int, end: int): words = [] token_idx = char_start_idx = char_end_idx = chars_idx = 0 for (i, word) in enumerate(text.split(' ')): token = self.tokenizer.tokenize(word) if (start <= token_idx <= end): if (token_idx == start): char_start_idx = chars_idx if (token_idx == end): char_end_idx = (chars_idx + len(word)) words += [word] if (token_idx > end): break token_idx += len(token) chars_idx += (len(word) + 1) return {'answer': ' '.join(words), 'start': max(0, char_start_idx), 'end': min(len(text), char_end_idx)}
class NnpExpander(): def __init__(self, nnp): self._nnp = nnp self._parameters = {} for param in self._nnp.parameter: self._parameters[param.variable_name] = True def _expand_repeat(self, network): def _search_repeat_id(mes, rid): return (list(mes.repeat_id).index(rid) if (rid in mes.repeat_id) else None) def _add_suffix(name, suffix, num): return '{}_{}[{}]'.format(name, suffix, num) net = nnabla_pb2.NNablaProtoBuf().network.add() net.CopyFrom(network) if (len(net.repeat_info) == 0): return net ri = net.repeat_info[0] del net.repeat_info[0] net.ClearField('variable') for (vpos, var) in enumerate(network.variable): if (var.type == 'Parameter'): if (var.name not in self._parameter_original_names): self._parameter_original_names[var.name] = [] pos = _search_repeat_id(var, ri.id) if (pos is not None): for i in range(ri.times): if (var.type == 'Parameter'): pv_repeat_id = (('{' + ri.id) + '}') if (not (pv_repeat_id in var.name)): name = _add_suffix(var.name, ri.id, i) else: name = var.name.replace('{{{}}}'.format(ri.id), '[{}]'.format(i)) self._parameter_original_names[var.name].append(name) else: name = _add_suffix(var.name, ri.id, i) v = net.variable.add() v.CopyFrom(var) v.name = name del v.repeat_id[pos] else: if ((var.type == 'Parameter') and (len(var.repeat_id) == 0) and (len(self._parameter_original_names[var.name]) == 0)): self._parameter_original_names[var.name].append(var.name) v = net.variable.add() v.CopyFrom(var) delay_var = {} for (fpos, func) in enumerate(network.function): if (func.type == 'Delay'): if (func.recurrent_param.repeat_id == ri.id): delay_var[func.output[0]] = [] for i in range(ri.times): if (i == 0): delay_var[func.output[0]].append(func.input[1]) else: v = func.input[0] v = _add_suffix(v, ri.id, (i - 1)) delay_var[func.output[0]].append(v) repeat_end_var = {} for (fpos, func) in enumerate(network.function): if (func.type == 'RepeatEnd'): if (func.repeat_param.repeat_id == ri.id): repeat_end_var[func.output[0]] = [] for i in range(func.repeat_param.times): repeat_end_var[func.output[0]].append(_add_suffix(func.input[0], func.repeat_param.repeat_id, i)) repeat_start_var = {} for (fpos, func) in enumerate(network.function): if (func.type == 'RepeatStart'): if (func.repeat_param.repeat_id == ri.id): repeat_start_var[func.output[0]] = [] for i in range(ri.times): if (i == 0): v = func.input[0] if (v in repeat_end_var): v = repeat_end_var[v][(ri.times - 1)] repeat_start_var[func.output[0]].append(v) else: v = func.input[1] if (v in repeat_end_var): v = repeat_end_var[v][(i - 1)] else: v = _add_suffix(v, ri.id, (i - 1)) repeat_start_var[func.output[0]].append(v) net.ClearField('function') for (fpos, func) in enumerate(network.function): if ((func.type == 'RepeatStart') or (func.type == 'RepeatEnd')): if (func.repeat_param.repeat_id == ri.id): continue if (func.type == 'Delay'): if (func.recurrent_param.repeat_id == ri.id): continue if (func.type == 'RecurrentInput'): if (func.recurrent_param.repeat_id == ri.id): f = net.function.add() f.CopyFrom(func) f.type = 'Split' f.split_param.axis = func.recurrent_param.axis f.ClearField('output') for i in range(ri.times): f.output.append(_add_suffix(func.output[0], ri.id, i)) pos = _search_repeat_id(func, ri.id) del f.repeat_id[pos] f.ClearField('recurrent_param') continue if (func.type == 'RecurrentOutput'): if (func.recurrent_param.repeat_id == ri.id): f = net.function.add() f.CopyFrom(func) f.type = 'Stack' f.stack_param.axis = func.recurrent_param.axis f.ClearField('input') for i in range(ri.times): f.input.append(_add_suffix(func.input[0], ri.id, i)) f.ClearField('recurrent_param') continue pos = _search_repeat_id(func, ri.id) if (pos is not None): for i in range(ri.times): f = net.function.add() f.CopyFrom(func) del f.repeat_id[pos] f.name = _add_suffix(func.name, ri.id, i) for (n, v) in enumerate(func.input): vname = None if (v in self._parameter_original_names): if (len(self._parameter_original_names[v]) == ri.times): vname = self._parameter_original_names[v][i] else: vname = v elif (v in repeat_start_var): vname = repeat_start_var[v][i] elif (v in repeat_end_var): vname = repeat_end_var[v][i] elif (v in delay_var): vname = delay_var[v][i] else: vname = _add_suffix(v, ri.id, i) f.input[n] = vname for (n, v) in enumerate(func.output): vname = _add_suffix(v, ri.id, i) f.output[n] = vname else: f = net.function.add() f.CopyFrom(func) for (n, v) in enumerate(func.input): if (v in repeat_end_var): vname = repeat_end_var[v][(ri.times - 1)] f.input[n] = vname return self._expand_repeat(net) def _expand_network(self, network): self._parameter_original_names = collections.OrderedDict() print(' Expanding {}.'.format(network.name)) repeat_ids = collections.OrderedDict() for ri in network.repeat_info: repeat_ids[ri.id] = ri.times network = self._expand_repeat(network) functions = [] for func in network.function: functions.append((func.name, func.type, [n for n in func.input], [n for n in func.output])) sorted_functions = self._sort_functions(functions) func_list = [] for f in functions: func_list.append(f[0]) net = nnabla_pb2.NNablaProtoBuf().network.add() net.CopyFrom(network) net.ClearField('function') for f in sorted_functions: func = net.function.add() func.CopyFrom(network.function[func_list.index(f[0])]) return net def _sort_functions(self, orig_functions): return _TopologicalSort(orig_functions).sorted() def _expand_parameter_variable(self, proto): names = [] for pv in proto.parameter_variable: if (pv.variable_name in self._parameter_original_names): for n in self._parameter_original_names[pv.variable_name]: names.append(n) else: names.append(pv.variable_name) proto.ClearField('parameter_variable') for n in sorted(names): pv = proto.parameter_variable.add() pv.variable_name = n def execute(self): nnp = nnabla_pb2.NNablaProtoBuf() nnp.CopyFrom(self._nnp) nnp.ClearField('network') for network in self._nnp.network: net = nnp.network.add() net.CopyFrom(self._expand_network(network)) for optimizer in nnp.optimizer: self._expand_parameter_variable(optimizer) for executor in nnp.executor: self._expand_parameter_variable(executor) return nnp
def test_reassign(): def shouldfail(A: dace.float64[20], B: dace.float64[30], selector: dace.int32): if (selector == 0): tmp = np.empty_like(A) tmp[:] = A return tmp else: tmp = np.empty_like(B) tmp[:] = B return tmp[0:20] with pytest.raises(DaceSyntaxError, match='reassign'): shouldfail.to_sdfg()
class RecurrentCapsuleNetwork(CapsuleNetwork): def __init__(self, embedding, aspect_embedding, num_layers, bidirectional, capsule_size, dropout, num_categories): super(RecurrentCapsuleNetwork, self).__init__(embedding=embedding, aspect_embedding=aspect_embedding, hidden_size=(embedding.embedding_dim * (2 if bidirectional else 1)), capsule_size=capsule_size, dropout=dropout, num_categories=num_categories) embed_size = embedding.embedding_dim self.rnn = nn.GRU(input_size=(embed_size * 2), hidden_size=embed_size, num_layers=num_layers, bidirectional=bidirectional, batch_first=True) self.bidirectional = bidirectional def _sentence_encode(self, sentence, aspect, mask=None): (batch_size, time_step, embed_size) = sentence.size() aspect_aware_sentence = torch.cat((sentence, aspect.unsqueeze(1).expand(batch_size, time_step, embed_size)), dim=(- 1)) (output, _) = self.rnn(aspect_aware_sentence) if self.bidirectional: sentence = sentence.unsqueeze((- 1)).expand(batch_size, time_step, embed_size, 2) sentence = sentence.contiguous().view(batch_size, time_step, (embed_size * 2)) output = (output + sentence) output = F.dropout(output, p=self.dropout, training=self.training) return output
def test_indexedarray(): layout = ak.from_buffers({'class': 'IndexedArray', 'index': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, 'form_key': 'node0'}, 3, {'node0-index': np.array([0, 1, 2], dtype=np.int64), 'node1-data': PlaceholderArray(numpy, (3,), np.int64)}, highlevel=False) assert (layout.length == 3) assert (layout.content.length == 3) layout = ak.from_buffers({'class': 'IndexedArray', 'index': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, 'form_key': 'node0'}, 3, {'node0-index': PlaceholderArray(numpy, (3,), np.int64), 'node1-data': np.array([0, 1, 2, 3, 4, 5], dtype=np.int64)}, highlevel=False) assert (layout.length == 3) assert (layout.content.length is unknown_length) layout = ak.from_buffers({'class': 'IndexedArray', 'index': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, 'form_key': 'node0'}, 3, {'node0-index': PlaceholderArray(numpy, (3,), np.int64), 'node1-data': PlaceholderArray(numpy, (6,), np.int64)}, highlevel=False) assert (layout.length == 3) assert (layout.content.length is unknown_length) layout = ak.from_buffers({'class': 'IndexedArray', 'index': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, 'form_key': 'node0'}, 3, {'node0-index': PlaceholderArray(numpy, (unknown_length,), np.int64), 'node1-data': PlaceholderArray(numpy, (6,), np.int64)}, highlevel=False) assert (layout.length == 3) assert (layout.content.length is unknown_length)
(name='kendalltau-scipy', pure=True) def kendalltau(a: np.ndarray, b: np.ndarray) -> np.ndarray: corr = kendalltau_(a, b).correlation return np.float64(corr)
def GetBfsEffDiam(tspec, *args): if (type(tspec) == PUNGraph): return GetBfsEffDiam_PUNGraph(tspec, *args) if (type(tspec) == PUndirNet): return GetBfsEffDiam_PUndirNet(tspec, *args) if (type(tspec) == PDirNet): return GetBfsEffDiam_PDirNet(tspec, *args) if (type(tspec) == PNGraph): return GetBfsEffDiam_PNGraph(tspec, *args) if (type(tspec) == PNEANet): return GetBfsEffDiam_PNEANet(tspec, *args) if (type(tspec) == PNGraphMP): return GetBfsEffDiam_PNGraphMP(tspec, *args) if (type(tspec) == PNEANetMP): return GetBfsEffDiam_PNEANetMP(tspec, *args) raise TypeError('First argument has invalid type')
class Array(np.ndarray): def __new__(cls, array, meta=None): if (not isinstance(array, np.ndarray)): raise ValueError('Array expects a numpy array.') if (not ((meta is None) or isinstance(meta, dict))): raise ValueError('Array expects meta data to be a dict.') meta = (meta if (meta is not None) else {}) try: ob = array.view(cls) except AttributeError: return array ob._copy_meta(meta) return ob def _copy_meta(self, meta): self._meta = Dict() for (key, val) in meta.items(): if isinstance(val, dict): val = Dict(val) self._meta[key] = val def meta(self): return self._meta def __array_finalize__(self, ob): if isinstance(ob, Array): self._copy_meta(ob.meta) else: self._copy_meta({}) def __array_wrap__(self, out, context=None): if (not out.shape): return out.dtype.type(out) elif (out.shape != self.shape): return out.view(type=np.ndarray) else: return out
.gpu def test_batchmm(): (b, m, n, k) = tuple((dace.symbol(k) for k in 'bmnk')) with change_default(blas, 'cuBLAS'): def bmmtest(A: dace.float64[(b, m, k)], B: dace.float64[(b, k, n)], C: dace.float64[(b, m, n)]): C[:] = (A B) sdfg = bmmtest.to_sdfg() sdfg.apply_gpu_transformations() csdfg = sdfg.compile() (b, m, n, k) = (3, 32, 31, 30) x = np.random.rand(b, m, k) y = np.random.rand(b, k, n) z = np.zeros([b, m, n], np.float64) csdfg(A=x, B=y, C=z, b=b, m=m, n=n, k=k) ref = (x y) diff = np.linalg.norm((ref - z)) print('Difference:', diff) assert (diff < 1e-06)
def construct_train_loader(args, dataset=None): if args.distributed: drop_last = True else: drop_last = False return _construct_loader(args=args, split='train', batch_size=int((args.batch_size / args.num_gpus)), shuffle=True, drop_last=drop_last, dataset=(dataset if dataset else args.dataset))
class PairProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_test_examples(self, data_dir): return self._create_test_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test') def get_labels(self): return ['0', '1'] def _create_examples(self, lines, set_type): examples = [] for (_, line) in enumerate(lines): guid = ('%s-%s' % (set_type, line[0])) try: text_a = line[1].lower() text_b = line[2].lower() label = line[3] except IndexError: print(('cannot read the line: ' + line)) continue examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def _create_test_examples(self, lines, set_type): (examples, rows) = ([], []) for (_, line) in enumerate(lines): guid = ('%s-%s' % (set_type, line[0])) try: text_a = line[1].lower() text_b = line[2].lower() label = line[3] except IndexError: print(('cannot read the line: ' + line)) continue examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) rows.append(line) return (examples, rows)
def get_img_output_length(width, height): def get_output_length(input_length): input_length += 6 filter_sizes = [7, 3, 1, 1] stride = 2 for filter_size in filter_sizes: input_length = (((input_length - filter_size) + stride) // stride) return input_length return (get_output_length(width), get_output_length(height))
def _infer_semantic_data_type(column: pd.Series) -> Any: column_not_na = column[column.apply(_check_valid_values, 0)] sample_size = (column_not_na.size if (column_not_na.size <= 100) else min(int((0.1 * column_not_na.size)), 500)) column_not_na_subset = column_not_na.sample(n=sample_size, random_state=1) lat_lon = ['lat', 'Lat', 'LAT', 'lon', 'Lon', 'LON'] if any(((x in column.name) for x in lat_lon)): lat_long_infer_count = sum(pd.Series(validate_lat_long(column_not_na_subset, lat_long=False, lat=True)).tolist()) if (lat_long_infer_count > (column_not_na_subset.size / 2)): return 'coordinate' if (infer_dtype(column_not_na_subset) != 'string'): return infer_dtype(column_not_na_subset) default_infer_dtype = infer_dtype(column_not_na_subset) semantic_data_type_dic = {'email': 0, 'country': 0, 'phone': 0, 'ip': 0, 'URL': 0, 'address': 0} semantic_data_type_dic['email'] = sum(pd.Series(validate_email(column_not_na_subset)).tolist()) semantic_data_type_dic['country'] = sum(pd.Series(validate_country(column_not_na_subset)).tolist()) semantic_data_type_dic['phone'] = sum(pd.Series(validate_phone(column_not_na_subset)).tolist()) semantic_data_type_dic['ip'] = sum(pd.Series(validate_ip(column_not_na_subset)).tolist()) semantic_data_type_dic['URL'] = sum(pd.Series(validate_url(column_not_na_subset)).tolist()) semantic_data_type_dic['address'] = sum(pd.Series(validate_address(column_not_na_subset)).tolist()) if all(((value == 0) for value in semantic_data_type_dic.values())): return default_infer_dtype best_infer = max(semantic_data_type_dic.items(), key=operator.itemgetter(1))[0] best_infer_count = max(semantic_data_type_dic.items(), key=operator.itemgetter(1))[1] return (best_infer if (best_infer_count > (column_not_na_subset.size / 2)) else default_infer_dtype)
def convert_question_into_desc(qa_pair): predicate = get_predicate(qa_pair['question']) return predicate
class LossScaler(): def __init__(self, scale=1): self.cur_scale = scale def has_overflow(self, params): return False def _has_inf_or_nan(x): return False def update_scale(self, overflow): pass def loss_scale(self): return self.cur_scale def scale_gradient(self, module, grad_in, grad_out): return tuple(((self.loss_scale * g) for g in grad_in)) def backward(self, loss, retain_graph=False): scaled_loss = (loss * self.loss_scale) scaled_loss.backward(retain_graph=retain_graph)
class SimpleStem(CNNBlockBase): def __init__(self, w_in, w_out, norm, activation_class): super().__init__(w_in, w_out, 2) self.conv = conv2d(w_in, w_out, 3, stride=2) self.bn = get_norm(norm, w_out) self.af = activation_class() def forward(self, x): for layer in self.children(): x = layer(x) return x
def create_r_distance(distance): def r_distance(tn, t): return [('distance', tn[0], (lambda : distance))] return r_distance