code
stringlengths
101
5.91M
def convert_ontonotes_file(filename, simplify, bigger_first): assert ('en_ontonotes' in filename) if (not os.path.exists(filename)): raise FileNotFoundError(('Cannot convert missing file %s' % filename)) new_filename = filename.replace('en_ontonotes', 'en_ontonotes-multi') with open(filename) as fin: doc = json.load(fin) for sentence in doc: for word in sentence: ner = word['ner'] if simplify: simplified = simplify_ontonotes_to_worldwide(ner) else: simplified = '-' if bigger_first: word['multi_ner'] = (ner, simplified) else: word['multi_ner'] = (simplified, ner) with open(new_filename, 'w') as fout: json.dump(doc, fout, indent=2)
class ChamferDistanceFunction(torch.autograd.Function): def forward(ctx, xyz1, xyz2): (batchsize, n, _) = xyz1.size() (_, m, _) = xyz2.size() xyz1 = xyz1.contiguous() xyz2 = xyz2.contiguous() dist1 = torch.zeros(batchsize, n) dist2 = torch.zeros(batchsize, m) idx1 = torch.zeros(batchsize, n, dtype=torch.int) idx2 = torch.zeros(batchsize, m, dtype=torch.int) if (not xyz1.is_cuda): cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) else: dist1 = dist1.cuda() dist2 = dist2.cuda() idx1 = idx1.cuda() idx2 = idx2.cuda() cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2) ctx.save_for_backward(xyz1, xyz2, idx1, idx2) return (dist1, dist2) def backward(ctx, graddist1, graddist2): (xyz1, xyz2, idx1, idx2) = ctx.saved_tensors graddist1 = graddist1.contiguous() graddist2 = graddist2.contiguous() gradxyz1 = torch.zeros(xyz1.size()) gradxyz2 = torch.zeros(xyz2.size()) if (not graddist1.is_cuda): cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) else: gradxyz1 = gradxyz1.cuda() gradxyz2 = gradxyz2.cuda() cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) return (gradxyz1, gradxyz2)
def build_transforms_head(cfg, is_train=True, PIXEL_MEAN=[0.485, 0.456, 0.406], PIXEL_STD=[0.229, 0.224, 0.225]): normalize_transform = T.Normalize(mean=PIXEL_MEAN, std=PIXEL_STD) if is_train: transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.Pad(10), T.RandomCrop([cfg.height, cfg.width]), T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), T.ToTensor(), normalize_transform, RandomErasing(probability=0.5, mean=PIXEL_MEAN)]) else: transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.ToTensor(), normalize_transform]) return transform
def register_Ns3LteRrcSapPhysCellIdRange_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::PhysCellIdRange const &', 'arg0')]) cls.add_instance_attribute('haveRange', 'bool', is_const=False) cls.add_instance_attribute('range', 'uint16_t', is_const=False) cls.add_instance_attribute('start', 'uint16_t', is_const=False) return
_kl(ContinuousBernoulli, ContinuousBernoulli) def _kl_continuous_bernoulli_continuous_bernoulli(p, q): t1 = (p.mean * (p.logits - q.logits)) t2 = (p._cont_bern_log_norm() + torch.log1p((- p.probs))) t3 = ((- q._cont_bern_log_norm()) - torch.log1p((- q.probs))) return ((t1 + t2) + t3)
def rule0(graph, nodes, sep_sets, knowledge, verbose): reorientAllWith(graph, Endpoint.CIRCLE) fci_orient_bk(knowledge, graph) for node_b in nodes: adjacent_nodes = graph.get_adjacent_nodes(node_b) if (len(adjacent_nodes) < 2): continue cg = ChoiceGenerator(len(adjacent_nodes), 2) combination = cg.next() while (combination is not None): node_a = adjacent_nodes[combination[0]] node_c = adjacent_nodes[combination[1]] combination = cg.next() if graph.is_adjacent_to(node_a, node_c): continue if graph.is_def_collider(node_a, node_b, node_c): continue sep_set = sep_sets.get((graph.node_map[node_a], graph.node_map[node_c])) if ((sep_set is not None) and (not sep_set.__contains__(graph.node_map[node_b]))): if (not is_arrow_point_allowed(node_a, node_b, graph, knowledge)): continue if (not is_arrow_point_allowed(node_c, node_b, graph, knowledge)): continue edge1 = graph.get_edge(node_a, node_b) graph.remove_edge(edge1) graph.add_edge(Edge(node_a, node_b, edge1.get_proximal_endpoint(node_a), Endpoint.ARROW)) edge2 = graph.get_edge(node_c, node_b) graph.remove_edge(edge2) graph.add_edge(Edge(node_c, node_b, edge2.get_proximal_endpoint(node_c), Endpoint.ARROW)) if verbose: print(((((('Orienting collider: ' + node_a.get_name()) + ' *-> ') + node_b.get_name()) + ' <-* ') + node_c.get_name()))
class EfficientFormerModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
.parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)]) def test_with_session_ids(dataset_type, request): log = request.getfixturevalue(dataset_type) splitter = RandomSplitter(test_size=0.3, drop_cold_items=False, drop_cold_users=False, seed=SEED) (train, test) = splitter.split(log) if isinstance(log, pd.DataFrame): assert ((train.shape[0] + test.shape[0]) == log.shape[0]) else: assert ((train.count() + test.count()) == log.count())
class _QPool2dBenchmarkBase(op_bench.TorchBenchmarkBase): def setup(self, N, C, H, W, dtype, contig): if (N == 0): f_input = ((torch.rand(C, H, W) - 0.5) * 256) else: f_input = ((torch.rand(N, C, H, W) - 0.5) * 256) scale = 1.0 zero_point = 0 self.q_input = torch.quantize_per_tensor(f_input, scale=scale, zero_point=zero_point, dtype=dtype) if (not contig): if (N == 0): self.q_input = self.q_input.permute(1, 2, 0).contiguous() self.q_input = self.q_input.permute(2, 0, 1) else: self.q_input = self.q_input.permute(0, 2, 3, 1).contiguous() self.q_input = self.q_input.permute(0, 3, 1, 2) def forward(self): return self.pool_op(self.q_input)
class Mergeable(object): def getTypeName(self) -> str: raise NotImplementedError('getTypeName not implemented.') def shouldMerge(self, other: Mergeable) -> bool: raise NotImplementedError('equals not implemented.')
def test_transformer_decoder(): decoder = NRTRDecoder(num_classes=37, padding_idx=36, max_seq_len=5) decoder.init_weights() decoder.train() out_enc = torch.rand(1, 25, 512) tgt_dict = {'padded_targets': torch.LongTensor([[1, 1, 1, 1, 36]])} img_metas = [{'valid_ratio': 1.0}] tgt_dict['padded_targets'] = tgt_dict['padded_targets'] out_train = decoder(None, out_enc, tgt_dict, img_metas, True) assert (out_train.shape == torch.Size([1, 5, 36])) out_test = decoder(None, out_enc, tgt_dict, img_metas, False) assert (out_test.shape == torch.Size([1, 5, 36]))
_to_string_io def load_events(fhandle: TextIO) -> annotations.Events: times = [] labels = [] confidence = [] reader = csv.reader(fhandle, delimiter='\t') for line in reader: times.append([float(line[0]), float(line[1])]) labels.append(line[2]) confidence.append(1.0) events_data = annotations.Events(np.array(times), 'seconds', labels, 'open', np.array(confidence)) return events_data
class SimilarityEvaluator(): def __init__(self, model_path='models/sim/sim.pt', tokenizer_path='models/sim/sim.sp.30k.model', gpu=False): self.model_path = model_path self.tokenizer_path = tokenizer_path self.tok = TreebankWordTokenizer() kw = {} if (not torch.cuda.is_available()): kw['map_location'] = torch.device('cpu') model = torch.load(self.model_path, **kw) state_dict = model['state_dict'] vocab_words = model['vocab_words'] args = model['args'] if (gpu is False): args.gpu = (- 1) self.model = WordAveraging(args, vocab_words) self.model.load_state_dict(state_dict, strict=True) self.sp = spm.SentencePieceProcessor() self.sp.Load(self.tokenizer_path) self.model.eval() def make_example(self, sentence): sentence = sentence.lower() sentence = ' '.join(self.tok.tokenize(sentence)) sentence = self.sp.EncodeAsPieces(sentence) wp1 = Example(' '.join(sentence)) wp1.populate_embeddings(self.model.vocab) return wp1 def find_similarity(self, s1, s2): with torch.no_grad(): s1 = [self.make_example(x) for x in s1] s2 = [self.make_example(x) for x in s2] (wx1, wl1, wm1) = self.model.torchify_batch(s1) (wx2, wl2, wm2) = self.model.torchify_batch(s2) scores = self.model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2) return [x.item() for x in scores] def find_similarity_batched(self, inputs, preds, batch_size=32): assert (len(inputs) == len(preds)) sim_scores = [] for i in range(0, len(inputs), batch_size): sim_scores.extend(self.find_similarity(inputs[i:(i + batch_size)], preds[i:(i + batch_size)])) return np.array(sim_scores) def embed_texts(self, texts, batch_size=128): result = [] for i in range(0, len(texts), batch_size): (wx, wl, wm) = self.model.torchify_batch([self.make_example(x) for x in texts[i:(i + batch_size)]]) with torch.no_grad(): tensors = torch.nn.functional.normalize(self.model.encode(wx, wm, wl)) result.append(tensors.cpu().numpy()) return np.concatenate(result)
class DropoutAddLayerNormSubsetFn(torch.autograd.Function): def forward(ctx, x0, residual, gamma, beta, colscale, x0_subset, out_subset, dropout_p, epsilon, rowscale_const, out_numrows, residual_in_fp32=False, prenorm=False, is_rms_norm=False, return_dmask=False): x0 = maybe_align(x0.contiguous(), 16) residual = (maybe_align(residual.contiguous(), 16) if (residual is not None) else None) gamma = maybe_align(gamma.contiguous(), 16) beta = (maybe_align(beta.contiguous(), 16) if (beta is not None) else None) colscale = (maybe_align(colscale.contiguous(), 16) if (colscale is not None) else None) (zmat, xmat, dmask, mu, rsigma) = _dropout_add_layer_norm_subset_forward(x0, residual, gamma, beta, colscale, x0_subset, out_subset, dropout_p, epsilon, rowscale_const, out_numrows, residual_in_fp32, is_rms_norm) x0_saved = (x0 if (colscale is not None) else None) x_shape = ((- 1), *x0.shape[1:]) ctx.save_for_backward(xmat.view(x_shape), x0_saved, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset) ctx.prenorm = prenorm ctx.dropout_p = dropout_p ctx.rowscale_const = rowscale_const ctx.x0_numrows = x0.shape[:(- 1)].numel() ctx.has_residual = (residual is not None) ctx.is_rms_norm = is_rms_norm ctx.has_beta = (beta is not None) z_shape = ((- 1), *x0.shape[1:]) if (not return_dmask): return (zmat.view(z_shape) if (not prenorm) else (zmat.view(z_shape), xmat.view(x0.shape))) else: z = zmat.view(z_shape) dmask = (dmask.view(x0.shape) if (dropout_p > 0.0) else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)) ctx.mark_non_differentiable(dmask) return ((z, dmask) if (not prenorm) else (z, xmat.view(x_shape), dmask)) def backward(ctx, dz, *args): dz = maybe_align(dz.contiguous(), 16) dx = (maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None) (x, x0, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset) = ctx.saved_tensors dropout_p = ctx.dropout_p has_residual = ctx.has_residual (dx0mat, dresidualmat, dgamma, dbeta, *rest) = _dropout_add_layer_norm_subset_backward(dz, dx, x, x0, dmask, mu, rsigma, gamma, colscale, x0_subset, out_subset, dropout_p, ctx.rowscale_const, ctx.x0_numrows, has_residual, ctx.is_rms_norm) dx0 = dx0mat.view((- 1), *x.shape[1:]) dresidual = (dresidualmat.view(x.shape) if (dresidualmat is not None) else None) dcolscale = (rest[0] if (colscale is not None) else None) return (dx0, dresidual, dgamma, (dbeta if ctx.has_beta else None), dcolscale, None, None, None, None, None, None, None, None, None, None)
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=''): suppressions_path = os.path.join(NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE) if is_python: path_cmd = ((PYTHON[0] + ' ') + os.path.join(NS3_BASEDIR, shell_command)) elif len(build_path): path_cmd = os.path.join(build_path, shell_command) else: path_cmd = os.path.join(NS3_BUILDDIR, shell_command) if valgrind: cmd = ('valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 --errors-for-leak-kinds=all %s' % (suppressions_path, path_cmd)) else: cmd = path_cmd if options.verbose: print(('Synchronously execute %s' % cmd)) start_time = time.time() proc = subprocess.Popen(cmd, shell=True, cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout_results, stderr_results) = proc.communicate() elapsed_time = (time.time() - start_time) retval = proc.returncode try: stdout_results = stdout_results.decode() except UnicodeDecodeError: print(('Non-decodable character in stdout output of %s' % cmd)) print(stdout_results) retval = 1 try: stderr_results = stderr_results.decode() except UnicodeDecodeError: print(('Non-decodable character in stderr output of %s' % cmd)) print(stderr_results) retval = 1 if options.verbose: print('Return code = ', retval) print('stderr = ', stderr_results) return (retval, stdout_results, stderr_results, elapsed_time)
class WordSplitter(Registrable): default_implementation = 'spacy' def split_words(self, sentence: str) -> List[Token]: raise NotImplementedError def from_params(cls, params: Params) -> 'WordSplitter': choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True) return cls.by_name(choice).from_params(params)
def generate_test_cpp_sources(test_params, template): (cpp_args_construction_stmts, _) = compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params) test_cpp_sources = template.substitute(functional_variant_name=test_params.functional_variant_name, cpp_args_construction_stmts=';\n '.join(cpp_args_construction_stmts), cpp_function_call=test_params.cpp_function_call) return test_cpp_sources
('/response-conformance/missing-field', methods=['GET']) def missing_field(): response_data = {'id': '123', 'name': 'Alice'} return (jsonify(response_data), 200)
def gs_link_prediction(g, edge_ids, edge_labels, num_samples, optimizer, batch_size=4, epochs=4, bias=True, dropout=0.0, normalize='l2', seed=0, shuffle=True): set_seed(seed) tf.random.set_seed(seed) if shuffle: random.seed(seed) generator = GraphSAGELinkGenerator(g, batch_size, num_samples) train_gen = generator.flow(edge_ids, edge_labels, shuffle=True) model = gs_link_pred_model(num_samples, generator, optimizer, bias, dropout, normalize) model.fit(train_gen, epochs=epochs, verbose=1, use_multiprocessing=False, workers=4, shuffle=shuffle) return model
def HAT(max_byte_size=, memory_estimate_period=1000000, grace_period=200, split_criterion='info_gain', split_confidence=1e-07, tie_threshold=0.05, binary_split=False, stop_mem_management=False, remove_poor_atts=False, no_preprune=False, leaf_prediction='nba', nb_threshold=0, nominal_attributes=None, bootstrap_sampling=True, random_state=None): warnings.warn("'HAT' has been renamed to 'HoeffdingAdaptiveTreeClassifier' in v0.5.0.\nThe old name will be removed in v0.7.0", category=FutureWarning) return HoeffdingAdaptiveTreeClassifier(max_byte_size=max_byte_size, memory_estimate_period=memory_estimate_period, grace_period=grace_period, split_criterion=split_criterion, split_confidence=split_confidence, tie_threshold=tie_threshold, binary_split=binary_split, stop_mem_management=stop_mem_management, remove_poor_atts=remove_poor_atts, no_preprune=no_preprune, leaf_prediction=leaf_prediction, nb_threshold=nb_threshold, nominal_attributes=nominal_attributes, bootstrap_sampling=bootstrap_sampling, random_state=random_state)
def load_data(path): urls = {} with open(path, 'r') as f: for line in f: (url, _) = line.split('\t') vid = url[(- 11):] urls[vid] = url return urls
class MovingAverageDict(object): def __init__(self, decay=0.99): self.decay = decay self.ma_dict = {} def __call__(self, value_dict): for (key, val) in value_dict.items(): if (isinstance(val, (np.float32, np.float64, np.float16)) or (isinstance(val, np.ndarray) and (val.dtype == 'float32') and (val.ndim == 0))): val = float(val) if isinstance(val, float): if (key not in self.ma_dict): self.ma_dict[key] = MovingAverage() self.ma_dict[key](val) def get_val_dict(self): dict_return = {} for (key, ma_obj) in self.ma_dict.items(): dict_return[key] = ma_obj.value return dict_return def get_val_str(self): val_dict = self.get_val_dict() sorted_list = list(sorted(val_dict.items(), key=(lambda item: item[0]))) str_return = '' for (key, val) in sorted_list: if (len(str_return) > 0): str_return += ', ' str_return += ('%s: %.4f' % (key, val)) return str_return
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ['torch'])
def _is_this_machine(host): try: machine_ips = [addr[4][0] for addr in socket.getaddrinfo(socket.gethostname(), None)] host_ip = socket.gethostbyname(host) except socket.gaierror: return False return any(((host_ip == machine_ip) for machine_ip in machine_ips))
class EncoderLayer(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, dff, rate=0.1): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(d_model, num_heads) self.ffn = point_wise_feed_forward_network(d_model, dff) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) def call(self, x, training, mask=None): (attn_output, _) = self.mha(x, x, x, mask) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1((x + attn_output)) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) out2 = self.layernorm2((out1 + ffn_output)) return out2
def shfl_down_f32(mask, val, offset): return impl.call_internal('cuda_shfl_down_sync_f32', mask, val, offset, 31, with_runtime_context=False)
def _maybe_download_dataset(dataset_path): dataset_folder = os.path.join(dataset_path, clrs.get_clrs_folder()) if os.path.isdir(dataset_folder): logging.info('Dataset found at %s. Skipping download.', dataset_folder) return dataset_folder logging.info('Dataset not found in %s. Downloading...', dataset_folder) clrs_url = clrs.get_dataset_gcp_url() request = requests.get(clrs_url, allow_redirects=True) clrs_file = os.path.join(dataset_path, os.path.basename(clrs_url)) os.makedirs(dataset_folder) open(clrs_file, 'wb').write(request.content) shutil.unpack_archive(clrs_file, extract_dir=dataset_folder) os.remove(clrs_file) return dataset_folder
def _cross_val_metrics(args_namespace): return cross_val_metrics(args_namespace.dataset_path, args_namespace.output_path, args_namespace.config_path, args_namespace.nb_folds, args_namespace.train_size_ratio, args_namespace.exclude_slot_metrics, args_namespace.include_errors, args_namespace.verbosity)
class RegNetForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class CamembertTokenizer(): def __init__(self, *args, **kwargs): requires_sentencepiece(self) def from_pretrained(self, *args, **kwargs): requires_sentencepiece(self)
def register_Ns3ObjectBase_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return
def gen_corrupt_batch_gpu(corruption, severity): def corrupt_batch_gpu(images, model): for i in range(images.size(0)): corr_func = corruption_dict[corruption] images[i] = corr_func(images[i], severity, gpu=True) return images return corrupt_batch_gpu
def replicate_small_cp_cmd(src, dst, recursive=True) -> Optional[str]: (provider_src, _, _) = parse_path(src) (provider_dst, _, _) = parse_path(dst) if ((provider_src == 'aws') and (provider_dst == 'aws')): return fallback_cmd_s3_cp(src, dst, recursive) elif ((provider_src == 'gcp') and (provider_dst == 'gcp')): return fallback_cmd_gcp_cp(src, dst, recursive) elif ((provider_src == 'azure') and (provider_dst == 'azure')): return fallback_cmd_azure_cp(src, dst, recursive) else: return None
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth): idx = tf.random_shuffle(tf.range(int(batch_size))) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) return tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps])
class STSDataReader(): def __init__(self, dataset_folder, s1_col_idx=5, s2_col_idx=6, score_col_idx=4, delimiter='\t', quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5): self.dataset_folder = dataset_folder self.score_col_idx = score_col_idx self.s1_col_idx = s1_col_idx self.s2_col_idx = s2_col_idx self.delimiter = delimiter self.quoting = quoting self.normalize_scores = normalize_scores self.min_score = min_score self.max_score = max_score def get_examples(self, filename, max_examples=0): data = csv.reader(open(os.path.join(self.dataset_folder, filename), encoding='utf-8'), delimiter=self.delimiter, quoting=self.quoting) examples = [] for (id, row) in enumerate(data): score = float(row[self.score_col_idx]) if self.normalize_scores: score = ((score - self.min_score) / (self.max_score - self.min_score)) s1 = row[self.s1_col_idx] s2 = row[self.s2_col_idx] examples.append(InputExample(guid=(filename + str(id)), texts=[s1, s2], label=score)) if ((max_examples > 0) and (len(examples) >= max_examples)): break return examples
def test_class_splitter_for_fold_overlaps(): class DemoTask(Task): def __init__(self): super(DemoTask, self).__init__(index=0, num_classes=None) self._inputs = np.arange(10) def __len__(self): return len(self._inputs) def __getitem__(self, index): return self._inputs[index] splitter = ClassSplitter(shuffle=True, num_train_per_class=5, num_test_per_class=5) task = DemoTask() all_train_samples = list() all_test_samples = list() for i in range(10): tasks_split = splitter(task) train_task = tasks_split['train'] test_task = tasks_split['test'] train_samples = set([train_task[i] for i in range(len(train_task))]) test_samples = set([test_task[i] for i in range(len(train_task))]) assert (len(train_samples.intersection(test_samples)) == 0) all_train_samples.append(train_samples) all_train_samples.append(train_samples) samples_in_all_train_splits = set().union(*all_train_samples) samples_in_all_test_splits = set().union(*all_test_samples) assert (len(samples_in_all_test_splits.intersection(samples_in_all_train_splits)) == 0)
def write_recip_lattice(f, recip_lattice): f.write('begin recip_lattice\n') for i in range(3): a = recip_lattice[i] f.write(' {0:>11.7f} {1:>11.7f} {2:>11.7f}\n'.format(*a)) f.write('end recip_lattice\n\n')
class UnsetValue(object): def __str__(self): return '<unset value>' def __repr__(self): return '<unset value>' def __bool__(self): return False def __nonzero__(self): return False
class RGBImgObsWrapper(gym.core.ObservationWrapper): def __init__(self, env, tile_size=8): super().__init__(env) self.tile_size = tile_size self.observation_space.spaces['image'] = spaces.Box(low=0, high=255, shape=((self.env.width * tile_size), (self.env.height * tile_size), 3), dtype='uint8') def observation(self, obs): env = self.unwrapped rgb_img = env.render(mode='rgb_array', highlight=False, tile_size=self.tile_size) return {'mission': obs['mission'], 'image': rgb_img}
def validate_cn_ric(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(ric.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(ric.is_valid) else: return df.applymap(ric.is_valid) return ric.is_valid(df)
class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
.parametrize('forest_cls, expected_oob_score', [(RandomSurvivalForest, 0.), (ExtraSurvivalTrees, 0.)]) def test_oob_score(make_whas500, forest_cls, expected_oob_score): whas500 = make_whas500(to_numeric=True) forest = forest_cls(oob_score=True, bootstrap=False, random_state=2) with pytest.raises(ValueError, match='Out of bag estimation only available if bootstrap=True'): forest.fit(whas500.x, whas500.y) forest.set_params(bootstrap=True) forest.fit(whas500.x, whas500.y) assert (forest.oob_prediction_.shape == (whas500.x.shape[0],)) assert (forest.oob_score_ == pytest.approx(expected_oob_score))
def register_Ns3NoOpHandoverAlgorithm_methods(root_module, cls): cls.add_constructor([param('ns3::NoOpHandoverAlgorithm const &', 'arg0')]) cls.add_constructor([]) cls.add_method('GetLteHandoverManagementSapProvider', 'ns3::LteHandoverManagementSapProvider *', [], is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('SetLteHandoverManagementSapUser', 'void', [param('ns3::LteHandoverManagementSapUser *', 's')], is_virtual=True) cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) cls.add_method('DoReportUeMeas', 'void', [param('uint16_t', 'rnti'), param('ns3::LteRrcSap::MeasResults', 'measResults')], visibility='protected', is_virtual=True) return
def get_batch_size(tensor_shape): tensor_shape.assert_has_rank(rank=4) return tensor_shape[0].value
def register_all_hico(root): for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_HICO.items(): for (key, (image_root, json_file)) in splits_per_dataset.items(): register_hico_instances(key, _get_builtin_metadata(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root), evaluator_type=dataset_name)
class ViTMAEModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ENetMinus(ENet): def __init__(self, n_classes=19, max_input_h=512, max_input_w=1024): (h, w) = (max_input_h, max_input_w) r = 0.86 nn.ModuleList.__init__(self, [Downsampler(3, 16), Bottleneck(16, 64, 0.01, downsample=True), Bottleneck(64, 64, 0.01), Bottleneck(64, 64, 0.01), Bottleneck(64, 128, 0.1, downsample=True), Bottleneck(128, 128, 0.1), Bottleneck(128, 128, 0.1, asymmetric_ksize=5), Bottleneck(128, 128, 0.1), Bottleneck(128, 128, 0.1, asymmetric_ksize=5), Bottleneck(128, 128, 0.1), Bottleneck(128, 128, 0.1, asymmetric_ksize=5), Bottleneck(128, 128, 0.1), Bottleneck(128, 128, 0.1, asymmetric_ksize=5), Upsampler(128, 64), Bottleneck(64, 64, 0.1), Upsampler(64, 16), nn.ConvTranspose2d(16, (n_classes + 1), (2, 2), (2, 2))])
class STS(): def __init__(self, directory, train=True, seed=0): cwd = os.getcwd().replace('dataset', '') directory = path.join(cwd, directory) ensure_dataset_exists(directory) self._directory = directory self._inner = 'Set{}'.format((1 + (((seed + 1) + int(train)) % 2))) self._data = self._load_signs(self._directory, self._inner) def _load_files(self, directory, inner): files = set() with open(path.join(directory, inner, 'annotations.txt')) as f: for l in f: files.add(l.split(':', 1)[0]) return sorted(files) def _read_bbox(self, parts): def _float(x): try: return float(x) except ValueError: if (len(x) > 0): return _float(x[:(- 1)]) raise return [_float(x) for x in parts] def _load_signs(self, directory, inner): with open(path.join(directory, inner, 'annotations.txt')) as f: lines = [l.strip() for l in f] (keys, values) = zip(*(l.split(':', 1) for l in lines)) all_signs = [] for v in values: signs = [] for sign in v.split(';'): if ((sign == ['']) or (sign == '')): continue parts = [s.strip() for s in sign.split(',')] if (parts[0] == 'MISC_SIGNS'): continue signs.append(Sign(visibility=parts[0], bbox=self._read_bbox(parts[1:5]), type=parts[5], name=parts[6])) all_signs.append(signs) images = [path.join(directory, inner, f) for f in keys] return list(zip(images, all_signs)) def __len__(self): return len(self._data) def __getitem__(self, i): return self._data[i]
def generate_datasets(data_root): train_info = sio.loadmat(os.path.join(data_root, 'train_list.mat'))['file_list'] test_info = sio.loadmat(os.path.join(data_root, 'test_list.mat'))['file_list'] class_names = os.listdir(os.path.join(data_root, 'Images')) class_names.sort() train_dataset = [] test_dataset = [] for index in range(len(train_info)): images_file = str(train_info[index][0][0]) label_name = images_file.split('/')[0] label = class_names.index(label_name) example = {} example['filename'] = os.path.join(data_root, 'Images', images_file) example['label'] = int(label) train_dataset.append(example) for index in range(len(test_info)): images_file = str(test_info[index][0][0]) label_name = images_file.split('/')[0] label = class_names.index(label_name) example = {} example['filename'] = os.path.join(data_root, 'Images', images_file) example['label'] = int(label) test_dataset.append(example) return (train_dataset, test_dataset)
def postprocess_one(pred_sql, schema): pred_sql = pred_sql.replace('group_by', 'group by').replace('order_by', 'order by').replace('limit_value', 'limit 1').replace('_EOS', '').replace(' value ', ' 1 ').replace('distinct', '').strip(',').strip() if pred_sql.endswith('value'): pred_sql = (pred_sql[:(- len('value'))] + '1') try: format_sql = sqlparse.format(pred_sql, reindent=True) except: return pred_sql format_sql_2 = normalize_space(format_sql) num_select = format_sql_2.count('select') if (num_select > 1): final_sql = postprocess_nested(format_sql_2, schema) else: (final_sql, _) = postprocess_single(format_sql_2, schema) return final_sql
class ResidualStack(nn.Module): def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens, use_kaiming_normal): super(ResidualStack, self).__init__() self._num_residual_layers = num_residual_layers self._layers = nn.ModuleList(([Residual(in_channels, num_hiddens, num_residual_hiddens, use_kaiming_normal)] * self._num_residual_layers)) def forward(self, x): for i in range(self._num_residual_layers): x = self._layers[i](x) return F.relu(x)
def read_train_split_to_str(dataset_dir): train_dir = os.path.join(dataset_dir, 'bbox_train') return read_train_test_directory_to_str(train_dir)
def compute_on_dataset(model, data_loader, device, timer=None): model.eval() results_dict = {} cpu_device = torch.device('cpu') for (_, batch) in enumerate(tqdm(data_loader)): (images_left, images_right, targets, calib, image_ids) = batch with torch.no_grad(): if timer: timer.tic() images_left = images_left.to(device) images_right = images_right.to(device) output = model(images_left, images_right, calib=calib) if timer: if (not (cfg.MODEL.DEVICE == 'cpu')): torch.cuda.synchronize() timer.toc() output = [o.to(cpu_device) for o in output] results_dict.update({img_id: result for (img_id, result) in zip(image_ids, output)}) return results_dict
def parse_notes_and_chords(stream: Stream, resolution: int=DEFAULT_RESOLUTION) -> Tuple[(List[Note], List[Chord])]: notes: List[Note] = [] chords: List[Chord] = [] ties: Dict[(int, int)] = {} for item in stream.flat.notesAndRests: if ((not item.isNote) and (not item.isChord)): continue if item.duration.isGrace: continue time = round(float((item.offset * resolution))) duration = round((float(item.quarterLength) * resolution)) velocity = item.volume.velocity if (velocity is not None): velocity = round(velocity) if item.isNote: pitch = int(item.pitch.midi) is_outgoing_tie = (item.tie and ((item.tie.type == 'start') or (item.tie.type == 'continue'))) if (pitch in ties): note_idx = ties[pitch] notes[note_idx].duration += duration if is_outgoing_tie: ties[pitch] = note_idx else: del ties[pitch] else: note = Note(time=time, pitch=int(item.pitch.midi), duration=duration, velocity=velocity) notes.append(note) if is_outgoing_tie: ties[pitch] = (len(notes) - 1) elif item.isChord: chord = Chord(time=time, pitches=[int(note.pitch.midi) for note in item.notes], duration=duration, velocity=velocity) chords.append(chord) return (notes, chords)
def sturm_bound(level, weight=2): if is_ArithmeticSubgroup(level): if level.is_congruence(): return level.sturm_bound(weight) raise ValueError('no Sturm bound defined for noncongruence subgroups') if isinstance(level, (int, Integer)): return Gamma0(level).sturm_bound(weight)
def visit_forward(variables, callback, fclosed=None): if (fclosed is None): fclosed = set() stop = False for v in variables: stop |= v.stop f = v.parent if (f is None): continue if (f in fclosed): continue fclosed.add(f) stop_f = visit_forward(f.inputs, callback, fclosed) stop |= stop_f if stop_f: print('Skip {} by stop signal.'.format(f.name)) f.disable() continue callback(f) print(f.name) return stop
class Partition3(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.8.attention.output.LayerNorm', 'l_1': 'bert.encoder.8.intermediate.dense', 'l_2': 'bert.encoder.8.output.dense', 'l_3': 'bert.encoder.8.output.dropout', 'l_4': 'bert.encoder.8.output.LayerNorm', 'l_5': 'bert.encoder.9.attention.self.query', 'l_6': 'bert.encoder.9.attention.self.key', 'l_7': 'bert.encoder.9.attention.self.value', 'l_8': 'bert.encoder.9.attention.self.softmax', 'l_9': 'bert.encoder.9.attention.self.dropout', 'l_10': 'bert.encoder.9.attention.output.dense', 'l_11': 'bert.encoder.9.attention.output.dropout', 'l_12': 'bert.encoder.9.attention.output.LayerNorm', 'l_13': 'bert.encoder.9.intermediate.dense', 'l_14': 'bert.encoder.9.output.dense', 'l_15': 'bert.encoder.9.output.dropout', 'l_16': 'bert.encoder.9.output.LayerNorm', 'l_17': 'bert.encoder.10.attention.self.query', 'l_18': 'bert.encoder.10.attention.self.key', 'l_19': 'bert.encoder.10.attention.self.value', 'l_20': 'bert.encoder.10.attention.self.softmax', 'l_21': 'bert.encoder.10.attention.self.dropout', 'l_22': 'bert.encoder.10.attention.output.dense', 'l_23': 'bert.encoder.10.attention.output.dropout', 'l_24': 'bert.encoder.10.attention.output.LayerNorm', 'l_25': 'bert.encoder.10.intermediate.dense', 'l_26': 'bert.encoder.10.output.dense', 'l_27': 'bert.encoder.10.output.dropout', 'l_28': 'bert.encoder.10.output.LayerNorm', 'l_29': 'bert.encoder.11.attention.self.query', 'l_30': 'bert.encoder.11.attention.self.key', 'l_31': 'bert.encoder.11.attention.self.value', 'l_32': 'bert.encoder.11.attention.self.softmax', 'l_33': 'bert.encoder.11.attention.self.dropout'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = (x1 + x0) t_0 = self.l_0(t_0) t_1 = self.l_1(t_0) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_0 = (t_1 + t_0) t_0 = self.l_4(t_0) t_1 = self.l_5(t_0) t_2 = self.l_6(t_0) t_3 = self.l_7(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_0 = (t_4 + t_0) t_0 = self.l_12(t_0) t_4 = self.l_13(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_0 = (t_4 + t_0) t_0 = self.l_16(t_0) t_4 = self.l_17(t_0) t_9 = self.l_18(t_0) t_5 = self.l_19(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_0 = (t_6 + t_0) t_0 = self.l_24(t_0) t_6 = self.l_25(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_0 = (t_6 + t_0) t_0 = self.l_28(t_0) t_6 = self.l_29(t_0) t_1 = self.l_30(t_0) t_8 = self.l_31(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) return list(flatten((t_0, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class TestConstant(unittest.TestCase): def test_objective_function(self): obj = objective.Constant(1) self.assertEqual(obj.calculate_objective_function(None), 1) val = 2 obj = objective.Constant(val) val = 3 self.assertEqual(obj.calculate_objective_function(None), 2) obj = Constant((3 + 2j)) self.assertEqual(obj.calculate_objective_function(None), (3 + 2j)) def test_gradient(self): param = DirectParam([1, 2]) obj = objective.Constant(3) self.assertEqual(list(obj.calculate_gradient(param)), [0, 0]) obj = Constant((3 + 2j)) self.assertEqual(list(obj.calculate_gradient(param)), [0, 0]) def test_objective_function_matrix(self): obj = Constant(np.array([1, 2])) np.testing.assert_array_equal(obj.calculate_objective_function(None), np.array([1, 2])) obj = Constant(np.array([[1, 2], [3, 4]])) np.testing.assert_array_equal(obj.calculate_objective_function(None), np.array([[1, 2], [3, 4]])) def test_gradient_matrix(self): obj = Constant(np.array([1, 2])) param = DirectParam([1, 2, 3]) np.testing.assert_array_equal(obj.calculate_gradient(param), np.array([[0, 0, 0], [0, 0, 0]])) obj = Constant(np.array([[1, 2], [3, 4], [5, 6]])) np.testing.assert_array_equal(obj.calculate_gradient(param), np.array([[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]])) def test_string(self): self.assertEqual(str(objective.Constant(12)), '12')
def rotx(t): c = np.cos(t) s = np.sin(t) return np.array([[1, 0, 0], [0, c, (- s)], [0, s, c]])
class TextCapsCapEvalDataset(CaptionEvalDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): BaseDataset.__init__(self, vis_processor, text_processor, vis_root, ann_paths) self.annotation = self.annotation[3]['data'] self.annotation = [ann for ann in self.annotation if ('caption_str' in ann)] self.img_ids = {} n = 0 for ann in self.annotation: img_id = ann['image_id'] if (img_id not in self.img_ids.keys()): self.img_ids[img_id] = n n += 1 ann['image'] = (ann['image_id'] + '.jpg') ann['caption'] = ann['caption_str'] del ann['caption_str'] self._add_instance_ids()
def main(args): mimic_notes_fpath = args.mimic_notes anno_data_path = args.anno_data outputdir = args.outputdir dataset = {'gold': 'gold.pain_complications.mimic.row_ids.tsv', 'unlabeled': 'unlabeled.pain_complications.mimic.row_ids.tsv'} print('Loading MIMIC-III notes ...') for name in dataset: dataset[name] = load_row_ids(f'{anno_data_path}/{dataset[name]}') dataset[name] = load_clinical_notes(dataset[name], mimic_notes_fpath) print((('... loaded ' + f'{len(dataset[name])} {name}') + ' documents.')) print('Processing MIMIC-III notes...') process_clinical_notes(dataset['gold']) process_clinical_notes(dataset['unlabeled']) print('Writing TSV output ...') dump_tsvs(dataset, outputdir) print('Done!')
def main(): try: (opts, args) = getopt.getopt(sys.argv[1:], '') except: usage(sys.argv[0]) for (opt, arg) in opts: usage(sys.argv[0]) if (len(args) != 2): usage(sys.argv[0]) waves = int(args[0]) seeds = int(args[1]) print((((('Conditional estimation on snowball sample with ' + str(waves)) + ' waves and ') + str(seeds)) + ' seeds')) sampled_filenames_part = ((('_waves' + str(waves)) + '_seeds') + str(seeds)) estimateALAAMEE.run_on_network_attr((('n500_kstar_simulate' + sampled_filenames_part) + '_num6700000.txt'), [changeDensity, changeActivity, changeContagion, partial(changeoOb, 'binaryAttribute'), partial(changeoOc, 'continuousAttribute')], ['Density', 'Activity', 'Contagion', 'Binary', 'Continuous'], (('sample-n500_bin_cont6700000' + sampled_filenames_part) + '.txt'), (('binaryAttribute_50_50_n500' + sampled_filenames_part) + '_num6700000.txt'), (('continuousAttributes_n500' + sampled_filenames_part) + '_num6700000.txt'), catattr_filename=None, sampler_func=conditionalALAAMsampler, zone_filename=(('snowball_zonefile' + sampled_filenames_part) + '_num6700000.txt'))
class GooglenetModel(model.Model): def __init__(self): super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005) def add_inference(self, cnn): def inception_v1(cnn, k, l, m, n, p, q): cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)], [('conv', n, 1, 1), ('conv', p, 5, 5)], [('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]] cnn.inception_module('incept_v1', cols) cnn.conv(64, 7, 7, 2, 2) cnn.mpool(3, 3, 2, 2, mode='SAME') cnn.conv(64, 1, 1) cnn.conv(192, 3, 3) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 64, 96, 128, 16, 32, 32) inception_v1(cnn, 128, 128, 192, 32, 96, 64) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 192, 96, 208, 16, 48, 64) inception_v1(cnn, 160, 112, 224, 24, 64, 64) inception_v1(cnn, 128, 128, 256, 24, 64, 64) inception_v1(cnn, 112, 144, 288, 32, 64, 64) inception_v1(cnn, 256, 160, 320, 32, 128, 128) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 256, 160, 320, 32, 128, 128) inception_v1(cnn, 384, 192, 384, 48, 128, 128) cnn.apool(7, 7, 1, 1, mode='VALID') cnn.reshape([(- 1), 1024])
.script def batch_select(data, mask, dims, dim, index): data = data.select(dim, index) if dims[(dim - 1)]: mask = mask.select(dim, 0) else: mask = mask.select(dim, index) dims = torch.cat((dims[:(dim - 1)], dims[dim:dims.size(0)])) return (data, mask, dims)
class IBertForMaskedLM(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def test_version_1_point_10(): assert_((NumpyVersion('1.9.0') < '1.10.0')) assert_((NumpyVersion('1.11.0') < '1.11.1')) assert_((NumpyVersion('1.11.0') == '1.11.0')) assert_((NumpyVersion('1.99.11') < '1.99.12'))
def calc_wer_on_dataset(dataset, refs, options, hyps): assert (dataset or refs) start_time = time.time() seq_len_stats = {'refs': Stats(), 'hyps': Stats()} seq_idx = options.startseq if (options.endseq < 0): options.endseq = float('inf') wer = 1.0 remaining_hyp_seq_tags = set(hyps.keys()) interactive = (util.is_tty() and (not log.verbose[5])) collected = {'hyps': [], 'refs': []} max_num_collected = 1 if dataset: dataset.init_seq_order(epoch=1) else: refs = sorted(refs.items(), key=(lambda item: len(item[1]))) while True: if (seq_idx > options.endseq): break if dataset: if (not dataset.is_less_than_num_seqs(seq_idx)): break dataset.load_seqs(seq_idx, (seq_idx + 1)) complete_frac = dataset.get_complete_frac(seq_idx) seq_tag = dataset.get_tag(seq_idx) assert isinstance(seq_tag, str) ref = dataset.get_data(seq_idx, options.key) if isinstance(ref, numpy.ndarray): assert (ref.shape == ()) ref = ref.flatten()[0] if isinstance(ref, bytes): ref = ref.decode('utf8') assert isinstance(ref, str) try: num_seqs_s = str(dataset.num_seqs) except NotImplementedError: try: num_seqs_s = ('~%i' % dataset.estimated_num_seqs) except TypeError: num_seqs_s = '?' else: if (seq_idx >= len(refs)): break complete_frac = ((seq_idx + 1) / float(len(refs))) (seq_tag, ref) = refs[seq_idx] assert isinstance(seq_tag, str) assert isinstance(ref, str) num_seqs_s = str(len(refs)) start_elapsed = (time.time() - start_time) progress_prefix = ('%i/%s (WER %.02f%%)' % (seq_idx, num_seqs_s, (wer * 100))) progress = ('%s (%.02f%%)' % (progress_prefix, (complete_frac * 100))) if (complete_frac > 0): total_time_estimated = (start_elapsed / complete_frac) remaining_estimated = (total_time_estimated - start_elapsed) progress += (' (%s)' % hms(remaining_estimated)) remaining_hyp_seq_tags.remove(seq_tag) hyp = hyps[seq_tag] seq_len_stats['hyps'].collect([len(hyp)]) seq_len_stats['refs'].collect([len(ref)]) collected['hyps'].append(hyp) collected['refs'].append(ref) if (len(collected['hyps']) >= max_num_collected): wer = wer_compute.step(session, **collected) del collected['hyps'][:] del collected['refs'][:] if interactive: util.progress_bar_with_time(complete_frac, prefix=progress_prefix) elif log.verbose[5]: print(progress_prefix, ('seq tag %r, ref/hyp len %i/%i chars' % (seq_tag, len(ref), len(hyp)))) seq_idx += 1 if (len(collected['hyps']) > 0): wer = wer_compute.step(session, **collected) print(('Done. Num seqs %i. Total time %s.' % (seq_idx, hms((time.time() - start_time)))), file=log.v1) print(('Remaining num hyp seqs %i.' % (len(remaining_hyp_seq_tags),)), file=log.v1) if dataset: print(('More seqs which we did not dumped: %s.' % dataset.is_less_than_num_seqs(seq_idx)), file=log.v1) for key in ['hyps', 'refs']: seq_len_stats[key].dump(stream_prefix=('Seq-length %r %r ' % (key, options.key)), stream=log.v2) if options.expect_full: assert (not remaining_hyp_seq_tags), 'There are still remaining hypotheses.' return wer
def _get_long_description(): with open(str((Path(__file__).parent / 'README.md')), 'r') as f: return f.read()
def depthwise_net_for_pruning(image, threshold, with_bias=False, channel_last=False, name_scope='net1'): with nn.parameter_scope(name_scope): h = image h /= 255.0 h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), with_bias=False, channel_last=channel_last, name='conv') inputs = h.parent.inputs axis = 0 reset_the_weight_value(inputs, axis, threshold) h = PF.depthwise_convolution(h, kernel=(3, 3), with_bias=with_bias, name='depthwise_conv') inputs = h.parent.inputs axis = 0 reset_the_weight_value(inputs, axis, threshold) h = PF.depthwise_deconvolution(h, kernel=(3, 3), with_bias=with_bias, name='depthwise_deconv') inputs = h.parent.inputs axis = 0 reset_the_weight_value(inputs, axis, threshold) pred = PF.affine(h, 10, name='fc') inputs = pred.parent.inputs axis = 1 reset_the_weight_value(inputs, axis, threshold) return pred
class Layout(Enum): alignEU = 0 compact = 1 offset = 2 stride = 3 matrix = 10 matrix2 = 11 _64IC = 20 _32IC = 21 _1IC = 22 _16IC = 23 T3 = 30 T4 = 31 T5 = 32 DMAstride = 40 DMA4Bank = 41 DMAmatrix = 42 DMAlinear = 43 alignEU_XN = 50 compact_XN = 51 continuous_XN = 60 def __call__(self, *args, **kargs): return ExtEnum(self, *args, **kargs)
class LassoBenchmark(Predictor, Estimator, Benchmark): param_names = ['representation', 'precompute'] params = (['dense', 'sparse'], [True, False]) def setup_cache(self): super().setup_cache() def make_data(self, params): (representation, precompute) = params if (representation == 'dense'): data = _synth_regression_dataset(n_samples=1000000, n_features=100) else: data = _synth_regression_sparse_dataset(n_samples=50000, n_features=5000, density=0.01) return data def make_estimator(self, params): (representation, precompute) = params estimator = Lasso(precompute=precompute, alpha=0.001, random_state=0) return estimator def make_scorers(self): make_gen_reg_scorers(self) def skip(self, params): (representation, precompute) = params if ((representation == 'sparse') and (precompute is False)): return True return False
class Datagen_set(): def __init__(self, X, Y, batch_size, code_dic, nl_dic, train=True): self.X = X self.Y = Y self.batch_size = batch_size self.code_dic = code_dic self.nl_dic = nl_dic self.train = train def __len__(self): return len(range(0, len(self.X), self.batch_size)) def __call__(self, epoch=0): return GeneratorLen(BackgroundGenerator(self.gen(epoch), 1), len(self)) def gen(self, epoch): if self.train: np.random.seed(epoch) newindex = list(np.random.permutation(len(self.X))) X = [self.X[i] for i in newindex] Y = [self.Y[i] for i in newindex] else: X = [x for x in self.X] Y = [y for y in self.Y] for i in range(0, len(self.X), self.batch_size): x = X[i:(i + self.batch_size)] y = Y[i:(i + self.batch_size)] x_raw = [read_pickle(n) for n in x] y_raw = [[self.nl_dic[t] for t in s] for s in y] x = [traverse_label(n) for n in x_raw] x = [np.array([self.code_dic[t] for t in xx], 'int32') for xx in x] x_raw = [traverse_label(n) for n in x_raw] y = tf.constant(tf.keras.preprocessing.sequence.pad_sequences(y, min(max([len(s) for s in y]), 100), padding='post', truncating='post', value=(- 1.0))) (yield (x, y, x_raw, y_raw))
class PairedEvaluationDataset(Dataset): def __init__(self, pair_file_list, image_size=512): self.image_size = image_size self.pair_file_list = pair_file_list def __len__(self): return len(self.pair_file_list) def __getitem__(self, item): (pred_file, ref_file) = self.pair_file_list[item] pred_img = load_img(pred_file, self.image_size) ref_img = load_img(ref_file, self.image_size) sample = {'pred': pred_img, 'ref': ref_img} return sample
class ScipyLBFGSBTuner(Tuner): def tune_impl(self, **kwargs): if ('init_method' in kwargs): init_method = kwargs['init_method'] else: init_method = 'average' if (self.start_config is not None): config = self.start_config elif (init_method is 'average'): coordinates = list(self.search_space.keys()) config = {} for coordinate in coordinates: param = self.search_space[coordinate] if isinstance(param, dict): if ('range' in param): (minval, maxval) = param['range'] config[coordinate] = ((maxval + minval) / 2) elif isinstance(param, list): config[k] = param[0] elif (init_method is 'random'): config = RandomTuner.generate_configs(self.search_space, 1)[0] else: print('{} is invalid init_method!'.format(init_method)) return sorted_vars = sorted(list(config.keys())) def config_to_array(config): return [config[var] for var in sorted_vars] def array_to_config(arr): return {var: value for (var, value) in zip(sorted_vars, arr)} def optimization_function(arr): config = array_to_config(arr) score = self.evaluate_configs([config]) if self.maximize: score = ((- 1) * score) return score bounds = [self.search_space[var]['range'] for var in sorted_vars] optimize.minimize(optimization_function, config_to_array(config), method='L-BFGS-B', bounds=bounds, options={'maxfun': self.budget})
(ipyvuetify=_HAS_IPYVUETIFY, IPython=_HAS_IPYTHON) def init_filename_textfield(): return v.TextField(class_='ml-3 pl-3', style_='max-width: 600px', v_model=str(Path.cwd().joinpath('plot.pdf')), label='Save As')
class SwitchWhiten2d(Module): def __init__(self, num_features, num_pergroup=16, sw_type=2, T=5, tie_weight=False, eps=1e-05, momentum=0.99, affine=True): super(SwitchWhiten2d, self).__init__() if (sw_type not in [2, 3, 5]): raise ValueError('sw_type should be in [2, 3, 5], but got {}'.format(sw_type)) assert ((num_features % num_pergroup) == 0) self.num_features = num_features self.num_pergroup = num_pergroup self.num_groups = (num_features // num_pergroup) self.sw_type = sw_type self.T = T self.tie_weight = tie_weight self.eps = eps self.momentum = momentum self.affine = affine num_components = sw_type self.sw_mean_weight = Parameter(torch.ones(num_components)) if (not self.tie_weight): self.sw_var_weight = Parameter(torch.ones(num_components)) else: self.register_parameter('sw_var_weight', None) if self.affine: self.weight = Parameter(torch.ones(num_features)) self.bias = Parameter(torch.zeros(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.register_buffer('running_mean', torch.zeros(self.num_groups, num_pergroup, 1)) self.register_buffer('running_cov', torch.eye(num_pergroup).unsqueeze(0).repeat(self.num_groups, 1, 1)) self.reset_parameters() def reset_parameters(self): self.running_mean.zero_() self.running_cov.zero_() nn.init.ones_(self.sw_mean_weight) if (not self.tie_weight): nn.init.ones_(self.sw_var_weight) if self.affine: nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def __repr__(self): return '{name}({num_features}, num_pergroup={num_pergroup}, sw_type={sw_type}, T={T}, tie_weight={tie_weight}, eps={eps}, momentum={momentum}, affine={affine})'.format(name=self.__class__.__name__, **self.__dict__) def forward(self, x): (N, C, H, W) = x.size() (c, g) = (self.num_pergroup, self.num_groups) in_data_t = x.transpose(0, 1).contiguous() in_data_t = in_data_t.view(g, c, (- 1)) if self.training: mean_bn = in_data_t.mean((- 1), keepdim=True) in_data_bn = (in_data_t - mean_bn) cov_bn = torch.bmm(in_data_bn, in_data_bn.transpose(1, 2)).div(((H * W) * N)) self.running_mean.mul_(self.momentum) self.running_mean.add_(((1 - self.momentum) * mean_bn.data)) self.running_cov.mul_(self.momentum) self.running_cov.add_(((1 - self.momentum) * cov_bn.data)) else: mean_bn = torch.autograd.Variable(self.running_mean) cov_bn = torch.autograd.Variable(self.running_cov) mean_bn = mean_bn.view(1, g, c, 1).expand(N, g, c, 1).contiguous() mean_bn = mean_bn.view((N * g), c, 1) cov_bn = cov_bn.view(1, g, c, c).expand(N, g, c, c).contiguous() cov_bn = cov_bn.view((N * g), c, c) in_data = x.view((N * g), c, (- 1)) eye = in_data.data.new().resize_(c, c) eye = torch.nn.init.eye_(eye).view(1, c, c).expand((N * g), c, c) mean_in = in_data.mean((- 1), keepdim=True) x_in = (in_data - mean_in) cov_in = torch.bmm(x_in, torch.transpose(x_in, 1, 2)).div((H * W)) if (self.sw_type in [3, 5]): x = x.view(N, (- 1)) mean_ln = x.mean((- 1), keepdim=True).view(N, 1, 1, 1) mean_ln = mean_ln.expand(N, g, 1, 1).contiguous().view((N * g), 1, 1) var_ln = x.var((- 1), keepdim=True).view(N, 1, 1, 1) var_ln = var_ln.expand(N, g, 1, 1).contiguous().view((N * g), 1, 1) var_ln = (var_ln * eye) if (self.sw_type == 5): var_bn = torch.diag_embed(torch.diagonal(cov_bn, dim1=(- 2), dim2=(- 1))) var_in = torch.diag_embed(torch.diagonal(cov_in, dim1=(- 2), dim2=(- 1))) softmax = nn.Softmax(0) mean_weight = softmax(self.sw_mean_weight) if (not self.tie_weight): var_weight = softmax(self.sw_var_weight) else: var_weight = mean_weight if (self.sw_type == 2): mean = ((mean_weight[0] * mean_bn) + (mean_weight[1] * mean_in)) cov = (((var_weight[0] * cov_bn) + (var_weight[1] * cov_in)) + (self.eps * eye)) elif (self.sw_type == 3): mean = (((mean_weight[0] * mean_bn) + (mean_weight[1] * mean_in)) + (mean_weight[2] * mean_ln)) cov = ((((var_weight[0] * cov_bn) + (var_weight[1] * cov_in)) + (var_weight[2] * var_ln)) + (self.eps * eye)) elif (self.sw_type == 5): mean = ((((mean_weight[0] + mean_weight[2]) * mean_bn) + ((mean_weight[1] + mean_weight[3]) * mean_in)) + (mean_weight[4] * mean_ln)) cov = ((((((var_weight[0] * cov_bn) + (var_weight[1] * cov_in)) + (var_weight[0] * var_bn)) + (var_weight[1] * var_in)) + (var_weight[4] * var_ln)) + (self.eps * eye)) (Ng, c, _) = cov.size() P = torch.eye(c).to(cov).expand(Ng, c, c) rTr = (cov * P).sum((1, 2), keepdim=True).reciprocal_() cov_N = (cov * rTr) for k in range(self.T): P = torch.baddbmm(1.5, P, (- 0.5), torch.matrix_power(P, 3), cov_N) wm = P.mul_(rTr.sqrt()) x_hat = torch.bmm(wm, (in_data - mean)) x_hat = x_hat.view(N, C, H, W) if self.affine: x_hat = ((x_hat * self.weight.view(1, self.num_features, 1, 1)) + self.bias.view(1, self.num_features, 1, 1)) return x_hat
def run_full_influence_functions(mode: str, num_examples_to_test: int, s_test_num_samples: int=1000) -> Dict[(int, Dict[(str, Any)])]: if (mode not in ['only-correct', 'only-incorrect']): raise ValueError(f'Unrecognized mode {mode}') (tokenizer, model) = misc_utils.create_tokenizer_and_model(constants.MNLI_MODEL_PATH) (mnli_train_dataset, mnli_eval_dataset) = misc_utils.create_datasets(task_name='mnli', tokenizer=tokenizer) batch_train_data_loader = misc_utils.get_dataloader(mnli_train_dataset, batch_size=128, random=True) instance_train_data_loader = misc_utils.get_dataloader(mnli_train_dataset, batch_size=1, random=False) eval_instance_data_loader = misc_utils.get_dataloader(dataset=mnli_eval_dataset, batch_size=1, random=False) output_mode = glue_output_modes['mnli'] def build_compute_metrics_fn(task_name: str): def compute_metrics_fn(p): if (output_mode == 'classification'): preds = np.argmax(p.predictions, axis=1) elif (output_mode == 'regression'): preds = np.squeeze(p.predictions) return glue_compute_metrics(task_name, preds, p.label_ids) return compute_metrics_fn trainer = transformers.Trainer(model=model, args=TrainingArguments(output_dir='./tmp-output', per_device_train_batch_size=128, per_device_eval_batch_size=128, learning_rate=5e-05, logging_steps=100), data_collator=default_data_collator, train_dataset=mnli_train_dataset, eval_dataset=mnli_eval_dataset, compute_metrics=build_compute_metrics_fn('mnli')) params_filter = [n for (n, p) in model.named_parameters() if (not p.requires_grad)] weight_decay_ignores = (['bias', 'LayerNorm.weight'] + [n for (n, p) in model.named_parameters() if (not p.requires_grad)]) model.cuda() num_examples_tested = 0 outputs_collections = {} for (test_index, test_inputs) in enumerate(eval_instance_data_loader): if (num_examples_tested >= num_examples_to_test): break prediction_is_correct = misc_utils.is_prediction_correct(trainer=trainer, model=model, inputs=test_inputs) if ((mode == 'only-correct') and (prediction_is_correct is False)): continue if ((mode == 'only-incorrect') and (prediction_is_correct is True)): continue with Timer() as timer: (influences, _, s_test) = nn_influence_utils.compute_influences(n_gpu=1, device=torch.device('cuda'), batch_train_data_loader=batch_train_data_loader, instance_train_data_loader=instance_train_data_loader, model=model, test_inputs=test_inputs, params_filter=params_filter, weight_decay=constants.WEIGHT_DECAY, weight_decay_ignores=weight_decay_ignores, s_test_damp=0.005, s_test_scale=10000.0, s_test_num_samples=s_test_num_samples, train_indices_to_include=None, s_test_iterations=1, precomputed_s_test=None) outputs = {'test_index': test_index, 'influences': influences, 's_test': s_test, 'time': timer.elapsed, 'correct': prediction_is_correct} num_examples_tested += 1 outputs_collections[test_index] = outputs remote_utils.save_and_mirror_scp_to_remote(object_to_save=outputs, file_name=f'KNN-recall.{mode}.{num_examples_to_test}.{test_index}.pth') print(f'Status: #{test_index} | {num_examples_tested} / {num_examples_to_test}') return outputs_collections
_INGREDIENT.capture def build_model(graph_adj, node_features, labels, dataset_indices_placeholder, train_feed, trainval_feed, val_feed, test_feed, weight_decay, normalize_features, num_layers, hidden_size, dropout_prob): dropout = tf.placeholder(dtype=tf.float32, shape=[]) train_feed[dropout] = dropout_prob trainval_feed[dropout] = False val_feed[dropout] = False test_feed[dropout] = False return GCN(node_features, graph_adj, labels, dataset_indices_placeholder, num_layers=num_layers, hidden_size=hidden_size, dropout_prob=dropout, weight_decay=weight_decay, normalize_features=normalize_features)
(precision=4) def backward_difference(): (X, _, _) = get_mushroom_data() print(X.info()) enc = ce.BackwardDifferenceEncoder() enc.fit(X, None) out = enc.transform(X) print(out.info()) del enc, _, X, out
class UpSample(nn.Module): def __init__(self, layer_type): super().__init__() self.layer_type = layer_type def forward(self, x): if (self.layer_type == 'none'): return x elif (self.layer_type == 'timepreserve'): return F.interpolate(x, scale_factor=(2, 1), mode='nearest') elif (self.layer_type == 'half'): return F.interpolate(x, scale_factor=2, mode='nearest') else: raise RuntimeError(('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type))
def get_rotated_mnist_loaders(angle, data_path, model_class='LeNet', download=False): if (model_class == 'MLP'): shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor(), ReshapeTransform(((- 1),))]) else: shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor()]) rotated_mnist_val_test_set = datasets.MNIST(data_path, train=False, transform=shift_tforms, download=download) (shift_val_loader, shift_test_loader) = val_test_split(rotated_mnist_val_test_set, val_size=2000) return (shift_val_loader, shift_test_loader)
def to_gif(video, duration, event): output = '/tmp/processed-{}.gif'.format(os.path.basename(video)) call_ffmpeg(['-i', video, '-t', '{0}'.format(duration), '-vf', 'fps=10,scale=320:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse', '-loop', '0', output]) return output
def load_checkpoints(directory, is_gpu=True): checkpoints = [] for (root, _, filenames) in os.walk(directory): for filename in filenames: results = re.search('.*?-([0-9].*?).pt', filename) if (results is not None): epoch_idx = int(results.group(1)) model = load_model(root, is_gpu=is_gpu, filename=filename) checkpoints.append((epoch_idx, model)) return checkpoints
_model def hrnet_w18_small(pretrained=True, **kwargs): return _create_model('hrnet_w18_small', pretrained, kwargs)
_utils.test() def test_nested_subscript(): x = ti.field(ti.i32) y = ti.field(ti.i32) ti.root.dense(ti.i, 1).place(x) ti.root.dense(ti.i, 1).place(y) x[0] = 0 def inc(): for i in range(1): x[x[i]] += 1 inc() assert (x[0] == 1)
def test_mixed(spark_session): _run_job(spark=spark_session, name='mixed', data_cols=['Weekly_Sales', 'Temperature', 'CPI'], hierarchical=True, agg_dict={'Weekly_Sales': 'sum'}, predict_on_train=True, robust=False)
class LightConv3x3(nn.Module): def __init__(self, in_channels, out_channels): super(LightConv3x3, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=False, groups=out_channels) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.bn(x) return self.relu(x)
class TestAI21TokenCounter(): def setup_method(self, method): self.token_counter = AI21TokenCounter() def test_count_tokens(self): request = Request(model='openai/text-davinci-002', model_deployment='openai/text-davinci-002', prompt='The Center for Research on Foundation Models (CRFM) is an interdisciplinary initiative born out of the Stanford Institute for Human-Centered Artificial Intelligence (HAI) that aims to make fundamental advances in the study, development, and deployment of foundation models.') completions: List[Sequence] = [Sequence(text='\n\nFoundation models are an artificial intelligence paradigm that emphasizes: (1) reasoning about symbolic, structured knowledge, (2) learning to perform tasks from observation, ', logprob=(- 66.), tokens=[Token(text='\n', logprob=(- 1.), top_logprobs={'\n': (- 1.)}), Token(text='\n', logprob=(- 2.), top_logprobs={' Foundation': (- 1.)}), Token(text=' Foundation', logprob=(- 1.), top_logprobs={' Foundation': (- 1.)}), Token(text=' models are', logprob=(- 0.), top_logprobs={' models are': (- 0.)}), Token(text=' an', logprob=(- 5.), top_logprobs={' a class of': (- 2.)}), Token(text=' artificial intelligence', logprob=(- 2.), top_logprobs={' increasingly popular': (- 1.)}), Token(text=' paradigm', logprob=(- 2.), top_logprobs={' ': (- 1.)}), Token(text=' that', logprob=(- 1.), top_logprobs={' that': (- 1.)}), Token(text=' emphasizes', logprob=(- 4.), top_logprobs={' attempts to': (- 3.)}), Token(text=':', logprob=(- 5.), top_logprobs={' reasoning': (- 2.)}), Token(text=' ', logprob=(- 1.), top_logprobs={'\n': (- 0.)}), Token(text='(', logprob=(- 0.), top_logprobs={'(': (- 0.)}), Token(text='1', logprob=(- 0.), top_logprobs={'1': (- 0.)}), Token(text=')', logprob=(- 0.), top_logprobs={')': (- 0.)}), Token(text=' reasoning', logprob=(- 3.), top_logprobs={' the': (- 2.)}), Token(text=' about', logprob=(- 1.), top_logprobs={' about': (- 1.)}), Token(text=' symbolic', logprob=(- 7.), top_logprobs={' and': (- 2.)}), Token(text=',', logprob=(- 2.), top_logprobs={' knowledge': (- 1.)}), Token(text=' structured', logprob=(- 2.), top_logprobs={' structured': (- 2.)}), Token(text=' knowledge', logprob=(- 0.), top_logprobs={' knowledge': (- 0.)}), Token(text=',', logprob=(- 1.), top_logprobs={',': (- 1.)}), Token(text=' ', logprob=(- 0.), top_logprobs={' ': (- 0.)}), Token(text='(', logprob=(- 0.), top_logprobs={'(': (- 0.)}), Token(text='2', logprob=(- 0.), top_logprobs={'2': (- 0.)}), Token(text=')', logprob=(- 5.e-05), top_logprobs={')': (- 5.e-05)}), Token(text=' learning', logprob=(- 2.), top_logprobs={' learning': (- 2.)}), Token(text=' to perform', logprob=(- 5.), top_logprobs={' through': (- 1.)}), Token(text=' tasks', logprob=(- 1.), top_logprobs={' complex': (- 1.)}), Token(text=' from', logprob=(- 1.), top_logprobs={' from': (- 1.)}), Token(text=' observation', logprob=(- 4.), top_logprobs={' human': (- 2.)}), Token(text=',', logprob=(- 0.), top_logprobs={',': (- 0.)}), Token(text=' ', logprob=(- 1.), top_logprobs={' and': (- 0.)})])] assert (self.token_counter.count_tokens(request, completions) == 32)
def clean_graph_item(graph_item): clean_graph_item = copy.deepcopy(graph_item) if ('optional' in clean_graph_item): del clean_graph_item['optional'] if ('required' in clean_graph_item): del clean_graph_item['required'] for (index, ii) in enumerate(clean_graph_item['objects']): if ('optional' in ii): del clean_graph_item['objects'][index]['optional'] if ('required' in ii): del clean_graph_item['objects'][index]['required'] return clean_graph_item
def getConnection(db=None, driver=None, user=None, password=None, host=None): conn_str = ('dbname=%s host=%s' % (db, host)) if (user is not None): conn_str = (conn_str + (' user=%s' % user)) if (password is not None): conn_str = (conn_str + (' password=%s' % password)) conn = psycopg2.connect(conn_str) return conn
def _is_stopped(demo, i, obs, stopped_buffer, delta=0.1): next_is_not_final = (i == (len(demo) - 2)) gripper_state_no_change = ((i < (len(demo) - 2)) and ((obs.gripper_open == demo[(i + 1)].gripper_open) and (obs.gripper_open == demo[(i - 1)].gripper_open) and (demo[(i - 2)].gripper_open == demo[(i - 1)].gripper_open))) small_delta = np.allclose(obs.joint_velocities, 0, atol=delta) stopped = ((stopped_buffer <= 0) and small_delta and (not next_is_not_final) and gripper_state_no_change) return stopped
def truncate_seq_pair(tokens_a, tokens_b, max_length): while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_length): break if (len(tokens_a) > len(tokens_b)): tokens_a.pop(0) else: tokens_b.pop(0)
class A002113(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return 'Palindromes in base 10.' def _precompute(self, how_many=150): try: self._b self._n except AttributeError: self._b = [] self._n = self.offset self._b += [i for i in range(self._n, (self._n + how_many)) if (sloane.A004086(i) == i)] self._n += how_many def _eval(self, n): try: return self._b[n] except (AttributeError, IndexError): self._precompute() return self._eval(n) def list(self, n): try: if (len(self._b) <= n): raise IndexError else: return self._b[:n] except (AttributeError, IndexError): self._precompute() return self.list(n)
class Decoder(layers.Layer): def __init__(self, original_dim, intermediate_dim=600, name='decoder', regularization_lambda=0.01, random_seed=42, **kwargs): super().__init__(name=name, **kwargs) tf.random.set_seed(random_seed) self.dense_proj = layers.Dense(intermediate_dim, activation='tanh', kernel_initializer=keras.initializers.GlorotNormal(), kernel_regularizer=keras.regularizers.l2(regularization_lambda)) self.dense_output = layers.Dense(original_dim, kernel_initializer=keras.initializers.GlorotNormal(), kernel_regularizer=keras.regularizers.l2(regularization_lambda)) def call(self, inputs, **kwargs): x = self.dense_proj(inputs) return self.dense_output(x)
def save_models(path: str, net, *, write_layers=True, file_format=None): os.makedirs(path, exist_ok=True) net_name = net.get_name() _save_net_file(path, net_name, net, file_format=file_format) if write_layers: models = bb.get_model_list(net, flatten=True) fname_list = [] for (i, model) in enumerate(models): name = model.get_name() if model.is_named(): if (name in fname_list): print(('[warrning] duplicate model name : %s' % name)) fname = ('%04d_%s' % (i, name)) else: fname = ('%s' % name) else: fname = ('%04d_%s' % (i, name)) fname_list.append(fname) _save_net_file(path, fname, model, file_format=file_format)
_properties class StencilTiling(transformation.SubgraphTransformation): debug = Property(desc='Debug mode', dtype=bool, default=False) prefix = Property(dtype=str, default='stencil', desc='Prefix for new inner tiled range symbols') strides = ShapeProperty(dtype=tuple, default=(1,), desc='Tile stride') schedule = Property(dtype=dace.dtypes.ScheduleType, default=dace.dtypes.ScheduleType.Default, desc='Dace.Dtypes.ScheduleType of Inner Maps') unroll_loops = Property(desc='Unroll Inner Loops if they have Size > 1', dtype=bool, default=False) def coverage_dicts(sdfg, graph, map_entry, outer_range=True): map_exit = graph.exit_node(map_entry) map = map_entry.map entry_coverage = {} exit_coverage = {} map_min = {dace.symbol(param): e for (param, e) in zip(map.params, map.range.min_element())} map_max = {dace.symbol(param): e for (param, e) in zip(map.params, map.range.max_element())} for e in graph.out_edges(map_entry): if (not e.data.subset): continue if outer_range: min_element = [m.subs(map_min) for m in e.data.subset.min_element()] max_element = [m.subs(map_max) for m in e.data.subset.max_element()] rng = subsets.Range(((min_e, max_e, 1) for (min_e, max_e) in zip(min_element, max_element))) else: rng = dcpy(e.data.subset) if (e.data.data not in entry_coverage): entry_coverage[e.data.data] = rng else: old_coverage = entry_coverage[e.data.data] entry_coverage[e.data.data] = subsets.union(old_coverage, rng) for e in graph.in_edges(map_exit): if outer_range: min_element = [m.subs(map_min) for m in e.data.subset.min_element()] max_element = [m.subs(map_max) for m in e.data.subset.max_element()] rng = subsets.Range(((min_e, max_e, 1) for (min_e, max_e) in zip(min_element, max_element))) else: rng = dcpy(e.data.subset) if (e.data.data not in exit_coverage): exit_coverage[e.data.data] = rng else: old_coverage = exit_coverage[e.data] exit_coverage[e.data.data] = subsets.union(old_coverage, rng) return (entry_coverage, exit_coverage) def topology(sdfg, graph, map_entries): sink_maps = set() children_dict = defaultdict(set) parent_dict = defaultdict(set) map_exits = {graph.exit_node(entry): entry for entry in map_entries} for map_entry in map_entries: map_exit = graph.exit_node(map_entry) for e in graph.in_edges(map_entry): if isinstance(e.src, nodes.AccessNode): for ie in graph.in_edges(e.src): if (ie.src in map_exits): other_entry = map_exits[ie.src] children_dict[other_entry].add(map_entry) parent_dict[map_entry].add(other_entry) out_counter = 0 for e in graph.out_edges(map_exit): if isinstance(e.dst, nodes.AccessNode): for oe in graph.out_edges(e.dst): if (oe.dst in map_entries): other_entry = oe.dst children_dict[map_entry].add(other_entry) parent_dict[other_entry].add(map_entry) out_counter += 1 if (out_counter == 0): sink_maps.add(map_entry) return (children_dict, parent_dict, sink_maps) def can_be_applied(sdfg, subgraph) -> bool: graph = subgraph.graph map_entries = helpers.get_outermost_scope_maps(sdfg, graph, subgraph) map_exits = [graph.exit_node(entry) for entry in map_entries] if (len(map_entries) <= 1): return False first_map = next(iter(map_entries)) params = dcpy(first_map.map.params) strides = first_map.map.range.strides() schedule = first_map.map.schedule for map_entry in map_entries: if (map_entry.map.params != params): return False if (map_entry.map.range.strides() != strides): return False if (map_entry.map.schedule != schedule): return False max_amount = 0 first_entry = next(iter(map_entries)) for map_entry in map_entries: for (r1, r2) in zip(map_entry.map.range, first_entry.map.range): if (len((r1[0] - r2[0]).free_symbols) > 0): return False else: max_amount = max(max_amount, abs((r1[0] - r2[0]))) if (len((r1[1] - r2[1]).free_symbols) > 0): return False else: max_amount = max(max_amount, abs((r1[1] - r2[1]))) if (max_amount == 0): return False try: node_config = SubgraphFusion.get_adjacent_nodes(sdfg, graph, map_entries) (_, intermediate_nodes, out_nodes) = node_config except NotImplementedError: return False if (not SubgraphFusion.check_topo_feasibility(sdfg, graph, map_entries, intermediate_nodes, out_nodes)): return False if (len((intermediate_nodes & out_nodes)) > 0): return False subgraph_contains_data = SubgraphFusion.determine_compressible_nodes(sdfg, graph, intermediate_nodes, map_entries, map_exits) if any([(s == False) for s in subgraph_contains_data.values()]): return False coverages = {} memlets = {} for map_entry in map_entries: coverages[map_entry] = StencilTiling.coverage_dicts(sdfg, graph, map_entry) memlets[map_entry] = StencilTiling.coverage_dicts(sdfg, graph, map_entry, outer_range=False) dag_neighbors = StencilTiling.topology(sdfg, graph, map_entries) (children_dict, _, sink_maps) = dag_neighbors for map_entry in map_entries: map_coverage = coverages[map_entry][1] param_parent_coverage = {p: None for p in map_entry.params} param_children_coverage = {p: None for p in map_entry.params} for child_entry in children_dict[map_entry]: for (data_name, cov) in map_coverage.items(): parent_coverage = cov children_coverage = None if (data_name in coverages[child_entry][0]): children_coverage = subsets.union(children_coverage, coverages[child_entry][0][data_name]) for (i, (p_subset, c_subset)) in enumerate(zip(parent_coverage, children_coverage)): p_subset = subsets.Range((p_subset,)) c_subset = subsets.Range((c_subset,)) params1 = symbolic.symlist(memlets[map_entry][1][data_name][i]).keys() params2 = symbolic.symlist(memlets[child_entry][0][data_name][i]).keys() if (params1 != params2): return False params = params1 if (len(params) > 1): return False try: symbol = next(iter(params)) param_parent_coverage[symbol] = subsets.union(param_parent_coverage[symbol], p_subset) param_children_coverage[symbol] = subsets.union(param_children_coverage[symbol], c_subset) except StopIteration: warnings.warn(f'StencilTiling::In map {map_entry}, there is a dimension belonging to {{data_name}} that has no map parameter associated.') pass except KeyError: return False if (param_parent_coverage != param_children_coverage): return False assert (len(sink_maps) > 0) first_sink_map = next(iter(sink_maps)) if (not all([(map.range.size() == first_sink_map.range.size()) for map in sink_maps])): return False return True def apply(self, sdfg): graph = sdfg.node(self.state_id) subgraph = self.subgraph_view(sdfg) map_entries = helpers.get_outermost_scope_maps(sdfg, graph, subgraph) result = StencilTiling.topology(sdfg, graph, map_entries) (children_dict, parent_dict, sink_maps) = result inferred_ranges = defaultdict(dict) topo_reversed = [] queue = set(sink_maps.copy()) while (len(queue) > 0): element = next((e for e in queue if (not (children_dict[e] - set(topo_reversed))))) topo_reversed.append(element) queue.remove(element) for parent in parent_dict[element]: queue.add(parent) coverage = {} for map_entry in map_entries: coverage[map_entry] = StencilTiling.coverage_dicts(sdfg, graph, map_entry, outer_range=True) variable_mapping = defaultdict(list) for map_entry in topo_reversed: map = map_entry.map for e in itertools.chain(graph.out_edges(map_entry), graph.in_edges(graph.exit_node(map_entry))): mapping = [] for dim in e.data.subset: syms = set() for d in dim: syms |= symbolic.symlist(d).keys() if (len(syms) > 1): raise NotImplementedError('One incoming or outgoing stencil subset is indexed by multiple map parameters. This is not supported yet.') try: mapping.append(syms.pop()) except KeyError: mapping.append(None) if (e.data in variable_mapping): assert (variable_mapping[e.data.data] == mapping) else: variable_mapping[e.data.data] = mapping local_ranges = {dn: None for dn in coverage[map_entry][1].keys()} for (data_name, cov) in coverage[map_entry][1].items(): local_ranges[data_name] = subsets.union(local_ranges[data_name], cov) for child_map in children_dict[map_entry]: if (data_name in coverage[child_map][0]): local_ranges[data_name] = subsets.union(local_ranges[data_name], coverage[child_map][0][data_name]) inferred_ranges[map_entry] = {p: None for p in map.params} for (data_name, ranges) in local_ranges.items(): for (param, r) in zip(variable_mapping[data_name], ranges): rng = subsets.Range((r,)) if param: inferred_ranges[map_entry][param] = subsets.union(inferred_ranges[map_entry][param], rng) params = next(iter(map_entries)).map.params.copy() self.reference_range = inferred_ranges[next(iter(sink_maps))] if self.debug: print('StencilTiling::Reference Range', self.reference_range) invariant_dims = [] for (idx, p) in enumerate(params): different = False if (self.reference_range[p] is None): invariant_dims.append(idx) warnings.warn(f'StencilTiling::No Stencil pattern detected for parameter {p}') continue for m in map_entries: if (inferred_ranges[m][p] != self.reference_range[p]): different = True break if (not different): invariant_dims.append(idx) warnings.warn(f'StencilTiling::No Stencil pattern detected for parameter {p}') self._outer_entries = set() for map_entry in map_entries: map = map_entry.map stripmine_subgraph = {StripMining.map_entry: graph.node_id(map_entry)} sdfg_id = sdfg.sdfg_id last_map_entry = None original_schedule = map_entry.schedule self.tile_sizes = [] self.tile_offset_lower = [] self.tile_offset_upper = [] removed_maps = 0 for (dim_idx, param) in enumerate(map_entry.map.params): if (dim_idx >= len(self.strides)): tile_stride = symbolic.pystr_to_symbolic(self.strides[(- 1)]) else: tile_stride = symbolic.pystr_to_symbolic(self.strides[dim_idx]) trivial = False if (dim_idx in invariant_dims): self.tile_sizes.append(tile_stride) self.tile_offset_lower.append(0) self.tile_offset_upper.append(0) else: target_range_current = inferred_ranges[map_entry][param] reference_range_current = self.reference_range[param] min_diff = symbolic.SymExpr((reference_range_current.min_element()[0] - target_range_current.min_element()[0])) max_diff = symbolic.SymExpr((target_range_current.max_element()[0] - reference_range_current.max_element()[0])) try: min_diff = symbolic.evaluate(min_diff, {}) max_diff = symbolic.evaluate(max_diff, {}) except TypeError: raise RuntimeError('Symbolic evaluation of map ranges failed. Please check your parameters and match.') self.tile_sizes.append(((tile_stride + max_diff) + min_diff)) self.tile_offset_lower.append(symbolic.pystr_to_symbolic(str(min_diff))) self.tile_offset_upper.append(symbolic.pystr_to_symbolic(str(max_diff))) tile_size = self.tile_sizes[(- 1)] dim_idx -= removed_maps if ((tile_size == map.range.size()[dim_idx]) and ((dim_idx + removed_maps) in invariant_dims)): continue if (map.range.size()[dim_idx] in [0, 1]): continue if ((tile_size == 1) and (tile_stride == 1) and ((dim_idx + removed_maps) in invariant_dims)): trivial = True removed_maps += 1 range_tuple = ((map.range[dim_idx][0] + self.tile_offset_lower[(- 1)]), (map.range[dim_idx][1] - self.tile_offset_upper[(- 1)]), map.range[dim_idx][2]) map.range[dim_idx] = range_tuple stripmine = StripMining() stripmine.setup_match(sdfg, sdfg_id, self.state_id, stripmine_subgraph, 0) stripmine.tiling_type = dtypes.TilingType.CeilRange stripmine.dim_idx = dim_idx stripmine.new_dim_prefix = (self.prefix if (not trivial) else '') stripmine.tile_size = str(tile_stride) stripmine.tile_stride = str(tile_stride) outer_map = stripmine.apply(graph, sdfg) outer_map.schedule = original_schedule if (not trivial): if (tile_stride == 1): map_entry.map.range[dim_idx] = tuple(((symbolic.SymExpr(el._approx_expr) if isinstance(el, symbolic.SymExpr) else el) for el in map_entry.map.range[dim_idx])) old_range = map_entry.map.range[dim_idx] map_entry.map.range[dim_idx] = ((old_range[0] - self.tile_offset_lower[(- 1)]), (old_range[1] + self.tile_offset_upper[(- 1)]), old_range[2]) _propagate_node(graph, map_entry) _propagate_node(graph, graph.exit_node(map_entry)) if last_map_entry: new_map_entry = graph.in_edges(map_entry)[0].src mapcollapse_subgraph = {MapCollapse.outer_map_entry: graph.node_id(last_map_entry), MapCollapse.inner_map_entry: graph.node_id(new_map_entry)} mapcollapse = MapCollapse() mapcollapse.setup_match(sdfg, sdfg_id, self.state_id, mapcollapse_subgraph, 0) mapcollapse.apply(graph, sdfg) last_map_entry = graph.in_edges(map_entry)[0].src if last_map_entry: self._outer_entries.add(last_map_entry) map_entry.map.schedule = self.schedule if (self.unroll_loops and all(((s == 1) for s in self.strides)) and any(((s not in [0, 1]) for s in map_entry.range.size()))): l = len(map_entry.params) if (l > 1): subgraph = {MapExpansion.map_entry: graph.node_id(map_entry)} trafo_expansion = MapExpansion() trafo_expansion.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(graph), subgraph, 0) trafo_expansion.apply(graph, sdfg) maps = [map_entry] for _ in range((l - 1)): map_entry = graph.out_edges(map_entry)[0].dst maps.append(map_entry) for map in reversed(maps): subgraph = {MapToForLoop.map_entry: graph.node_id(map)} trafo_for_loop = MapToForLoop() trafo_for_loop.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(graph), subgraph, 0) trafo_for_loop.apply(graph, sdfg) nsdfg = trafo_for_loop.nsdfg guard = trafo_for_loop.guard end = trafo_for_loop.after_state begin = next((e.dst for e in nsdfg.out_edges(guard) if (e.dst != end))) subgraph = {DetectLoop.loop_guard: nsdfg.node_id(guard), DetectLoop.loop_begin: nsdfg.node_id(begin), DetectLoop.exit_state: nsdfg.node_id(end)} transformation = LoopUnroll() transformation.setup_match(nsdfg, 0, (- 1), subgraph, 0) transformation.apply(nsdfg, nsdfg) elif self.unroll_loops: warnings.warn('StencilTiling::Did not unroll loops. Either all ranges are equal to one or range difference is symbolic.') self._outer_entries = list(self._outer_entries)
class TFConvBertForTokenClassification(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def test_recordarray_7(): def test_recordarray_7(x): return ((2 * x.y[(2, 0, 1)]) + 10) (value_jvp, jvp_grad) = jax.jvp(test_recordarray_7, (test_recordarray,), (test_recordarray_tangent,)) (value_vjp, vjp_func) = jax.vjp(test_recordarray_7, test_recordarray) assert (ak.to_list(value_jvp) == 14.0) assert (ak.to_list(value_vjp) == 14.0) assert (ak.to_list(jvp_grad) == 1.0) assert (ak.to_list(vjp_func(value_vjp)[0]) == [[{'x': 0.0, 'y': [0.0]}, {'x': 0.0, 'y': [0.0, 0.0]}], [], [{'x': 0.0, 'y': [0.0, 28.0, 0.0]}]])
def load_dataset(choice, data_dir='./data/'): if (choice == 'mnist2d'): from datasets.mnist import mnist2d_10class return mnist2d_10class(data_dir) if (choice == 'mnist2d_2class'): from datasets.mnist import mnist2d_2class return mnist2d_2class(data_dir) if (choice == 'mnistvector'): from datasets.mnist import mnistvector_10class return mnistvector_10class(data_dir) raise NameError(f'Dataset {choice} not found.')
class Sampler(torch.utils.data.Sampler): def __init__(self, buckets, batch_size, shuffle=False, distributed=False, evaluate=False): self.batch_size = batch_size self.shuffle = shuffle (self.sizes, self.buckets) = zip(*[(size, bucket) for (size, bucket) in buckets.items()]) self.chunks = [min(len(bucket), max(round(((size * len(bucket)) / batch_size)), 1)) for (size, bucket) in zip(self.sizes, self.buckets)] self.rank = (dist.get_rank() if distributed else 0) self.rank = (dist.get_rank() if distributed else 0) self.replicas = (dist.get_world_size() if distributed else 1) self.force_even = (distributed and evaluate) print(self.force_even) self.samples = ((sum(self.chunks) // self.replicas) + ((self.replicas * int((sum(self.chunks) % self.replicas))) if self.force_even else 0)) self.epoch = 0 def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) range_fn = torch.arange if self.shuffle: def range_fn(x): return torch.randperm(x, generator=g) (total, count) = (0, 0) if self.force_even: all = [] for i in range_fn(len(self.buckets)).tolist(): split_sizes = [((((len(self.buckets[i]) - j) - 1) // self.chunks[i]) + 1) for j in range(self.chunks[i])] for batch in range_fn(len(self.buckets[i])).split(split_sizes): all.append([self.buckets[i][j] for j in batch.tolist()]) if ((len(all) % self.replicas) != 0): for batch in all: if (len(batch) < self.replicas): continue all.remove(batch) l = ((self.replicas - ((len(all) + 1) % self.replicas)) + 1) for i in range(l): if (i < (l - 1)): all.append([batch[i]]) else: all.append(batch[(l - 1):]) break length = 0 assert ((len(all) % self.replicas) == 0), f'{len(all)}, {self.replicas}' for (i, batch) in enumerate(all): if ((i % self.replicas) == self.rank): length += len(batch) (yield batch) else: for i in range_fn(len(self.buckets)).tolist(): split_sizes = [((((len(self.buckets[i]) - j) - 1) // self.chunks[i]) + 1) for j in range(self.chunks[i])] if self.force_even: if ((len(split_sizes) % self.replicas) != 0): top = split_sizes.pop() v = (top // (self.replicas + 1)) while (top != 0): split_sizes.append(min(v, top)) top -= min(v, top) for batch in range_fn(len(self.buckets[i])).split(split_sizes): if (count == self.samples): break if ((total % self.replicas) == self.rank): count += 1 (yield [self.buckets[i][j] for j in batch.tolist()]) total += 1 self.epoch += 1 def __len__(self): return self.samples