code
stringlengths
101
5.91M
def multihead_attention(sizes, output): runtest('Q', 'phi,ibj->phbj', output, sizes=sizes) runtest('lin1', 'bji,ui->bju', output, sizes=sizes) runtest('lin2', 'bju,iu->bji', output, sizes=sizes) runtest('out', 'phi,phbj->bij', output, sizes=sizes) runtest('QKT', 'phbk,phbj->hbjk', output, sizes=sizes) runtest('gamma', 'phbk,hbjk->phbj', output, sizes=sizes) runtest('QKV-fused', 'qphi,ibj->qphbj', output, sizes=sizes) runtest('KV-fused', 'vphi,ibj->vphbj', output, sizes=sizes) runtest('dWlin2', 'bji,bju->iu', output, sizes=sizes) runtest('dXlin2', 'bji,iu->bju', output, sizes=sizes) runtest('dWlin1', 'bju,bji->ui', output, sizes=sizes) runtest('dXlin1', 'bju,ui->bji', output, sizes=sizes) runtest('dWout', 'phbj,bij->phi', output, sizes=sizes) runtest('dXout', 'phi,bij->phbj', output, sizes=sizes) runtest('dX2gamma', 'phbj,hbjk->phbk', output, sizes=sizes) runtest('dX1gamma', 'phbk,phbj->hbjk', output, sizes=sizes) runtest('dX2QKT', 'phbj,hbjk->phbk', output, sizes=sizes) runtest('dX1QKT', 'phbk,hbjk->phbj', output, sizes=sizes) runtest('dWQ', 'ibj,phbj->phi', output, sizes=sizes) runtest('dXQ', 'phi,phbj->ibj', output, sizes=sizes) runtest('dWQK-fused', 'ibj,vphbj->vphi', output, sizes=sizes) runtest('dXQK-fused', 'vphi,vphbj->ibj', output, sizes=sizes) runtest('dWQKV-fused', 'ibj,qphbj->qphi', output, sizes=sizes) runtest('dXQKV-fused', 'qphi,qphbj->ibj', output, sizes=sizes)
def test_remove_entity_by_name(): tl = Timeline() e1 = Dummy('e1', tl) assert ('e1' in tl.entities) assert (e1.timeline == tl) tl.remove_entity_by_name('e1') assert (not ('e1' in tl.entities)) assert (e1.timeline is None)
def test_ufunc_add_where1_true(): A = np.random.randint(1, 10, size=(1,), dtype=np.int32) B = np.random.randint(1, 10, size=(1,), dtype=np.int32) C = ufunc_add_where1_true(A, B) assert np.array_equal((A + B), C)
class LabeledEvaluator(Evaluator): def get_evaluation(self, sess, batch): (idxs, data_set) = batch feed_dict = self.model.get_feed_dict(data_set, False, supervised=False) (global_step, yp) = sess.run([self.model.global_step, self.model.yp], feed_dict=feed_dict) yp = yp[:data_set.num_examples] y = feed_dict[self.model.y] e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist()) return e
_task('denoising') class DenoisingTask(LegacyFairseqTask): def add_args(parser): parser.add_argument('data', help='path to data directory') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for dataset') parser.add_argument('--sample-break-mode', default='complete_doc', type=str, help='mode for breaking sentence') parser.add_argument('--mask', default=0.0, type=float, help='fraction of words/subwords that will be masked') parser.add_argument('--mask-random', default=0.0, type=float, help='instead of using [MASK], use random token this often') parser.add_argument('--insert', default=0.0, type=float, help='insert this percentage of additional random tokens') parser.add_argument('--permute', default=0.0, type=float, help='take this proportion of subwords and permute them') parser.add_argument('--rotate', default=0.5, type=float, help='rotate this proportion of inputs') parser.add_argument('--poisson-lambda', default=3.0, type=float, help='randomly shuffle sentences for this proportion of inputs') parser.add_argument('--permute-sentences', default=0.0, type=float, help='shuffle this proportion of sentences in all inputs') parser.add_argument('--mask-length', default='subword', type=str, choices=['subword', 'word', 'span-poisson'], help='mask length to choose') parser.add_argument('--replace-length', default=(- 1), type=int, help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample') parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = self.dictionary.add_symbol('<mask>') def setup_task(cls, args, **kwargs): paths = utils.split_paths(args.data) assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) if (not hasattr(args, 'shuffle_instance')): args.shuffle_instance = False return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = StripTokenDataset(dataset, self.dictionary.eos()) dataset = maybe_shorten_dataset(dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 2), pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, document_sep_len=0) logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) dataset = AppendTokenDataset(dataset, self.source_dictionary.eos()) mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if (self.args.mask_length != 'subword') else None) self.datasets[split] = DenoisingDataset(dataset, dataset.sizes, self.dictionary, self.mask_idx, mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args) logger.info('Split: {0}, Loaded {1} samples of denoising_dataset'.format(split, len(self.datasets[split]))) def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): pad = self.source_dictionary.pad() eos = self.source_dictionary.eos() src_dataset = TokenBlockDataset(src_tokens, src_lengths, block_size=(self.args.tokens_per_sample - 2), pad=pad, eos=eos, break_mode=self.args.sample_break_mode, document_sep_len=0) prev_output_tokens = PrependTokenDataset(StripTokenDataset(src_dataset, eos), eos) src_dataset = PadDataset(src_dataset, pad_idx=pad, left_pad=False) return NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False), 'prev_output_tokens': PadDataset(prev_output_tokens, pad_idx=pad, left_pad=False)}, 'target': src_dataset}, sizes=[np.array(src_lengths)]) def max_positions(self): return (self.args.max_source_positions, self.args.max_target_positions) def source_dictionary(self): return self.dictionary def target_dictionary(self): return self.dictionary
def normal_attention(tensor_base, tensor_to_attend, mask_for_tensor_base, mask_for_tensor_to_attend, similarity_method='inner', hn=100, use_pooling=False, pooling_method='max', reverse=False, scope=None): with tf.variable_scope((scope or 'normal_attention')): t_main = tensor_base t_sec = tensor_to_attend mask_main = mask_for_tensor_base mask_sec = mask_for_tensor_to_attend (bs, sl, vec) = (tf.shape(t_main)[0], tf.shape(t_main)[1], tf.shape(t_main)[2]) ql = tf.shape(t_sec)[1] mask_main_etd = tf.expand_dims(mask_main, 2) mask_sec_etd = tf.expand_dims(mask_sec, 1) mask_similarity_mat = tf.logical_and(mask_main_etd, mask_sec_etd) if (similarity_method == 'inner'): t_main_etd = tf.expand_dims(t_main, 2) t_sec_etd = tf.expand_dims(t_sec, 1) similarity_mat = tf.reduce_sum((t_main_etd * t_sec_etd), (- 1)) elif (similarity_method == 'tri_linear'): t_main_tiled = tf.tile(tf.expand_dims(t_main, 2), [1, 1, ql, 1]) t_sec_tiled = tf.tile(tf.expand_dims(t_sec, 1), [1, sl, 1, 1]) similarity_mat = get_logits([t_main_tiled, t_sec_tiled], None, False, scope='tri_linear_tri_linear', func='tri_linear') elif (similarity_method == 'map_linear'): t_main_map = tf.nn.relu(linear([t_main], hn, True, scope='linear_map_main')) t_sec_map = tf.nn.relu(linear([t_sec], hn, True, scope='linear_map_sec')) t_main_map_etd = tf.expand_dims(t_main_map, 2) t_sec_map_etd = tf.expand_dims(t_sec_map, 1) similarity_mat = tf.reduce_sum((t_main_map_etd * t_sec_map_etd), (- 1)) else: raise AttributeError(("No similarity matrix calculation method '%s'" % similarity_method)) if use_pooling: if (pooling_method == 'max'): pooling_out = tf.reduce_max(exp_mask(similarity_mat, mask_similarity_mat), (- 2)) elif (pooling_method == 'mean'): sum_out = tf.reduce_sum(normal_mask(similarity_mat, mask_similarity_mat), (- 2)) num = tf.reduce_sum(tf.cast(mask_similarity_mat, tf.int32), (- 2)) num = tf.where(tf.equal(num, tf.zeros_like(num, tf.int32)), tf.ones_like(num, tf.int32), num) pooling_out = (sum_out / tf.cast(num, tf.float32)) else: raise AttributeError(("No pooling method '%s'" % pooling_method)) return softsel(t_sec, pooling_out, mask_sec) else: t_sec_tiled = tf.tile(tf.expand_dims(t_sec, 1), [1, sl, 1, 1]) if (not reverse): out = normal_softsel(t_sec_tiled, similarity_mat, mask_similarity_mat) else: out = reverse_softsel(t_sec_tiled, similarity_mat, mask_similarity_mat) return out
def test_clean_input_format_tuple(df_countries: pd.DataFrame) -> None: df_clean = clean_country(df_countries, 'messy_country', input_format=('name', 'alpha-2')) df_check = df_countries.copy() df_check['messy_country_clean'] = ['Canada', 'Canada', np.nan, np.nan, 'Ireland', 'DR Congo', 'Congo Republic', np.nan, np.nan, np.nan, 'American Samoa', 'Turkey', 'Belize', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan] assert df_clean.equals(df_check)
def collect_generated_testcases(root_dir=test_onnx_common.pytorch_converted_dir, verbose=False, fail_dir=None, expect=True): total_pass = 0 total_fail = 0 for d in os.listdir(root_dir): dir_name = os.path.join(root_dir, d) if os.path.isdir(dir_name): failed = False try: model_file = os.path.join(dir_name, 'model.onnx') data_dir_pattern = os.path.join(dir_name, 'test_data_set_*') for data_dir in glob.glob(data_dir_pattern): for device in torch.testing.get_all_device_types(): run_generated_test(model_file, data_dir, device) if expect: expect_file = os.path.join(_expect_dir, 'PyTorch-generated-{}.expect'.format(d)) with open(expect_file, 'w') as text_file: model = onnx.load(model_file) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) text_file.write(google.protobuf.text_format.MessageToString(model)) total_pass += 1 except Exception as e: if verbose: print('The test case in {} failed!'.format(dir_name)) traceback.print_exc() if (fail_dir is None): shutil.rmtree(dir_name) else: target_dir = os.path.join(fail_dir, d) if os.path.exists(target_dir): shutil.rmtree(target_dir) shutil.move(dir_name, target_dir) total_fail += 1 print('Successfully generated/updated {} test cases from PyTorch.'.format(total_pass)) if expect: print('Expected pbtxt files are generated in {}.'.format(_expect_dir)) print('Failed {} testcases are moved to {}.'.format(total_fail, _fail_test_dir))
class SemanticPreservingTransformation(): def __init__(self, parser_path: str, language: str, transform_functions: Dict[(Callable, int)]=None): self.language = language if (transform_functions is not None): self.transform_functions = transform_functions else: self.transform_functions = {BlockSwap: 1, ConfusionRemover: 1, DeadCodeInserter: 1, ForWhileTransformer: 1, OperandSwap: 1, SyntacticNoisingTransformation: 1} self.transformations = [] if (self.language == 'nl'): self.transformations.append(SyntacticNoisingTransformation(parser_path=parser_path, language='nl')) else: for t in self.transform_functions: for _ in range(self.transform_functions[t]): self.transformations.append(t(parser_path=parser_path, language=language)) def transform_code(self, code: str): (transformed_code, transformation_name) = (None, None) indices = list(range(len(self.transformations))) np.random.shuffle(indices) success = False while ((not success) and (len(indices) > 0)): si = np.random.choice(indices) indices.remove(si) t = self.transformations[si] (transformed_code, metadata) = t.transform_code(code) success = metadata['success'] if success: transformation_name = type(t).__name__ if (not success): return (code, None) return (transformed_code, transformation_name)
class SurfaceRegularization(ShapeRegularizationTerm): def __init__(self, db: database.Database) -> None: super().__init__(db) self.ds = fenics.Measure('ds', self.mesh) self.mu = self.config.getfloat('Regularization', 'factor_surface') self.target_surface = self.config.getfloat('Regularization', 'target_surface') if self.config.getboolean('Regularization', 'use_initial_surface'): self.target_surface = self._compute_surface() if (self.mu > 0.0): self.is_active = True self.scale() self.current_surface = fenics.Expression('val', degree=0, val=1.0) def compute_shape_derivative(self) -> ufl.Form: if self.is_active: n = fenics.FacetNormal(self.mesh) shape_form = (((fenics.Constant(self.mu) * (self.current_surface - fenics.Constant(self.target_surface))) * t_div(self.test_vector_field, n)) * self.ds) return shape_form else: return fenics.derivative((fenics.Constant(0.0) * self.dx), fenics.SpatialCoordinate(self.mesh), self.test_vector_field) def update(self) -> None: self.current_surface.val = self._compute_surface() def compute_objective(self) -> float: value = 0.0 if self.is_active: surface = self._compute_surface() value = ((0.5 * self.mu) * pow((surface - self.target_surface), 2)) return value def scale(self) -> None: if (self.use_relative_scaling and self.is_active): if (not self.db.parameter_db.temp_dict): surface = fenics.assemble((fenics.Constant(1.0) * self.ds)) value = (0.5 * pow((surface - self.target_surface), 2)) if (abs(value) < 1e-15): _loggers.info('The surface regularization vanishes for the initial iteration. Multiplying this term with the factor you supplied as weight.') else: self.mu /= abs(value) else: self.mu = self.db.parameter_db.temp_dict['Regularization']['mu_surface'] def _compute_surface(self) -> float: surface: float = fenics.assemble((fenics.Constant(1) * self.ds)) return surface
class TestToys(unittest.TestCase): def test_undirected(self): adjacency = house() self.assertEqual(adjacency.shape, (5, 5)) graph = house(metadata=True) self.assertEqual(graph.position.shape, (5, 2)) adjacency = bow_tie() self.assertEqual(adjacency.shape, (5, 5)) graph = bow_tie(metadata=True) self.assertEqual(graph.position.shape, (5, 2)) graph = karate_club(True) self.assertEqual(graph.adjacency.shape, (34, 34)) self.assertEqual(len(graph.labels), 34) graph = miserables(True) self.assertEqual(graph.adjacency.shape, (77, 77)) self.assertEqual(len(graph.names), 77) def test_directed(self): adjacency = painters() self.assertEqual(adjacency.shape, (14, 14)) adjacency = art_philo_science() self.assertEqual(adjacency.shape, (30, 30)) graph = painters(True) self.assertEqual(graph.adjacency.shape, (14, 14)) self.assertEqual(len(graph.names), 14) graph = art_philo_science(True) self.assertEqual(graph.adjacency.shape, (30, 30)) self.assertEqual(len(graph.names), 30) def test_bipartite(self): graph = star_wars(True) self.assertEqual(graph.biadjacency.shape, (4, 3)) self.assertEqual(len(graph.names), 4) self.assertEqual(len(graph.names_col), 3) graph = movie_actor(True) self.assertEqual(graph.biadjacency.shape, (15, 16)) self.assertEqual(len(graph.names), 15) self.assertEqual(len(graph.names_col), 16) graph = hourglass(True) self.assertEqual(graph.biadjacency.shape, (2, 2)) graph = art_philo_science(True) self.assertEqual(graph.biadjacency.shape, (30, 11)) self.assertEqual(len(graph.names), 30) self.assertEqual(len(graph.names_col), 11)
class TestFunctions(DebugTestCase): def test_functions(self): self.break_and_run('c = 2') result = gdb.execute('print $cy_cname("b")', to_string=True) assert re.search('__pyx_.*b', result), result result = gdb.execute('print $cy_lineno()', to_string=True) supposed_lineno = test_libcython.source_to_lineno['c = 2'] assert (str(supposed_lineno) in result), (supposed_lineno, result) result = gdb.execute('print $cy_cvalue("b")', to_string=True) assert ('= 1' in result)
class heapdict(MutableMapping): __marker = object() def __init__(self, *args, **kw): self.heap = [] self.d = {} self.update(*args, **kw) (dict.clear) def clear(self): del self.heap[:] self.d.clear() (dict.__setitem__) def __setitem__(self, key, value): if (key in self.d): self.pop(key) wrapper = [value, key, len(self)] self.d[key] = wrapper self.heap.append(wrapper) self._decrease_key((len(self.heap) - 1)) def _min_heapify(self, i): n = len(self.heap) h = self.heap while True: l = ((i << 1) + 1) r = ((i + 1) << 1) if ((l < n) and (h[l][0] < h[i][0])): low = l else: low = i if ((r < n) and (h[r][0] < h[low][0])): low = r if (low == i): break self._swap(i, low) i = low def _decrease_key(self, i): while i: parent = ((i - 1) >> 1) if (self.heap[parent][0] < self.heap[i][0]): break self._swap(i, parent) i = parent def _swap(self, i, j): h = self.heap (h[i], h[j]) = (h[j], h[i]) h[i][2] = i h[j][2] = j (dict.__delitem__) def __delitem__(self, key): wrapper = self.d[key] while wrapper[2]: parentpos = ((wrapper[2] - 1) >> 1) parent = self.heap[parentpos] self._swap(wrapper[2], parent[2]) self.popitem() (dict.__getitem__) def __getitem__(self, key): return self.d[key][0] (dict.__iter__) def __iter__(self): return iter(self.d) def popitem(self): wrapper = self.heap[0] if (len(self.heap) == 1): self.heap.pop() else: self.heap[0] = self.heap.pop() self.heap[0][2] = 0 self._min_heapify(0) del self.d[wrapper[1]] return (wrapper[1], wrapper[0]) (dict.__len__) def __len__(self): return len(self.d) def peekitem(self): return (self.heap[0][1], self.heap[0][0]) def __str__(self): return str(self.d)
class SelectionMethod(): def __init__(self): raise TypeError def run_acc(self, run_records): raise NotImplementedError def hparams_accs(self, records): return records.group('args.hparams_seed').map((lambda _, run_records: (self.run_acc(run_records), run_records))).filter((lambda x: (x[0] is not None))).sorted(key=(lambda x: x[0]['val_acc']))[::(- 1)] def sweep_acc(self, records, return_extra=False): _hparams_accs = self.hparams_accs(records) if len(_hparams_accs): if (return_extra and ('ext_acc' in _hparams_accs[0][0])): return (_hparams_accs[0][0]['test_acc'], *_hparams_accs[0][0]['ext_acc']) else: return _hparams_accs[0][0]['test_acc'] else: return None def best_record(self, records): _hparams_accs = self.hparams_accs(records) if len(_hparams_accs): assert (len(_hparams_accs[0][1]) == 1) return _hparams_accs[0][1][0] else: return None
def _exact_1_norm(A): if scipy.sparse.issparse(A): return max(abs(A).sum(axis=0).flat) elif is_pydata_spmatrix(A): return max(abs(A).sum(axis=0)) else: return np.linalg.norm(A, 1)
class BackboneUtilsTester(unittest.TestCase): def test_get_aligned_output_features_output_indices(self): stage_names = ['a', 'b', 'c'] (out_features, out_indices) = get_aligned_output_features_output_indices(None, None, stage_names) self.assertEqual(out_features, ['c']) self.assertEqual(out_indices, [2]) (out_features, out_indices) = get_aligned_output_features_output_indices(['a', 'c'], None, stage_names) self.assertEqual(out_features, ['a', 'c']) self.assertEqual(out_indices, [0, 2]) (out_features, out_indices) = get_aligned_output_features_output_indices(None, [0, 2], stage_names) self.assertEqual(out_features, ['a', 'c']) self.assertEqual(out_indices, [0, 2]) (out_features, out_indices) = get_aligned_output_features_output_indices(None, [(- 3), (- 1)], stage_names) self.assertEqual(out_features, ['a', 'c']) self.assertEqual(out_indices, [(- 3), (- 1)]) def test_verify_out_features_out_indices(self): with self.assertRaises(ValueError): verify_out_features_out_indices(['a', 'b'], (0, 1), None) with self.assertRaises(ValueError): verify_out_features_out_indices(('a', 'b'), (0, 1), ['a', 'b']) with self.assertRaises(ValueError): verify_out_features_out_indices(['a', 'b'], (0, 1), ['a']) with self.assertRaises(ValueError): verify_out_features_out_indices(None, 0, ['a', 'b']) with self.assertRaises(ValueError): verify_out_features_out_indices(None, (0, 1), ['a']) with self.assertRaises(ValueError): verify_out_features_out_indices(['a', 'b'], (0,), ['a', 'b', 'c']) with self.assertRaises(ValueError): verify_out_features_out_indices(['a', 'b'], (0, 2), ['a', 'b', 'c']) with self.assertRaises(ValueError): verify_out_features_out_indices(['b', 'a'], (0, 1), ['a', 'b']) verify_out_features_out_indices(['a', 'b', 'd'], (0, 1, (- 1)), ['a', 'b', 'c', 'd']) def test_backbone_mixin(self): backbone = BackboneMixin() backbone.stage_names = ['a', 'b', 'c'] backbone._out_features = ['a', 'c'] backbone._out_indices = [0, 2] self.assertEqual(backbone.out_features, ['a', 'c']) self.assertEqual(backbone.out_indices, [0, 2]) backbone.out_features = ['a', 'b'] self.assertEqual(backbone.out_features, ['a', 'b']) self.assertEqual(backbone.out_indices, [0, 1]) backbone.out_indices = [(- 3), (- 1)] self.assertEqual(backbone.out_features, ['a', 'c']) self.assertEqual(backbone.out_indices, [(- 3), (- 1)])
def seed_torch(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def get_evaluator(cfg, dataset_name, output_folder=None): if (output_folder is None): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference') evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if (evaluator_type in ['sem_seg', 'coco_panoptic_seg']): evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) if (evaluator_type in ['coco', 'coco_panoptic_seg']): evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if (evaluator_type == 'coco_panoptic_seg'): evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) if (evaluator_type == 'cityscapes_instance'): assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.' return CityscapesInstanceEvaluator(dataset_name) if (evaluator_type == 'cityscapes_sem_seg'): assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.' return CityscapesSemSegEvaluator(dataset_name) if (evaluator_type == 'pascal_voc'): return PascalVOCDetectionEvaluator(dataset_name) if (evaluator_type == 'lvis'): return LVISEvaluator(dataset_name, cfg, True, output_folder) if (len(evaluator_list) == 0): raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type)) if (len(evaluator_list) == 1): return evaluator_list[0] return DatasetEvaluators(evaluator_list)
def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, (num_epochs - 1))) print(('-' * 10)) for phase in ['train', 'val']: if (phase == 'train'): scheduler.step() model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0.0 for data in dataloaders[phase]: (inputs, labels) = data (now_batch_size, c, h, w) = inputs.shape if (now_batch_size < opt.batchsize): continue if use_gpu: inputs = Variable(inputs.cuda().detach()) labels = Variable(labels.cuda().detach()) else: (inputs, labels) = (Variable(inputs), Variable(labels)) if fp16: inputs = inputs.half() optimizer.zero_grad() if (phase == 'val'): with torch.no_grad(): outputs = model(inputs) else: outputs = model(inputs) if (not opt.PCB): (_, preds) = torch.max(outputs.data, 1) loss = criterion(outputs, labels) else: part = {} sm = nn.Softmax(dim=1) num_part = 6 for i in range(num_part): part[i] = outputs[i] score = (((((sm(part[0]) + sm(part[1])) + sm(part[2])) + sm(part[3])) + sm(part[4])) + sm(part[5])) (_, preds) = torch.max(score.data, 1) loss = criterion(part[0], labels) for i in range((num_part - 1)): loss += criterion(part[(i + 1)], labels) if (phase == 'train'): if fp16: optimizer.backward(loss) else: loss.backward() optimizer.step() if ((int(version[0]) > 0) or (int(version[2]) > 3)): running_loss += (loss.item() * now_batch_size) else: running_loss += (loss.data[0] * now_batch_size) running_corrects += float(torch.sum((preds == labels.data))) epoch_loss = (running_loss / dataset_sizes[phase]) epoch_acc = (running_corrects / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) y_loss[phase].append(epoch_loss) y_err[phase].append((1.0 - epoch_acc)) if (phase == 'val'): last_model_wts = model.state_dict() if ((epoch % 10) == 9): save_network(model, epoch) draw_curve(epoch) time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) print() time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) model.load_state_dict(last_model_wts) save_network(model, 'last') return model
class BertAbsConfig(PretrainedConfig): pretrained_config_archive_map = BERTABS_FINETUNED_CONFIG_MAP def __init__(self, vocab_size=30522, max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, **kwargs): super(BertAbsConfig, self).__init__(**kwargs) self.vocab_size = vocab_size self.max_pos = max_pos self.enc_layers = enc_layers self.enc_hidden_size = enc_hidden_size self.enc_heads = enc_heads self.enc_ff_size = enc_ff_size self.enc_dropout = enc_dropout self.dec_layers = dec_layers self.dec_hidden_size = dec_hidden_size self.dec_heads = dec_heads self.dec_ff_size = dec_ff_size self.dec_dropout = dec_dropout
class TestNumPyFunctions(object): def test_set_module(self): assert_equal(np.sum.__module__, 'numpy') assert_equal(np.char.equal.__module__, 'numpy.char') assert_equal(np.fft.fft.__module__, 'numpy.fft') assert_equal(np.linalg.solve.__module__, 'numpy.linalg') def test_inspect_sum(self): signature = inspect.signature(np.sum) assert_(('axis' in signature.parameters)) _array_function def test_override_sum(self): (MyArray, implements) = _new_duck_type_and_implements() (np.sum) def _(array): return 'yes' assert_equal(np.sum(MyArray()), 'yes') _array_function def test_sum_on_mock_array(self): class ArrayProxy(): def __init__(self, value): self.value = value def __array_function__(self, *args, **kwargs): return self.value.__array_function__(*args, **kwargs) def __array__(self, *args, **kwargs): return self.value.__array__(*args, **kwargs) proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) proxy.value.__array_function__.return_value = 1 result = np.sum(proxy) assert_equal(result, 1) proxy.value.__array_function__.assert_called_once_with(np.sum, (ArrayProxy,), (proxy,), {}) proxy.value.__array__.assert_not_called() _array_function def test_sum_forwarding_implementation(self): class MyArray(np.ndarray): def sum(self, axis, out): return 'summed' def __array_function__(self, func, types, args, kwargs): return super().__array_function__(func, types, args, kwargs) array = np.array(1).view(MyArray) assert_equal(np.sum(array), 'summed')
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False, output_loading_info=False): try: import tensorflow as tf import torch except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.') raise tf_state_dict = {tf_weight.name: tf_weight.numpy() for tf_weight in tf_weights} return load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info)
class Speech2TextForConditionalGeneration(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
class AKICodeLabeler(OMOPConceptCodeLabeler): original_omop_concept_codes = ['SNOMED/', 'SNOMED/', 'SNOMED/']
_kl(Beta, Beta) def _kl_beta_beta(p, q): sum_params_p = (p.concentration1 + p.concentration0) sum_params_q = (q.concentration1 + q.concentration0) t1 = ((q.concentration1.lgamma() + q.concentration0.lgamma()) + sum_params_p.lgamma()) t2 = ((p.concentration1.lgamma() + p.concentration0.lgamma()) + sum_params_q.lgamma()) t3 = ((p.concentration1 - q.concentration1) * torch.digamma(p.concentration1)) t4 = ((p.concentration0 - q.concentration0) * torch.digamma(p.concentration0)) t5 = ((sum_params_q - sum_params_p) * torch.digamma(sum_params_p)) return ((((t1 - t2) + t3) + t4) + t5)
class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
_torch _retrieval _sentencepiece _tokenizers _torch_non_multi_gpu class RagModelIntegrationTests(unittest.TestCase): _property def sequence_model(self): return RagSequenceForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn').to(torch_device).eval() _property def token_model(self): return RagTokenForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn').to(torch_device).eval() def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator_config = AutoConfig.from_pretrained('facebook/bart-large-cnn') return RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, dataset='wiki_dpr', dataset_split='train', index_name='exact', index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8) def test_rag_sequence_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) rag_sequence = self.sequence_model rag_sequence.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with torch.no_grad(): output = rag_sequence(input_ids, labels=decoder_input_ids) expected_shape = torch.Size([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device) _assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE) expected_loss = torch.tensor([36.7368]).to(torch_device) _assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE) def test_rag_token_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with torch.no_grad(): output = rag_token(input_ids, labels=decoder_input_ids) expected_shape = torch.Size([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device) _assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE) expected_loss = torch.tensor([36.3557]).to(torch_device) _assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE) def test_rag_token_generate_beam(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) output_ids = rag_token.generate(input_ids, decoder_start_token_id=rag_token.generator.config.decoder_start_token_id, num_beams=2, num_return_sequences=2) output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True) output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True) EXPECTED_OUTPUT_TEXT_1 = '"She\'s My Kind of Girl' EXPECTED_OUTPUT_TEXT_2 = '"She\'s My Kind of Love' self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1) self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2) def test_rag_sequence_generate_beam(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) rag_sequence = self.sequence_model rag_sequence.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) output_ids = rag_sequence.generate(input_ids, decoder_start_token_id=rag_sequence.generator.config.decoder_start_token_id, num_beams=2, num_return_sequences=2) output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True) output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True) EXPECTED_OUTPUT_TEXT_1 = '"She\'s My Kind of Girl" was released through Epic Records in Japan in March 1972, giving the duo a Top 10 hit. Two more singles were released in Japan, "En Carousel" and "Love Has Its Ways" Ulvaeus and Andersson persevered with their songwriting and experimented with new sounds and vocal arrangements.' EXPECTED_OUTPUT_TEXT_2 = 'In September 2018, Bjorn Ulvaeus revealed that the two new songs, "I Still Have Faith In You" and "Don\'t Shut Me Down", would be released no earlier than March 2019. The two new tracks will feature in a TV special set to air later in the year.' self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1) self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2) def test_data_questions(self): return ['who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z'] def test_rag_sequence_generate_batch(self): tokenizer = RagTokenizer.from_pretrained('facebook/rag-sequence-nq') retriever = RagRetriever.from_pretrained('facebook/rag-sequence-nq', index_name='exact', use_dummy_dataset=True) rag_sequence = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever).to(torch_device) input_dict = tokenizer(self.test_data_questions, return_tensors='pt', padding=True, truncation=True) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) output_ids = rag_sequence.generate(input_ids, attention_mask=attention_mask) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [' albert einstein', ' june 22, 2018', ' amplitude modulation', ' tim besley ( chairman )', ' june 20, 2018', ' 1980', ' 7.0', ' 8'] self.assertListEqual(outputs, EXPECTED_OUTPUTS) def test_rag_sequence_generate_batch_from_context_input_ids(self): tokenizer = RagTokenizer.from_pretrained('facebook/rag-sequence-nq') retriever = RagRetriever.from_pretrained('facebook/rag-sequence-nq', index_name='exact', use_dummy_dataset=True) rag_sequence = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever).to(torch_device) input_dict = tokenizer(self.test_data_questions, return_tensors='pt', padding=True, truncation=True) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) question_hidden_states = rag_sequence.question_encoder(input_ids, attention_mask=attention_mask)[0] docs_dict = retriever(input_ids.cpu().detach().numpy(), question_hidden_states.cpu().detach().numpy(), return_tensors='pt') doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict['retrieved_doc_embeds'].to(torch_device).float().transpose(1, 2)).squeeze(1) output_ids = rag_sequence.generate(context_input_ids=docs_dict['context_input_ids'].to(torch_device), context_attention_mask=docs_dict['context_attention_mask'].to(torch_device), doc_scores=doc_scores.to(torch_device), do_deduplication=True) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [' albert einstein', ' june 22, 2018', ' amplitude modulation', ' tim besley ( chairman )', ' june 20, 2018', ' 1980', ' 7.0', ' 8'] self.assertListEqual(outputs, EXPECTED_OUTPUTS) def test_rag_token_generate_batch(self): tokenizer = RagTokenizer.from_pretrained('facebook/rag-token-nq') retriever = RagRetriever.from_pretrained('facebook/rag-token-nq', index_name='exact', use_dummy_dataset=True) rag_token = RagTokenForGeneration.from_pretrained('facebook/rag-token-nq', retriever=retriever).to(torch_device) if (torch_device == 'cuda'): rag_token.half() input_dict = tokenizer(self.test_data_questions, return_tensors='pt', padding=True, truncation=True) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) output_ids = rag_token.generate(input_ids, attention_mask=attention_mask) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [' albert einstein', ' september 22, 2017', ' amplitude modulation', ' stefan persson', ' april 20, 2018', ' the 1970s', ' 7.1. 2', ' 13'] self.assertListEqual(outputs, EXPECTED_OUTPUTS)
class BaseCleaningSampler(BaseSampler): _sampling_type = 'clean-sampling' _sampling_strategy_docstring = "sampling_strategy : str, list or callable\n Sampling information to sample the data set.\n\n - When ``str``, specify the class targeted by the resampling. Note the\n the number of samples will not be equal in each. Possible choices\n are:\n\n ``'majority'``: resample only the majority class;\n\n ``'not minority'``: resample all classes but the minority class;\n\n ``'not majority'``: resample all classes but the majority class;\n\n ``'all'``: resample all classes;\n\n ``'auto'``: equivalent to ``'not minority'``.\n\n - When ``list``, the list contains the classes targeted by the\n resampling.\n\n - When callable, function taking ``y`` and returns a ``dict``. The keys\n correspond to the targeted classes. The values correspond to the\n desired number of samples for each class.\n ".rstrip() _parameter_constraints: dict = {'sampling_strategy': [Interval(numbers.Real, 0, 1, closed='right'), StrOptions({'auto', 'majority', 'not minority', 'not majority', 'all'}), list, callable]}
def R_to_euler(R): tol = (sys.float_info.epsilon * 10) if ((abs(R[(0, 0)]) < tol) and (abs(R[(1, 0)]) < tol)): eul1 = 0 eul2 = m.atan2((- R[(2, 0)]), R[(0, 0)]) eul3 = m.atan2((- R[(1, 2)]), R[(1, 1)]) else: eul1 = m.atan2(R[(1, 0)], R[(0, 0)]) sp = m.sin(eul1) cp = m.cos(eul1) eul2 = m.atan2((- R[(2, 0)]), ((cp * R[(0, 0)]) + (sp * R[(1, 0)]))) eul3 = m.atan2(((sp * R[(0, 2)]) - (cp * R[(1, 2)])), ((cp * R[(1, 1)]) - (sp * R[(0, 1)]))) return [np.rad2deg(eul3), np.rad2deg(eul2), np.rad2deg(eul1)]
class IdentityHash(): def __init__(self): pass def __getitem__(self, item): return item
def kullback_leibler_divergence(nk, qk, estimator='nsb'): estimator = as_estimator(estimator) nk = numpy.asarray(nk) k = len(qk) if (k == 1): return 0.0 return (cross_entropy(nk, qk) - estimator.fit(nk, k=k).estimate_)
class Block(nn.Module): def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True): super(Block, self).__init__() if ((out_filters != in_filters) or (strides != 1)): self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False) self.skipbn = nn.SynchronizedBatchNorm2d(out_filters) else: self.skip = None self.relu = nn.ReLU(inplace=True) rep = [] filters = in_filters if grow_first: rep.append(self.relu) rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False)) rep.append(nn.SynchronizedBatchNorm2d(out_filters)) filters = out_filters for i in range((reps - 1)): rep.append(self.relu) rep.append(SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False)) rep.append(nn.SynchronizedBatchNorm2d(filters)) if (not grow_first): rep.append(self.relu) rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False)) rep.append(nn.SynchronizedBatchNorm2d(out_filters)) if (not start_with_relu): rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if (strides != 1): rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if (self.skip is not None): skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x
class RepGhostNet(nn.Module): cfgs = [[[3, 8, 16, 0, 1]], [[3, 24, 24, 0, 2]], [[3, 36, 24, 0, 1]], [[5, 36, 40, 0.25, 2]], [[5, 60, 40, 0.25, 1]], [[3, 120, 80, 0, 2]], [[3, 100, 80, 0, 1], [3, 120, 80, 0, 1], [3, 120, 80, 0, 1], [3, 240, 112, 0.25, 1], [3, 336, 112, 0.25, 1]], [[5, 336, 160, 0.25, 2]], [[5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1], [5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1]]] def __init__(self, subtype='repghostnet_0.5', out_stages=[5, 7, 9], output_stride=16, classifier=False, num_classes=1000, pretrained=False, backbone_path=None, shortcut=True, reparam=True, reparam_bn=True, reparam_identity=False, deploy=False): super(RepGhostNet, self).__init__() self.subtype = subtype self.out_stages = out_stages self.output_stride = output_stride self.classifier = classifier self.num_classes = num_classes self.pretrained = pretrained self.backbone_path = backbone_path width = float(self.subtype.split('_')[1]) output_channel = _make_divisible((16 * width), 4) self.stem = nn.Sequential(nn.Conv2d(3, output_channel, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channel), nn.ReLU(inplace=True)) input_channel = output_channel for (i, cfg) in enumerate(self.cfgs): layers = [] for (k, exp_size, c, se_ratio, s) in cfg: output_channel = _make_divisible((c * width), 4) hidden_channel = _make_divisible((exp_size * width), 4) layers.append(RepGhostBottleneck(input_channel, hidden_channel, output_channel, k, s, se_ratio=se_ratio, shortcut=shortcut, reparam=reparam, reparam_bn=reparam_bn, reparam_identity=reparam_identity, deploy=deploy)) input_channel = output_channel setattr(self, ('stage%d' % (i + 1)), nn.Sequential(*layers)) output_channel = _make_divisible(((exp_size * width) * 2), 4) self.last_conv = ConvBnAct(input_channel, output_channel, 1) input_channel = output_channel if self.classifier: self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) self.conv_head = nn.Sequential(nn.Conv2d(input_channel, 1280, 1, 1, 0, bias=True), nn.ReLU(inplace=True)) self.dropout = nn.Dropout2d(0.2) self.fc = nn.Linear(1280, num_classes) def convert_to_deploy(self): repghost_model_convert(self, do_copy=False) def forward(self, x): x = self.stem(x) output = [] for i in range(1, 10): stage = getattr(self, 'stage{}'.format(i)) x = stage(x) if ((i in self.out_stages) and (not self.classifier)): output.append(x) if self.classifier: x = self.last_conv(x) x = self.global_pool(x) x = self.conv_head(x) x = x.view(x.size(0), (- 1)) x = self.dropout(x) x = self.fc(x) return x return (output if (len(self.out_stages) > 1) else output[0])
def read_fix_video(name_label_stride_setting_queue, offset_str, config): video_name = name_label_stride_setting_queue[0] label = name_label_stride_setting_queue[1] read_stride = name_label_stride_setting_queue[2] offset = name_label_stride_setting_queue[3] ho = name_label_stride_setting_queue[4] wo = name_label_stride_setting_queue[5] mirror = name_label_stride_setting_queue[6] if config['merge_label']: labels = label else: labels = tf.fill([config['n_steps']], label) images = [] images2 = [] for i in xrange(config['n_steps']): image = one_image(config['modality'], tf.add(config['data_path1'], video_name), offset_str, (offset + tf.to_int32(tf.floor((i * read_stride)))), config['height'], config['width'], 1, ho, wo, config['crop_size'], config['crop_size'], config['crop_size'], config['preprocessing_fn_1'], False, config['length1']) image = tf.cond(mirror, (lambda : tf.image.flip_left_right(image)), (lambda : image)) images.append(image) if (config['modality2'] is not None): image = one_image(config['modality2'], tf.add(config['data_path2'], video_name), offset_str, (offset + tf.to_int32(tf.floor((i * read_stride)))), config['height'], config['width'], 1, ho, wo, config['crop_size'], config['crop_size'], config['crop_size'], config['preprocessing_fn_2'], False, config['length2']) image = tf.cond(mirror, (lambda : tf.image.flip_left_right(image)), (lambda : image)) images2.append(image) images = tf.stack(images) if (config['modality2'] is not None): images2 = tf.stack(images2) return (images, images2, labels)
class ReshapeLayer(Layer): def __init__(self, incoming, shape, **kwargs): super(ReshapeLayer, self).__init__(incoming, **kwargs) shape = tuple(shape) for s in shape: if isinstance(s, int): if ((s == 0) or (s < (- 1))): raise ValueError('`shape` integers must be positive or -1') elif isinstance(s, list): if ((len(s) != 1) or (not isinstance(s[0], int)) or (s[0] < 0)): raise ValueError('`shape` input references must be single-element lists of int >= 0') else: raise ValueError('`shape` must be a tuple of int and/or [int]') if (sum(((s == (- 1)) for s in shape)) > 1): raise ValueError('`shape` cannot contain multiple -1') self.shape = shape def get_output_shape_for(self, input_shape, **kwargs): output_shape = list(self.shape) masked_input_shape = list(input_shape) masked_output_shape = list(output_shape) for (dim, o) in enumerate(output_shape): if isinstance(o, list): if (o[0] >= len(input_shape)): raise ValueError(('specification contains [%d], but input shape has %d dimensions only' % (o[0], len(input_shape)))) output_shape[dim] = input_shape[o[0]] masked_output_shape[dim] = input_shape[o[0]] if ((input_shape[o[0]] is None) and (masked_input_shape[o[0]] is None)): masked_input_shape[o[0]] = 1 masked_output_shape[dim] = 1 input_size = (None if any(((x is None) for x in masked_input_shape)) else np.prod(masked_input_shape)) output_size = (None if any(((x is None) for x in masked_output_shape)) else np.prod(masked_output_shape)) del masked_input_shape, masked_output_shape if ((- 1) in output_shape): dim = output_shape.index((- 1)) if ((input_size is None) or (output_size is None)): output_shape[dim] = None output_size = None else: output_size *= (- 1) output_shape[dim] = (input_size // output_size) output_size *= output_shape[dim] if ((input_size is not None) and (output_size is not None) and (input_size != output_size)): raise ValueError(('%s cannot be reshaped to specification %s. The total size mismatches.' % (input_shape, self.shape))) return tuple(output_shape) def get_output_for(self, input, **kwargs): output_shape = list(self.shape) for (dim, o) in enumerate(output_shape): if isinstance(o, list): output_shape[dim] = input.shape[o[0]] return input.reshape(tuple(output_shape))
def test(arch=None, exclude=None, require=None, **options): def exclude_arch_platform(arch, system, exclude): if (exclude is None): exclude = [] if (not isinstance(exclude, (list, tuple))): exclude = [exclude] for pair in exclude: exclude_arch = None exclude_sys = None if isinstance(pair, (list, tuple)): if (len(pair) == 1): exclude_arch = pair[0] else: assert (len(pair) == 2) exclude_arch = pair[0] exclude_sys = pair[1] else: exclude_arch = pair assert ((exclude_arch is not None) or (exclude_sys is not None)) if (exclude_arch and exclude_sys): if ((exclude_arch == arch) and (exclude_sys == system)): return True elif (exclude_arch and (exclude_arch == arch)): return True elif (exclude_sys and (exclude_sys == system)): return True return False if (arch is None): arch = [] if (require is None): require = [] if (not isinstance(arch, (list, tuple))): arch = [arch] if (not isinstance(require, (list, tuple))): require = [require] archs_expected = expected_archs() if (len(arch) == 0): arch = archs_expected else: arch = [v for v in arch if (v in archs_expected)] marks = [] if (len(arch) == 0): marks.append(pytest.mark.skip(reason='No supported archs')) else: arch_params_sets = [arch, *_test_features.values()] parameters = [] for (req_arch, *req_params) in itertools.product(*arch_params_sets): if (req_arch not in arch): continue curr_system = platform.system() if exclude_arch_platform(req_arch, curr_system, exclude): continue if (not all((_ti_core.is_extension_supported(req_arch, e) for e in require))): continue current_options = copy.deepcopy(options) for (feature, param) in zip(_test_features, req_params): value = param.value required_extensions = param.required_extensions if ((current_options.setdefault(feature, value) != value) or any(((not _ti_core.is_extension_supported(req_arch, e)) for e in required_extensions))): break else: parameters.append((req_arch, current_options)) if (not parameters): marks.append(pytest.mark.skip(reason='No all required extensions are supported')) else: marks.append(pytest.mark.parametrize('req_arch,req_options', parameters, ids=[(f'arch={arch.name}-{i}' if (len(parameters) > 1) else f'arch={arch.name}') for (i, (arch, _)) in enumerate(parameters)])) def decorator(func): func.__ti_test__ = True for mark in reversed(marks): func = mark(func) return func return decorator
def boolean(arg): if (arg in ('true', 'True', 'yes', '1', 1)): return True elif (arg in ('false', 'False', 'no', '0', 0)): return False else: raise argparse.ArgumentTypeError(('could not interpret "%s" as true or false' % (arg,)))
() ('--batch_size', type=int, default=4000) ('--max_path_length', type=int, default=100) _experiment def trpo_cartpole_batch_sampler(ctxt=None, seed=1, batch_size=4000, max_path_length=100): set_seed(seed) n_envs = (batch_size // max_path_length) with LocalTFRunner(ctxt, max_cpus=n_envs) as runner: env = GarageEnv(env_name='CartPole-v1') policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=max_path_length, discount=0.99, max_kl_step=0.01) runner.setup(algo=algo, env=env, sampler_cls=BatchSampler, sampler_args={'n_envs': n_envs}) runner.train(n_epochs=100, batch_size=4000, plot=False)
class EncoderDecoderLFramework(LFramework): def __init__(self, args): super().__init__(args) vocabs = data_loader.load_vocabs(args) self.in_vocab = vocabs['text'] self.out_vocab = vocabs['program'] if (self.model_id == BRIDGE): self.mdl = Bridge(args, self.in_vocab, self.out_vocab) elif (self.model_id == SEQ2SEQ_PG): self.mdl = PointerGenerator(args, self.in_vocab, self.out_vocab) elif (self.model_id == SEQ2SEQ): self.mdl = Seq2Seq(args, self.in_vocab, self.out_vocab) else: raise NotImplementedError if (self.args.loss == 'cross_entropy'): self.loss_fun = MaskedCrossEntropyLoss(self.mdl.out_vocab.pad_id) else: raise NotImplementedError self.define_optimizer() self.define_lr_scheduler() (_, _, self.output_post_process, _) = tok.get_tokenizers(args) print('{} module created'.format(self.model)) def get_text_masks(self, encoder_input_ids): return encoder_input_ids[1] def get_schema_masks(self, encoder_input_ptr_ids, transformer_output_masks=None): if (transformer_output_masks is not None): (encoder_input_ptr_ids, _) = ops.batch_binary_lookup(encoder_input_ptr_ids, transformer_output_masks, pad_value=self.in_vocab.pad_id) if self.args.use_typed_field_markers: schema_masks = (((((((encoder_input_ptr_ids == self.tu.table_marker_id) | (encoder_input_ptr_ids == self.tu.text_field_marker_id)) | (encoder_input_ptr_ids == self.tu.number_field_marker_id)) | (encoder_input_ptr_ids == self.tu.time_field_marker_id)) | (encoder_input_ptr_ids == self.tu.boolean_field_marker_id)) | (encoder_input_ptr_ids == self.tu.other_field_marker_id)) | (encoder_input_ptr_ids == self.tu.asterisk_marker_id)) else: schema_masks = ((((encoder_input_ptr_ids == self.tu.table_marker_id) | (encoder_input_ptr_ids == self.tu.field_marker_id)) | (encoder_input_ptr_ids == self.tu.primary_key_marker_id)) | (encoder_input_ptr_ids == self.tu.asterisk_marker_id)) return schema_masks def loss(self, formatted_batch): outputs = self.forward(formatted_batch) if (self.model_id in [SEQ2SEQ_PG, BRIDGE]): (decoder_ptr_value_ids, _) = formatted_batch[4] left_shift_targets = ops.left_shift_pad(decoder_ptr_value_ids, self.out_vocab.pad_id) else: (decoder_input_ids, _) = formatted_batch[1] left_shift_targets = ops.left_shift_pad(decoder_input_ids, self.out_vocab.pad_id) loss = self.loss_fun(outputs, left_shift_targets) loss /= self.num_accumulation_steps return loss def forward(self, formatted_batch, model_ensemble=None): encoder_input_ids = formatted_batch[0] decoder_input_ids = (formatted_batch[1][0] if self.training else None) if (self.model_id in [SEQ2SEQ_PG, BRIDGE]): encoder_ptr_input_ids = formatted_batch[2] (encoder_ptr_value_ids, _) = formatted_batch[3] decoder_ptr_value_ids = (formatted_batch[4][0] if self.training else None) text_masks = self.get_text_masks(encoder_input_ids) if (self.model_id in [BRIDGE]): transformer_output_value_masks = formatted_batch[5][0] schema_masks = self.get_schema_masks(encoder_ptr_input_ids[0]) schema_memory_masks = formatted_batch[6][0] feature_ids = formatted_batch[8] if model_ensemble: assert (not self.training) outputs = ensemble_beam_search(model_ensemble, encoder_ptr_input_ids, encoder_ptr_value_ids, text_masks, schema_masks, feature_ids, None, transformer_output_value_masks, schema_memory_masks) else: outputs = self.mdl(encoder_ptr_input_ids, encoder_ptr_value_ids, text_masks, schema_masks, feature_ids, transformer_output_value_masks=transformer_output_value_masks, schema_memory_masks=schema_memory_masks, decoder_input_ids=decoder_input_ids, decoder_ptr_value_ids=decoder_ptr_value_ids) else: outputs = self.mdl(encoder_ptr_input_ids, encoder_ptr_value_ids, text_masks, decoder_input_ids=decoder_input_ids, decoder_ptr_value_ids=decoder_ptr_value_ids) elif (self.model_id == SEQ2SEQ): outputs = self.mdl(encoder_input_ids, decoder_input_ids) else: raise NotImplementedError return outputs def inference(self, examples, decode_str_output=True, restore_clause_order=False, pred_restored_cache=None, check_schema_consistency_=True, engine=None, inline_eval=False, model_ensemble=None, verbose=False): if (self.args.leaderboard_submission or self.args.demo): assert ((not verbose) and (not inline_eval) and (not self.args.use_oracle_tables)) (pred_list, pred_score_list, pred_decoded_list, pred_decoded_score_list) = ([], [], [], []) if restore_clause_order: if (pred_restored_cache is None): pred_restored_cache = dict() if self.save_vis: (text_ptr_weights_vis, pointer_vis) = ([], []) num_error_cases = 0 for batch_start_id in tqdm(range(0, len(examples), self.dev_batch_size)): mini_batch = examples[batch_start_id:(batch_start_id + self.dev_batch_size)] formatted_batch = self.format_batch(mini_batch) outputs = self.forward(formatted_batch, model_ensemble) if (self.model_id in [SEQ2SEQ_PG, BRIDGE]): (preds, pred_scores, text_p_pointers, text_ptr_weights, seq_len) = outputs text_p_pointers.unsqueeze_(2) p_pointers = torch.cat([(1 - text_p_pointers), text_p_pointers], dim=2) elif (self.model_id == SEQ2SEQ): (preds, pred_scores, text_ptr_weights, seq_len) = outputs p_pointers = None else: raise NotImplementedError pred_list.append(preds) pred_score_list.append(pred_scores) if (decode_str_output or verbose): for i in range(len(mini_batch)): example = mini_batch[i] db_name = example.db_name schema = self.schema_graphs[db_name] (table_po, field_po) = (None, None) if self.args.use_oracle_tables: if (self.args.num_random_tables_added > 0): (table_po, field_po) = formatted_batch[(- 1)][i] (exp_output_strs, exp_output_scores, exp_seq_lens, exp_correct) = ([], [], [], []) if inline_eval: if (example.dataset_id == SPIDER): gt_program_list = example.program_list gt_program_ast = (example.program_ast_list_[0] if example.program_ast_list_ else example.program) hardness = spider_eval_tools.Evaluator().eval_hardness(gt_program_ast, db_dir=self.args.db_dir, db_name=example.db_name) elif (example.dataset_id == WIKISQL): gt_program_list = example.program_ast_list_ else: raise NotImplementedError if (example.dataset_id == WIKISQL): hardness = 'easy' if (self.decoding_algorithm == 'beam-search'): for j in range(self.beam_size): beam_id = ((i * self.beam_size) + j) post_processed_output = self.post_process_nn_output(beam_id, example.dataset_id, example, preds, schema, text_ptr_weights, p_pointers, table_po=table_po, field_po=field_po, verbose=verbose) if post_processed_output: pred_sql = post_processed_output[0] if restore_clause_order: if (pred_restored_cache and (db_name in pred_restored_cache) and (pred_sql in pred_restored_cache[db_name])): (restored_pred, grammatical, schema_consistent) = pred_restored_cache[db_name][pred_sql] else: (restored_pred, grammatical, schema_consistent) = moz_sp.restore_clause_order(pred_sql, schema, check_schema_consistency_=check_schema_consistency_, verbose=verbose) if (pred_restored_cache and check_schema_consistency_): if (db_name not in pred_restored_cache): pred_restored_cache[db_name] = dict() pred_restored_cache[db_name][pred_sql] = (restored_pred, grammatical, schema_consistent) if (check_schema_consistency_ and (not schema_consistent)): restored_pred = None pred_sql = restored_pred elif check_schema_consistency_: if (not moz_sp.check_schema_consistency(pred_sql, schema, in_execution_order=self.args.process_sql_in_execution_order)): pred_sql = None if (pred_sql and self.args.execution_guided_decoding): assert (engine is not None) try: pred_query = Query.from_dict(pred_sql, ordered=False) pred_ex = engine.execute_query(example.db_name, pred_query, lower=True) if (not pred_ex): pred_sql = None except Exception: pred_sql = None else: pred_sql = None if pred_sql: exp_output_strs.append(pred_sql) exp_output_scores.append(float(pred_scores[beam_id])) exp_seq_lens.append(int(seq_len[beam_id])) if self.save_vis: self.save_vis_parameters(post_processed_output, text_ptr_weights_vis, pointer_vis) if inline_eval: results = eval_tools.eval_prediction(pred=pred_sql, gt_list=gt_program_list, dataset_id=example.dataset_id, db_name=example.db_name, in_execution_order=(self.args.process_sql_in_execution_order and (not restore_clause_order))) (correct, _, _) = results exp_correct.append(correct) correct_ = (correct[1] if isinstance(correct, tuple) else correct) if correct_: break else: raise NotImplementedError num_preds = len(exp_output_strs) pred_decoded_list.append(exp_output_strs) pred_decoded_score_list.append(exp_output_scores[:num_preds]) if verbose: predictions = zip(exp_output_strs, exp_output_scores, exp_seq_lens, exp_correct) is_error_case = self.print_predictions((batch_start_id + i), example, hardness, predictions, schema) if is_error_case: num_error_cases += 1 print('Error Case {}'.format(num_error_cases)) print() if ((not pred_decoded_list[(- 1)]) and (not self.args.demo)): pred_decoded_list[(- 1)].append(self.get_dummy_prediction(schema)) pred_decoded_score_list[(- 1)].append((- ops.HUGE_INT)) out_dict = dict() out_dict['preds'] = ops.pad_and_cat(pred_list, self.out_vocab.pad_id) out_dict['pred_scores'] = torch.cat(pred_score_list) if decode_str_output: out_dict['pred_decoded'] = pred_decoded_list out_dict['pred_decoded_scores'] = pred_decoded_score_list if restore_clause_order: out_dict['pred_restored_cache'] = pred_restored_cache if self.save_vis: vis_dict = dict() vis_dict['text_attention_vis'] = text_ptr_weights_vis vis_dict['text_pointer_vis'] = pointer_vis for key in vis_dict: if key.endswith('_vis'): if key.endswith('_attention_vis'): attn_target_label = key.split('_')[0] self.vis_writer.save_cross_attention(vis_dict[key], attn_target_label) if key.endswith('_pointer_vis'): self.vis_writer.save_pointer(vis_dict[key], 'all') return out_dict def format_batch(self, mini_batch): def get_decoder_input_ids(): if self.training: if (self.model_id in [BRIDGE]): X = [exp.program_singleton_field_input_ids for exp in mini_batch] else: X = [exp.program_input_ids for exp in mini_batch] return ops.pad_batch(X, self.mdl.out_vocab.pad_id) else: return None def get_encoder_attn_mask(table_names, table_masks): schema_pos = [schema_graph.get_schema_pos(table_name) for table_name in table_names] encoder_attn_mask = [1 for _ in range(exp.num_text_tokens)] encoder_attn_mask.append(1) is_selected_table = False for j in range(1, len(table_masks)): if (j in schema_pos): encoder_attn_mask.append(1) is_selected_table = True elif (table_masks[j] == 1): encoder_attn_mask.append(0) is_selected_table = False elif is_selected_table: encoder_attn_mask.append(1) else: encoder_attn_mask.append(0) return encoder_attn_mask super().format_batch(mini_batch) encoder_input_ids = ops.pad_batch([exp.text_ids for exp in mini_batch], self.mdl.in_vocab.pad_id) decoder_input_ids = get_decoder_input_ids() table_samples = [] if (self.model_id == SEQ2SEQ): return (encoder_input_ids, decoder_input_ids) elif (self.model_id in [BRIDGE]): (encoder_ptr_input_ids, encoder_ptr_value_ids, decoder_ptr_value_ids) = ([], [], []) (primary_key_ids, foreign_key_ids, field_type_ids, table_masks, table_positions, table_field_scopes, field_table_pos, transformer_output_value_masks, schema_memory_masks) = ([], [], [], [], [], [], [], [], []) for exp in mini_batch: schema_graph = self.schema_graphs.get_schema(exp.db_id) if self.training: if exp.gt_table_names_list: gt_tables = set([schema_graph.get_table_id(t_name) for t_name in exp.gt_table_names]) else: gt_table_names = [token for (token, t) in zip(exp.program_singleton_field_tokens, exp.program_singleton_field_token_types) if (t == 0)] gt_tables = set([schema_graph.get_table_id(t_name) for t_name in gt_table_names]) if schema_graph.name.startswith('baseball'): tables = list(gt_tables) tables += random.sample([i for i in range(schema_graph.num_tables) if (i not in gt_tables)], k=min(random.randint(1, 7), (schema_graph.num_tables - len(gt_tables)))) else: tables = list(range(schema_graph.num_tables)) if self.args.table_shuffling: table_to_drop = random.choice(tables) if (table_to_drop not in gt_tables): if (random.uniform(0, 1) < 0.3): tables = [x for x in tables if (x != table_to_drop)] (table_po, field_po) = schema_graph.get_schema_perceived_order(tables, random_table_order=True, random_field_order=self.args.random_field_order) else: (table_po, field_po) = schema_graph.get_schema_perceived_order(tables, random_table_order=False, random_field_order=self.args.random_field_order) question_encoding = (exp.text if self.args.use_picklist else None) (schema_features, matched_values) = schema_graph.get_serialization(self.tu, flatten_features=True, table_po=table_po, field_po=field_po, use_typed_field_markers=self.args.use_typed_field_markers, use_graph_encoding=self.args.use_graph_encoding, question_encoding=question_encoding, top_k_matches=self.args.top_k_picklist_matches, num_values_per_field=self.args.num_values_per_field, no_anchor_text=self.args.no_anchor_text, verbose=False) (ptr_input_tokens, ptr_input_values, num_excluded_tables, num_excluded_fields) = get_table_aware_transformer_encoder_inputs(exp.text_ptr_values, exp.text_tokens, schema_features, self.tu) assert (len(ptr_input_tokens) <= self.tu.tokenizer.max_len) if (num_excluded_fields > 0): print('Warning: training input truncated') num_included_nodes = (((schema_graph.get_num_perceived_nodes(tables) + 1) - num_excluded_tables) - num_excluded_fields) encoder_ptr_input_ids.append(self.tu.tokenizer.convert_tokens_to_ids(ptr_input_tokens)) if self.args.read_picklist: (exp.transformer_output_value_mask, value_features, value_tokens) = get_transformer_output_value_mask(ptr_input_tokens, matched_values, self.tu) transformer_output_value_masks.append(exp.transformer_output_value_mask) primary_key_ids.append(schema_graph.get_primary_key_ids(num_included_nodes, table_po, field_po)) foreign_key_ids.append(schema_graph.get_foreign_key_ids(num_included_nodes, table_po, field_po)) field_type_ids.append(schema_graph.get_field_type_ids(num_included_nodes, table_po, field_po)) table_masks.append(schema_graph.get_table_masks(num_included_nodes, table_po, field_po)) if self.args.read_picklist: constant_memory_features = (exp.text_tokens + value_features) constant_memory = (exp.text_ptr_values + value_tokens) exp.text_ptr_values = constant_memory else: constant_memory_features = exp.text_tokens (constant_ptr_value_ids, constant_unique_input_ids) = vec.vectorize_ptr_in(constant_memory_features, self.out_vocab) encoder_ptr_value_ids.append((constant_ptr_value_ids + [((self.out_vocab.size + len(constant_memory_features)) + x) for x in range(num_included_nodes)])) program_field_ptr_value_ids = vec.vectorize_field_ptr_out(exp.program_singleton_field_tokens, exp.program_singleton_field_token_types, self.out_vocab, constant_unique_input_ids, max_memory_size=len(constant_memory_features), schema=schema_graph, num_included_nodes=num_included_nodes) decoder_ptr_value_ids.append(program_field_ptr_value_ids) else: encoder_ptr_input_ids = [exp.ptr_input_ids for exp in mini_batch] encoder_ptr_value_ids = [exp.ptr_value_ids for exp in mini_batch] decoder_ptr_value_ids = ([exp.program_text_and_field_ptr_value_ids for exp in mini_batch] if self.training else None) primary_key_ids = [exp.primary_key_ids for exp in mini_batch] foreign_key_ids = [exp.foreign_key_ids for exp in mini_batch] field_type_ids = [exp.field_type_ids for exp in mini_batch] table_masks = [exp.table_masks for exp in mini_batch] (table_pos, table_field_scope) = schema_graph.get_table_scopes(schema_graph.num_nodes) table_positions.append(table_pos) table_field_scopes.append(table_field_scope) if self.args.read_picklist: transformer_output_value_masks.append(exp.transformer_output_value_mask) encoder_ptr_input_ids = ops.pad_batch(encoder_ptr_input_ids, self.mdl.in_vocab.pad_id) encoder_ptr_value_ids = ops.pad_batch(encoder_ptr_value_ids, self.mdl.in_vocab.pad_id) schema_memory_masks = (ops.pad_batch(schema_memory_masks, pad_id=0) if (self.args.use_pred_tables and (not self.training)) else (None, None)) decoder_ptr_value_ids = (ops.pad_batch(decoder_ptr_value_ids, self.mdl.out_vocab.pad_id) if self.training else None) primary_key_ids = ops.pad_batch(primary_key_ids, self.mdl.in_vocab.pad_id) foreign_key_ids = ops.pad_batch(foreign_key_ids, self.mdl.in_vocab.pad_id) field_type_ids = ops.pad_batch(field_type_ids, self.mdl.in_vocab.pad_id) table_masks = ops.pad_batch(table_masks, pad_id=0) transformer_output_value_masks = (ops.pad_batch(transformer_output_value_masks, pad_id=0, dtype=torch.uint8) if self.args.read_picklist else (None, None)) if (not self.training): table_positions = (ops.pad_batch(table_positions, pad_id=(- 1)) if self.args.process_sql_in_execution_order else (None, None)) table_field_scopes = (ops.pad_batch_2D(table_field_scopes, pad_id=0) if self.args.process_sql_in_execution_order else (None, None)) graphs = None return (encoder_input_ids, decoder_input_ids, encoder_ptr_input_ids, encoder_ptr_value_ids, decoder_ptr_value_ids, transformer_output_value_masks, schema_memory_masks, graphs, (primary_key_ids, foreign_key_ids, field_type_ids, table_masks, table_positions, table_field_scopes, field_table_pos), table_samples) elif (self.model_id in [SEQ2SEQ_PG]): encoder_ptr_input_ids = [exp.ptr_input_ids for exp in mini_batch] encoder_ptr_value_ids = [exp.ptr_value_ids for exp in mini_batch] decoder_ptr_value_ids = [exp.program_text_ptr_value_ids for exp in mini_batch] encoder_ptr_input_ids = ops.pad_batch(encoder_ptr_input_ids, self.mdl.in_vocab.pad_id) encoder_ptr_value_ids = ops.pad_batch(encoder_ptr_value_ids, self.mdl.in_vocab.pad_id) decoder_ptr_value_ids = ops.pad_batch(decoder_ptr_value_ids, self.mdl.out_vocab.pad_id) return (encoder_input_ids, decoder_input_ids, encoder_ptr_input_ids, encoder_ptr_value_ids, decoder_ptr_value_ids) else: raise NotImplementedError def post_process_nn_output(self, idx, dataset_id, example, decoder_outputs, schema=None, text_ptr_weights=None, p_pointers=None, table_po=None, field_po=None, verbose=False): decoder_output = ops.var_to_numpy(decoder_outputs[idx]) if (dataset_id == WIKISQL): try: output_str = tok.wikisql_vec_to_struct(decoder_output, self.out_vocab, example.text_ptr_values, example.text_token_starts, example.text_token_ends, example.text, self.tu) except Exception: output_str = None else: out_tokens = self.de_vectorize(decoder_output, self.out_vocab, example.text_ptr_values, schema, table_po=table_po, field_po=field_po, return_tokens=True) if self.args.no_join_condition: assert (schema is not None) try: out_tokens = moz_sp.add_join_condition(out_tokens, schema) except ValueError as e: if verbose: print(str(e)) return None output_str = self.output_post_process(out_tokens) output_str = output_str.replace(self.out_vocab.num_token, '1').replace('<NUM>', '1') output_str = output_str.replace(self.out_vocab.str_token, '"string"').replace('<STRING>', 'string') if self.save_vis: text_ptr_weights_vis = (example.text_ptr_values, out_tokens, ops.var_to_numpy(text_ptr_weights[idx])) if (self.model_id in [SEQ2SEQ_PG, BRIDGE]): pointer_vis = (out_tokens, ops.var_to_numpy(p_pointers[idx])) return (output_str, text_ptr_weights_vis, pointer_vis) else: return (output_str, text_ptr_weights_vis) else: return (output_str,) def get_dummy_prediction(self, schema): return 'SELECT * FROM {}'.format(schema.table_rev_index[0].name) def de_vectorize(self, p_cpu, out_vocab, input_ptr_values, schema=None, table_po=None, field_po=None, return_tokens=False): if (self.model_id in [SEQ2SEQ_PG]): return vec.de_vectorize_ptr(p_cpu, out_vocab, memory=input_ptr_values, post_process=self.output_post_process, return_tokens=return_tokens) elif (self.model_id in [BRIDGE]): return vec.de_vectorize_field_ptr(p_cpu, out_vocab, memory=input_ptr_values, schema=schema, table_po=table_po, field_po=field_po, post_process=self.output_post_process, return_tokens=return_tokens) elif (self.model_id == SEQ2SEQ): return vec.de_vectorize(p_cpu, out_vocab, post_process=self.output_post_process, return_tokens=return_tokens) else: raise NotImplementedError def print_predictions(self, example_id, example, hardness, predictions, schema): inspect_error_cases = False output_strs = [] for (i, prediction) in enumerate(predictions): (pred_sql, pred_score, sql_len, correct) = prediction if isinstance(correct, tuple): correct = correct[1] correct_badge = ('[CORRE]' if correct else '[WRONG]') if (example.dataset_id == WIKISQL): pred_sql = str(pred_sql) output_strs.append('{} {} Pred {}:\t{} ({:.3f}) (length={})'.format(correct_badge, '[{}]'.format(hardness), i, pred_sql.encode('utf-8'), float(pred_score), int(sql_len))) if ((i == 0) and (not correct)): inspect_error_cases = True if (not output_strs): output_strs.append('{} {} Pred {}:\t{} ({:.3f})'.format('[WRONG]', hardness, 0, 'No valid output!', 0)) inspect_error_cases = True if ((not output_strs) or output_strs[0].startswith('[WRONG]')): example.pretty_print(example_id=example_id, schema=schema, de_vectorize_ptr=vec.de_vectorize_ptr, de_vectorize_field_ptr=vec.de_vectorize_field_ptr, rev_vocab=self.out_vocab, post_process=self.output_post_process, use_table_aware_te=(self.model_id in [BRIDGE])) for output_str in output_strs: print(output_str) return inspect_error_cases def save_vis_parameters(self, outputs, text_ptr_weights_vis, pointer_vis): text_ptr_weights_vis.append(outputs[1]) if (self.model_id in [SEQ2SEQ_PG, BRIDGE]): pointer_vis.append(outputs[2])
class _FFTModule(sys.modules[__name__].__class__): def __call__(*args, **kwargs): from scipy import _dep_fft return _dep_fft(*args, **kwargs)
class RemoteMonitor(Callback): def __init__(self, root=' path='/publish/epoch/end/', field='data', headers=None): super(RemoteMonitor, self).__init__() self.root = root self.path = path self.field = field self.headers = headers def on_epoch_end(self, epoch, logs=None): if (requests is None): raise ImportError('RemoteMonitor requires the `requests` library.') logs = (logs or {}) send = {} send['epoch'] = epoch for (k, v) in logs.items(): send[k] = v try: requests.post((self.root + self.path), {self.field: json.dumps(send)}, headers=self.headers) except requests.exceptions.RequestException: warnings.warn(('Warning: could not reach RemoteMonitor root server at ' + str(self.root)))
def load(dataset, split='train'): if dataset.startswith('atomic10x'): dpath = build_data(ATOMIC10X) file = os.path.join(dpath, 'ATOMIC10X_with_literals.parquet') df = pd.read_parquet(file) elif (dataset == 'names'): dpath = build_data(NAMES) df = pd.read_csv(os.path.join(dpath, 'names_1990-2021.csv')) elif (dataset == 'soda'): dataset = load_dataset('allenai/soda', split=split) df = dataset.to_pandas() else: raise NotImplementedError return df
class RegNetXBlock(nn.Module): def __init__(self, in_channels, out_channels, group_width, stride=1): super().__init__() downsample = [] if ((stride != 1) or (in_channels != out_channels)): if (stride != 1): downsample.append(nn.AvgPool2d(kernel_size=3, stride=2, padding=1)) downsample += [nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), nn.GroupNorm((out_channels // group_width), out_channels)] self.downsample = nn.Sequential(*downsample) self.convs = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), nn.GroupNorm((out_channels // group_width), out_channels), nn.ReLU(True), nn.Conv2d(out_channels, out_channels, groups=(out_channels // group_width), kernel_size=3, padding=1, stride=stride, bias=False), nn.GroupNorm((out_channels // group_width), out_channels), nn.ReLU(True), nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False), nn.GroupNorm((out_channels // group_width), out_channels)) self.relu = nn.ReLU(True) def _combine(self, x, skip): return self.relu((x + skip)) def forward(self, x): skip = self.downsample(x) x = self.convs(x) return self._combine(x, skip)
class FairseqTask(object): def add_args(parser): pass def __init__(self, args): self.args = args self.datasets = {} def load_dictionary(cls, filename): return Dictionary.load(filename) def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): d = Dictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d def setup_task(cls, args, **kwargs): return cls(args, **kwargs) def load_dataset(self, split, combine=False, **kwargs): raise NotImplementedError def dataset(self, split): from fairseq.data import FairseqDataset if (split not in self.datasets): raise KeyError(('Dataset not loaded: ' + split)) if (not isinstance(self.datasets[split], FairseqDataset)): raise TypeError('Datasets are expected to be of type FairseqDataset') return self.datasets[split] def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): assert isinstance(dataset, FairseqDataset) with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() if (max_positions is not None): indices = data_utils.filter_by_size(indices, dataset.size, max_positions, raise_exception=(not ignore_invalid_inputs)) batch_sampler = data_utils.batch_by_size(indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple) return iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch) def build_model(self, args): from fairseq import models return models.build_model(args, self) def build_criterion(self, args): from fairseq import criterions return criterions.build_criterion(args, self) def build_generator(self, args): if getattr(args, 'score_reference', False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer(self.target_dictionary) else: from fairseq.sequence_generator import SequenceGenerator return SequenceGenerator(self.target_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), sampling=getattr(args, 'sampling', False), sampling_topk=getattr(args, 'sampling_topk', (- 1)), sampling_topp=getattr(args, 'sampling_topp', (- 1.0)), temperature=getattr(args, 'temperature', 1.0), diverse_beam_groups=getattr(args, 'diverse_beam_groups', (- 1)), diverse_beam_strength=getattr(args, 'diverse_beam_strength', 0.5), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0)) def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): model.train() (loss, sample_size, logging_output) = criterion(model, sample) if ignore_grad: loss *= 0 optimizer.backward(loss) return (loss, sample_size, logging_output) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): (loss, sample_size, logging_output) = criterion(model, sample) return (loss, sample_size, logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): return generator.generate(models, sample, prefix_tokens=prefix_tokens) def update_step(self, num_updates): pass def grad_denom(self, sample_sizes, criterion): return criterion.__class__.grad_denom(sample_sizes) def aggregate_logging_outputs(self, logging_outputs, criterion): return criterion.__class__.aggregate_logging_outputs(logging_outputs) def max_positions(self): return None def source_dictionary(self): raise NotImplementedError def target_dictionary(self): raise NotImplementedError
def angle_in_degree(angle, smaller=True): return ((180 * angle_in_radian(angle, smaller=smaller)) / np.pi)
_method def block_matrix(*args, **kwds): args = list(args) sparse = kwds.get('sparse', None) if (not args): if (sparse is not None): return matrix_space.MatrixSpace(ZZ, 0, 0, sparse=sparse)([]) else: return matrix_space.MatrixSpace(ZZ, 0, 0)([]) if ((len(args) >= 1) and is_Ring(args[0])): if (kwds.get('ring', args[0]) != args[0]): raise ValueError('base ring specified twice and they are different') ring = args[0] args.pop(0) else: ring = kwds.get('ring', None) if (len(args) >= 1): try: nrows = int(args[0]) args.pop(0) if (kwds.get('nrows', nrows) != nrows): raise ValueError('number of rows specified twice and they are different') except TypeError: nrows = kwds.get('nrows', None) else: nrows = kwds.get('nrows', None) if (len(args) >= 1): try: ncols = int(args[0]) args.pop(0) if (kwds.get('ncols', ncols) != ncols): raise ValueError('number of columns specified twice and they are different') except TypeError: ncols = kwds.get('ncols', None) else: ncols = kwds.get('ncols', None) if (not args): args = [[]] if (len(args) > 1): print(args) raise TypeError('invalid block_matrix invocation') sub_matrices = args[0] if is_Matrix(sub_matrices): M = sub_matrices if (((nrows is not None) and (nrows != 1)) or ((ncols is not None) and (ncols != 1))): raise ValueError('invalid nrows/ncols passed to block_matrix') if (ring is not None): M = M.change_ring(ring) if ((sparse is not None) and (M.is_sparse() != sparse)): M = (M.sparse_matrix() if sparse else M.dense_matrix()) return M if (not isinstance(sub_matrices, (list, tuple))): raise TypeError('invalid block_matrix invocation') subdivide = kwds.get('subdivide', True) try_grid = True if (not sub_matrices): if (((nrows is not None) and (nrows != 0)) or ((ncols is not None) and (ncols != 0))): raise ValueError('invalid nrows/ncols passed to block_matrix') elif isinstance(sub_matrices[0], (list, tuple)): if ((nrows is not None) and (len(sub_matrices) != nrows)): raise ValueError('invalid nrows passed to block_matrix') first_len = len(sub_matrices[0]) if ((ncols is not None) and (first_len != ncols)): raise ValueError('invalid ncols passed to block_matrix') same_length = all(((isinstance(v, (list, tuple)) and (len(v) == first_len)) for v in sub_matrices)) if (subdivide and (not same_length)): raise ValueError('list of rows is not valid (rows are wrong types or lengths)') try_grid = same_length else: n = len(sub_matrices) if (nrows is None): if (ncols is None): raise ValueError('must specify either nrows or ncols') else: nrows = (n // ncols) elif (ncols is None): ncols = (n // nrows) if ((nrows * ncols) != n): raise ValueError(('given number of rows (%s), columns (%s) incompatible with number of submatrices (%s)' % (nrows, ncols, n))) sub_matrices = [sub_matrices[(i * ncols):((i + 1) * ncols)] for i in range(nrows)] if (ring is None): ring = ZZ for row in sub_matrices: for M in row: R = (M.base_ring() if is_Matrix(M) else parent(M)) if (R is not ZZ): ring = sage.categories.pushout.pushout(ring, R) if (sparse is None): sparse = True for row in sub_matrices: for M in row: if (sparse and is_Matrix(M) and (not M.is_sparse())): sparse = False row_heights = None col_widths = None zero_widths = None total_width = None if try_grid: try: (row_heights, col_widths) = _determine_block_matrix_grid(sub_matrices) except ValueError as e: if subdivide: raise ValueError(e) if (col_widths is None): (row_heights, zero_widths, total_width) = _determine_block_matrix_rows(sub_matrices) big = None for i in range(len(sub_matrices)): R = sub_matrices[i] row = None for j in range(len(R)): M = R[j] if is_Matrix(M): if (M.base_ring() is not ring): M = M.change_ring(ring) if (M.is_sparse() != sparse): M = (M.sparse_matrix() if sparse else M.dense_matrix()) elif ((not M) and (zero_widths is not None)): if (zero_widths[i] > 0): M = matrix(ring, row_heights[i], zero_widths[i], 0, sparse=sparse) zero_widths[i] = 0 else: continue elif (zero_widths is not None): M = matrix(ring, row_heights[i], row_heights[i], M, sparse=sparse) else: M = matrix(ring, row_heights[i], col_widths[j], M, sparse=sparse) if (row is None): row = M else: row = row.augment(M) if (big is None): big = row else: big = big.stack(row) if (big is None): if (ring is None): ring = ZZ big = matrix(ring, 0, 0) if subdivide: big.subdivide(running_total(row_heights[:(- 1)]), running_total(col_widths[:(- 1)])) return big
def generate_json(comp1k_json_path, target_list_path, save_json_path): data_infos = mmcv.load(comp1k_json_path) targets = mmcv.list_from_file(target_list_path) new_data_infos = [] for data_info in data_infos: for target in targets: if data_info['alpha_path'].endswith(target): new_data_infos.append(data_info) break mmcv.dump(new_data_infos, save_json_path)
def test_prefixsum_idx2(): tree = SumSegmentTree(4) tree[0] = 0.5 tree[1] = 1.0 tree[2] = 1.0 tree[3] = 3.0 assert (tree.find_prefixsum_idx(0.0) == 0) assert (tree.find_prefixsum_idx(0.55) == 1) assert (tree.find_prefixsum_idx(0.99) == 1) assert (tree.find_prefixsum_idx(1.51) == 2) assert (tree.find_prefixsum_idx(3.0) == 3) assert (tree.find_prefixsum_idx(5.5) == 3)
def main(): parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--model', type=str, default='MLPBinaryConnect', help='Model name: MLPBinaryNet, MLPBinaryConnect_STE') parser.add_argument('--bnmomentum', type=float, default=0.15, help='BN layer momentum value') parser.add_argument('--optim', type=str, default='BayesBiNN', help='Optimizer: BayesBiNN, STE, or Adam') parser.add_argument('--val-split', type=float, default=0.1, help='Random validation set ratio') parser.add_argument('--criterion', type=str, default='cross-entropy', help='loss funcion: square-hinge or cross-entropy') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--train-samples', type=int, default=1, metavar='N', help='number of Monte Carlo samples used in BayesBiNN (default: 1)') parser.add_argument('--test-samples', type=int, default=0, metavar='N', help='number of Monte Carlo samples used in evaluation for BayesBiNN (default: 1), if 0, point estimate using meanis applied, which is similar to the Bop optimizer') parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.0001, metavar='LR', help='learning rate (default: 0.0001)') parser.add_argument('--lr-end', type=float, default=1e-16, metavar='LR-end', help='learning rate (default: 0.01)') parser.add_argument('--lr-decay', type=float, default=0.9, metavar='LR-decay', help='learning rated decay factor for each epoch (default: 0.9)') parser.add_argument('--decay-steps', type=int, default=1, metavar='N', help='LR rate decay steps (default: 1)') parser.add_argument('--momentum', type=float, default=0.0, metavar='M', help='BayesBiNN momentum (default: 0.0)') parser.add_argument('--data-augmentation', action='store_true', default=False, help='Enable data augmentation') parser.add_argument('--log-interval', type=int, default=500, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model') parser.add_argument('--experiment-id', type=int, default=0, help='Experiment ID for log files (int)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--lrschedular', type=str, default='Cosine', help='Mstep,Expo,Cosine') parser.add_argument('--drop-prob', type=float, default=0.2, help='dropout rate') parser.add_argument('--trainset_scale', type=int, default=1, help='scale of training set') parser.add_argument('--lamda', type=float, default=10, metavar='lamda-init', help='initial mean value of the natural parameter lamda(default: 10)') parser.add_argument('--lamda-std', type=float, default=0, metavar='lamda-init', help='linitial std value of the natural parameter lamda(default: 0)') parser.add_argument('--temperature', type=float, default=1e-10, metavar='temperature', help='temperature for BayesBiNN (default: 1e-8)') parser.add_argument('--kl-reweight', type=float, default=1.0, metavar='min temperature', help='initial temperature for BayesBiNN (default: 1)') parser.add_argument('--bn-affine', type=float, default=0, metavar='bn-affine', help='whether there is bn learnable parameters, 1: learnable, 0: no (default: 0)') args = parser.parse_args() if (args.model == 'MLPBinaryConnect_STE'): args.optim = 'STE' if (args.lr_decay > 1): raise ValueError('The end learning rate should be smaller than starting rate!!') args.use_cuda = ((not args.no_cuda) and torch.cuda.is_available()) torch.manual_seed((args.seed + args.experiment_id)) np.random.seed((args.seed + args.experiment_id)) now = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time())) args.out_dir = os.path.join('./outputs', 'mnist_{}_{}_lr{}_{}_id{}'.format(args.model, args.optim, args.lr, now, args.experiment_id)) os.makedirs(args.out_dir, exist_ok=True) config_save_path = os.path.join(args.out_dir, 'configs', 'config_{}.json'.format(args.experiment_id)) os.makedirs(os.path.dirname(config_save_path), exist_ok=True) with open(config_save_path, 'w') as f: json.dump(args.__dict__, f, indent=2) args.device = torch.device(('cuda' if args.use_cuda else 'cpu')) print('Running on', args.device) print('') for (key, val) in vars(args).items(): print('{}: {}'.format(key, val)) print('\n') if args.data_augmentation: transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) else: transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) kwargs = ({'num_workers': 2, 'pin_memory': True, 'drop_last': True} if args.use_cuda else {}) train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform_train) if ((args.val_split > 0) and (args.val_split < 1)): val_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform_test) num_train = len(train_dataset) indices = list(range(num_train)) split = int(np.floor((args.val_split * num_train))) np.random.shuffle(indices) (train_idx, val_idx) = (indices[split:], indices[:split]) train_sampler = SubsetRandomSampler(train_idx) val_sampler = SubsetRandomSampler(val_idx) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, sampler=val_sampler, **kwargs) print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), len(val_loader.sampler))) else: train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = None print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), 0)) test_dataset = datasets.MNIST('./data', train=False, transform=transform_test) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs) print('{} test datapoints.\n'.format(len(test_loader.sampler))) (in_features, out_features) = ((28 * 28), 10) num_units = 2048 if (args.model == 'MLPBinaryConnect'): model = MLPBinaryConnect(in_features, out_features, num_units, eps=0.0001, drop_prob=args.drop_prob, momentum=args.bnmomentum, batch_affine=(args.bn_affine == 1)) elif (args.model == 'MLPBinaryConnect_STE'): model = MLPBinaryConnect_STE(in_features, out_features, num_units, eps=0.0001, drop_prob=args.drop_prob, momentum=args.bnmomentum, batch_affine=(args.bn_affine == 1)) args.optim = 'STE' else: raise ValueError('Please select a network out of {MLP, BinaryConnect, BinaryNet}') print(model) model = model.to(args.device) if ((args.optim == 'Adam') or (args.optim == 'STE')): optimizer = optim.Adam(model.parameters(), lr=args.lr) elif (args.optim == 'BayesBiNN'): effective_trainsize = (len(train_loader.sampler) * args.trainset_scale) optimizer = BayesBiNN(model, lamda_init=args.lamda, lamda_std=args.lamda_std, temperature=args.temperature, train_set_size=effective_trainsize, lr=args.lr, betas=args.momentum, num_samples=args.train_samples, reweight=args.kl_reweight) if (args.criterion == 'square-hinge'): criterion = SquaredHingeLoss() elif (args.criterion == 'cross-entropy'): criterion = nn.CrossEntropyLoss() else: raise ValueError('Please select loss criterion in {square-hinge, cross-entropy}') start = time.time() results = train_model(args, model, [train_loader, val_loader, test_loader], criterion, optimizer) (model, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc) = results save_train_history(args, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc) time_total = timeSince(start) print('Task completed in {:.0f}m {:.0f}s'.format((time_total // 60), (time_total % 60)))
def build_ood_binary_dataset(is_train): sub_typ_objects1 = [] sub_typ_objects2 = [] objects = [] img = (np.ones((img_size, img_size, 3)) * 255) for (color_id, color) in enumerate(colors): center = center_generate(sub_typ_objects1) start = ((center[0] - size), (center[1] - size)) end = ((center[0] + size), (center[1] + size)) cv2.rectangle(img, start, end, color, (- 1)) sub_typ_objects1.append((color_id, center, 'r')) for (color_id_2, color_2) in enumerate(colors): center = center_generate(sub_typ_objects2) center_ = (center[0], center[1]) cv2.circle(img, center_, size, color_2, (- 1)) sub_typ_objects2.append((color_id_2, center, 'c')) for (color_id_3, color_3) in enumerate(colors): center = center_generate(objects) center_ = (center[0], center[1]) cv2.circle(img, center_, size, color_3, (- 1)) objects.append((color_id_3, center, 'c')) binary_questions = [] binary_answers = [] for _ in range(nb_questions): question = np.zeros(question_size) color = random.randint(0, 5) question[color] = 1 question[(q_type_idx + 1)] = 1 subtype = random.randint(0, 2) question[(subtype + sub_q_type_idx)] = 1 binary_questions.append(question) if (subtype == 0): if is_train: my_obj = sub_typ_objects1[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in sub_typ_objects1] dist_list[dist_list.index(0)] = 999 closest = dist_list.index(min(dist_list)) if (sub_typ_objects1[closest][2] == 'r'): answer = 2 else: answer = 3 else: my_obj = sub_typ_objects2[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in sub_typ_objects2] dist_list[dist_list.index(0)] = 999 closest = dist_list.index(min(dist_list)) if (sub_typ_objects2[closest][2] == 'r'): answer = 2 else: answer = 3 elif (subtype == 1): if is_train: my_obj = sub_typ_objects2[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in sub_typ_objects2] furthest = dist_list.index(max(dist_list)) if (sub_typ_objects2[furthest][2] == 'r'): answer = 2 else: answer = 3 else: my_obj = sub_typ_objects1[color][1] dist_list = [((my_obj - obj[1]) ** 2).sum() for obj in sub_typ_objects1] furthest = dist_list.index(max(dist_list)) if (sub_typ_objects1[furthest][2] == 'r'): answer = 2 else: answer = 3 elif (subtype == 2): my_obj = objects[color][2] count = (- 1) for obj in objects: if (obj[2] == my_obj): count += 1 answer = (count + 4) binary_answers.append(answer) binary_relations = (binary_questions, binary_answers) img = (img / 255.0) dataset = (img, binary_relations) return dataset
def write_stamps(pyx_defs, stamp_fname='pyx-stamps'): with open(stamp_fname, 'wt') as stamp_file: stamp_file.write('# SHA1 hashes for pyx files and generated c files\n') stamp_file.write('# Auto-generated file, do not edit\n') for (pyx_fname, pyx_info) in pyx_defs.items(): stamp_file.write(('%s, %s\n' % (pyx_fname, pyx_info['pyx_hash']))) stamp_file.write(('%s, %s\n' % (pyx_info['c_filename'], pyx_info['c_hash'])))
class CopyRnnPredictor(BasePredictor): def __init__(self, model_info, vocab_info, beam_size, max_target_len, max_src_length): super().__init__(model_info) if isinstance(vocab_info, str): self.vocab2id = load_vocab(vocab_info) elif isinstance(vocab_info, dict): self.vocab2id = vocab_info else: raise ValueError('vocab info type error') self.id2vocab = dict(zip(self.vocab2id.values(), self.vocab2id.keys())) self.config = self.load_config(model_info) self.model = self.load_model(model_info, CopyRNN(self.config, self.vocab2id)) self.model.eval() self.beam_size = beam_size self.max_target_len = max_target_len self.max_src_len = max_src_length self.beam_searcher = BeamSearch(model=self.model, beam_size=self.beam_size, max_target_len=self.max_target_len, id2vocab=self.id2vocab, bos_idx=self.vocab2id[BOS_WORD], unk_idx=self.vocab2id[UNK_WORD], args=self.config) self.pred_base_config = {'max_oov_count': self.config.max_oov_count, 'max_src_len': self.max_src_len, 'max_target_len': self.max_target_len, 'prefetch': False, 'shuffle_in_batch': False, 'token_field': TOKENS, 'keyphrase_field': 'keyphrases'} def predict(self, text_list, batch_size=10, delimiter=None, tokenized=False): self.model.eval() if (len(text_list) < batch_size): batch_size = len(text_list) if tokenized: text_list = [{TOKENS: i} for i in text_list] else: text_list = [{TOKENS: token_char_tokenize(i)} for i in text_list] args = Munch({'batch_size': batch_size, **self.config._asdict(), **self.pred_base_config}) loader = KeyphraseDataLoader(data_source=text_list, vocab2id=self.vocab2id, mode=INFERENCE_MODE, args=args) result = [] for batch in loader: with torch.no_grad(): result.extend(self.beam_searcher.beam_search(batch, delimiter=delimiter)) return result def eval_predict(self, src_filename, dest_filename, args, model=None, remove_existed=False): args_dict = vars(args) args_dict['batch_size'] = args_dict['eval_batch_size'] args = Munch(args_dict) loader = KeyphraseDataLoader(data_source=src_filename, vocab2id=self.vocab2id, mode=EVAL_MODE, args=args) if os.path.exists(dest_filename): print('destination filename {} existed'.format(dest_filename)) if remove_existed: os.remove(dest_filename) if (model is not None): model.eval() self.beam_searcher = BeamSearch(model=model, beam_size=self.beam_size, max_target_len=self.max_target_len, id2vocab=self.id2vocab, bos_idx=self.vocab2id[BOS_WORD], unk_idx=self.vocab2id[UNK_WORD], args=self.config) for batch in loader: with torch.no_grad(): batch_result = self.beam_searcher.beam_search(batch, delimiter=None) final_result = [] assert (len(batch_result) == len(batch[RAW_BATCH])) for (item_input, item_output) in zip(batch[RAW_BATCH], batch_result): item_input['pred_keyphrases'] = item_output final_result.append(item_input) append_jsonlines(dest_filename, final_result)
def _set_rpc_done(ctx_id, rank_distance): global rpc_done global ctx_ids global known_context_ids rpc_done[rank_distance] = True ctx_ids[rank_distance] = ctx_id known_context_ids.add(ctx_id)
def train_teacher(dataset, nb_teachers, teacher_id): assert input.create_dir_if_needed(FLAGS.data_dir) assert input.create_dir_if_needed(FLAGS.train_dir) if (dataset == 'svhn'): (train_data, train_labels, test_data, test_labels) = input.ld_svhn(extended=True) elif (dataset == 'cifar10'): (train_data, train_labels, test_data, test_labels) = input.ld_cifar10() elif (dataset == 'mnist'): (train_data, train_labels, test_data, test_labels) = input.ld_mnist() else: print('Check value of dataset flag') return False (data, labels) = input.partition_dataset(train_data, train_labels, nb_teachers, teacher_id) print(('Length of training data: ' + str(len(labels)))) if FLAGS.deeper: filename = (((str(nb_teachers) + '_teachers_') + str(teacher_id)) + '_deep.ckpt') else: filename = (((str(nb_teachers) + '_teachers_') + str(teacher_id)) + '.ckpt') ckpt_path = ((((FLAGS.train_dir + '/') + str(dataset)) + '_') + filename) assert deep_cnn.train(data, labels, ckpt_path) ckpt_path_final = ((ckpt_path + '-') + str((FLAGS.max_steps - 1))) teacher_preds = deep_cnn.softmax_preds(test_data, ckpt_path_final) precision = metrics.accuracy(teacher_preds, test_labels) print(('Precision of teacher after training: ' + str(precision))) return True
def test_prod_fun_and_const_reverse(): var1 = optplan.Parameter() prod1 = (2 * var1) assert isinstance(prod1, optplan.Product) assert (len(prod1.functions) == 2) assert (prod1.functions[0] == var1) assert (prod1.functions[1].value.real == 2) assert (prod1.functions[1].value.imag == 0)
def main(label_dir, n_comb=1, n_cands=8): test_data = json.load(open(os.path.join(label_dir, 'test.json'))) for i in range(len(test_data)): data = {} lst = np.delete(np.arange(len(test_data)), i) others = np.random.choice(lst, (n_comb - 1), replace=False).tolist() target = ([i] + others) (setX_images, setY_images) = ([], []) for j in target: items = test_data[j]['items'] images = [] for img in items: path = os.path.join(INPUT_DIR, (str(img['item_id']) + '.json.gz')) images.append(_read_feature(path)) images = np.array(images) y_size = (len(images) // 2) xy_mask = (([True] * (len(images) - y_size)) + ([False] * y_size)) xy_mask = np.random.permutation(xy_mask) setX_images.extend(images[xy_mask].tolist()) setY_images.extend(images[(~ xy_mask)].tolist()) data['query'] = setX_images answers = [setY_images] for j in range((n_cands - 1)): lst = np.delete(np.arange(len(test_data)), target) negatives = np.random.choice(lst, n_comb, replace=False).tolist() assert (len((set(target) & set(negatives))) == 0) target += negatives setY_images = [] for k in negatives: items = test_data[k]['items'] images = [] for img in items: path = os.path.join(INPUT_DIR, (str(img['item_id']) + '.json.gz')) images.append(_read_feature(path)) images = np.random.permutation(images) y_size = (len(images) // 2) setY_images.extend(images[:y_size].tolist()) answers.append(setY_images) data['answers'] = answers path = os.path.join(OUT_DIR, 'test_ncand{}'.format(n_cands), os.path.basename(label_dir)) if (not os.path.isdir(path)): os.makedirs(path) path = os.path.join(path, '{0:05d}.json.gz'.format(i)) data = json.dumps(data) with gzip.open(path, mode='wt') as f: f.write(data)
class TimeStep(collections.namedtuple('TimeStep', ['env_spec', 'observation', 'action', 'reward', 'next_observation', 'terminal', 'env_info', 'agent_info'])): def __new__(cls, env_spec, observation, action, reward, next_observation, terminal, env_info, agent_info): if (not env_spec.observation_space.contains(observation)): raise ValueError('observation must conform to observation_space {}, but got data with shape {} instead.'.format(env_spec.observation_space, observation)) if (not env_spec.observation_space.contains(next_observation)): raise ValueError('next_observation must conform to observation_space {}, but got data with shape {} instead.'.format(env_spec.observation_space, next_observation)) if (not env_spec.action_space.contains(action)): raise ValueError('action must conform to action_space {}, but got data with shape {} instead.'.format(env_spec.action_space, action)) if (not isinstance(agent_info, dict)): raise ValueError('agent_info must be type {}, but got type {} instead.'.format(dict, type(agent_info))) if (not isinstance(env_info, dict)): raise ValueError('env_info must be type {}, but got type {} instead.'.format(dict, type(env_info))) if (not isinstance(reward, float)): raise ValueError('reward must be type {}, but got type {} instead.'.format(float, type(reward))) if (not isinstance(terminal, bool)): raise ValueError('terminal must be dtype bool, but got dtype {} instead.'.format(type(terminal))) return super().__new__(TimeStep, env_spec, observation, action, reward, next_observation, terminal, env_info, agent_info)
def save_checkpoint(state, save_dir, is_best=False, remove_module_from_keys=False): mkdir_if_missing(save_dir) if remove_module_from_keys: state_dict = state['state_dict'] new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if k.startswith('module.'): k = k[7:] new_state_dict[k] = v state['state_dict'] = new_state_dict epoch = state['epoch'] fpath = osp.join(save_dir, ('model.pth.tar-' + str(epoch))) torch.save(state, fpath) print('Checkpoint saved to "{}"'.format(fpath)) if is_best: shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model-best.pth.tar'))
.corpus .parametrize('use_cache', [False, True]) def test_voxceleb1sid(use_cache): config = dotenv_values() voxceleb1 = Path(config['VoxCeleb1']) if voxceleb1.is_dir(): (train_data, valid_data, test_data) = VoxCeleb1SID(voxceleb1).data_split else: raise ValueError('Please set the VoxCeleb1 path in .env')
def recommend(abstract: str): from simpletransformers.t5 import T5Model model_args = {'reprocess_input_data': True, 'overwrite_output_dir': True, 'max_seq_length': 256, 'eval_batch_size': 128, 'num_train_epochs': 1, 'save_eval_checkpoints': False, 'use_multiprocessing': False, 'num_beams': None, 'do_sample': True, 'max_length': 50, 'top_k': 50, 'top_p': 0.95, 'num_return_sequences': 3} model = T5Model('t5', './checkpoint_15000_1', args=model_args, use_cuda=False) abss = [('summarize: ' + abstract)] predicted_title = model.predict(abss) return predicted_title
def recursiveAdd(t1, val=1, t2=None): if (t2 is None): t2 = val val = 1 if isinstance(t2, list): t1 = (t1 if isinstance(t1, list) else [t1]) for (i, _) in enumerate(t2): (t1[i], t2[i]) = recursiveAdd(t1[i], val, t2[i]) elif (isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor)): t1.add_(val, t2) else: raise RuntimeError((((('expecting nested tensors or tables. Got ' + type(t1).__name__) + ' and ') + type(t2).__name__) + ' instead')) return (t1, t2)
class MackeyGlass(SequenceDataset): _name_ = 'mackey' def init_defaults(self): return {'l_seq': 5000, 'l_predict': 15, 'tau': 17, 'washout': 100, 'delta_t': 10, 'n_train': 1024, 'n_eval': 64} def d_input(self): return 1 def d_output(self): return 1 def l_output(self): return self.l_seq def setup(self): from .datasets.mackey import mackey_glass (train_X, train_Y) = mackey_glass(n_samples=self.n_train, l_seq=self.l_seq, l_predict=self.l_predict, tau=self.tau, washout=self.washout, delta_t=self.delta_t) (train_X, train_Y) = (torch.FloatTensor(train_X), torch.FloatTensor(train_Y)) (val_X, val_Y) = mackey_glass(n_samples=self.n_eval, l_seq=self.l_seq, l_predict=self.l_predict, tau=self.tau, washout=self.washout, delta_t=self.delta_t) (val_X, val_Y) = (torch.FloatTensor(val_X), torch.FloatTensor(val_Y)) self.dataset_train = torch.utils.data.TensorDataset(train_X, train_Y) self.dataset_val = torch.utils.data.TensorDataset(val_X, val_Y) self.dataset_test = None def __str__(self): return f'{self._name_}'
class Parsing(torch.nn.Module): def __init__(self, cfg, dim_in, spatial_in): super(Parsing, self).__init__() self.dim_in = dim_in self.spatial_in = spatial_in self.parsingiou_on = cfg.PARSING.PARSINGIOU_ON head = registry.PARSING_HEADS[cfg.PARSING.PARSING_HEAD] self.Head = head(cfg, self.dim_in, self.spatial_in) output = registry.PARSING_OUTPUTS[cfg.PARSING.PARSING_OUTPUT] self.Output = output(cfg, self.Head.dim_out, self.Head.spatial_out) self.loss_evaluator = parsing_loss_evaluator(cfg) if self.parsingiou_on: self.ParsingIoU = ParsingIoU(cfg, self.Head.dim_out, self.Head.spatial_out) self.dim_out = self.Output.dim_out self.spatial_out = self.Output.spatial_out def forward(self, conv_features, targets=None): if self.training: return self._forward_train(conv_features, targets) else: return self._forward_test(conv_features) def _forward_train(self, conv_features, targets=None): losses = dict() x = self.Head(conv_features) logits = self.Output(x) (loss_parsing, parsingiou_targets) = self.loss_evaluator(logits, targets['parsing']) losses.update(dict(loss_parsing=loss_parsing)) if self.parsingiou_on: (loss_parsingiou, _) = self.ParsingIoU(x, parsingiou_targets) losses.update(dict(loss_parsingiou=loss_parsingiou)) return (None, losses) def _forward_test(self, conv_features): x = self.Head(conv_features) logits = self.Output(x) output = F.softmax(logits[(- 1)], dim=1) results = dict(probs=output, parsing_iou_scores=torch.ones(output.size()[0], dtype=torch.float32, device=output.device)) if self.parsingiou_on: (_, parsingiou) = self.ParsingIoU(x, None) results.update(dict(parsing_iou_scores=parsingiou.squeeze(1))) return (results, {})
def get_augmentations(): augmentations = [transforms.RandomApply([transforms.RandomRotation(30)], p=0.5), transforms.RandomApply([transforms.RandomAffine(degrees=0, translate=((2 / 28.0), (2 / 28.0)))], p=0.5), transforms.RandomApply([transforms.RandomAffine(degrees=0, translate=(0, 0), scale=(0.75, 1))], p=0.5), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)), transforms.RandomErasing(p=0.5, scale=((4 / 28.0), (4 / 28.0)), ratio=(1.0, 1.0), value=0, inplace=False)] return augmentations
class SawyerHammerV2Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'hammer_pos': obs[3:6], 'unused_info': obs[6:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0) action['grab_effort'] = self._grab_effort(o_d) return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_puck = (o_d['hammer_pos'] + np.array([(- 0.04), 0.0, (- 0.01)])) pos_goal = (np.array([0.24, 0.71, 0.11]) + np.array([(- 0.19), 0.0, 0.05])) if (np.linalg.norm((pos_curr[:2] - pos_puck[:2])) > 0.04): return (pos_puck + np.array([0.0, 0.0, 0.1])) elif ((abs((pos_curr[2] - pos_puck[2])) > 0.05) and (pos_puck[(- 1)] < 0.03)): return (pos_puck + np.array([0.0, 0.0, 0.03])) elif (np.linalg.norm((pos_curr[[0, 2]] - pos_goal[[0, 2]])) > 0.02): return np.array([pos_goal[0], pos_curr[1], pos_goal[2]]) else: return pos_goal def _grab_effort(o_d): pos_curr = o_d['hand_pos'] pos_puck = (o_d['hammer_pos'] + np.array([(- 0.04), 0.0, (- 0.01)])) if ((np.linalg.norm((pos_curr[:2] - pos_puck[:2])) > 0.04) or (abs((pos_curr[2] - pos_puck[2])) > 0.1)): return 0.0 else: return 0.8
def make_remote(f: Callable, num_gpus: int) -> Callable: f = ray.put(f) (f) (num_gpus=num_gpus, max_calls=1) def wrapper(*args, **kwargs): f = getattr(args[0], args[1]) print(f''' Running {f.__name__} in a new process''') f() return args[0] return wrapper
def test_merge_option_of_records_3(): a = ak.Array([[[[None, {'a': 1, 'b': 2}]]]]) assert (str(a.type) == '1 * var * var * var * ?{a: int64, b: int64}') b = ak.merge_option_of_records(a, axis=(- 1)) assert (b.tolist() == [[[[{'a': None, 'b': None}, {'a': 1, 'b': 2}]]]]) assert (str(b.type) == '1 * var * var * var * {a: ?int64, b: ?int64}')
def _divide_by_count(a, b, out=None): with np.errstate(invalid='ignore', divide='ignore'): if isinstance(a, np.ndarray): if (out is None): return np.divide(a, b, out=a, casting='unsafe') else: return np.divide(a, b, out=out, casting='unsafe') elif (out is None): return a.dtype.type((a / b)) else: return np.divide(a, b, out=out, casting='unsafe')
def to_execution_step(step_or_nets, default_name=None): from caffe2.python.net_builder import NetBuilder if isinstance(step_or_nets, ExecutionStep): return step_or_nets stop_blob = None if ((not default_name) and hasattr(step_or_nets, 'name')): default_name = step_or_nets.name if isinstance(step_or_nets, NetBuilder): stop_blob = step_or_nets._stop_blob step_or_nets = step_or_nets.get() return execution_step(default_name, step_or_nets, should_stop_blob=stop_blob)
def test_max_two_array(): with goos.OptimizationPlan() as plan: x = goos.Variable([2, 1]) y = goos.Variable([3, 0.5]) z = goos.dot(goos.max(x, y), [1, (- 1)]) np.testing.assert_array_equal(z.get().array, 2) np.testing.assert_array_equal(z.get_grad([x])[0].array_grad, [0, (- 1)]) np.testing.assert_array_equal(z.get_grad([y])[0].array_grad, [1, 0])
class BaxterGripper(Gripper): def __init__(self, count: int=0): super().__init__(count, 'BaxterGripper', ['BaxterGripper_leftJoint', 'BaxterGripper_rightJoint'])
class hf_optimizer(): def __init__(self, _p, inputs, s, costs, h=None, ha=None): self.p = _p self.shapes = [i.get_value().shape for i in _p] self.sizes = list(map(numpy.prod, self.shapes)) self.positions = numpy.cumsum(([0] + self.sizes))[:(- 1)] g = T.grad(costs[0], _p) g = list(map(T.as_tensor_variable, g)) self.f_gc = compile_function(inputs, (g + costs)) self.f_cost = compile_function(inputs, costs) symbolic_types = (T.scalar, T.vector, T.matrix, T.tensor3, T.tensor4) v = [symbolic_types[len(i)]() for i in self.shapes] Gv = gauss_newton_product(costs[0], _p, v, s) coefficient = T.scalar() if (h is not None): h_constant = symbolic_types[h.ndim]() structural_damping = ((coefficient * (((- h_constant) * T.log((h + 1e-10))) - ((1 - h_constant) * T.log(((1 - h) + 1e-10)))).sum()) / h.shape[0]) if (ha is None): ha = h Gv_damping = gauss_newton_product(structural_damping, _p, v, ha) Gv = [(a + b) for (a, b) in zip(Gv, Gv_damping)] givens = {h_constant: h} else: givens = {} self.function_Gv = compile_function(((inputs + v) + [coefficient]), Gv, givens=givens) def quick_cost(self, delta=0): if isinstance(delta, numpy.ndarray): delta = self.flat_to_list(delta) if (type(delta) in (list, tuple)): for (i, d) in zip(self.p, delta): i.set_value((i.get_value() + d)) cost = numpy.mean([self.f_cost(*i)[0] for i in self.cg_dataset.iterate(update=False)]) if (type(delta) in (list, tuple)): for (i, d) in zip(self.p, delta): i.set_value((i.get_value() - d)) return cost def cg(self, b, verbose=False): if self.preconditioner: M = (self.lambda_ * numpy.ones_like(b)) for inputs in self.cg_dataset.iterate(update=False): M += (self.list_to_flat(self.f_gc(*inputs)[:len(self.p)]) ** 2) M **= (- 0.75) sys.stdout.flush() else: M = 1.0 x = (self.cg_last_x if hasattr(self, 'cg_last_x') else numpy.zeros_like(b)) r = (b - self.batch_Gv(x)) d = (M * r) delta_new = numpy.dot(r, d) phi = [] backtracking = [] backspaces = 0 for i in range(1, (1 + self.max_cg_iterations)): q = self.batch_Gv(d) dq = numpy.dot(d, q) alpha = (delta_new / dq) x = (x + (alpha * d)) r = (r - (alpha * q)) s = (M * r) delta_old = delta_new delta_new = numpy.dot(r, s) d = (s + ((delta_new / delta_old) * d)) if (i >= int(numpy.ceil((1.3 ** len(backtracking))))): backtracking.append((self.quick_cost(x), x.copy(), i)) phi_i = ((- 0.5) * numpy.dot(x, (r + b))) phi.append(phi_i) if verbose: progress = (' [CG iter %i, phi=%+.5f, cost=%.5f]' % (i, phi_i, backtracking[(- 1)][0])) sys.stdout.write((('\x08' * backspaces) + progress)) sys.stdout.flush() backspaces = len(progress) k = max(10, (i / 10)) if ((i > k) and (phi_i < 0) and (((phi_i - phi[((- k) - 1)]) / phi_i) < (k * 0.0005))): break self.cg_last_x = x.copy() if self.global_backtracking: j = numpy.argmin([b[0] for b in backtracking]) else: j = (len(backtracking) - 1) while ((j > 0) and (backtracking[(j - 1)][0] < backtracking[j][0])): j -= 1 if verbose: print((' backtracked %i/%i' % (backtracking[j][2], i)), end=' ') sys.stdout.flush() return (backtracking[j] + (i,)) def flat_to_list(self, vector): return [vector[position:(position + size)].reshape(shape) for (shape, size, position) in zip(self.shapes, self.sizes, self.positions)] def list_to_flat(self, l): return numpy.concatenate([i.flatten() for i in l]) def batch_Gv(self, vector, lambda_=None): v = self.flat_to_list(vector) if (lambda_ is None): lambda_ = self.lambda_ result = (lambda_ * vector) for inputs in self.cg_dataset.iterate(False): result += (self.list_to_flat(self.function_Gv(*((inputs + v) + [(lambda_ * self.mu)]))) / self.cg_dataset.number_batches) return result def train(self, gradient_dataset, cg_dataset, initial_lambda=0.1, mu=0.03, global_backtracking=False, preconditioner=False, max_cg_iterations=250, num_updates=100, validation=None, validation_frequency=1, patience=numpy.inf, save_progress=None, itr_callback=None, verbose=False): self.lambda_ = initial_lambda self.mu = mu self.global_backtracking = global_backtracking self.cg_dataset = cg_dataset self.preconditioner = preconditioner self.max_cg_iterations = max_cg_iterations best = [0, numpy.inf, None] first_iteration = 1 if (isinstance(save_progress, str) and os.path.isfile(save_progress)): save = pickle.load(file(save_progress)) (self.cg_last_x, best, self.lambda_, first_iteration, init_p) = save first_iteration += 1 if verbose: print('* recovered saved model') try: for u in range(first_iteration, (1 + num_updates)): if verbose: print(('update %i/%i,' % (u, num_updates)), end=' ') sys.stdout.flush() gradient = numpy.zeros(sum(self.sizes), dtype=theano.config.floatX) costs = [] for inputs in gradient_dataset.iterate(update=True): result = self.f_gc(*inputs) gradient += (self.list_to_flat(result[:len(self.p)]) / gradient_dataset.number_batches) costs.append(result[len(self.p):]) if verbose: print('cost=', numpy.mean(costs, axis=0), end=' ') if verbose: print(('lambda=%.5f,' % self.lambda_), end=' ') sys.stdout.flush() (after_cost, flat_delta, backtracking, num_cg_iterations) = self.cg((- gradient)) delta_cost = numpy.dot(flat_delta, (gradient + (0.5 * self.batch_Gv(flat_delta, lambda_=0)))) before_cost = self.quick_cost() for (i, delta) in zip(self.p, self.flat_to_list(flat_delta)): i.set_value((i.get_value() + delta)) cg_dataset.update() if (itr_callback is not None): itr_callback() rho = ((after_cost - before_cost) / delta_cost) if (rho < 0.25): self.lambda_ *= 1.5 elif (rho > 0.75): self.lambda_ /= 1.5 if ((validation is not None) and ((u % validation_frequency) == 0)): if hasattr(validation, 'iterate'): costs = numpy.mean([self.f_cost(*i) for i in validation.iterate()], axis=0) elif isinstance(validation, collections.Callable): costs = validation() if verbose: print('validation=', costs, end=' ') if (costs[0] < best[1]): best = (u, costs[0], [i.get_value().copy() for i in self.p]) if verbose: print('*NEW BEST', end=' ') if isinstance(save_progress, str): save = (self.cg_last_x, best, self.lambda_, u, [i.get_value().copy() for i in self.p]) pickle.dump(save, file(save_progress, 'wb'), pickle.HIGHEST_PROTOCOL) if ((u - best[0]) > patience): if verbose: print('PATIENCE ELAPSED, BAILING OUT') break if verbose: print() sys.stdout.flush() except KeyboardInterrupt: if verbose: print('Interrupted by user.') if (best[2] is None): best[2] = [i.get_value().copy() for i in self.p] return best[2]
def run_hyperopt(kwargs: dict, score_function: Callable, param_space_function: Callable, initial_points: list): ray.init('local') kwargs['gpu'] = (kwargs.get('gpus_per_trial', 0) > 0) if kwargs['debug']: kwargs['num_h_samples'] = 10 kwargs['max_epochs'] = 5 save_dir = kwargs['save_dir'] common.setup_logger(save_dir, log_name='hyperopt.log', debug=kwargs.get('debug', False)) pl.utilities.seed.seed_everything(kwargs.get('seed')) trainable = tune.with_parameters(score_function, base_args=kwargs, orig_dir=Path().resolve()) yaml_args = yaml.dump(kwargs) logging.info(f''' {yaml_args}''') with open((Path(save_dir) / 'args.yaml'), 'w') as fp: fp.write(yaml_args) metric = 'val_loss' trainable = tune.with_resources(trainable, resources=tune.PlacementGroupFactory([{'CPU': kwargs.get('cpus_per_trial'), 'GPU': kwargs.get('gpus_per_trial')}, {'CPU': kwargs.get('num_workers')}], strategy='PACK')) search_algo = OptunaSearch(metric=metric, mode='min', points_to_evaluate=initial_points, space=param_space_function) search_algo = ConcurrencyLimiter(search_algo, max_concurrent=kwargs['max_concurrent']) tuner = tune.Tuner(trainable, tune_config=tune.TuneConfig(mode='min', metric=metric, search_alg=search_algo, scheduler=ASHAScheduler(max_t=((24 * 60) * 60), time_attr='time_total_s', grace_period=kwargs.get('grace_period'), reduction_factor=2), num_samples=kwargs.get('num_h_samples')), run_config=RunConfig(name=None, local_dir=kwargs['save_dir'])) if (kwargs.get('tune_checkpoint') is not None): ckpt = str(Path(kwargs['tune_checkpoint']).resolve()) tuner = tuner.restore(path=ckpt, restart_errored=True) results = tuner.fit() best_trial = results.get_best_result() output = {'score': best_trial.metrics[metric], 'config': best_trial.config} out_str = yaml.dump(output, indent=2) logging.info(out_str) with open((Path(save_dir) / 'best_trial.yaml'), 'w') as f: f.write(out_str) results.get_dataframe().to_csv((Path(save_dir) / 'full_res_tbl.tsv'), sep='\t', index=None)
def broadcast(tensor, devices=None, *, out=None): if (not ((devices is None) ^ (out is None))): raise RuntimeError("Exactly one of 'devices' and 'out' must be specified, but got devices={} and out={}".format(devices, out)) if (devices is not None): devices = [_get_device_index(d) for d in devices] return torch._C._broadcast(tensor, devices) else: return torch._C._broadcast_out(tensor, out)
def _transformation_determine_affected_nodes(sdfg: SDFG, transformation: Union[(PatternTransformation, SubgraphTransformation)], strict: bool=False) -> Set[Union[(nd.Node, SDFGState)]]: target_sdfg = sdfg affected_nodes = set() if isinstance(transformation, PatternTransformation): if ((transformation.sdfg_id >= 0) and target_sdfg.sdfg_list): target_sdfg = target_sdfg.sdfg_list[transformation.sdfg_id] for (k, _) in transformation._get_pattern_nodes().items(): try: affected_nodes.add(getattr(transformation, k)) except KeyError: pass if isinstance(transformation, DetectLoop): if ((transformation.loop_guard is not None) and (transformation.loop_guard in target_sdfg.nodes())): for iedge in target_sdfg.in_edges(transformation.loop_guard): affected_nodes.add(iedge.src) if ((transformation.loop_begin is not None) and (transformation.loop_begin in target_sdfg.nodes())): to_visit = [transformation.loop_begin] while to_visit: state = to_visit.pop(0) for (_, dst, _) in target_sdfg.out_edges(state): if ((dst not in affected_nodes) and (dst is not transformation.loop_guard)): to_visit.append(dst) affected_nodes.add(state) if ((len(affected_nodes) == 0) and (transformation.state_id < 0) and (target_sdfg.parent_nsdfg_node is not None)): affected_nodes.add(target_sdfg.parent_nsdfg_node) else: if ((transformation.sdfg_id >= 0) and target_sdfg.sdfg_list): target_sdfg = target_sdfg.sdfg_list[transformation.sdfg_id] subgraph = transformation.get_subgraph(target_sdfg) for n in subgraph.nodes(): affected_nodes.add(n) if strict: return affected_nodes if (hasattr(transformation, 'state_id') and (transformation.state_id >= 0)): state = target_sdfg.node(transformation.state_id) expanded = set() for node in affected_nodes: expanded.add(node) scope_entry = None if isinstance(node, nd.MapEntry): scope_entry = node elif isinstance(node, nd.MapExit): scope_entry = state.entry_node(node) if (scope_entry is not None): scope = state.scope_subgraph(scope_entry, include_entry=True, include_exit=True) for n in scope.nodes(): expanded.add(n) return expanded return affected_nodes
class InfoManager(): def __init__(self, info_config_file, task_config: TaskConfig=None, faq_config: FAQConfig=None): self.models_info = load_info_logic(info_config_file) self.models = {} self.negate_intent_threshold = 0.2 for m_name in self.models_info: init_args = (self.models_info[m_name]['init_args'] if ('init_args' in self.models_info[m_name]) else {}) if (task_config and (m_name == 'intent')): init_args['task_config'] = task_config init_args['faq_config'] = faq_config self.models[m_name] = client_dict[m_name](**init_args) def collect_info(self, utt: str, model_names: list, ctx: DialogContext): res = {} context = utt for name in model_names: assert (name in client_dict) if ('context_args' in self.models_info[name]): context = ctx.user_history.extract_utt(**self.models_info[name]['context_args']) call_args = (self.models_info[name]['call_args'] if ('call_args' in self.models_info[name]) else {}) res[name] = self.models[name](context, **call_args) if (name == 'intent'): sents_after_seg = sent_tokenize(utt) res['intent_seg'] = [self.models[name](sent, **call_args) for sent in sents_after_seg] return res def intent_resolution(self, intent_res_1, intent_res_2, negation_flag=False): got_intent_1 = (intent_res_1['success'] if intent_res_1 else False) got_intent_2 = (intent_res_2['success'] if intent_res_2 else False) intent_1 = (intent_res_1['intent'] if got_intent_1 else '') intent_2 = (intent_res_2['intent'] if got_intent_2 else '') prob1 = (intent_res_1['prob'] if got_intent_1 else 0.0) prob2 = (intent_res_2['prob'] if got_intent_2 else 0.0) res = {'intent': intent_1, 'prob': prob1, 'uncertain': False} if negation_flag: if (intent_1 and intent_2 and (intent_1 == intent_2) and ((prob2 - prob1) > self.negate_intent_threshold)): negate_intent = True else: negate_intent = False else: negate_intent = False if negate_intent: res['intent'] = 'negative' if (not negation_flag): if (intent_2 and (intent_2 != intent_1) and (prob2 > prob1)): res['intent'] = intent_2 res['prob'] = prob2 elif (intent_2 and (intent_2 == intent_1)): if ((intent_1 != 'positive') and (intent_1 != 'negative')): res['uncertain'] = True return res def info_pipeline(self, asr_origin: str, asr_norm: str, ctx: DialogContext, models=['intent', 'ner']): tokenized_utt = word_tokenize(asr_norm) pos_tags = nltk.pos_tag(tokenized_utt) ctx.store_utt('spk1', '%Y-%m-%d %H:%M:%S', asr_norm, tokenized_text=tokenized_utt, pos_tags=pos_tags) models = ([m_name for m_name in self.models_info] if self.models_info else models) res = self.collect_info(asr_norm, models, ctx) cur_mes = ctx.user_history.messages_buffer[(- 1)] raw_sents = sent_tokenize(asr_norm) negation_flags = ([False] * len(raw_sents)) intent_2nd_res = ([None] * len(raw_sents)) negation_placeholder = '##' def remove_negation_word(negation_res, negation_placeholder='##'): words = negation_res['wordlist'][:] triplets = negation_res['triplets'] for triplet in triplets: i = triplet[0] words[i] = negation_placeholder text_with_negation_words_removed = ' '.join(words) tokenized_text_with_negation_placeholder = words sents_with_negation_words_removed = sent_tokenize(text_with_negation_words_removed) negation_flags = ([False] * len(sents_with_negation_words_removed)) for i in range(len(negation_flags)): sents_with_negation_words_removed[i] = sents_with_negation_words_removed[i].replace(negation_placeholder, '') if (sents_with_negation_words_removed[i] != raw_sents[i]): negation_flags[i] = True text_with_negation_words_removed = ' '.join(sents_with_negation_words_removed) return (text_with_negation_words_removed, tokenized_text_with_negation_placeholder, sents_with_negation_words_removed) got_negation = (True if (('negation' in res) and ('triplets' in res['negation']) and res['negation']['triplets'] and (res['negation']['triplets'][0][0] != (- 1))) else False) if got_negation: (cur_mes.text_with_negation_words_removed, tokenized_text_with_negation_placeholder, sents_with_negation_words_removed) = remove_negation_word(res['negation']) else: (cur_mes.text_with_negation_words_removed, tokenized_text_with_negation_placeholder, sents_with_negation_words_removed) = (' '.join(tokenized_utt), tokenized_utt, sent_tokenize(asr_norm)) def find_entity(coref_res, cluster): words = coref_res['words'] entity = '' for (span_s, span_e) in cluster: pos_tags = nltk.pos_tag(words) if (' '.join(words[span_s:span_e]).lower() not in COREFERENCE_SKIP): span_tags = set([t[1] for t in pos_tags[span_s:span_e]]) if (not span_tags.issubset(SELECTED_POS_TAGS)): entity = words[span_s:span_e] break return entity def replace_corefed_entity(coref_res, tokenized_text_with_negation_placeholder): predicted_clusters = coref_res['predicted_clusters'] utt_replaced_coref = [] words = coref_res['words'][:] cur_utt_length = len(tokenized_text_with_negation_placeholder) start_ind = (len(words) - len(tokenized_text_with_negation_placeholder)) replaced_token_flags = [n for n in range(cur_utt_length)] replaced_span_dict = {} sig = '*' for cluster in predicted_clusters: entity_flag = False for (span_s, span_e) in cluster: if ((span_s >= start_ind) and (span_e <= (start_ind + cur_utt_length)) and (' '.join(words[span_s:span_e]).lower() not in COREFERENCE_SKIP)): span_tags = set([t[1] for t in pos_tags[(span_s - start_ind):(span_e - start_ind)]]) if (SELECTED_POS_TAGS & span_tags): if (not entity_flag): entity = find_entity(coref_res, cluster) entity_flag = True if entity: for ind in range((span_s - start_ind), (span_e - start_ind)): replaced_token_flags[ind] = sig replaced_span_dict[sig] = entity sig += '*' last_sign = '' for (ind, tok) in enumerate(replaced_token_flags): if (tok in replaced_span_dict): if (last_sign != tok): utt_replaced_coref.extend(replaced_span_dict[tok]) else: utt_replaced_coref.append(tokenized_text_with_negation_placeholder[ind]) last_sign = tok utt_replaced_coref = ' '.join(utt_replaced_coref) return utt_replaced_coref got_coref = (True if (('coref' in res) and ('predicted_clusters' in res['coref']) and res['coref']['predicted_clusters']) else False) cur_mes.utt_replaced_coref = (replace_corefed_entity(res['coref'], tokenized_text_with_negation_placeholder) if got_coref else ' '.join(tokenized_text_with_negation_placeholder)) sents_to_adjust_for_coref = sent_tokenize(cur_mes.utt_replaced_coref) sents_tokenized_text_with_negation_placeholder = sent_tokenize(' '.join(tokenized_text_with_negation_placeholder)) coref_flags = ([False] * len(sents_to_adjust_for_coref)) for i in range(len(coref_flags)): sent_to_adjust_for_coref = sents_to_adjust_for_coref[i] if (sent_to_adjust_for_coref != sents_tokenized_text_with_negation_placeholder[i]): coref_flags[i] = True intent_2nd_res[i] = self.models['intent'](sent_to_adjust_for_coref.replace(negation_placeholder, '')) elif negation_flags[i]: intent_2nd_res[i] = self.models['intent'](sents_with_negation_words_removed[i]) cur_mes.utt_replaced_coref = cur_mes.utt_replaced_coref.replace(negation_placeholder, '') assert (len(res['intent_seg']) == len(intent_2nd_res) == len(negation_flags)) intent_res = [] for i in range(len(negation_flags)): intent_res.append(self.intent_resolution(res['intent_seg'][i], intent_2nd_res[i], negation_flags[i])) res['final_intent'] = (None if (not intent_res) else intent_res[0]) for i in range(1, len(intent_res)): if res['final_intent']['uncertain']: if (not intent_res[i]['uncertain']): res['final_intent'] = intent_res[i] elif (intent_res[i]['prob'] > res['final_intent']['prob']): res['final_intent'] = intent_res[i] elif ((res['final_intent']['intent'] == 'positive') or (res['final_intent']['intent'] == 'negative')): if (intent_res[i]['intent'] != 'positive'): res['final_intent'] = intent_res[i] elif ((not intent_res[i]['uncertain']) and (intent_res[i]['intent'] != 'positive') and (intent_res[i]['intent'] != 'negative') and (intent_res[i]['prob'] > res['final_intent']['prob'])): res['final_intent'] = intent_res[i] return res
(frozen=True) class BCQModules(DDPGBaseModules): policy: DeterministicResidualPolicy targ_policy: DeterministicResidualPolicy imitator: ConditionalVAE imitator_optim: Optimizer
class AdjustedRandIndex(ConfusionMatrixMetric): def __init__(self, metric: str='ADJRIND'): super().__init__(metric) def calculate(self): tp = self.confusion_matrix.tp tn = self.confusion_matrix.tn fp = self.confusion_matrix.fp fn = self.confusion_matrix.fn n = self.confusion_matrix.n fp_tn = (tn + fp) tp_fn = (fn + tp) tn_fn = (tn + fn) tp_fp = (fp + tp) nis = ((tn_fn * tn_fn) + (tp_fp * tp_fp)) njs = ((fp_tn * fp_tn) + (tp_fn * tp_fn)) sum_of_squares = ((((tp * tp) + (tn * tn)) + (fp * fp)) + (fn * fn)) a = (((((tp * (tp - 1)) + (fp * (fp - 1))) + (tn * (tn - 1))) + (fn * (fn - 1))) / 2.0) b = ((njs - sum_of_squares) / 2.0) c = ((nis - sum_of_squares) / 2.0) d = (((((n * n) + sum_of_squares) - nis) - njs) / 2.0) x1 = (a - (((a + c) * (a + b)) / (((a + b) + c) + d))) x2 = (((a + c) + (a + b)) / 2.0) x3 = (((a + c) * (a + b)) / (((a + b) + c) + d)) denominator = (x2 - x3) if (denominator != 0): return (x1 / denominator) else: return 0
class EnvironmentState(MsgpackMixin): position = Vector3r() geo_point = GeoPoint() gravity = Vector3r() air_pressure = np.float32(0) temperature = np.float32(0) air_density = np.float32(0)
class Parser(object): greeting_words = set(['hi', 'hello', 'hey', 'hiya', 'howdy']) question_words = set(['what', 'when', 'where', 'why', 'which', 'who', 'whose', 'how', 'do', 'did', 'does', 'are', 'is', 'would', 'will', 'can', 'could', 'any']) neg_words = set(['no', 'not', "n't"]) def __init__(self, agent, kb, lexicon): self.agent = agent self.partner = (1 - agent) self.kb = kb self.lexicon = lexicon def is_negative(cls, utterance): for token in utterance.tokens: if (token in cls.neg_words): return True return False def is_question(cls, utterance): tokens = utterance.tokens if (len(tokens) < 1): return False last_word = tokens[(- 1)] first_word = tokens[0] return ((last_word == '?') or (first_word in cls.question_words)) def is_greeting(cls, utterance): for token in utterance.tokens: if (token in cls.greeting_words): return True return False def tag_utterance(self, utterance): tags = [] if self.is_question(utterance): tags.append('question') if self.is_greeting(utterance): tags.append('greeting') if self.is_negative(utterance): tags.append('negative') return tags def parse(self, event, dialogue_state, update_state=False): raise NotImplementedError def parse_action(self, event): intent = event.action return Utterance(logical_form=LogicalForm(intent), template=['<{}>'.format(intent)])
.parametrize('use_attention,sparse_feature_num,dense_feature_num', [(True, 3, 0)]) def test_AFM(use_attention, sparse_feature_num, dense_feature_num): model_name = 'AFM' sample_size = SAMPLE_SIZE (x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num) model = AFM(feature_columns, feature_columns, use_attention=use_attention, afm_dropout=0.5) check_model(model, model_name, x, y)
def unique_max(unique_x, x, values, marker_2D=None): unique_interval = 100 (unique_values, unique_indices) = ([], []) for i in range(0, len(unique_x), unique_interval): unique_x_b = unique_x[i:(i + unique_interval)] marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float() values_2D = ((marker_2D * values.unsqueeze(0)) - ((1 - marker_2D) * HUGE_INT)) (unique_values_b, unique_idx_b) = values_2D.max(dim=1) unique_values.append(unique_values_b) unique_indices.append(unique_idx_b) unique_values = torch.cat(unique_values) unique_idx = torch.cat(unique_indices) return (unique_values, unique_idx)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.7): if (image.dtype != np.uint8): raise ValueError('`image` not of type np.uint8') if (mask.dtype != np.uint8): raise ValueError('`mask` not of type np.uint8') if np.any(np.logical_and((mask != 1), (mask != 0))): raise ValueError('`mask` elements should be in [0, 1]') rgb = ImageColor.getrgb(color) pil_image = Image.fromarray(image) solid_color = (np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])) pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') pil_mask = Image.fromarray(np.uint8(((255.0 * alpha) * mask))).convert('L') pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) np.copyto(image, np.array(pil_image.convert('RGB')))
def optimizeForMKLDNN(net, training_mode=False): net.Proto().ParseFromString(C.transform_optimizeForMKLDNN(net.Proto().SerializeToString(), training_mode))
def test(): layout = ak.contents.RecordArray([ak.contents.ListOffsetArray(ak.index.Index64([0, 3]), ak.contents.NumpyArray(np.array([1, 2, 3], dtype=np.uint16))), ak.contents.ListOffsetArray(ak.index.Index64([0, 2]), ak.contents.NumpyArray(np.array([4, 5], dtype=np.uint16)))], ['x', 'y']) sliced = layout[(..., np.newaxis)] assert sliced.is_equal_to(ak.contents.RecordArray([ak.contents.RegularArray(ak.contents.ListOffsetArray(ak.index.Index64([0, 3]), ak.contents.NumpyArray(np.array([1, 2, 3], dtype=np.uint16))), 1), ak.contents.RegularArray(ak.contents.ListOffsetArray(ak.index.Index64([0, 2]), ak.contents.NumpyArray(np.array([4, 5], dtype=np.uint16))), 1)], ['x', 'y']))
_dispatch def hfft(x, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, *, plan=None): return (Dispatchable(x, np.ndarray),)
def test_load_fake_lfw_people_too_restrictive(): with pytest.raises(ValueError): fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
class GUI_Sim(): def __init__(self, sim_time: int, time_scale: int, logging: str, sim_name, config): self.sim_name = sim_name self.time_scale = time_scale self.timeline = Timeline((sim_time * time_scale)) self.timeline.seed(0) self.topology = Topology(sim_name, self.timeline) self.sim_templates = config.defaults.copy() self.apps = [] temp = config.templates.copy() self.logging = logging for (key, val) in temp.items(): self.sim_templates.update(val) nodes = list(config.data.nodes.data()) for node in nodes: node_name = node[1]['data']['name'] node_type = node[1]['data']['type'] node_temp = self.sim_templates[node[1]['data']['template']].copy() if (node_type == 'QKDNode'): node_in = QKDNode(node_name, self.timeline, **node_temp) elif (node_type == 'QuantumRouter'): mem = node_temp.pop('mem_type') mem_config = self.sim_templates[mem].copy() node_in = QuantumRouter(node_name, self.timeline, **node_temp) for (key, val) in mem_config.items(): node_in.memory_array.update_memory_params(key, val) else: node = Node(node_name, self.timeline) self.topology.add_node(node_in) edges = list(config.data.edges.data()) for edge in edges: edge_data = edge[2]['data'].copy() link_type = edge_data.pop('link_type') source = edge_data.pop('source') target = edge_data.pop('target') if (link_type == 'Quantum'): self.topology.add_quantum_connection(source, target, **edge_data) else: self.topology.add_classical_connection(source, target, **edge_data) labels = config.cc_delays.columns table = config.cc_delays.copy() table = table.to_numpy(dtype=int) for i in range(len(table)): for j in range(len(table[i])): if (table[i][j] == 0): continue delay = (table[i][j] / 2) cchannel_params = {'delay': delay, 'distance': 1000.0} self.topology.add_classical_channel(labels[i], labels[j], **cchannel_params) bsm_hard = self.sim_templates['default_detector'] for node in self.topology.get_nodes_by_type('BSMNode'): for (key, val) in bsm_hard.items(): node.bsm.update_detectors_params(key, val) entanglement = self.sim_templates['default_entanglement'] for node in self.topology.get_nodes_by_type('QuantumRouter'): node.network_manager.protocol_stack[1].set_swapping_success_rate(entanglement['succ_prob']) node.network_manager.protocol_stack[1].set_swapping_degradation(entanglement['degredation']) for node in self.topology.get_nodes_by_type('QuantumRouter'): table = self.topology.generate_forwarding_table(node.name) for (dst, next_node) in table.items(): node.network_manager.protocol_stack[0].add_forwarding_rule(dst, next_node) def init_logging(self): set_logger('sim_logging', self.timeline, (((DIRECTORY + '/') + self.sim_name) + '_log.txt')) set_logger_level(self.logging) def random_request_simulation(self): node_names = [] for node in self.topology.get_nodes_by_type('QuantumRouter'): node_names.append(node.name) apps_new = [] for (i, name) in enumerate(node_names): other_nodes = node_names[:] other_nodes.remove(name) app = RandomRequestApp(self.topology.nodes[name], other_nodes, i) apps_new.append(app) app.start() self.apps = apps_new self.timeline.init() def getSimTime(self): tl = self.timeline ns = tl.convert_to_nanoseconds(tl.time) simulation_time = tl.ns_to_human_time(ns) if (tl.stop_time == float('inf')): stop_time = 'NaN' else: ns = tl.convert_to_nanoseconds(tl.stop_time) stop_time = tl.ns_to_human_time(ns) new_simtime = f'{simulation_time} / {stop_time}' return new_simtime def write_to_file(self): tick = time.time() output = open((((DIRECTORY + '/') + self.sim_name) + '_results.txt'), 'w') output.write(('execution time %.2f sec' % (time.time() - tick))) for app in self.apps: output.write((('node ' + app.node.name) + '\n')) val = len(app.get_wait_time()) output.write((('\tnumber of wait times: %d' % val) + '\n')) wait = app.get_wait_time() wait_string = (('[' + ' '.join((str(e) for e in wait))) + ']\n') output.write(('\twait times: ' + wait_string)) reserve_string = (('[' + ' '.join((str(e) for e in app.reserves))) + ']\n') output.write(('\treservations: ' + reserve_string)) throughput = app.get_throughput() throughput_string = (('[' + ' '.join((str(e) for e in throughput))) + ']\n') output.write(('\tthroughput: ' + throughput_string)) output.write('Reservations Table:\n') node_names = [] start_times = [] end_times = [] memory_sizes = [] for node in self.topology.get_nodes_by_type('QuantumRouter'): node_name = node.name acc = node.network_manager.protocol_stack[1].accepted_reservation for reservation in acc: (s_t, e_t, size) = (reservation.start_time, reservation.end_time, reservation.memory_size) cond_1 = (reservation.initiator != node.name) cond_2 = (reservation.responder != node.name) if (cond_1 and cond_2): size *= 2 node_names.append(node_name) start_times.append(s_t) end_times.append(e_t) memory_sizes.append(size) log = {'Node': node_names, 'Start_time': start_times, 'End_time': end_times, 'Memory_size': memory_sizes} df = pd.DataFrame(log) output.write(df.to_string()) output.close()
class StateProcessor(): def __init__(self): with tf.variable_scope('state_processor'): self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8) self.output = tf.image.rgb_to_grayscale(self.input_state) self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160) self.output = tf.image.resize_images(self.output, (84, 84), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) self.output = tf.squeeze(self.output) def process(self, sess, state): return sess.run(self.output, {self.input_state: state})
def main(): parser = argparse.ArgumentParser(description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).') parser.add_argument('--file_path', type=str, default='data/dump.txt', help='The path to the data.') parser.add_argument('--bert_tokenizer', type=str, default='bert-base-uncased', help='The tokenizer to use.') parser.add_argument('--dump_file', type=str, default='data/dump', help='The dump file prefix.') args = parser.parse_args() logger.info(f'Loading Tokenizer ({args.bert_tokenizer})') bert_tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer) logger.info(f'Loading text from {args.file_path}') with open(args.file_path, 'r', encoding='utf8') as fp: data = fp.readlines() logger.info(f'Start encoding') logger.info(f'{len(data)} examples to process.') rslt = [] iter = 0 interval = 10000 start = time.time() for text in data: text = f'[CLS] {text.strip()} [SEP]' token_ids = bert_tokenizer.encode(text) rslt.append(token_ids) iter += 1 if ((iter % interval) == 0): end = time.time() logger.info(f'{iter} examples processed. - {((end - start) / interval):.2f}s/expl') start = time.time() logger.info('Finished binarization') logger.info(f'{len(data)} examples processed.') dp_file = f'{args.dump_file}.{args.bert_tokenizer}.pickle' rslt_ = [np.uint16(d) for d in rslt] random.shuffle(rslt_) logger.info(f'Dump to {dp_file}') with open(dp_file, 'wb') as handle: pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
def test_arrow_null_nested_struct(): a = pyarrow.array([[{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}], None, [], [{'x': 4, 'y': 4.4}, {'x': 5, 'y': 5.5}]]) assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == [[{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}], None, [], [{'x': 4, 'y': 4.4}, {'x': 5, 'y': 5.5}]])
def clean_object_attributes(scenes): keys = ['shape', 'size', 'material', 'color', 'id'] for (scene_id, scene) in enumerate(scenes['scenes']): for (obj_id, obj) in enumerate(scene['objects']): new_obj = {key: obj[key] for key in keys} scenes['scenes'][scene_id]['objects'][obj_id] = new_obj return scenes
def p_const_member_type_group(p): type_ref = p[2] assignments = p[3] p[0] = [syntax_tree.ConstMember(type_ref, name, value) for (name, value) in assignments]
class BlissToZipDataset(Job): def __init__(self, name, corpus_file, segment_file=None, use_full_seq_name=False, no_audio=False): self.name = name self.corpus_file = corpus_file self.segment_file_path = segment_file self.use_full_seq_name = use_full_seq_name self.no_audio = no_audio self.out = self.output_path(('%s.zip' % name)) def tasks(self): (yield Task('run', mini_task=True)) def run(self): import zipfile zip_file = zipfile.ZipFile(tk.uncached_path(self.out), mode='w', compression=zipfile.ZIP_STORED) dict_file_path = (self.name + '.txt') dict_file = open(dict_file_path, 'wt') dict_file.write('[\n') c = corpus.Corpus() assert (len(c.subcorpora) == 0) c.load(tk.uncached_path(self.corpus_file)) if self.segment_file_path: if tk.uncached_path(self.segment_file_path).endswith('gz'): segment_file = gzip.open(tk.uncached_path(self.segment_file_path), 'rb') else: segment_file = open(tk.uncached_path(self.segment_file), 'rt') segments = [line.decode().strip() for line in segment_file] for recording in c.recordings: if (not recording.segments): continue assert (len(recording.segments) == 1) segment = recording.segments[0] segment_name = '/'.join([c.name, recording.name, segment.name]) if (self.segment_file_path and (segment_name not in segments)): continue if (not self.use_full_seq_name): segment_name = segment.name if self.no_audio: dict_file.write(('{"duration": %f, "text": "%s", "seq_name": "%s"},\n' % (segment.end, segment.orth.replace('"', '\\"'), segment_name))) else: audio_path = recording.audio arc_path = os.path.join(self.name, os.path.basename(audio_path)) zip_file.write(audio_path, arcname=arc_path) dict_file.write(('{"file": "%s", "duration": %f, "text": "%s", "seq_name": "%s"},\n' % (os.path.basename(audio_path), segment.end, segment.orth.replace('"', '\\"'), segment_name))) dict_file.write(']\n') dict_file.close() zip_file.write(dict_file_path, dict_file_path) zip_file.close()
def _model_name_to_cost(model_name: str) -> float: for model_family in inference_cost_per_1000_tokens.keys(): if (model_family in model_name): return inference_cost_per_1000_tokens[model_family] raise ValueError(('Did not recognize GPT-3 model name %s' % model_name))
class FPExtCnxp(FunCnxp): sig = (Constant,) code = 'fpext' def type_constraints(self, tcs): tcs.float(self) tcs.float(self._args[0]) tcs.width_order(self._args[0], self)
class Task(): def __init__(self, name: str, module_pool: nn.ModuleDict, op_sequence: Sequence[Operation], scorer: Scorer=Scorer(metrics=['accuracy']), loss_func: Optional[Callable[(..., torch.Tensor)]]=None, output_func: Optional[Callable[(..., torch.Tensor)]]=None) -> None: self.name = name self.module_pool = module_pool self.op_sequence = op_sequence self.loss_func = (loss_func or F.cross_entropy) self.output_func = (output_func or partial(F.softmax, dim=1)) self.scorer = scorer logging.info(f'Created task: {self.name}') def __repr__(self) -> str: cls_name = type(self).__name__ return f'{cls_name}(name={self.name})'
def make_multi_attrgetter(environment, attribute, postprocess=None): attribute_parts = (attribute.split(',') if isinstance(attribute, string_types) else [attribute]) attribute = [_prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts] def attrgetter(item): items = ([None] * len(attribute)) for (i, attribute_part) in enumerate(attribute): item_i = item for part in attribute_part: item_i = environment.getitem(item_i, part) if (postprocess is not None): item_i = postprocess(item_i) items[i] = item_i return items return attrgetter
def srwl_uti_read_data_cols(_file_path, _str_sep, _i_col_start=0, _i_col_end=(- 1), _n_line_skip=0): f = open(_file_path, 'r') lines = f.readlines() resCols = [] nRows = (len(lines) - _n_line_skip) for i in range(nRows): curLine = lines[(_n_line_skip + i)] curLineParts = curLine.split(_str_sep) curNumParts = len(curLineParts) colCount = 0 colCountTrue = 0 for iCol in range(curNumParts): curPart = curLineParts[iCol] if (len(curPart) > 0): if (((_i_col_start <= colCount) or (_i_col_start < 0)) and ((colCount <= _i_col_end) or (_i_col_end < 0))): if (len(resCols) < (colCountTrue + 1)): resCols.append([]) resCols[colCountTrue].append(float(curPart)) colCountTrue += 1 colCount += 1 f.close() return resCols
class HasNSZPred(FunPred): sig = (FastMathInst,) code = 'hasNSZ' type_constraints = _none