code
stringlengths
101
5.91M
class PoolMemoryManager(): def __init__(self, init_pool_size: int, max_pool_size: int) -> None: self.pool = rmm.mr.PoolMemoryResource(rmm.mr.CudaMemoryResource(), initial_pool_size=init_pool_size, maximum_pool_size=max_pool_size) self.mr = rmm.mr.TrackingResourceAdaptor(self.pool) rmm.mr.set_current_device_resource(self.mr) def get_allocated_size(self): return self.mr.get_allocated_bytes() def pool_size(self): return self.pool.pool_size()
def main(args): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('db_file', help='pattern database file') parser.add_argument('output_file', help='output header file path') pargs = parser.parse_args(args) if (not os.path.exists(pargs.db_file)): logging.error('"{}" does not exist'.format(pargs.db_file)) return 1 mk_genfile_common.mk_pat_db_internal(pargs.db_file, pargs.output_file) logging.info('Generated "{}"'.format(pargs.output_file)) return 0
('numpy.einsum') def create_einsum_sdfg(pv: 'dace.frontend.python.newast.ProgramVisitor', sdfg: SDFG, state: SDFGState, einsum_string: StringLiteral, *arrays: str, dtype: Optional[dtypes.typeclass]=None, optimize: bool=False, output: Optional[str]=None, alpha: Optional[symbolic.SymbolicType]=1.0, beta: Optional[symbolic.SymbolicType]=0.0): return _create_einsum_internal(sdfg, state, str(einsum_string), *arrays, dtype=dtype, optimize=optimize, output=output, alpha=alpha, beta=beta)[0]
def hashtag(text): text = text.group() hashtag_body = text[1:] if hashtag_body.isupper(): result = ' {} '.format(hashtag_body.lower()) else: result = ' '.join((['<hashtag>'] + re.split('(?=[A-Z])', hashtag_body, flags=FLAGS))) return result
def get_path_uid(path): if hasattr(os, 'O_NOFOLLOW'): fd = os.open(path, (os.O_RDONLY | os.O_NOFOLLOW)) file_uid = os.fstat(fd).st_uid os.close(fd) elif (not os.path.islink(path)): file_uid = os.stat(path).st_uid else: raise OSError('{} is a symlink; Will not return uid for symlinks'.format(path)) return file_uid
class CustomImportPickler(_Pickler): def __init__(self, import_module, *args, **kwargs): self.import_module = import_module super().__init__(*args, **kwargs) def save_global(self, obj, name=None): write = self.write memo = self.memo if (name is None): name = getattr(obj, '__qualname__', None) if (name is None): name = obj.__name__ module_name = whichmodule(obj, name) try: module = self.import_module(module_name) (obj2, parent) = _getattribute(module, name) except (ImportError, KeyError, AttributeError): raise PicklingError(("Can't pickle %r: it's not found as %s.%s" % (obj, module_name, name))) from None else: if (obj2 is not obj): raise PicklingError(("Can't pickle %r: it's not the same object as %s.%s" % (obj, module_name, name))) if (self.proto >= 2): code = _extension_registry.get((module_name, name)) if code: assert (code > 0) if (code <= 255): write((EXT1 + pack('<B', code))) elif (code <= 65535): write((EXT2 + pack('<H', code))) else: write((EXT4 + pack('<i', code))) return lastname = name.rpartition('.')[2] if (parent is module): name = lastname if (self.proto >= 4): self.save(module_name) self.save(name) write(STACK_GLOBAL) elif (parent is not module): self.save_reduce(getattr, (parent, lastname)) elif (self.proto >= 3): write(((((GLOBAL + bytes(module_name, 'utf-8')) + b'\n') + bytes(name, 'utf-8')) + b'\n')) else: if self.fix_imports: r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING if ((module_name, name) in r_name_mapping): (module_name, name) = r_name_mapping[(module_name, name)] elif (module_name in r_import_mapping): module_name = r_import_mapping[module_name] try: write(((((GLOBAL + bytes(module_name, 'ascii')) + b'\n') + bytes(name, 'ascii')) + b'\n')) except UnicodeEncodeError: raise PicklingError(("can't pickle global identifier '%s.%s' using pickle protocol %i" % (module, name, self.proto))) from None self.memoize(obj)
class _SubprocessThread(Thread): def __init__(self, executable_name, args, env, shell, cwd, verbose, stdout, stderr, stdin_input): Thread.__init__(self, name=('Subprocess %s' % executable_name)) self._args = args self._shell = shell self._cwd = cwd self._verbose = verbose self._stdout = stdout self._stderr = stderr self._stdin_input = stdin_input self._env = env self._pid = None self._started_cv = Condition() self.stdout_result = None self.stderr_result = None self.returncode = None self._exception = None def exception(self): return self._exception def run(self): try: self._started_cv.acquire() stdin = (PIPE if self._stdin_input else None) proc = Popen(self._args, shell=self._shell, cwd=self._cwd, stdin=stdin, stdout=self._stdout, stderr=self._stderr, env=self._env) self._pid = proc.pid self._started_cv.notify() self._started_cv.release() if self._stdin_input: proc.stdin.write(self._stdin_input) proc.stdin.flush() self.process_output(proc) self.returncode = proc.returncode except Exception as err: self._exception = err def get_pid(self): self._started_cv.acquire() while (self._pid is None): self._started_cv.wait() self._started_cv.release() return self._pid def process_output(self, proc): if (self._verbose and (self._stdout == PIPE) and (self._stderr in (PIPE, STDOUT))): self.stdout_result = '' self.stderr_result = '' while True: reads = [proc.stdout.fileno()] if (self._stderr == PIPE): reads.append(proc.stderr.fileno()) ret = select(reads, [], []) for file_no in ret[0]: if (file_no == proc.stdout.fileno()): read = output_as_str(proc.stdout.readline()) sys.stdout.write(read) self.stdout_result += read if ((self._stderr == PIPE) and (file_no == proc.stderr.fileno())): read = output_as_str(proc.stderr.readline()) sys.stderr.write(read) self.stderr_result += read if (proc.poll() is not None): break else: (stdout_r, stderr_r) = proc.communicate() self.stdout_result = output_as_str(stdout_r) self.stderr_result = output_as_str(stderr_r)
class RoIAlign(Module): def __init__(self, aligned_height, aligned_width, spatial_scale): super(RoIAlign, self).__init__() self.aligned_width = int(aligned_width) self.aligned_height = int(aligned_height) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return RoIAlignFunction(self.aligned_height, self.aligned_width, self.spatial_scale)(features, rois)
class BoxOpsTests(tf.test.TestCase): def setUp(self): boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float) boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) self.boxes1 = boxes1 self.boxes2 = boxes2 def testArea(self): areas = np_box_ops.area(self.boxes1) expected_areas = np.array([6.0, 5.0], dtype=float) self.assertAllClose(expected_areas, areas) def testIntersection(self): intersection = np_box_ops.intersection(self.boxes1, self.boxes2) expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float) self.assertAllClose(intersection, expected_intersection) def testIOU(self): iou = np_box_ops.iou(self.boxes1, self.boxes2) expected_iou = np.array([[(2.0 / 16.0), 0.0, (6.0 / 400.0)], [(1.0 / 16.0), 0.0, (5.0 / 400.0)]], dtype=float) self.assertAllClose(iou, expected_iou) def testIOA(self): boxes1 = np.array([[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32) boxes2 = np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32) ioa21 = np_box_ops.ioa(boxes2, boxes1) expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32) self.assertAllClose(ioa21, expected_ioa21)
def _getscaleoffset(expr): stub = ['stub'] data = expr(_E(stub)).data try: (a, b, c) = data if ((a is stub) and (b == '__mul__') and isinstance(c, numbers.Number)): return (c, 0.0) if ((a is stub) and (b == '__add__') and isinstance(c, numbers.Number)): return (1.0, c) except TypeError: pass try: ((a, b, c), d, e) = data if ((a is stub) and (b == '__mul__') and isinstance(c, numbers.Number) and (d == '__add__') and isinstance(e, numbers.Number)): return (c, e) except TypeError: pass raise ValueError('illegal expression')
def NMTCriterion(vocabSize): crit = nn.CrossEntropyLoss() if opt.gpus: crit.cuda() return crit
def _legacy_key(s): def get_parts(s): result = [] for p in _VERSION_PART.split(s.lower()): p = _VERSION_REPLACE.get(p, p) if p: if ('0' <= p[:1] <= '9'): p = p.zfill(8) else: p = ('*' + p) result.append(p) result.append('*final') return result result = [] for p in get_parts(s): if p.startswith('*'): if (p < '*final'): while (result and (result[(- 1)] == '*final-')): result.pop() while (result and (result[(- 1)] == '')): result.pop() result.append(p) return tuple(result)
def get_file_sess_idx(file_name): session_idx = [] with open(file_name) as f: for line in f: session = json.loads(line) session_idx.append(get_sess_idx(session)) return set(session_idx)
class TS_Trainer(Trainer): def __init__(self, teacher_model, student_model, batch_processor, optimizer=None, lr_scheduler=None, work_dir=None, log_level=logging.INFO, logger=None, **kwargs): super(TS_Trainer, self).__init__(student_model, batch_processor, optimizer=optimizer, lr_scheduler=lr_scheduler, work_dir=work_dir, log_level=logging.INFO, logger=logger) self.T_model = teacher_model self.g_step = 0 def load_teacher_checkpoint(self, filename, map_location='cpu', strict=False): self.logger.info('load checkpoint from %s', filename) return load_checkpoint(self.T_model, filename, map_location, strict, self.logger) def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None): if (meta is None): meta = dict(epoch=(self.epoch + 1), iter=self.iter) else: meta.update(epoch=(self.epoch + 1), iter=self.iter) filename = filename_tmpl.format((self.epoch + 1)) filepath = osp.join(out_dir, filename) linkpath = osp.join(out_dir, 'latest.pth') optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) torchie.symlink(filename, linkpath) def save_iter_checkpoint(self, out_dir, filename_tmpl='epoch_{}_iter_{}.pth', save_optimizer=True, meta=None): if (meta is None): meta = dict(epoch=(self.epoch + 1), iter=self._inner_iter) else: meta.update(epoch=(self.epoch + 1), iter=self._inner_iter) filename = filename_tmpl.format((self.epoch + 1), (self._inner_iter + 1)) filepath = osp.join(out_dir, filename) linkpath = osp.join(out_dir, 'latest.pth') optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) torchie.symlink(filename, linkpath) def batch_processor_inline(self, T_model, S_model, data, train_mode, global_step, **kwargs): if ('local_rank' in kwargs): device = torch.device(kwargs['local_rank']) else: device = None example = example_to_device(data, torch.cuda.current_device(), non_blocking=False) self.call_hook('after_data_to_device') if train_mode: if (T_model.backbone._get_name() == 'PointPillarsScatter'): (T_preds, F_D_a, F_D_b) = T_model(example, return_loss=False) (losses, F_S_a, F_S_b, S_preds, mask_loss, offset_loss) = S_model(example, return_loss=True) F_S_a = F.max_pool2d(F_S_a, 2, 2) F_D_a = F.max_pool2d(F_D_a, 2, 2) inds = (F_D_a > 0) sparse2dense_loss = (F.mse_loss(F_S_a[(~ inds)], F_D_a[(~ inds)].detach()) * 10) sparse2dense_loss += (F.mse_loss(F_S_a[inds], F_D_a[inds].detach()) * 10) F_D_b = F.max_pool2d(F_D_b, 2, 2) F_S_b = F.max_pool2d(F_S_b, 2, 2) inds = (F_D_b > 0) sparse2dense_loss += (F.mse_loss(F_S_a[inds], F_D_a[inds].detach()) * 10) sparse2dense_loss += (F.mse_loss(F_S_b[inds], F_D_b[inds].detach()) * 10) sparse2dense_loss += (F.mse_loss(F_S_b[(~ inds)], F_D_b[(~ inds)].detach()) * 10) KD_hm_loss = fastfocalloss(S_preds[0]['hm'], F.sigmoid(T_preds[0]['hm']).detach(), example['ind'][0], example['mask'][0], example['cat'][0]) Distill_Loss = (sparse2dense_loss + KD_hm_loss) losses['loss'][0] += (Distill_Loss + ((mask_loss + offset_loss) * 0.5)) losses['sparse2dense_loss'] = [sparse2dense_loss.detach().cpu()] losses['mask_loss'] = [mask_loss.detach().cpu()] losses['reconstruction_loss'] = [offset_loss.detach().cpu()] losses['T_hm_loss'] = [fastfocalloss(F.sigmoid(T_preds[0]['hm']).detach(), example['hm'][0], example['ind'][0], example['mask'][0], example['cat'][0]).detach().cpu()] losses['kd_hm_loss'] = [KD_hm_loss.detach().cpu()] elif (T_model.backbone._get_name() == 'SpMiddleResNetFHD'): (T_preds, F_D_a, F_D_b) = T_model(example, return_loss=False, return_feature=True, return_recon_feature=True) (losses, F_S_a, F_S_b, S_preds, mask_loss, offset_loss) = S_model(example, return_loss=True, return_feature=True) inds = (F_D_a > 0) sparse2dense_loss = (F.mse_loss(F_S_a[inds], F_D_a[inds]) * 10) sparse2dense_loss += (F.mse_loss(F_S_a[(~ inds)], F_D_a[(~ inds)]) * 20) inds = (F_D_b > 0) sparse2dense_loss += (F.mse_loss(F_S_b[inds], F_D_b[inds]) * 5) sparse2dense_loss += (F.mse_loss(F_S_b[(~ inds)], F_D_b[(~ inds)]) * 20) KD_hm_loss = fastfocalloss(S_preds[0]['hm'], F.sigmoid(T_preds[0]['hm']), example['ind'][0], example['mask'][0], example['cat'][0]) T_preds[0]['anno_box'] = torch.cat((T_preds[0]['reg'], T_preds[0]['height'], T_preds[0]['dim'], T_preds[0]['rot']), dim=1) KD_reg_loss = distill_reg_loss(S_preds[0]['anno_box'], T_preds[0]['anno_box'], example['mask'][0], example['ind'][0]) if hasattr(S_model, 'module'): KD_reg_loss = ((KD_reg_loss * KD_reg_loss.new_tensor(S_model.module.bbox_head.code_weights)).sum() * S_model.module.bbox_head.weight) else: KD_reg_loss = ((KD_reg_loss * KD_reg_loss.new_tensor(S_model.bbox_head.code_weights)).sum() * S_model.bbox_head.weight) Distill_Loss = ((KD_hm_loss + KD_reg_loss) + sparse2dense_loss) losses['loss'][0] += (Distill_Loss + (mask_loss + offset_loss)) losses['sparse2dense_loss'] = [sparse2dense_loss.detach().cpu()] losses['kd_hm_loss'] = [KD_hm_loss.detach().cpu()] losses['kd_reg_loss'] = [KD_reg_loss.detach().cpu()] losses['mask_loss'] = [mask_loss.detach().cpu()] losses['reconstruction_loss'] = [offset_loss.detach().cpu()] losses['T_hm_loss'] = [fastfocalloss(F.sigmoid(T_preds[0]['hm']), example['hm'][0], example['ind'][0], example['mask'][0], example['cat'][0]).detach().cpu()] else: (T_losses, F_D_a, F_D_b) = T_model(example, return_loss=True, return_feature=True, return_recon_feature=True) (losses, F_S_a, F_S_b, S_preds, mask_loss, offset_loss) = S_model(example, return_loss=True, return_feature=True) inds = (F_D_a > 0) sparse2dense_loss = (F.mse_loss(F_S_a[inds], F_D_a[inds].detach()) * 10) sparse2dense_loss += (F.mse_loss(F_S_a[(~ inds)], F_D_a[(~ inds)].detach()) * 20) inds = (F_D_b > 0) sparse2dense_loss += (F.mse_loss(F_S_b[inds], F_D_b[inds].detach()) * 5) sparse2dense_loss += (F.mse_loss(F_S_b[(~ inds)], F_D_b[(~ inds)].detach()) * 20) Distill_Loss = sparse2dense_loss losses['loss'][0] += (Distill_Loss + ((mask_loss + offset_loss) * 0.5)) losses['sparse2dense_loss'] = [sparse2dense_loss.detach().cpu()] losses['mask_loss'] = [mask_loss.detach().cpu()] losses['reconstruction_loss'] = [offset_loss.detach().cpu()] losses['T_loc_loss_elem'] = [T_losses['loc_loss_elem'][0]] losses['T_dir_loss_reduced'] = [T_losses['dir_loss_reduced'][0]] losses['T_loss'] = [T_losses['loss'][0].detach().cpu()] self.call_hook('after_forward') (loss, log_vars) = parse_second_losses(losses) del losses outputs = dict(loss=loss, log_vars=log_vars, num_samples=(- 1)) self.call_hook('after_parse_loss') return outputs else: return S_model(example, return_loss=False) def train(self, data_loader, epoch, **kwargs): self.T_model.eval() for param in self.T_model.parameters(): param.requires_grad = False self.model.train() self.mode = 'train' self.data_loader = data_loader self.length = len(data_loader) self._max_iters = (self._max_epochs * self.length) self.call_hook('before_train_epoch') base_step = (epoch * self.length) for (i, data_batch) in enumerate(data_loader): global_step = (base_step + i) if (self.lr_scheduler is not None): self.lr_scheduler.step(global_step) self._inner_iter = i self.call_hook('before_train_iter') outputs = self.batch_processor_inline(self.T_model, self.model, data_batch, train_mode=True, global_step=(base_step + i), **kwargs) if (not isinstance(outputs, dict)): raise TypeError('batch_processor() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_train_iter') self._iter += 1 self.call_hook('after_train_epoch') self._epoch += 1
def test_one_level_record(): base = ak.zip({'x': [1, 2, 3], 'y': [8, 9, 10]}) assert (ak.without_field(base, where=['x']).to_list() == [{'y': 8}, {'y': 9}, {'y': 10}]) assert (ak.fields(base) == ['x', 'y']) del base['x'] assert (base.to_list() == [{'y': 8}, {'y': 9}, {'y': 10}])
def record_kwargs(kwargs, top_level): new_kwargs = dict() new_kwargs_ids = dict() for (k, v) in kwargs.items(): assert isinstance(k, (int, bool, str, float, type(None))), f'unsupported kwargs {type(k)}' if isinstance(v, (list, tuple, set)): (traced_children, children_ids) = record_args(v, top_level=False) traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(v)))) for id in children_ids: record_arg(traced_value.id, id) traced_value.set_data(type(v)(traced_children)) elif isinstance(v, dict): (traced_children, traced_ids) = record_kwargs(v, top_level=False) traced_value = TracedValue(NodeTypes.PRIMITIVE, ('/' + container_construct_op_name(type(v)))) for (key, id) in traced_ids.items(): record_kwarg(traced_value.id, key, id) traced_value.set_data(type(v)(traced_children)) elif isinstance(v, TracedValue): traced_value = v else: assert (not isinstance(v, Tensor)), 'tensor constants should not happen' traced_value = TracedValue(NodeTypes.CONSTANT, f'/prim::Constant') traced_value.set_data(v) traced_value.node.constant_value = v new_kwargs_ids[k] = traced_value.id if top_level: new_kwargs[k] = traced_value else: new_kwargs[k] = traced_value._data return (new_kwargs, new_kwargs_ids)
def get_concept_relax(concept, concepts_full, concepts_relax): for c in concepts_full: if (((c[(- 1)] == '*') and (concept[:(len(c) - 1)] == c[:(- 1)])) or (concept == c)): return concepts_relax[concepts_full.index(c)] return concept
def test_fastica_nowhiten(): m = [[0, 1], [1, 0]] ica = FastICA(n_components=1, whiten=False, random_state=0) warn_msg = 'Ignoring n_components with whiten=False.' with pytest.warns(UserWarning, match=warn_msg): ica.fit(m) assert hasattr(ica, 'mixing_')
def test_iht_reproducibility(): from sklearn.datasets import load_digits (X_digits, y_digits) = load_digits(return_X_y=True) idx_sampled = [] for seed in range(5): est = RandomForestClassifier(n_estimators=10, random_state=seed) iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED) iht.fit_resample(X_digits, y_digits) idx_sampled.append(iht.sample_indices_.copy()) for (idx_1, idx_2) in zip(idx_sampled, idx_sampled[1:]): assert_array_equal(idx_1, idx_2)
class ChainableUndefined(Undefined): __slots__ = () def __html__(self): return self.__str__() def __getattr__(self, _): return self __getitem__ = __getattr__
def multinomial(list): old_sum = list[0] okay = True i = 1 while (okay and (i < len(list))): j = 1 while (okay and (j <= min(old_sum, list[i]))): if ((j & old_sum) == j): okay = ((j & list[i]) == 0) j = (j << 1) old_sum = (old_sum + list[i]) i = (i + 1) if okay: return old_sum else: return None
class NeuralEmbeddings(): def __init__(self, tokenizer: Tokenizer=None) -> None: if (tokenizer is None): self.tokenizer = Tokenizer(word_delimiter=' ') def __call__(self, tokens: Union[(List[str], str)]) -> Tensor: if isinstance(tokens, str): tokens = self.tokenizer.tokenize(tokens) return self.embedding_layer(torch.tensor([self.vocabulary_dict[token] for token in tokens])) def get_embedding(self, tokens: Union[(List[str], str)]) -> Tensor: return self.__call__(tokens)
def test_equality(f, cmp_key): obj1 = f() obj2 = jit.script(f)() return (cmp_key(obj1), cmp_key(obj2))
def dump_json(obj, fname, indent=None): with open(fname, 'w') as f: return json.dump(obj, f, indent=indent)
def nom_comps(srs: dd.Series, cfg: Config) -> Dict[(str, Any)]: data: Dict[(str, Any)] = dict() data['nrows'] = srs.shape[0] srs = srs.dropna() grps = srs.value_counts(sort=False) data['geo'] = grps data['nuniq'] = grps.shape[0] if (cfg.bar.enable or cfg.pie.enable or cfg.value_table.enable): data['bar'] = (grps.nlargest(cfg.bar.bars) if cfg.bar.sort_descending else grps.nsmallest(cfg.bar.bars)) if ((cfg.bar.bars == cfg.pie.slices) and (cfg.bar.sort_descending == cfg.pie.sort_descending)): data['pie'] = data['bar'] else: data['pie'] = (grps.nlargest(cfg.pie.slices) if cfg.pie.sort_descending else grps.nsmallest(cfg.pie.slices)) if ((cfg.bar.bars == cfg.value_table.ngroups) and cfg.bar.sort_descending): data['value_table'] = data['bar'] elif ((cfg.pie.slices == cfg.value_table.ngroups) and cfg.pie.sort_descending): data['value_table'] = data['pie'] else: data['value_table'] = grps.nlargest(cfg.value_table.ngroups) if cfg.insight.enable: data['chisq'] = chisquare(grps.values) df = grps.reset_index() if (cfg.stats.enable or cfg.value_table.enable): data.update(_calc_nom_stats(srs, df, data['nrows'], data['nuniq'])) elif (cfg.wordfreq.enable and cfg.insight.enable): data['len_stats'] = {'Minimum': srs.str.len().min(), 'Maximum': srs.str.len().max()} if cfg.wordlen.enable: lens = srs.str.len() data['len_hist'] = da.histogram(lens, cfg.wordlen.bins, (lens.min(), lens.max())) if (cfg.wordcloud.enable or cfg.wordfreq.enable): if all(((getattr(cfg.wordcloud, att) == getattr(cfg.wordfreq, att)) for att in ('top_words', 'stopword', 'stem', 'lemmatize'))): word_freqs = _calc_word_freq(df, cfg.wordfreq.top_words, cfg.wordfreq.stopword, cfg.wordfreq.lemmatize, cfg.wordfreq.stem) data['word_cnts_cloud'] = word_freqs['word_cnts'] data['nuniq_words_cloud'] = word_freqs['nuniq_words'] else: word_freqs = _calc_word_freq(df.copy(), cfg.wordfreq.top_words, cfg.wordfreq.stopword, cfg.wordfreq.lemmatize, cfg.wordfreq.stem) word_freqs_cloud = _calc_word_freq(df, cfg.wordcloud.top_words, cfg.wordcloud.stopword, cfg.wordcloud.lemmatize, cfg.wordcloud.stem) data['word_cnts_cloud'] = word_freqs_cloud['word_cnts'] data['nuniq_words_cloud'] = word_freqs['nuniq_words'] data['word_cnts_freq'] = word_freqs['word_cnts'] data['nwords_freq'] = word_freqs['nwords'] return data
def test_negative_input(): negative_int = np.random.randint((- 4), (- 1), size=(5, 5)) with testing.raises(ValueError): remove_small_objects(negative_int)
_test() def test_kernels_lns_inside_component(): unique_functions_conf = dace.config.Config.get('compiler', 'unique_functions') dace.config.Config.set('compiler', 'unique_functions', value='none') def kernels_lns_inside_component(A: dace.float32[(8, 8)], x: dace.float32[8], B: dace.float32[(8, 8)], y: dace.float32[8]): tmp1 = (A x) tmp2 = (B y) return np.dot(tmp1, tmp2) A = np.random.rand(8, 8).astype(np.float32) B = np.random.rand(8, 8).astype(np.float32) x = np.random.rand(8).astype(np.float32) y = np.random.rand(8).astype(np.float32) sdfg = kernels_lns_inside_component.to_sdfg() sdfg.apply_transformations([FPGATransformSDFG, InlineSDFG]) with config.set_temporary('compiler', 'fpga', 'concurrent_kernel_detection', value=True): program = sdfg.compile() assert (count_kernels(sdfg) == 3) z = program(A=A, x=x, B=B, y=y) ref = np.dot((A x), (B y)) assert np.allclose(z, ref) dace.config.Config.set('compiler', 'unique_functions', value=unique_functions_conf) return sdfg
(hash_funcs={torch.nn.parameter.Parameter: (lambda parameter: parameter.data.detach().cpu().numpy())}, allow_output_mutation=True) def load_blip_itm_model(device, model_type='base'): model = load_model('blip_image_text_matching', model_type, is_eval=True, device=device) return model
class DenseNet(nn.Module): def __init__(self, sample_size, sample_duration, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, last_fc=True): super(DenseNet, self).__init__() self.last_fc = last_fc self.sample_size = sample_size self.sample_duration = sample_duration self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv3d(3, num_init_features, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)), ('norm0', nn.BatchNorm3d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('pool0', nn.MaxPool3d(kernel_size=3, stride=2, padding=1))])) num_features = num_init_features for (i, num_layers) in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) self.features.add_module(('denseblock%d' % (i + 1)), block) num_features = (num_features + (num_layers * growth_rate)) if (i != (len(block_config) - 1)): trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2)) self.features.add_module(('transition%d' % (i + 1)), trans) num_features = (num_features // 2) self.features.add_module('norm5', nn.BatchNorm2d(num_features)) self.classifier = nn.Linear(num_features, num_classes) def forward(self, x): features = self.features(x) out = F.relu(features, inplace=True) last_duration = math.ceil((self.sample_duration / 16)) last_size = math.floor((self.sample_size / 32)) out = F.avg_pool3d(out, kernel_size=(last_duration, last_size, last_size)).view(features.size(0), (- 1)) if self.last_fc: out = self.classifier(out) return out
def randomize_hyperparameters(object: gpflow.Module) -> None: for param in object.trainable_parameters: if (param.prior is not None): if ((param.prior.batch_shape == param.prior.event_shape == []) and (len(param.shape) == 1)): sample = param.prior.sample(tf.shape(param)) else: sample = param.prior.sample() if (param.prior_on is gpflow.base.PriorOn.UNCONSTRAINED): param.unconstrained_variable.assign(sample) else: param.assign(sample) elif isinstance(param.bijector, tfp.bijectors.Sigmoid): sample = tf.random.uniform(param.bijector.low.shape, minval=param.bijector.low, maxval=param.bijector.high, dtype=param.bijector.low.dtype) param.assign(sample)
def get_major_bit_number(n): if (not n): raise Exception('Bad number') i = 128 r = 0 while (not (n & i)): r += 1 i >>= 1 return (r, (n & (~ i)))
class EmbeddingWithLinear(torch.nn.Module): def __init__(self): super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) self.fc = torch.nn.Linear(5, 5) self.emb.qconfig = float_qparams_weight_only_qconfig self.qconfig = default_qconfig def forward(self, indices, linear_in): return (self.emb(indices), self.fc(linear_in))
def other_ordering_pre(I, option_set, kwds): if (not I): return (I, None) main_kwds = kwds options = option_set old_ring = next(iter(I)).ring() try: new_ring = old_ring.clone(ordering=options['switch_to']) kwds = {k: options[k] for k in options if (k not in ('other_ordering_first', 'switch_to', 'I'))} kwds['redsb'] = True I = groebner_basis([new_ring(poly) for poly in I], **kwds) variety_size = variety_size_from_gb(I) fglm_bound = (options.get('fglm_bound') or groebner_basis.options['fglm_bound']) if (variety_size < fglm_bound): main_kwds['convert_with_fglm_from_ring'] = new_ring main_kwds['convert_with_fglm_to_ring'] = old_ring else: I = [old_ring(poly) for poly in I] finally: pass return (I, None)
class PolicyType(enum.Enum): CONTEXT_FREE = enum.auto() CONTEXTUAL = enum.auto() OFFLINE = enum.auto() def __repr__(self) -> str: return str(self)
class FSMState(SageObject): is_initial = False initial_probability = None def __init__(self, label, word_out=None, is_initial=False, is_final=False, final_word_out=None, initial_probability=None, hook=None, color=None, allow_label_None=False): if ((not allow_label_None) and (label is None)): raise ValueError('Label None reserved for a special state, choose another label.') self._label_ = label if isinstance(word_out, list): self.word_out = word_out elif (word_out is not None): self.word_out = [word_out] else: self.word_out = [] self.is_initial = is_initial self._final_word_out_ = None self.is_final = is_final self.final_word_out = final_word_out self.initial_probability = initial_probability if (hook is not None): if callable(hook): self.hook = hook else: raise TypeError('Wrong argument for hook.') self.color = color def __lt__(self, other): return (self.label() < other.label()) def final_word_out(self): return self._final_word_out_ _word_out.setter def final_word_out(self, final_word_out): if (not self.is_final): if (final_word_out is not None): raise ValueError(('Only final states can have a final output word, but state %s is not final.' % (self.label(),))) else: self._final_word_out_ = None elif isinstance(final_word_out, list): self._final_word_out_ = final_word_out elif (final_word_out is not None): self._final_word_out_ = [final_word_out] else: self._final_word_out_ = [] def is_final(self): return (self.final_word_out is not None) _final.setter def is_final(self, is_final): if (is_final and (self.final_word_out is None)): self._final_word_out_ = [] elif (not is_final): if (not self.final_word_out): self._final_word_out_ = None else: raise ValueError(('State %s cannot be non-final, because it has a final output word. Only final states can have a final output word. ' % (self.label(),))) def label(self): return self._label_ def __copy__(self): new = FSMState(self.label(), self.word_out, self.is_initial, self.is_final, color=self.color, final_word_out=self.final_word_out, initial_probability=self.initial_probability) if hasattr(self, 'hook'): new.hook = self.hook return new copy = __copy__ def __deepcopy__(self, memo): try: label = self._deepcopy_relabel_ except AttributeError: label = deepcopy(self.label(), memo) new = FSMState(label, deepcopy(self.word_out, memo), self.is_initial, self.is_final) if hasattr(self, 'hook'): new.hook = deepcopy(self.hook, memo) new.color = deepcopy(self.color, memo) new.final_word_out = deepcopy(self.final_word_out, memo) new.initial_probability = deepcopy(self.initial_probability, memo) return new def deepcopy(self, memo=None): return deepcopy(self, memo) def relabeled(self, label, memo=None): self._deepcopy_relabel_ = label new = deepcopy(self, memo) del self._deepcopy_relabel_ return new def __getstate__(self): odict = self.__dict__.copy() try: del odict['transitions'] except KeyError: pass return odict def __hash__(self): return hash(self.label()) def _repr_(self): return pretty(self.label()) def __eq__(self, other): if (not is_FSMState(other)): return False return (self.label() == other.label()) def __ne__(self, other): return (not (self == other)) def fully_equal(self, other, compare_color=True): color = ((not compare_color) or (self.color == other.color)) return ((self == other) and (self.is_initial == other.is_initial) and (self.is_final == other.is_final) and (self.final_word_out == other.final_word_out) and (self.word_out == other.word_out) and color and (self.initial_probability == other.initial_probability)) def __bool__(self): return True def _epsilon_successors_(self, fsm=None): if (not hasattr(self, 'transitions')): raise ValueError(('State %s does not belong to a finite state machine.' % (self,))) it = _FSMProcessIteratorEpsilon_(fsm, input_tape=[], initial_state=self) for _ in it: pass _epsilon_successors_dict_ = it.visited_states _epsilon_successors_dict_[self].remove([]) if (not _epsilon_successors_dict_[self]): del _epsilon_successors_dict_[self] for (s, outputs) in _epsilon_successors_dict_.items(): _epsilon_successors_dict_[s] = [t for (t, _) in itertools.groupby(sorted(outputs))] return _epsilon_successors_dict_ def _in_epsilon_cycle_(self, fsm=None): return (self in self._epsilon_successors_(fsm)) def _epsilon_cycle_output_empty_(self, fsm=None): try: return (not any(self._epsilon_successors_(fsm)[self])) except KeyError: raise ValueError(('State %s is not in an epsilon cycle.' % (self,)))
def forward_one_multilayer(rnns, lstm_input, layer_states, dropout_amount=0.0): num_layers = len(layer_states) new_states = [] cell_states = [] hidden_states = [] state = lstm_input for i in range(num_layers): (layer_h, layer_c) = rnns[i](torch.unsqueeze(state, 0), layer_states[i]) new_states.append((layer_h, layer_c)) layer_h = layer_h.squeeze() layer_c = layer_c.squeeze() state = layer_h if (i < (num_layers - 1)): state = F.dropout(state, p=dropout_amount) cell_states.append(layer_c) hidden_states.append(layer_h) return ((cell_states, hidden_states), state, new_states)
def cer(s1, s2): (s1, s2) = (s1.replace(' ', ''), s2.replace(' ', '')) return Lev.distance(s1, s2)
class SteinWatkinsAllData(): def __init__(self, num): num = int(num) self.num = num if (num < 0): raise RuntimeError(('num (=%s) must be a nonnegative integer' % num)) name = str(num) name = (('0' * (3 - len(name))) + name) self._file = os.path.join(SAGE_SHARE, 'stein_watkins', ('a.%s.bz2' % name)) self._iter = iter(self) def __repr__(self): return ('Stein-Watkins Database a.%s Iterator' % self.num) def __iter__(self): try: file = bz2.open(self._file, 'rt', encoding='utf-8') except IOError: raise IOError(('The Stein-Watkins data file %s must be installed.' % self._file)) C = None for L in file: if (len(L) == 0): continue if (L[0] != '['): if (C is not None): (yield C) x = L.split() N = int(x[0]) C = SteinWatkinsIsogenyClass(N) C.rank = int(x[2]) C.leading_coefficient = x[3] C.isogeny_number = x[4] C.modular_degree = x[5] C.curves = [] C.data = x else: w = L.split() C.curves.append([eval(w[0]), w[1], w[2], w[3]]) (yield C) def __next__(self): return next(self._iter) next = __next__ def __getitem__(self, N): X = [] if isinstance(N, slice): (min_level, max_level, step) = N.indices(len(list(self))) for C in self: M = C.conductor if ((M >= min_level) and (M <= max_level)): X.append(C) elif (M > max_level): return X else: for C in self: M = C.conductor if (M == N): X.append(C) elif (M > N): return X return X def iter_levels(self): it = iter(self) C = [] N = 0 while True: try: E = next(it) except StopIteration: if C: (yield C) return if (E.conductor != N): if C: (yield C) C = [E] N = E.conductor else: C.append(E) (yield C)
def conv_flop_count(x_shape: typing.List[int], w_shape: typing.List[int], out_shape: typing.List[int]) -> typing.Counter[str]: (batch_size, Cin_dim, Cout_dim) = (x_shape[0], w_shape[1], out_shape[1]) out_size = prod(out_shape[2:]) kernel_size = prod(w_shape[2:]) flop = ((((batch_size * out_size) * Cout_dim) * Cin_dim) * kernel_size) flop_counter = Counter({'conv': flop}) return flop_counter
def isogenies_sporadic_Q(E, l=None, minimal_models=True): j = E.j_invariant() j = QQ(j) if ((j not in sporadic_j) or ((l is not None) and (sporadic_j[j] != l))): return [] F = E.base_field() data = _sporadic_Q_data(j) Ew = E.short_weierstrass_model() E_to_Ew = E.isomorphism_to(Ew) (c4, c6) = Ew.c_invariants() ((a4, a6), f) = data d = ((c6 * a4) / ((18 * c4) * a6)) R = PolynomialRing(F, 'X') n = len(f) ker = R([((d ** ((n - i) - 1)) * f[i]) for i in range(n)]) from sage.rings.number_field.number_field_base import NumberField model = ('minimal' if (minimal_models and isinstance(F, NumberField)) else None) isog = Ew.isogeny(kernel=ker, degree=l, model=model, check=False) isog = (isog * E_to_Ew) return [isog]
def initialize_weights(*models): for model in models: for module in model.modules(): if isinstance(module, (nn.Conv2d, nn.Linear)): nn.init.kaiming_normal_(module.weight, nonlinearity='relu') if (module.bias is not None): module.bias.data.zero_() elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight, nonlinearity='relu') if (module.bias is not None): module.bias.data.zero_() elif (isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm1d) or isinstance(module, nn.GroupNorm) or isinstance(module, nn.SyncBatchNorm)): module.weight.data.fill_(1) module.bias.data.zero_()
def check_tensorflow_version(): min_tf_version = '1.4.0-dev' if (version.LooseVersion(tf.__version__) < version.LooseVersion(min_tf_version)): raise EnvironmentError(('Tensorflow version must >= %s' % min_tf_version))
def dump_cfg(): cfg_file = os.path.join(_C.OUT_DIR, _C.CFG_DEST) with open(cfg_file, 'w') as f: _C.dump(stream=f)
class TestCWorkspace(htu.HypothesisTestCase): def test_net_execution(self): ws = workspace.C.Workspace() self.assertEqual(ws.nets, {}) self.assertEqual(ws.blobs, {}) net = core.Net('test-net') net.ConstantFill([], 'testblob', shape=[1, 2, 3, 4], value=1.0) ws.create_net(net) with self.assertRaises(RuntimeError): ws.create_net(net) ws.create_net(net, True) ws.create_net(net, overwrite=True) self.assertIn('testblob', ws.blobs) self.assertEqual(len(ws.nets), 1) net_name = net.Proto().name self.assertIn('test-net', net_name) net = ws.nets[net_name].run() blob = ws.blobs['testblob'] np.testing.assert_array_equal(np.ones((1, 2, 3, 4), dtype=np.float32), blob.fetch()) (name=st.text(), value=st.floats(min_value=(- 1), max_value=1.0)) def test_operator_run(self, name, value): ws = workspace.C.Workspace() op = core.CreateOperator('ConstantFill', [], [name], shape=[1], value=value) ws.run(op) self.assertIn(name, ws.blobs) np.testing.assert_allclose([value], ws.blobs[name].fetch(), atol=0.0001, rtol=0.0001) (blob_name=st.text(), net_name=st.text(), value=st.floats(min_value=(- 1), max_value=1.0)) def test_net_run(self, blob_name, net_name, value): ws = workspace.C.Workspace() net = core.Net(net_name) net.ConstantFill([], [blob_name], shape=[1], value=value) ws.run(net) self.assertIn(blob_name, ws.blobs) self.assertNotIn(net_name, ws.nets) np.testing.assert_allclose([value], ws.blobs[blob_name].fetch(), atol=0.0001, rtol=0.0001) (blob_name=st.text(), net_name=st.text(), plan_name=st.text(), value=st.floats(min_value=(- 1), max_value=1.0)) def test_plan_run(self, blob_name, plan_name, net_name, value): ws = workspace.C.Workspace() plan = core.Plan(plan_name) net = core.Net(net_name) net.ConstantFill([], [blob_name], shape=[1], value=value) plan.AddStep(core.ExecutionStep('step', nets=[net], num_iter=1)) ws.run(plan) self.assertIn(blob_name, ws.blobs) self.assertIn(net.Name(), ws.nets) np.testing.assert_allclose([value], ws.blobs[blob_name].fetch(), atol=0.0001, rtol=0.0001) (blob_name=st.text(), net_name=st.text(), value=st.floats(min_value=(- 1), max_value=1.0)) def test_net_create(self, blob_name, net_name, value): ws = workspace.C.Workspace() net = core.Net(net_name) net.ConstantFill([], [blob_name], shape=[1], value=value) ws.create_net(net).run() self.assertIn(blob_name, ws.blobs) self.assertIn(net.Name(), ws.nets) np.testing.assert_allclose([value], ws.blobs[blob_name].fetch(), atol=0.0001, rtol=0.0001) (name=st.text(), value=htu.tensor(), device_option=st.sampled_from(htu.device_options)) def test_array_serde(self, name, value, device_option): ws = workspace.C.Workspace() ws.create_blob(name).feed(value, device_option=device_option) self.assertIn(name, ws.blobs) blob = ws.blobs[name] np.testing.assert_equal(value, ws.blobs[name].fetch()) serde_blob = ws.create_blob('{}_serde'.format(name)) serde_blob.deserialize(blob.serialize(name)) np.testing.assert_equal(value, serde_blob.fetch()) (name=st.text(), value=st.text()) def test_string_serde(self, name, value): value = value.encode('ascii', 'ignore') ws = workspace.C.Workspace() ws.create_blob(name).feed(value) self.assertIn(name, ws.blobs) blob = ws.blobs[name] self.assertEqual(value, ws.blobs[name].fetch()) serde_blob = ws.create_blob('{}_serde'.format(name)) serde_blob.deserialize(blob.serialize(name)) self.assertEqual(value, serde_blob.fetch()) def test_exception(self): ws = workspace.C.Workspace() with self.assertRaises(TypeError): ws.create_net('...')
_params(type=dtypes.DataInstrumentationType.Save) class SaveProvider(InstrumentationProvider, DataInstrumentationProviderMixin): def __init__(self): super().__init__() self.gpu_runtime_init = False from dace.codegen.targets.framecode import DaCeCodeGenerator self.codegen: DaCeCodeGenerator = None def on_sdfg_begin(self, sdfg: SDFG, local_stream: CodeIOStream, global_stream: CodeIOStream, codegen: 'DaCeCodeGenerator'): if (sdfg.parent is None): self.codegen = codegen path = os.path.abspath(os.path.join(sdfg.build_folder, 'data')).replace('\\', '/') codegen.statestruct.append('dace::DataSerializer *serializer;') sdfg.append_init_code(f'''__state->serializer = new dace::DataSerializer("{path}"); ''') def on_sdfg_end(self, sdfg: SDFG, local_stream: CodeIOStream, global_stream: CodeIOStream): if (sdfg.parent is None): sdfg.append_exit_code('delete __state->serializer;\n') def on_state_begin(self, sdfg: SDFG, state: SDFGState, local_stream: CodeIOStream, global_stream: CodeIOStream): if (state.symbol_instrument == dtypes.DataInstrumentationType.No_Instrumentation): return (condition_preamble, condition_postamble) = ('', '') condition: Optional[CodeBlock] = state.symbol_instrument_condition if ((condition is not None) and (not (condition.as_string == '1'))): cond_string = None if (condition.language == dtypes.Language.CPP): cond_string = condition.as_string elif (condition.language == dtypes.Language.Python): cond_string = cppunparse.py2cpp(condition.code[0], expr_semicolon=False) else: warnings.warn(('Unrecognized language %s in codeblock' % condition.language)) cond_string = condition.as_string condition_preamble = (f'if ({cond_string})' + ' {') condition_postamble = '}' state_id = sdfg.node_id(state) local_stream.write(condition_preamble, sdfg, state_id) defined_symbols = state.defined_symbols() for (sym, _) in defined_symbols.items(): local_stream.write(f'''__state->serializer->save_symbol("{sym}", "{state_id}", {cpp.sym2cpp(sym)}); ''', sdfg, state_id) local_stream.write(condition_postamble, sdfg, state_id) def on_node_end(self, sdfg: SDFG, state: SDFGState, node: nodes.AccessNode, outer_stream: CodeIOStream, inner_stream: CodeIOStream, global_stream: CodeIOStream): from dace.codegen.dispatcher import DefinedType if (is_devicelevel_gpu(sdfg, state, node) or is_devicelevel_fpga(sdfg, state, node)): return (condition_preamble, condition_postamble) = ('', '') condition: Optional[CodeBlock] = node.instrument_condition if ((condition is not None) and (not (condition.as_string == '1'))): cond_string = None if (condition.language == dtypes.Language.CPP): cond_string = condition.as_string elif (condition.language == dtypes.Language.Python): cond_string = cppunparse.py2cpp(condition.code[0], expr_semicolon=False) else: warnings.warn(('Unrecognized language %s in codeblock' % condition.language)) cond_string = condition.as_string condition_preamble = (f'if ({cond_string})' + ' {') condition_postamble = '}' desc = node.desc(sdfg) ptrname = cpp.ptr(node.data, desc, sdfg, self.codegen) (defined_type, _) = self.codegen.dispatcher.defined_vars.get(ptrname) if (defined_type == DefinedType.Scalar): ptrname = ('&' + ptrname) state_id = sdfg.node_id(state) node_id = state.node_id(node) uuid = f'{sdfg.sdfg_id}_{state_id}_{node_id}' (preamble, postamble) = ('', '') if (desc.storage == dtypes.StorageType.GPU_Global): self._setup_gpu_runtime(sdfg, global_stream) (preamble, postamble, ptrname) = self._generate_copy_to_host(node, desc, ptrname) shape = ', '.join((cpp.sym2cpp(s) for s in desc.shape)) strides = ', '.join((cpp.sym2cpp(s) for s in desc.strides)) inner_stream.write(condition_preamble, sdfg, state_id, node_id) inner_stream.write(preamble, sdfg, state_id, node_id) inner_stream.write(f'''__state->serializer->save({ptrname}, {cpp.sym2cpp((desc.total_size - desc.start_offset))}, "{node.data}", "{uuid}", {shape}, {strides}); ''', sdfg, state_id, node_id) inner_stream.write(postamble, sdfg, state_id, node_id) inner_stream.write(condition_postamble, sdfg, state_id, node_id)
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence): seen_nodes = set() ref_nodes = {} def find_duplicates(node): if (node.is_literal or node.is_name): return if (node in seen_nodes): if (node not in ref_nodes): ref_node = LetRefNode(node) ref_nodes[node] = ref_node ref_node_sequence.append(ref_node) else: seen_nodes.add(node) if node.is_sequence_constructor: for item in node.args: find_duplicates(item) for expr_list in expr_list_list: rhs = expr_list[(- 1)] find_duplicates(rhs) if (not ref_nodes): return def substitute_nodes(node): if (node in ref_nodes): return ref_nodes[node] elif node.is_sequence_constructor: node.args = list(map(substitute_nodes, node.args)) return node for node in ref_nodes: if node.is_sequence_constructor: node.args = list(map(substitute_nodes, node.args)) for expr_list in expr_list_list: expr_list[(- 1)] = substitute_nodes(expr_list[(- 1)])
def blockUNet(in_c, out_c, name, transposed=False, bn=True, relu=True, dropout=False): block = nn.Sequential() if relu: block.add_module(('%s.relu' % name), nn.ReLU(inplace=True)) else: block.add_module(('%s.leakyrelu' % name), nn.LeakyReLU(0.2, inplace=True)) if (not transposed): block.add_module(('%s.conv' % name), nn.Conv2d(in_c, out_c, 4, 2, 1, bias=False)) else: block.add_module(('%s.tconv' % name), nn.ConvTranspose2d(in_c, out_c, 4, 2, 1, bias=False)) if bn: block.add_module(('%s.bn' % name), nn.BatchNorm2d(out_c)) if dropout: block.add_module(('%s.dropout' % name), nn.Dropout2d(0.5, inplace=True)) return block
def array2complex(Z): if (Z.shape[0] != 2): raise ValueError('First axis of Z must be of length 2') z = (Z[0] + (1j * Z[1])) return z
def _norm(p, dim): if (dim is None): return p.norm() elif (dim == 0): output_size = ((p.size(0),) + ((1,) * (p.dim() - 1))) return p.contiguous().view(p.size(0), (- 1)).norm(dim=1).view(*output_size) elif (dim == (p.dim() - 1)): output_size = (((1,) * (p.dim() - 1)) + (p.size((- 1)),)) return p.contiguous().view((- 1), p.size((- 1))).norm(dim=0).view(*output_size) else: return _norm(p.transpose(0, dim), 0).transpose(0, dim)
def add_gaps_back(sequence: str, gap_mask: list) -> str: out = list() seq_index = 0 for c in gap_mask: if (c is None): out.append(sequence[seq_index]) seq_index += 1 else: out.append(c) return ''.join(out)
def get_scheduler(optimizer, opt): if (opt.lr_policy == 'linear'): def lambda_rule(epoch): lr_l = (1.0 - (max(0, ((epoch + opt.epoch_count) - opt.niter)) / float((opt.niter_decay + 1)))) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif (opt.lr_policy == 'step'): scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif (opt.lr_policy == 'plateau'): scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif (opt.lr_policy == 'cosine'): scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
class AvenueDataset(AnomalibVideoDataset): def __init__(self, task: TaskType, root: (Path | str), gt_dir: (Path | str), transform: A.Compose, split: Split, clip_length_in_frames: int=1, frames_between_clips: int=1) -> None: super().__init__(task, transform, clip_length_in_frames, frames_between_clips) self.root = (root if isinstance(root, Path) else Path(root)) self.gt_dir = (gt_dir if isinstance(gt_dir, Path) else Path(gt_dir)) self.split = split self.indexer_cls: Callable = AvenueClipsIndexer def _setup(self) -> None: self.samples = make_avenue_dataset(self.root, self.gt_dir, self.split)
.mpi def test_isnot_commworld_1(): from mpi4py import MPI comm = MPI.COMM_WORLD comm2 = comm.Dup() def isnot_commworld_1(out: dace.bool[1]): out[0] = (comm2 is not MPI.COMM_WORLD) res = np.zeros((1,), dtype=np.bool_) isnot_commworld_1(res) assert (res[0] == (comm2 is not MPI.COMM_WORLD))
def get_visualizer(renderer_name, renderer_config): if (renderer_name == 'MultiPathPPRenderer'): return MultiPathPPRenderer(renderer_config) raise Exception(f'Unknown visualizer {renderer_name}')
def test_parse_regions(): from sfepy.discrete.parse_regions import create_bnf test_strs = ['vertices of surface -v r.Omega', 'r.Y_2 +v copy r.Y_1', 'vertices in (y <= 0.00001) & (x < 0.11)', 'vertices in ((y <= 0.00001) & (x < 0.11))', 'vertices in (((y <= 0.00001) & (x < 0.11)))', 'vertices in (((0.00001 < y) & (x < 0.11)))', 'vertices of group 0', 'vertices of group 0 +v vertices of group 1\n +c cells by afun', 'all -v vertices in (y == 0.00001)', 'all -v vertices of surface', 'all -c r.DOmega_100', 'r.Y_1 -v vertices of surface *c r.Z_8\n *v vertices in (y > 0)', 'vertices of surface +v vertices by pokus', 'cells of group 6 +c vertices by fn2_3c', 'r.Y_1 *v (r.Y_2 +c (vertices in (y > 0) *v r.Y_32))\n -v vertices of surface -c r.Y_5', 'copy r.ab2-b-c +v r.d12-23', 'vertices by afun', 'vertex in r.Gamma_3', 'vertex 10', 'vertex 10, 20, 30', 'cell 10', 'cell 10, 20, 30', 'vertex 10, 20 +v cell 30, 40', '(vertex 10, 20) +v (cell 30, 40)', 'cell 10, 20, 30 +v vertex 10', 'cells by afun'] stack = [] bnf = create_bnf(stack) n_fail = 0 for test_str in test_strs: stack[:] = [] try: bnf.parseString(test_str) except: tst.report(('failed: %s' % test_str)) n_fail += 1 tst.report(('%d failures' % n_fail)) assert (n_fail == 0)
class ConditionalBatchNormalization(chainer.Chain): def __init__(self, size, n_cat, decay=0.9, eps=2e-05, dtype=numpy.float32): super(ConditionalBatchNormalization, self).__init__() self.avg_mean = numpy.zeros(size, dtype=dtype) self.register_persistent('avg_mean') self.avg_var = numpy.zeros(size, dtype=dtype) self.register_persistent('avg_var') self.N = 0 self.register_persistent('N') self.decay = decay self.eps = eps self.n_cat = n_cat def __call__(self, x, gamma, beta, **kwargs): argument.check_unexpected_kwargs(kwargs, test='test argument is not supported anymore. Use chainer.using_config') (finetune,) = argument.parse_kwargs(kwargs, ('finetune', False)) with cuda.get_device_from_id(self._device_id): _gamma = variable.Variable(self.xp.ones(self.avg_mean.shape, dtype=x.dtype)) with cuda.get_device_from_id(self._device_id): _beta = variable.Variable(self.xp.zeros(self.avg_mean.shape, dtype=x.dtype)) if configuration.config.train: if finetune: self.N += 1 decay = (1.0 - (1.0 / self.N)) else: decay = self.decay ret = chainer.functions.batch_normalization(x, _gamma, _beta, eps=self.eps, running_mean=self.avg_mean, running_var=self.avg_var, decay=decay) else: mean = variable.Variable(self.avg_mean) var = variable.Variable(self.avg_var) ret = batch_normalization.fixed_batch_normalization(x, _gamma, _beta, mean, var, self.eps) shape = ret.shape ndim = len(shape) gamma = F.broadcast_to(F.reshape(gamma, (list(gamma.shape) + ([1] * (ndim - len(gamma.shape))))), shape) beta = F.broadcast_to(F.reshape(beta, (list(beta.shape) + ([1] * (ndim - len(beta.shape))))), shape) return ((gamma * ret) + beta)
class VGG(nn.Module): def __init__(self, features): super(VGG, self).__init__() self.features = features self.embeddings = nn.Sequential(nn.Linear(((512 * 4) * 6), 4096), nn.ReLU(True), nn.Linear(4096, 4096), nn.ReLU(True), nn.Linear(4096, 128), nn.ReLU(True)) def forward(self, x): x = self.features(x) x = torch.transpose(x, 1, 3) x = torch.transpose(x, 1, 2) x = x.contiguous() x = x.view(x.size(0), (- 1)) return self.embeddings(x)
def get_params(mode, attn_map, train_outputs, features, labels): params = {'mode': mode} params_map = attn_map['params'] for (param_name, param_values) in params_map.items(): if ('label' in param_values): params[param_name] = labels[param_values['label']] elif ('feature' in param_values): params[param_name] = features[param_values['feature']] elif ('layer' in param_values): outputs_layer = train_outputs[param_values['layer']] params[param_name] = outputs_layer[param_values['output']] else: params[param_name] = param_values['value'] return params
def mkdir_or_exist(dir_name, mode=511): if (dir_name == ''): return dir_name = osp.expanduser(dir_name) if six.PY3: os.makedirs(dir_name, mode=mode, exist_ok=True) elif (not osp.isdir(dir_name)): os.makedirs(dir_name, mode=mode)
def test_security_definition_parameter(testdir, empty_open_api_2_schema): empty_open_api_2_schema['paths'] = {'/test': {'post': {'parameters': [{'name': 'body', 'in': 'body', 'schema': {'type': 'object', 'example': {'foo': 'bar'}}}], 'responses': {'200': {'description': 'OK'}}}}} empty_open_api_2_schema['securityDefinitions'] = {'token': {'type': 'apiKey', 'name': 'Authorization', 'in': 'header'}} empty_open_api_2_schema['security'] = [{'token': []}] testdir.make_test('\()\(phases=[Phase.explicit])\ndef test_(case):\n pass\n ', schema=empty_open_api_2_schema) result = testdir.runpytest('-v') result.assert_outcomes(passed=1)
def distributed_main(device_id, configuration, predict=False): config = configuration.get_config() config.device_id = device_id if (config.distributed.rank is None): config.distributed.rank = (config.start_rank + device_id) main(configuration, init_distributed=True, predict=predict)
class SplinterConfig(PretrainedConfig): model_type = 'splinter' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, question_token_id=104, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.question_token_id = question_token_id
def parse_variable_value(value): if (value == 'True'): return True elif (value == 'False'): return False elif (value == 'None'): return None elif value.isdigit(): return int(value) else: try: value = float(value) except Exception: pass return value
def make_sdfg(implementation, dtype, id=0, in_shape=[n, n], out_shape=[n, n], in_subset='0:n, 0:n', out_subset='0:n, 0:n', overwrite=False, getri=True): sdfg = dace.SDFG('linalg_inv_{}_{}_{}'.format(implementation, dtype.__name__, id)) sdfg.add_symbol('n', dace.int64) state = sdfg.add_state('dataflow') sdfg.add_array('xin', in_shape, dtype) if (not overwrite): sdfg.add_array('xout', out_shape, dtype) xin = state.add_read('xin') if overwrite: xout = state.add_write('xin') else: xout = state.add_write('xout') inv_node = Inv('inv', overwrite_a=overwrite, use_getri=getri) inv_node.implementation = implementation state.add_memlet_path(xin, inv_node, dst_conn='_ain', memlet=Memlet.simple(xin, in_subset, num_accesses=(n * n))) state.add_memlet_path(inv_node, xout, src_conn='_aout', memlet=Memlet.simple(xout, out_subset, num_accesses=(n * n))) return sdfg
class BigDataset(torch.utils.data.Dataset): def __init__(self, folder): self.folder = folder self.image_paths = os.listdir(folder) def __getitem__(self, index): path = self.image_paths[index] img = imageio.imread((self.folder + path)) img = torch.from_numpy(img).permute(2, 0, 1) return img def __len__(self): return len(self.image_paths)
def import_nose(): nose_is_good = True minimum_nose_version = (1, 0, 0) try: import nose except ImportError: nose_is_good = False else: if (nose.__versioninfo__ < minimum_nose_version): nose_is_good = False if (not nose_is_good): msg = ('Need nose >= %d.%d.%d for tests - see % minimum_nose_version) raise ImportError(msg) return nose
def create_emb_for_encoder_and_decoder(share_vocab, src_vocab_size, tgt_vocab_size, src_embed_size, tgt_embed_size, dtype=tf.float32, num_partitions=0, src_vocab_file=None, tgt_vocab_file=None, src_embed_file=None, tgt_embed_file=None, scope=None): partitioner = None if (num_partitions > 0): partitioner = parallax.get_partitioner(num_partitions) if ((src_embed_file or tgt_embed_file) and partitioner): raise ValueError("Can't set num_partitions > 1 when using pretrained embedding") with tf.variable_scope((scope or 'embeddings'), dtype=dtype, partitioner=partitioner) as scope: if share_vocab: if (src_vocab_size != tgt_vocab_size): raise ValueError(('Share embedding but different src/tgt vocab sizes %d vs. %d' % (src_vocab_size, tgt_vocab_size))) assert (src_embed_size == tgt_embed_size) utils.print_out('# Use the same embedding for source and target') vocab_file = (src_vocab_file or tgt_vocab_file) embed_file = (src_embed_file or tgt_embed_file) embedding_encoder = _create_or_load_embed('embedding_share', vocab_file, embed_file, src_vocab_size, src_embed_size, dtype) embedding_decoder = embedding_encoder else: with tf.variable_scope('encoder', partitioner=partitioner): embedding_encoder = _create_or_load_embed('embedding_encoder', src_vocab_file, src_embed_file, src_vocab_size, src_embed_size, dtype) with tf.variable_scope('decoder', partitioner=partitioner): embedding_decoder = _create_or_load_embed('embedding_decoder', tgt_vocab_file, tgt_embed_file, tgt_vocab_size, tgt_embed_size, dtype) return (embedding_encoder, embedding_decoder)
def add_preprocess_args(parser): group = parser.add_argument_group('Preprocessing') group.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') group.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') group.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix (also used to build dictionaries)') group.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes (words missing from train set are replaced with <unk>)') group.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes (words missing from train set are replaced with <unk>)') group.add_argument('--align-suffix', metavar='FP', default=None, help='alignment file suffix') group.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir') group.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary') group.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary') group.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain') group.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain') group.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)') parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary') group.add_argument('--only-source', action='store_true', help='Only process the source language') group.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N') group.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers') return parser
def make_attention_dot(func): def new_func(dataset): config = func(dataset) config['model']['decoder']['attention']['type'] = 'dot' config['training']['exp_dirname'] = config['training']['exp_dirname'].replace('tanh', 'dot') return config return new_func
def _write_properties_to_json(buf, min_weight, max_weight, sign, direct, self_loop, multigraph): buf.write('{\n\t"properties": {\n') buf.write((('\t\t"directed": ' + str(direct).lower()) + ',\n')) buf.write((('\t\t"signed": ' + str(sign).lower()) + ',\n')) buf.write((('\t\t"multigraph": ' + str(multigraph).lower()) + ',\n')) buf.write((('\t\t"weighted": ' + str(is_weighted(max_weight, min_weight, sign)).lower()) + ',\n')) buf.write((('\t\t"self_loop": ' + str(self_loop).lower()) + '\n\t},'))
class ViltFeatureExtractor(ViltImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ViltFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ViltImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
def test_kde(seed: int, dataset: str, version: str, workload: str, params: Dict[(str, Any)], overwrite: bool): table = load_table(dataset, (params.get('version') or version)) train_num = params['train_num'] L.info('load training workload...') queries = load_queryset(dataset, workload)['train'][:train_num] L.info('construct postgres estimator...') estimator = FeedbackKDE(table, ratio=params['ratio'], train_num=train_num, seed=seed) L.info(f'start training with {train_num} queries...') start_stmp = time.time() estimator.train_batch(queries) dur_min = ((time.time() - start_stmp) / 60) L.info(f'built kde estimator: {estimator}, using {dur_min:1f} minutes') run_test(dataset, version, workload, estimator, overwrite)
_utils.test(require=ti.extension.quant) def test_struct_for_quant(): n = 8 qi13 = ti.types.quant.int(13, True) x = ti.field(dtype=qi13) bitpack = ti.BitpackedFields(max_num_bits=32) bitpack.place(x) ti.root.dense(ti.i, n).place(bitpack) def count() -> int: tot = 0 for i in x: tot += i return tot assert (count() == 28)
class TestNext(DebugStepperTestCase): def test_cython_next(self): self.break_and_run('c = 2') lines = ('int(10)', 'puts("spam")', 'os.path.join("foo", "bar")', 'some_c_function()') for line in lines: gdb.execute('cy next') self.lineno_equals(line)
def pbar_logger(iterable=None, desc='train', **tqdm_mkwargs): kwargs = copy.copy(tqdm_mkwargs) if ('desc' not in kwargs): kwargs['desc'] = desc if ('iterable' not in kwargs): kwargs['iterable'] = iterable pbar = tqdm(**kwargs) def update_pbar(step: StepInfo): pbar.update((step.next_step - pbar.n)) pbar.set_postfix(loss=jnp_to_python(step.loss)) return update_pbar
def get_candidate_tables(format_sql, schema): candidate_tables = [] tokens = format_sql.split() for (ii, token) in enumerate(tokens): if ('.' in token): table_name = token.split('.')[0] candidate_tables.append(table_name) candidate_tables = list(set(candidate_tables)) table_names_original = [table_name.lower() for table_name in schema['table_names_original']] candidate_tables_id = [table_names_original.index(table_name) for table_name in candidate_tables] assert ((- 1) not in candidate_tables_id) table_names_original = schema['table_names_original'] return (candidate_tables_id, table_names_original)
class Seq2SeqTrainingArguments(TrainingArguments): label_smoothing: Optional[float] = field(default=0.0, metadata={'help': 'The label smoothing epsilon to apply (if not zero).'}) sortish_sampler: bool = field(default=False, metadata={'help': 'Whether to SortishSamler or not.'}) predict_with_generate: bool = field(default=False, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'}) adafactor: bool = field(default=False, metadata={'help': 'whether to use adafactor'}) encoder_layerdrop: Optional[float] = field(default=None, metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'}) decoder_layerdrop: Optional[float] = field(default=None, metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'}) dropout: Optional[float] = field(default=None, metadata={'help': 'Dropout probability. Goes into model.config.'}) attention_dropout: Optional[float] = field(default=None, metadata={'help': 'Attention dropout probability. Goes into model.config.'}) lr_scheduler: Optional[str] = field(default='linear', metadata={'help': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'})
def dict_mean_and_std_err(d): assert all((isinstance(v, Iterable) for v in d.values())) result = {} for (k, v) in d.items(): result[k] = mean_and_std_err(v) return result
def test_pyro_bayesian_train_sample_mixin(): adata = synthetic_iid() BayesianRegressionModel.setup_anndata(adata) mod = BayesianRegressionModel(adata) mod.train(max_epochs=2, batch_size=128, lr=0.01) assert (list(mod.module.guide.state_dict()['locs.linear.weight_unconstrained'].shape) == [1, 100]) samples = mod.sample_posterior(num_samples=10, batch_size=None, return_samples=True) assert (len(samples['posterior_samples']['sigma']) == 10)
def get(config_path, trained: bool=False, device: Optional[str]=None): cfg = get_config(config_path, trained) if ((device is None) and (not torch.cuda.is_available())): device = 'cpu' if ((device is not None) and isinstance(cfg, CfgNode)): cfg.MODEL.DEVICE = device if isinstance(cfg, CfgNode): model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: model = instantiate(cfg.model) if (device is not None): model = model.to(device) if (('train' in cfg) and ('init_checkpoint' in cfg.train)): DetectionCheckpointer(model).load(cfg.train.init_checkpoint) return model
def load_data_dblp(path: str='dataset/DBLP4057_GAT_with_idx_tra200_val_800.mat', train_size: int=0.8, meta: bool=True) -> Tuple[(list, np.array, list, np.array)]: data = sio.loadmat(path) (truelabels, features) = (data['label'], data['features'].astype(float)) N = features.shape[0] if (not meta): rownetworks = [(data['net_APA'] - np.eye(N))] else: rownetworks = [(data['net_APA'] - np.eye(N)), (data['net_APCPA'] - np.eye(N)), (data['net_APTPA'] - np.eye(N))] y = truelabels index = np.arange(len(y)) (X_train, X_test, y_train, y_test) = train_test_split(index, y, stratify=y, test_size=(1 - train_size), random_state=48, shuffle=True) (X_train, X_val, y_train, y_val) = train_test_split(X_train, y_train, stratify=y_train, test_size=0.2, random_state=48, shuffle=True) split_ids = [X_train, y_train, X_val, y_val, X_test, y_test] return (rownetworks, features, split_ids, np.array(y))
class RotatedCOCOEvaluator(COCOEvaluator): def process(self, inputs, outputs): for (input, output) in zip(inputs, outputs): prediction = {'image_id': input['image_id']} if ('instances' in output): instances = output['instances'].to(self._cpu_device) prediction['instances'] = self.instances_to_json(instances, input['image_id']) if ('proposals' in output): prediction['proposals'] = output['proposals'].to(self._cpu_device) self._predictions.append(prediction) def instances_to_json(self, instances, img_id): num_instance = len(instances) if (num_instance == 0): return [] boxes = instances.pred_boxes.tensor.numpy() if (boxes.shape[1] == 4): boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() results = [] for k in range(num_instance): result = {'image_id': img_id, 'category_id': classes[k], 'bbox': boxes[k], 'score': scores[k]} results.append(result) return results def _eval_predictions(self, predictions, img_ids=None): self._logger.info('Preparing results for COCO format ...') coco_results = list(itertools.chain(*[x['instances'] for x in predictions])) if hasattr(self._metadata, 'thing_dataset_id_to_contiguous_id'): reverse_id_mapping = {v: k for (k, v) in self._metadata.thing_dataset_id_to_contiguous_id.items()} for result in coco_results: result['category_id'] = reverse_id_mapping[result['category_id']] if self._output_dir: file_path = os.path.join(self._output_dir, 'coco_instances_results.json') self._logger.info('Saving results to {}'.format(file_path)) with PathManager.open(file_path, 'w') as f: f.write(json.dumps(coco_results)) f.flush() if (not self._do_evaluation): self._logger.info('Annotations are not available for evaluation.') return self._logger.info('Evaluating predictions ...') assert ((self._tasks is None) or (set(self._tasks) == {'bbox'})), '[RotatedCOCOEvaluator] Only bbox evaluation is supported' coco_eval = (self._evaluate_predictions_on_coco(self._coco_api, coco_results) if (len(coco_results) > 0) else None) task = 'bbox' res = self._derive_coco_results(coco_eval, task, class_names=self._metadata.get('thing_classes')) self._results[task] = res def _evaluate_predictions_on_coco(self, coco_gt, coco_results): assert (len(coco_results) > 0) coco_dt = coco_gt.loadRes(coco_results) coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType='bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return coco_eval
def create_window_3d(window_size, channel=1): _1D_window = gaussian(window_size, 1.5).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()) _3D_window = (_2D_window.unsqueeze(2) _1D_window.t()) window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().cuda() return window
class Dataset(GraphDatasetBase, DatasetBase): def __init__(self, max_length=75, mask_value=(- 1.0), **kwargs): super().__init__(max_length=max_length, **kwargs) self.mask_value = mask_value def get_dataset_name(self): return dataset_name def get_record_names(self): return record_names def get_record_keys(self): return record_keys def get_record_types(self): return record_types def get_record_shapes(self): return record_shapes def get_paddings(self): return dict(record_name=b'', num_nodes=0, edges=(- 1), node_features=self.mask_value, edge_features=self.mask_value, target=0) def get_padded_shapes(self): return dict(record_name=[], num_nodes=[], edges=[None, 2], node_features=[self.max_length, 3], edge_features=[None, 1], target=[])
def vstack_surfaces(surfaces, background_color=None): result_width = max((surface.get_width() for surface in surfaces)) result_height = sum((surface.get_height() for surface in surfaces)) result_surface = pygame.surface.Surface((result_width, result_height)) if background_color: result_surface.fill(background_color) next_surface_y_position = 0 for surface in surfaces: result_surface.blit(surface, (0, next_surface_y_position)) next_surface_y_position += surface.get_height() return result_surface
def get_bond_feature_dims(): return list(map(len, [allowable_features['possible_bond_type_list'], allowable_features['possible_bond_stereo_list'], allowable_features['possible_is_conjugated_list']]))
def evaluate(documents: List[Document], feature_extractor_cls: Type[BaseFeatureExtractor], k_folds: int, prediction: bool=False): print('Extracting features from documents') documents = [feature_extractor_cls.append_features_to_document(document) for document in tqdm.tqdm(documents)] print(f'Extracted {sum(map((lambda d: d.n_blocks), documents))} lines from {len(documents)} documents with label distribution: {Counter(sum(map((lambda d: d.labels), documents), []))} for evaluation.') print(f'Extracted {documents[0].n_features}-dim features and {documents[0].n_pointer_features}-dim pointer features.') documents_pred = predictor.k_fold_train_predict(documents, n_splits=k_folds) metrics = {'structure': evaluate_structure(documents, documents_pred), 'labels': evaluate_labels(documents, documents_pred)} if prediction: prediction_jsons = _create_prediction_jsons(documents, documents_pred) return (metrics, prediction_jsons) else: return metrics
.core def test_init_args(model): args = model._init_args assert (args['rank'] == 2) assert (args['implicit_prefs'] is False) assert (args['seed'] == 42)
def process_book(header_annot_dir, lemma_dir, tree_dir, book_id): if (os.path.exists(os.path.join(tree_dir, (book_id + '.xml'))) and os.path.exists(os.path.join(lemma_dir, (book_id + '.pkl')))): return (book_id, 'Exists') os.environ['CORENLP_HOME'] = '~/stanford_corenlp/stanford-corenlp-full-2018-10-05' try: with CoreNLPClient(annotators=['tokenize', 'lemma'], timeout=30000, max_char_length=, be_quiet=True, start_server=False) as client: (tree, para_end_sentences, lemma_dict) = sentencize(header_annot_dir, client, book_id) tree2 = paragraphize(tree, para_end_sentences) filename = os.path.join(tree_dir, (book_id + '.xml')) tree2.write(filename, pretty_print=True) with open(os.path.join(lemma_dir, (book_id + '.pkl')), 'wb') as f: pickle.dump(lemma_dict, f) except Exception as e: print(book_id, e) return (book_id, e) print(book_id, 'Success!') return (book_id, 'Success')
class AspectRatioGroupedDataset(data.IterableDataset): def __init__(self, dataset, batch_size): self.dataset = dataset self.batch_size = batch_size self._buckets = [[] for _ in range(2)] def __iter__(self): for d in self.dataset: (w, h) = (d['width'], d['height']) bucket_id = (0 if (w > h) else 1) bucket = self._buckets[bucket_id] bucket.append(d) if (len(bucket) == self.batch_size): (yield bucket[:]) del bucket[:]
((not have_sympy), 'SymPy not installed') def test_conv6b(): x = sympy.Symbol('x') y = sympy.Symbol('y') assert (sympify((x / 3)) == (Symbol('x') / 3)) assert (sympify((3 * x)) == (3 * Symbol('x'))) assert (sympify((3 + x)) == (3 + Symbol('x'))) assert (sympify((3 - x)) == (3 - Symbol('x'))) assert (sympify((x / y)) == (Symbol('x') / Symbol('y')))
class ASPP2d(Model): def __init__(self, inplanes, emb_dim, dilations=[1, 6, 12, 18], fmaps=48, dense=False): super(ASPP2d, self).__init__() if (not dense): self.aspp1 = _ASPPModule2d(inplanes, fmaps, 1, padding=0, dilation=dilations[0]) self.aspp2 = _ASPPModule2d(inplanes, fmaps, 3, padding=dilations[1], dilation=dilations[1]) self.aspp3 = _ASPPModule2d(inplanes, fmaps, 3, padding=dilations[2], dilation=dilations[2]) self.aspp4 = _ASPPModule2d(inplanes, fmaps, 3, padding=dilations[3], dilation=dilations[3]) self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(inplanes, fmaps, 1, stride=1, bias=False), nn.BatchNorm2d(fmaps), nn.ReLU()) self.conv1 = nn.Conv2d((fmaps * 5), 1, 1, bias=False) self.bn1 = nn.BatchNorm2d(1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.5) self._init_weight() def forward(self, x): x = x.unsqueeze(1) x1 = self.aspp1(x) x2 = self.aspp2(x) x3 = self.aspp3(x) x4 = self.aspp4(x) x5 = self.global_avg_pool(x) x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) x = torch.cat((x1, x2, x3, x4, x5), dim=1) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.dropout(x).squeeze(1) return x def _init_weight(self): for m in self.modules(): if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def unique_tensors(Xs): if (len(Xs) > 1): (ux, uids) = np.unique([x.name for x in Xs], return_index=True) uids = sorted(uids) return [Xs[i] for i in uids] else: return Xs
def test_rpad_numpy_array(): array = ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5])) assert (to_list(ak._do.pad_none(array, 10, 0)) == [1.1, 2.2, 3.3, 4.4, 5.5, None, None, None, None, None]) assert (ak._do.pad_none(array.to_typetracer(), 10, 0).form == ak._do.pad_none(array, 10, 0).form) array = ak.contents.numpyarray.NumpyArray(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])) assert (to_list(ak._do.pad_none(array, 5, 0)) == [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], None, None, None]) assert (ak._do.pad_none(array.to_typetracer(), 5, 0).form == ak._do.pad_none(array, 5, 0).form) assert (to_list(ak._do.pad_none(array, 5, 1)) == [[1.1, 2.2, 3.3, None, None], [4.4, 5.5, 6.6, None, None]]) assert (ak._do.pad_none(array.to_typetracer(), 5, 1).form == ak._do.pad_none(array, 5, 1).form) array = ak.contents.numpyarray.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64).reshape(2, 3, 5)) assert (to_list(array) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (to_list(ak._do.pad_none(array, 1, 0)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (ak._do.pad_none(array.to_typetracer(), 1, 0).form == ak._do.pad_none(array, 1, 0).form) assert (to_list(ak._do.pad_none(array, 2, 0)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (ak._do.pad_none(array.to_typetracer(), 2, 0).form == ak._do.pad_none(array, 2, 0).form) assert (to_list(ak._do.pad_none(array, 3, 0)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]], None]) assert (ak._do.pad_none(array.to_typetracer(), 3, 0).form == ak._do.pad_none(array, 3, 0).form) assert (to_list(ak._do.pad_none(array, 4, 0)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]], None, None]) assert (ak._do.pad_none(array.to_typetracer(), 4, 0).form == ak._do.pad_none(array, 4, 0).form) assert (to_list(ak._do.pad_none(array, 5, 0)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]], None, None, None]) assert (ak._do.pad_none(array.to_typetracer(), 5, 0).form == ak._do.pad_none(array, 5, 0).form) assert (to_list(ak._do.pad_none(array, 2, 1)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (to_list(ak._do.pad_none(array, 3, 1)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (ak._do.pad_none(array.to_typetracer(), 3, 1).form == ak._do.pad_none(array, 3, 1).form) assert (to_list(ak._do.pad_none(array, 4, 1)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], None], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29], None]]) assert (ak._do.pad_none(array.to_typetracer(), 4, 1).form == ak._do.pad_none(array, 4, 1).form) assert (to_list(ak._do.pad_none(array, 5, 1)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], None, None], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29], None, None]]) assert (ak._do.pad_none(array.to_typetracer(), 5, 1).form == ak._do.pad_none(array, 5, 1).form) assert (to_list(ak._do.pad_none(array, 3, 2)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (ak._do.pad_none(array.to_typetracer(), 3, 2).form == ak._do.pad_none(array, 3, 2).form) assert (to_list(ak._do.pad_none(array, 7, 2)) == [[[0, 1, 2, 3, 4, None, None], [5, 6, 7, 8, 9, None, None], [10, 11, 12, 13, 14, None, None]], [[15, 16, 17, 18, 19, None, None], [20, 21, 22, 23, 24, None, None], [25, 26, 27, 28, 29, None, None]]]) assert (ak._do.pad_none(array.to_typetracer(), 7, 2).form == ak._do.pad_none(array, 7, 2).form) assert (to_list(ak._do.pad_none(array, 2, 2)) == [[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) assert (ak._do.pad_none(array.to_typetracer(), 2, 2).form == ak._do.pad_none(array, 2, 2).form)
def create_wiki_data(tokenizer_name: str, max_seq_length: int, short_seq_prob: float=0.0): import nltk nltk.download('punkt') tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) target_length = (max_seq_length - tokenizer.num_special_tokens_to_add(pair=False)) def wiki_tokenize_function(examples): sentences = [] for sents in examples['sentences']: sentences.append(tokenizer(sents, add_special_tokens=False, truncation=False, return_attention_mask=False, return_token_type_ids=False)['input_ids']) return {'input_ids': sentences} def sentence_wiki(examples): sentences = nltk.sent_tokenize(examples['text']) return {'sentences': sentences} def wiki_pad_each_line(examples): blocks = [] for sents in examples['input_ids']: curr_block = [] curr_tgt_len = (target_length if (random.random() > short_seq_prob) else random.randint(3, target_length)) for sent in sents: if (len(curr_block) >= curr_tgt_len): blocks.append(curr_block) curr_block = [] curr_tgt_len = (target_length if (random.random() > short_seq_prob) else random.randint(3, target_length)) curr_block.extend(sent) if (len(curr_block) > 0): blocks.append(curr_block) return {'token_ids': blocks} wiki = load_dataset('wikipedia', '.en', split='train') wiki = wiki.map(sentence_wiki, num_proc=8, remove_columns=['title', 'text']) tokenized_wiki = wiki.map(wiki_tokenize_function, num_proc=8, batched=True, remove_columns=['sentences']) processed_wiki = tokenized_wiki.map(wiki_pad_each_line, num_proc=8, batched=True, remove_columns=['input_ids']) return processed_wiki
class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) def forward(self, question): context = self.retrieve(question).passages prediction = self.generate_answer(context=context, question=question) return dspy.Prediction(context=context, answer=prediction.answer)
def calculate_params_per_stage(params_per_node: Dict[(SimpleNode, float)]) -> Dict[(int, float)]: params_per_stage = defaultdict((lambda : 0)) for (n, p) in params_per_node.items(): params_per_stage[n.stage_id] += p return dict(params_per_stage)
class EmbeddingNetwork2(nn.Module): def __init__(self, in_space=10, dim=3): super(EmbeddingNetwork2, self).__init__() self.embedding = nn.Embedding(in_space, dim) self.seq = nn.Sequential(self.embedding, nn.Linear(dim, 1), nn.Sigmoid()) def forward(self, indices): return self.seq(indices)
class Reader(object): def __init__(self, schema=None): if (schema is not None): assert isinstance(schema, Field) self._schema = schema def schema(self): assert (self._schema is not None), 'Schema not provided for this reader.' return self._schema def _set_schema(self, schema): self._schema = schema def setup_ex(self, init_net, finish_net): pass def read_ex(self, local_init_net, local_finish_net): read_net = core.Net('reader_body') return (([read_net],) + self.read(read_net)) def read_record_ex(self, local_init_net, local_finish_net): (nets, should_stop, fields) = self.read_ex(local_init_net, local_finish_net) if self._schema: fields = from_blob_list(self._schema, fields) return (nets, should_stop, fields) def read(self, read_net): raise NotImplementedError('Readers must implement `read`.') def reset(self, net): raise NotImplementedError('This reader cannot be resetted.') def read_record(self, read_net): (should_stop, fields) = self.read(read_net) if self._schema: fields = from_blob_list(self._schema, fields) return (should_stop, fields) def execution_step(self, reader_net_name=None, external_should_stop=None): reader_net = core.Net((reader_net_name or 'reader')) (should_stop, fields) = self.read_record(reader_net) if (external_should_stop is not None): should_stop = reader_net.Or([external_should_stop, should_stop]) read_step = core.execution_step('{}_step'.format(reader_net_name), reader_net, should_stop_blob=should_stop) return (read_step, fields)
def register_Ns3LteRlcTm_methods(root_module, cls): cls.add_constructor([param('ns3::LteRlcTm const &', 'arg0')]) cls.add_constructor([]) cls.add_method('DoDispose', 'void', [], is_virtual=True) cls.add_method('DoNotifyHarqDeliveryFailure', 'void', [], is_virtual=True) cls.add_method('DoNotifyTxOpportunity', 'void', [param('uint32_t', 'bytes'), param('uint8_t', 'layer'), param('uint8_t', 'harqId')], is_virtual=True) cls.add_method('DoReceivePdu', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')], is_virtual=True) cls.add_method('DoSendMcPdcpSdu', 'void', [param('ns3::EpcX2Sap::UeDataParams', 'params')], is_virtual=True) cls.add_method('DoTransmitPdcpPdu', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')], is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return