code
stringlengths
101
5.91M
def train_and_val(config, model, callbacks, mixture_num, sub_model_name): print(('training %s %s model' % (model_name, sub_model_name))) train_size = int((((num_mon_sites * num_mon_inst_train) + num_unmon_sites_train) * 0.95)) train_steps = (train_size // batch_size) val_size = int((((num_mon_sites * num_mon_inst_train) + num_unmon_sites_train) * 0.05)) val_steps = (val_size // batch_size) train_time_start = time.time() model.fit_generator(data_generator.generate(config, 'training_data', mixture_num), steps_per_epoch=(train_steps if ((train_size % batch_size) == 0) else (train_steps + 1)), epochs=epochs, verbose=2, callbacks=callbacks, validation_data=data_generator.generate(config, 'validation_data', mixture_num), validation_steps=(val_steps if ((val_size % batch_size) == 0) else (val_steps + 1)), shuffle=False) train_time_end = time.time() print(('Total training time: %f' % (train_time_end - train_time_start)))
_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 assign_result = AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property('pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result cost_bbox = torch.cdist(bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist(bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() index = torch.topk(C, k=self.match_times, dim=0, largest=False)[1] index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] indexes = torch.cat((index, index1), dim=1).reshape((- 1)).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) (pred_max_overlaps, _) = pred_overlaps.max(dim=1) (anchor_max_overlaps, _) = anchor_overlaps.max(dim=0) ignore_idx = (pred_max_overlaps > self.neg_ignore_thr) assigned_gt_inds[ignore_idx] = (- 1) pos_gt_index = torch.arange(0, C1.size(1), device=bbox_pred.device).repeat((self.match_times * 2)) pos_ious = anchor_overlaps[(indexes, pos_gt_index)] pos_ignore_idx = (pos_ious < self.pos_ignore_thr) pos_gt_index_with_ignore = (pos_gt_index + 1) pos_gt_index_with_ignore[pos_ignore_idx] = (- 1) assigned_gt_inds[indexes] = pos_gt_index_with_ignore if (gt_labels is not None): assigned_labels = assigned_gt_inds.new_full((num_bboxes,), (- 1)) pos_inds = torch.nonzero((assigned_gt_inds > 0), as_tuple=False).squeeze() if (pos_inds.numel() > 0): assigned_labels[pos_inds] = gt_labels[(assigned_gt_inds[pos_inds] - 1)] else: assigned_labels = None assign_result = AssignResult(num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', (~ pos_ignore_idx)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
def check_cdf_logcdf(distfn, args, msg): points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0]) vals = distfn.ppf(points, *args) vals = vals[np.isfinite(vals)] cdf = distfn.cdf(vals, *args) logcdf = distfn.logcdf(vals, *args) cdf = cdf[(cdf != 0)] logcdf = logcdf[np.isfinite(logcdf)] msg += ' - logcdf-log(cdf) relationship' npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def eval_file_stats(target_db): count = 0 no_results = [] for i in target_db.find(): if (i['results'] != []): count += 1 else: no_results.append(i['id']) return (count, no_results)
class Filter(abc.ABC): def __init__(self): self.verbose = False def execute(self, image: sitk.Image, params: FilterParams=None) -> sitk.Image: raise NotImplementedError()
def main(): cmdclass = dict() version = None init_path = ((_pwd / 'lidarnerf') / '__init__.py') with open(init_path, 'r', encoding='utf-8') as f: lines = f.readlines() for line in lines: match_res = re.match('^__version__ = "(.*)"', line) if match_res: version = match_res.group(1) break if (version is None): raise RuntimeError(f'Cannot find version from {init_path}') print(f'Detected lidarnerf version: {version}') _ = setup(name='lidarnerf', version=version, description='LiDAR-NeRF: Novel LiDAR View Synthesis via Neural Radiance Fields', packages=['lidarnerf', 'lidarnvs'], cmdclass=cmdclass, include_package_data=True)
def leaky_integrate_and_fire(mem, cur=0, threshold=1, time_step=0.001, R=5.1, C=0.005): tau_mem = (R * C) spk = (mem > threshold) mem = (mem + ((time_step / tau_mem) * ((- mem) + (cur * R)))) return (mem, spk)
def _subdivide_interval(args): (interval, f, norm_func, _quadrature) = args (old_err, a, b, old_int) = interval c = (0.5 * (a + b)) if (getattr(_quadrature, 'cache_size', 0) > 0): f = functools.lru_cache(_quadrature.cache_size)(f) (s1, err1, round1) = _quadrature(a, c, f, norm_func) dneval = _quadrature.num_eval (s2, err2, round2) = _quadrature(c, b, f, norm_func) dneval += _quadrature.num_eval if (old_int is None): (old_int, _, _) = _quadrature(a, b, f, norm_func) dneval += _quadrature.num_eval if (getattr(_quadrature, 'cache_size', 0) > 0): dneval = f.cache_info().misses dint = ((s1 + s2) - old_int) derr = ((err1 + err2) - old_err) dround_err = (round1 + round2) subintervals = ((a, c, s1, err1), (c, b, s2, err2)) return (dint, derr, dround_err, subintervals, dneval)
def read_test_file(test_file, prediction_topk): test_data = [] for line in open(test_file, encoding='utf-8'): line = line.strip('\n').split('\t') rank = int(line[2]) if (rank <= prediction_topk): test_data.append((line[0], line[1])) return test_data
def test_hyperkalemia(tmp_path: pathlib.Path): outcome_codes = {'child_1_1', 'child_2', 'child_1', 'LOINC/LP386618-5', 'LOINC/LG10990-6', 'LOINC/LG7931-1', 'LOINC/6298-4', 'LOINC/2823-3'} labeler = _create_specific_labvalue_labeler(HyperkalemiaLabValueLabeler, 'severe', outcome_codes) _assert_value_to_label_correct(labeler, 7.1, 6.1, 5.55, 5.49, 'mmol/l') _run_specific_labvalue_test(labeler, outcome_codes, [(10, 'mmol/L'), ((7.01 * 18), 'mg/dL')], [(6.1, 'mmol/L'), (6.9, 'mmol/L')], [(5.6, 'mEq/L'), (5.99, 'mmol/L')], [(0, 'mEq/L'), ((4 * 18), 'mg/dL')], 'test_hyperkalemia')
class Test_Frontend(unittest.TestCase): def setUp(self) -> None: cc = device_cc() math_inst = MathInstruction([1, 1, 1], cutlass.float32, cutlass.float32, cutlass.float32, cutlass.OpClass.Simt, MathOperation.multiply_add) stages = 2 tile_description = TileDescription([128, 128, 8], stages, [2, 4, 1], math_inst) A = TensorDescription(cutlass.float32, cutlass.RowMajor, 1) B = TensorDescription(cutlass.float32, cutlass.RowMajor, 1) C = TensorDescription(cutlass.float32, cutlass.RowMajor, 1) epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32) self.operation = GemmOperationUniversal(arch=cc, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1) pycutlass.compiler.add_module([self.operation]) def test_torch_frontend(self): try: import torch except: self.assertTrue(False, 'Unable to import torch') problem_size = cutlass.gemm.GemmCoord(512, 256, 128) tensor_A = torch.ceil(torch.empty(size=(problem_size.m(), problem_size.k()), dtype=torch.float32, device='cuda').uniform_((- 8.5), 7.5)) tensor_B = torch.ceil(torch.empty(size=(problem_size.k(), problem_size.n()), dtype=torch.float32, device='cuda').uniform_((- 8.5), 7.5)) tensor_C = torch.ceil(torch.empty(size=(problem_size.m(), problem_size.n()), dtype=torch.float32, device='cuda').uniform_((- 8.5), 7.5)) tensor_D = torch.empty_like(tensor_C) alpha = 1.0 beta = 0.0 arguments = GemmArguments(operation=self.operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=self.operation.epilogue_type(alpha, beta), gemm_mode=cutlass.gemm.Mode.Gemm, split_k_splices=1) self.operation.run(arguments) arguments.sync() tensor_D_ref = (((alpha * tensor_A) tensor_B) + (beta * tensor_C)) self.assertTrue(torch.equal(tensor_D, tensor_D_ref)) def test_cupy_frontend(self): try: import cupy as cp except: self.assertTrue(False, 'Unable to import cupy') cp.cuda.set_allocator(rmm.rmm_cupy_allocator) problem_size = cutlass.gemm.GemmCoord(512, 256, 128) tensor_A = cp.ceil(cp.random.uniform(low=(- 8.5), high=7.5, size=(problem_size.m(), problem_size.k()), dtype=cp.float32)) tensor_B = cp.ceil(cp.random.uniform(low=(- 8.5), high=7.5, size=(problem_size.k(), problem_size.n()), dtype=cp.float32)) tensor_C = cp.ceil(cp.random.uniform(low=(- 8.5), high=7.5, size=(problem_size.m(), problem_size.n()), dtype=cp.float32)) tensor_D = cp.ones_like(tensor_C) alpha = 1.0 beta = 1.0 tensor_D_ref = (((alpha * tensor_A) tensor_B) + (beta * tensor_C)) arguments = GemmArguments(operation=self.operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=self.operation.epilogue_type(alpha, beta), gemm_mode=cutlass.gemm.Mode.Gemm, split_k_splices=1) self.operation.run(arguments) arguments.sync() self.assertTrue(cp.array_equal(tensor_D, tensor_D_ref))
def produceImgList(): root_path = '/home/lmin/data/portrait/' stages = ['train', 'val'] for stage in stages: seg_txt = open(((root_path + stage) + '_2.txt'), 'a') imgpath = glob(os.path.join(root_path, stage, 'images/*.png')) for imgline in imgpath: print(imgline.replace(root_path, '')) seg_txt.write((imgline.replace(root_path, '') + '\n')) seg_txt.close()
def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs}
def convert_file(input_file, output_file): fasta = pyfaidx.Fasta(input_file) h5 = h5py.File(output_file, 'w') for k in fasta.keys(): s = str(fasta[k][:].seq).upper() ds = h5.create_dataset(k, (len(s),), dtype='S1') for i in range(len(s)): ds[i] = numpy.string_(s[i]) h5.close()
def register_Ns3EpcX2SapHandoverRequestParams_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::EpcX2Sap::HandoverRequestParams const &', 'arg0')]) cls.add_instance_attribute('bearers', 'std::vector< ns3::EpcX2Sap::ErabToBeSetupItem >', is_const=False) cls.add_instance_attribute('cause', 'uint16_t', is_const=False) cls.add_instance_attribute('isMc', 'bool', is_const=False) cls.add_instance_attribute('mmeUeS1apId', 'uint32_t', is_const=False) cls.add_instance_attribute('oldEnbUeX2apId', 'uint16_t', is_const=False) cls.add_instance_attribute('rlcRequests', 'std::vector< ns3::EpcX2Sap::RlcSetupRequest >', is_const=False) cls.add_instance_attribute('rrcContext', 'ns3::Ptr< ns3::Packet >', is_const=False) cls.add_instance_attribute('sourceCellId', 'uint16_t', is_const=False) cls.add_instance_attribute('targetCellId', 'uint16_t', is_const=False) cls.add_instance_attribute('ueAggregateMaxBitRateDownlink', 'uint64_t', is_const=False) cls.add_instance_attribute('ueAggregateMaxBitRateUplink', 'uint64_t', is_const=False) return
def interpolant_attempt(): s = ss.create_msat_solver(False) s.set_opt('produce-models', 'true') s.set_opt('incremental', 'true') (prop, fts) = build_simple_alu_fts(s) print('\n Running Interpolant-based Model Checking ') print('INIT\n\t{}'.format(fts.init)) print('TRANS\n\t{}'.format(fts.trans)) print('PROP\n\t{}'.format(prop.prop)) itpmc = pono.InterpolantMC(prop, fts, s) res = itpmc.check_until(20) print(res) assert (res is True), 'Expecting InterpolantMC to prove the property' print('InterpolantMC returned true')
def prepare_dir(ted_dir): converted_dir = os.path.join(ted_dir, 'converted') wav_dir = os.path.join(converted_dir, 'wav') if (not os.path.exists(wav_dir)): os.makedirs(wav_dir) txt_dir = os.path.join(converted_dir, 'txt') if (not os.path.exists(txt_dir)): os.makedirs(txt_dir) counter = 0 entries = os.listdir(os.path.join(ted_dir, 'sph')) for sph_file in tqdm(entries, total=len(entries)): speaker_name = sph_file.split('.sph')[0] sph_file_full = os.path.join(ted_dir, 'sph', sph_file) stm_file_full = os.path.join(ted_dir, 'stm', '{}.stm'.format(speaker_name)) assert (os.path.exists(sph_file_full) and os.path.exists(stm_file_full)) all_utterances = get_utterances_from_stm(stm_file_full) all_utterances = filter(filter_short_utterances, all_utterances) for (utterance_id, utterance) in enumerate(all_utterances): target_wav_file = os.path.join(wav_dir, '{}_{}.wav'.format(utterance['filename'], str(utterance_id))) target_txt_file = os.path.join(txt_dir, '{}_{}.txt'.format(utterance['filename'], str(utterance_id))) cut_utterance(sph_file_full, target_wav_file, utterance['start_time'], utterance['end_time'], sample_rate=args.sample_rate) with io.FileIO(target_txt_file, 'w') as f: f.write(_preprocess_transcript(utterance['transcript']).encode('utf-8')) counter += 1
def simulate_from_network_attr(arclist_filename, param_func_list, labels, theta, binattr_filename=None, contattr_filename=None, catattr_filename=None, sampler_func=basicALAAMsampler, numSamples=100, iterationInStep=None, burnIn=None): assert (len(param_func_list) == len(labels)) G = Graph(arclist_filename, binattr_filename, contattr_filename, catattr_filename) degseq = np.array([G.degree(v) for v in G.nodeIterator()]) sys.stdout.write((' '.join((((['t'] + [('theta_' + z) for z in labels]) + labels) + ['acceptance_rate', 'meanDegree1', 'varDegree1', 'meanDegree0', 'varDegree0'])) + '\n')) for (simvec, stats, acceptance_rate, t) in simulateALAAM(G, param_func_list, theta, numSamples, iterationInStep, burnIn, sampler_func=sampler_func): meanDegree1 = np.mean(degseq[np.nonzero(simvec)[0]]) varDegree1 = np.var(degseq[np.nonzero(simvec)[0]]) meanDegree0 = np.mean(degseq[np.nonzero((simvec == 0))[0]]) varDegree0 = np.var(degseq[np.nonzero((simvec == 0))[0]]) sys.stdout.write((' '.join((((([str(t)] + [str(th) for th in list(theta)]) + [str(x) for x in list(stats)]) + [str(acceptance_rate)]) + [str(x) for x in [meanDegree1, varDegree1, meanDegree0, varDegree0]])) + '\n'))
def _paired_bootstrap_trial(per_doc1, per_doc2): indices = [random.randint(0, (len(per_doc1) - 1)) for i in range(len(per_doc1))] pseudo1 = sum((per_doc1[i] for i in indices), Matrix()) pseudo2 = sum((per_doc2[i] for i in indices), Matrix()) return _result_diff(pseudo1, pseudo2)
def init_cuda_not_in_main_proc_check(): import theano.sandbox.cuda as cuda if (cuda.use.device_number is not None): print(('CUDA already initialized in proc %i' % os.getpid())) return use_original = cuda.use def use_wrapped(device, **kwargs): print(('CUDA.use %s in proc %i' % (device, os.getpid()))) use_original(device=device, **kwargs) cuda.use = use_wrapped cuda.use.device_number = None
.parametrize('wrapper', [_ArrayAPIWrapper, _NumPyAPIWrapper]) def test_get_namespace_array_api_isdtype(wrapper): if (wrapper == _ArrayAPIWrapper): xp_ = pytest.importorskip('numpy.array_api') xp = _ArrayAPIWrapper(xp_) else: xp = _NumPyAPIWrapper() assert xp.isdtype(xp.float32, xp.float32) assert xp.isdtype(xp.float32, 'real floating') assert xp.isdtype(xp.float64, 'real floating') assert (not xp.isdtype(xp.int32, 'real floating')) for dtype in supported_float_dtypes(xp): assert xp.isdtype(dtype, 'real floating') assert xp.isdtype(xp.bool, 'bool') assert (not xp.isdtype(xp.float32, 'bool')) assert xp.isdtype(xp.int16, 'signed integer') assert (not xp.isdtype(xp.uint32, 'signed integer')) assert xp.isdtype(xp.uint16, 'unsigned integer') assert (not xp.isdtype(xp.int64, 'unsigned integer')) assert xp.isdtype(xp.int64, 'numeric') assert xp.isdtype(xp.float32, 'numeric') assert xp.isdtype(xp.uint32, 'numeric') assert (not xp.isdtype(xp.float32, 'complex floating')) if (wrapper == _NumPyAPIWrapper): assert (not xp.isdtype(xp.int8, 'complex floating')) assert xp.isdtype(xp.complex64, 'complex floating') assert xp.isdtype(xp.complex128, 'complex floating') with pytest.raises(ValueError, match='Unrecognized data type'): assert xp.isdtype(xp.int16, 'unknown')
class CamembertOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: if (self.task == 'multiple-choice'): dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
def get_sparse_lookup_trainer_version(version): assert (version in {'fp32', 'fp16'}), 'Unexpected version of sparse_lookup layer {0}'.format(version) return version
class FakeRegNetVisslWrapper(nn.Module): def __init__(self, model: nn.Module): super().__init__() feature_blocks: List[Tuple[(str, nn.Module)]] = [] feature_blocks.append(('conv1', model.stem)) for (k, v) in model.trunk_output.named_children(): assert k.startswith('block'), f'Unexpected layer name {k}' block_index = (len(feature_blocks) + 1) feature_blocks.append((f'res{block_index}', v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs(x, out_feat_keys=None, feature_blocks=self._feature_blocks)
def batch_llm_generate(template_file: str, prompt_parameter_values: List[dict], engine, max_tokens, temperature, stop_tokens, top_p=0.9, frequency_penalty=0, presence_penalty=0, postprocess=True, max_tries=1, ban_line_break_start=False, max_num_threads=10): f = partial(_generate, engine=engine, max_tokens=max_tokens, temperature=temperature, stop_tokens=stop_tokens, top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, postprocess=postprocess, max_tries=max_tries, ban_line_break_start=ban_line_break_start) with ThreadPoolExecutor(max_num_threads) as executor: thread_outputs = [executor.submit(f, _fill_template(template_file, p)) for p in prompt_parameter_values] thread_outputs = [o.result() for o in thread_outputs] return thread_outputs
def start_training(): cfg = shared_configs.get_sparse_pretraining_args() set_random_seed(cfg.seed) n_gpu = hvd.size() os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank()) device = torch.device('cuda', 0) torch.cuda.set_device(0) if (hvd.rank() != 0): LOGGER.disabled = True LOGGER.info(f'device: {device} n_gpu: {n_gpu}, rank: {hvd.rank()}, 16-bits training: {cfg.fp16}') model = setup_model(cfg, device=device) model.train() optimizer = setup_e2e_optimizer(model, cfg) compression = hvd.Compression.none optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=compression) compression = hvd.Compression.none hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) (model, optimizer) = amp.initialize(model, optimizer, enabled=cfg.fp16, opt_level='O2', keep_batchnorm_fp32=True) tokenizer = BertTokenizerFast.from_pretrained(cfg.tokenizer_dir) (train_loaders, val_loaders) = setup_dataloaders(cfg, tokenizer) train_loader = MetaLoader(train_loaders, accum_steps=cfg.gradient_accumulation_steps, distributed=(n_gpu > 1)) img_norm = ImageNorm(mean=cfg.img_pixel_mean, std=cfg.img_pixel_std) train_loader = PrefetchLoader(train_loader, img_norm) val_loaders = {k: PrefetchLoader(v, img_norm) for (k, v) in val_loaders.items()} total_train_batch_size = int((((n_gpu * cfg.train_batch_size) * cfg.gradient_accumulation_steps) * cfg.max_n_example_per_group)) total_n_epochs = cfg.num_train_epochs cfg.num_train_steps = int(math.ceil((((1.0 * train_loader.n_batches_in_epoch) * total_n_epochs) / (n_gpu * cfg.gradient_accumulation_steps)))) cfg.valid_steps = (int(math.ceil((((1.0 * cfg.num_train_steps) / cfg.num_valid) / cfg.min_valid_steps))) * cfg.min_valid_steps) actual_num_valid = (int(math.floor(((1.0 * cfg.num_train_steps) / cfg.valid_steps))) + 1) restorer = TrainingRestorer(cfg, model, optimizer) global_step = restorer.global_step TB_LOGGER.global_step = global_step if (hvd.rank() == 0): LOGGER.info('Saving training meta...') save_training_meta(cfg) LOGGER.info('Saving training done...') TB_LOGGER.create(join(cfg.output_dir, 'log')) pbar = tqdm(total=cfg.num_train_steps) model_saver = ModelSaver(join(cfg.output_dir, 'ckpt')) add_log_to_file(join(cfg.output_dir, 'log', 'log.txt')) else: LOGGER.disabled = True pbar = NoOp() model_saver = NoOp() restorer = NoOp() if (global_step > 0): pbar.update(global_step) LOGGER.info(cfg) LOGGER.info('Starting training...') LOGGER.info(f'***** Running training with {n_gpu} GPUs *****') LOGGER.info(f' Single-GPU Non-Accumulated batch size = {cfg.train_batch_size}') LOGGER.info(f' max_n_example_per_group = {cfg.max_n_example_per_group}') LOGGER.info(f' Accumulate steps = {cfg.gradient_accumulation_steps}') LOGGER.info(f' Total batch size = #GPUs * Single-GPU batch size * max_n_example_per_group * Accumulate steps [Image] = {total_train_batch_size}') LOGGER.info(f' Total #batches - single epoch = {train_loader.n_batches_in_epoch}.') LOGGER.info(f' Total #steps = {cfg.num_train_steps}') LOGGER.info(f' Total #epochs = {total_n_epochs}.') LOGGER.info(f' Validate every {cfg.valid_steps} steps, in total {actual_num_valid} times') with optimizer.skip_synchronize(): optimizer.zero_grad() if (global_step == 0): optimizer.step() debug_step = 5 tasks = [] for (name, flag) in zip(['itc'], [cfg.use_itc]): if flag: tasks.append(name) task2loss = {t: RunningMeter(f'train_loss/{t}') for t in tasks} task2loss['loss'] = RunningMeter('train_loss/loss') train_log = {'train/i2t_acc': 0, 'train/t2i_acc': 0} for (step, (task, batch)) in enumerate(train_loader): outputs = forward_step(cfg, model, batch) itc_loss = 0 assert ((not cfg.use_mlm) and (not cfg.use_itm)) if cfg.use_itc: n_itc_ex = len(outputs['itc_labels']) n_t2i_corrects = (outputs['t2i_scores'].max(dim=(- 1))[1] == outputs['itc_labels']).sum().item() n_i2t_corrects = (outputs['i2t_scores'].max(dim=(- 1))[1] == outputs['itc_labels']).sum().item() train_log.update({'train/t2i_acc': float((n_t2i_corrects / n_itc_ex)), 'train/i2t_acc': float((n_i2t_corrects / n_itc_ex))}) itc_loss = outputs['itc_loss'] task2loss['itc'](itc_loss.item()) loss = itc_loss task2loss['loss'](loss.item()) delay_unscale = (((step + 1) % cfg.gradient_accumulation_steps) != 0) with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss: scaled_loss.backward() zero_none_grad(model) optimizer.synchronize() if (((step + 1) % cfg.gradient_accumulation_steps) == 0): global_step += 1 if (((step + 1) % cfg.log_interval) == 0): TB_LOGGER.log_scalar_dict({l.name: l.val for l in task2loss.values() if (l.val is not None)}) n_epoch = int(((((1.0 * n_gpu) * cfg.gradient_accumulation_steps) * global_step) / train_loader.n_batches_in_epoch)) lr_this_step = get_lr_sched(global_step, cfg.decay, cfg.learning_rate, cfg.num_train_steps, warmup_ratio=cfg.warmup_ratio, decay_epochs=cfg.step_decay_epochs, multi_step_epoch=n_epoch) for (pg_n, param_group) in enumerate(optimizer.param_groups): param_group['lr'] = lr_this_step if (((step + 1) % cfg.log_interval) == 0): TB_LOGGER.add_scalar('train/lr', lr_this_step, global_step) if (cfg.grad_norm != (- 1)): grad_norm = clip_grad_norm_(amp.master_params(optimizer), cfg.grad_norm) if (((step + 1) % cfg.log_interval) == 0): TB_LOGGER.add_scalar('train/grad_norm', grad_norm, global_step) TB_LOGGER.step() none_grads = [p[0] for p in model.named_parameters() if (p[1].requires_grad and (p[1].grad is None))] assert (len(none_grads) == 0), f'{none_grads}' with optimizer.skip_synchronize(): optimizer.step() optimizer.zero_grad() restorer.step() pbar.update(1) if ((global_step % cfg.valid_steps) == 0): LOGGER.info(f'Step {global_step}: start validation') validate(model, val_loaders, cfg) model_saver.save(step=global_step, model=model) if (global_step >= cfg.num_train_steps): break if (cfg.debug and (global_step >= debug_step)): break if ((global_step % cfg.valid_steps) != 0): LOGGER.info(f'Step {global_step}: start validation') validate(model, val_loaders, cfg) model_saver.save(step=global_step, model=model)
def test_unbox(): def f1(x): x return 3.14 growablebuffer = GrowableBuffer(np.int32) f1(growablebuffer)
class DummySurvivalRegressor(DummyRegressor): def __init__(self, strategy='mean', constant=None, quantile=None): super().__init__(strategy=strategy, constant=constant, quantile=quantile) if hasattr(DummyRegressor, 'n_features_in_'): delattr(DummyRegressor, 'n_features_in_') def fit(self, X, y, sample_weight=None): (_, time) = check_array_survival(X, y) return super().fit(X, time)
def parse_opt(): parser = argparse.ArgumentParser(description='Regressor Model Training') parser.add_argument('--epochs', type=int, default=10, help='Number of epochs') parser.add_argument('--batch_size', type=int, default=32, help='Number of batch size') parser.add_argument('--alpha', type=float, default=0.6, help='Aplha default=0.6 DONT CHANGE') parser.add_argument('--w', type=float, default=0.4, help='w DONT CHANGE') parser.add_argument('--num_workers', type=int, default=2, help='Total # workers, for colab & kaggle use 2') parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate') parser.add_argument('--save_epoch', type=int, default=10, help='Save model every # epochs') parser.add_argument('--train_path', type=str, default=(ROOT / 'dataset/KITTI/training'), help='Training path KITTI') parser.add_argument('--model_path', type=str, default=(ROOT / 'weights'), help='Weights path, for load and save model') parser.add_argument('--select_model', type=str, default='resnet18', help='Model selection: {resnet18, vgg11}') parser.add_argument('--api_key', type=str, default='', help='API key for comet.ml') opt = parser.parse_args() return opt
def get_state_embedding_network_args(env, embedding_dim): network_args = dict(name='state_embedding_network', input_shape=env.observation_space.shape, output_dim=embedding_dim, hidden_sizes=(64, 32), hidden_nonlinearity=get_nonlinearity_for_embedding(), output_nonlinearity=None, batch_normalization=False) return network_args
def get_name_scope_ops(ops, scope): if (scope and (scope[(- 1)] == '/')): scope = scope[:(- 1)] return filter_ops_from_regex(ops, '^{}(/.*)?$'.format(scope))
def test_find_by_tag(testdir): testdir.make_petstore_test('\(endpoint="/pet/findByTags$")\(max_examples=5, deadline=None)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert_list(case.query["tags"])\n assert_requests_call(case)\n') testdir.assert_petstore()
def _get_format_from_name(name: str) -> str: try: int(name) return 'numeric' except ValueError: return ('alpha-2' if (len(name) == 2) else ('alpha-3' if (len(name) == 3) else 'regex'))
class TestDistanceRepresentation(TestCase): def test_call_value_should_be_distance(self): p1s = tf.constant([[[[1, 2, 3]]]], dtype=tf.float32) p2s = tf.constant([[[[4, 5, 6]]]], dtype=tf.float32) distances = representation(p1s, p2s) self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(27), places=6)
def add_to_total_cost(amount: float): global total_cost with thread_lock: total_cost += amount
def AFM(linear_feature_columns, dnn_feature_columns, fm_group=DEFAULT_GROUP_NAME, use_attention=True, attention_factor=8, l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_att=1e-05, afm_dropout=0, seed=1024, task='binary'): features = build_input_features((linear_feature_columns + dnn_feature_columns)) inputs_list = list(features.values()) (group_embedding_dict, _) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed, support_dense=False, support_group=True) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) if use_attention: fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout, seed)(list(v)) for (k, v) in group_embedding_dict.items() if (k in fm_group)]) else: fm_logit = add_func([FM()(concat_func(v, axis=1)) for (k, v) in group_embedding_dict.items() if (k in fm_group)]) final_logit = add_func([linear_logit, fm_logit]) output = PredictionLayer(task)(final_logit) model = Model(inputs=inputs_list, outputs=output) return model
def _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects=32): np.random.seed(42) vggface2 = VGGFace2('/proj/janus6/vggface2') frontalset = [im for im in vggface2.frontalset(n_frontal=1)] matelist = frontalset[0:n_subjects] if (n_subjects == 16): matelist[3] = frontalset[(n_subjects + 1)] matelist[5] = frontalset[(n_subjects + 5)] matelist[6] = frontalset[(n_subjects + 4)] matelist[11] = frontalset[(n_subjects + 7)] matelist[12] = frontalset[(n_subjects + 2)] matelist[13] = frontalset[(n_subjects + 9)] matelist[15] = frontalset[(n_subjects + 6)] d_subjectid_to_topk_frontal_nonmates = vipy.util.load('_vggface2_topk_frontal_nonmates.pkl') nonmateidlist = [] for m in matelist: for n in d_subjectid_to_topk_frontal_nonmates[m.category()]: if (n not in nonmateidlist): nonmateidlist.append(n) break d_frontalset = {x.category(): x for x in frontalset} nonmatelist = [d_frontalset[k] for k in nonmateidlist] probelist = [vggface2.take(n_subjects, im_mate.category()) for im_mate in matelist] assert (len(nonmatelist) == n_subjects) assert (len(probelist) == n_subjects) assert (len(probelist[0]) == n_subjects) assert (len(matelist) == n_subjects) return (matelist, nonmatelist, probelist)
def init_np_seed(worker_id): seed = torch.initial_seed() np.random.seed(((seed * worker_id) % ))
def groupwise(iterable: Iterable[_UT0], n: int, fill: bool=True, fillvalue: _UT1=None) -> Iterator[Tuple[(Union[(_UT0, _UT1)], ...)]]: iterable_copies = [] for (ni, it) in enumerate(itertools.tee(iterable, n)): if (not fill): for _ in range(ni): next(it, None) if (fill and (ni < (n - 1))): it = itertools.chain(([fillvalue] * ((n - ni) - 1)), it) iterable_copies.append(it) if fill: return itertools.zip_longest(*iterable_copies, fillvalue=fillvalue) else: return zip(*iterable_copies)
def get_model(): n = 2 depth = ((n * 9) + 2) n_blocks = (((depth - 2) // 9) - 1) inputs = layers.Input(shape=(32, 32, 3)) data_augmentation = get_augmentation_layers() augmented = data_augmentation(inputs) x = resnet20.stem(augmented) x = resnet20.learner(x, n_blocks) outputs = resnet20.classifier(x, 10) model = tf.keras.Model(inputs, outputs) return model
def lr_func_steps_with_relative_lrs(cfg, cur_epoch): ind = get_step_index(cfg, cur_epoch) return (cfg.SOLVER.LRS[ind] * cfg.SOLVER.BASE_LR)
def word_tokenize(tokens): return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
def vec_vec_wise_multiplication(q, p): (q_r, q_i, q_j, q_k) = make_wise_quaternion(q) qp_r = get_quaternion_wise_mul((q_r * p)) qp_i = get_quaternion_wise_mul((q_i * p)) qp_j = get_quaternion_wise_mul((q_j * p)) qp_k = get_quaternion_wise_mul((q_k * p)) return torch.cat([qp_r, qp_i, qp_j, qp_k], dim=1)
def rebuild_tensor(cls, storage, metadata): (storage_offset, size, stride, requires_grad) = metadata t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride) if (cls == torch.nn.parameter.Parameter): t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad) else: t.requires_grad = requires_grad return t
def VFE(): os.chdir('./medirl-master/Code/') VideoDir = './medirl-master/videos/crash-video' videos = glob.glob((VideoDir + '/*.mp4')) pathOut = './medirl-master/videos/crash-video/output' for v in videos: objectDection(v, VideoDir) generateFrame(v, VideoDir) combineCSV(v, VideoDir) showLight(v, VideoDir) show_hsv_equalized(v, VideoDir) hsvThreshold(v, VideoDir) LuminosityStat(v, VideoDir) detectRed(v, VideoDir)
class ViT(nn.Module): def __init__(self, img_size=1024, patch_size=16, in_chans=3, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, drop_path_rate=0.0, init_values=None, norm_pre_=False, norm_post=True, norm_layer=nn.LayerNorm, act_layer=nn.GELU, swiglu=False, use_abs_pos=True, use_rel_pos=False, use_act_checkpoint=False, pretrain_img_size=224, pretrain_use_cls_token=True, xformers=True, freeze=False, lora_info=dict, tome_info=dict, repadapter_info=dict): super().__init__() self.pretrain_use_cls_token = pretrain_use_cls_token self.norm_pre_ = norm_pre_ self.norm_post = norm_post self.patch_embed = PatchEmbed(kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim) if use_abs_pos: num_patches = ((pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)) num_positions = ((num_patches + 1) if pretrain_use_cls_token else num_patches) self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) else: self.pos_embed = None if self.norm_pre_: self.norm_pre = norm_layer(embed_dim) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.ModuleList() for i in range(depth): block = Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=dpr[i], init_values=init_values, norm_layer=norm_layer, act_layer=act_layer, swiglu=swiglu, use_rel_pos=use_rel_pos, input_size=((img_size // patch_size), (img_size // patch_size)), xformers=xformers, use_lora=(i in lora_info['lora_block_indexes']), lora_info=lora_info, use_tome=(i in tome_info['merge_attn_indexes']), tome_info=tome_info, use_repadapter=(i in repadapter_info['adapter_block_indexes']), repadapter_info=repadapter_info) if use_act_checkpoint: block = checkpoint_wrapper(block) self.blocks.append(block) if self.norm_post: self.norm = norm_layer(embed_dim) if (self.pos_embed is not None): trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) if freeze: self._freeze_backbone() def _init_weights(self, m): if (isinstance(m, nn.Linear) and (not isinstance(m, LoRALinear))): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def _freeze_backbone(self): for param in self.patch_embed.parameters(): param.requires_grad = False if (self.pos_embed is not None): self.pos_embed.requires_grad = False if self.norm_pre_: for param in self.norm_pre.parameters(): param.requires_grad = False for param in self.blocks.parameters(): param.requires_grad = False if self.norm_post: for param in self.norm.parameters(): param.requires_grad = False for (module_name, module) in self.named_modules(): for (module_param_name, value) in module.named_parameters(recurse=False): if (isinstance(module, LoRALinear) and ('lora' in module_param_name)): value.requires_grad = True if (isinstance(module, RepAdapter) or ('adapter_mlp' in module_name)): value.requires_grad = True def forward(self, x): (B, _, _, _) = x.size() (x, (Hp, Wp)) = self.patch_embed(x) if (self.pos_embed is not None): x = (x + resize_pos_embed(self.pos_embed, (1 if self.pretrain_use_cls_token else 0), (Hp, Wp))) if self.norm_pre_: x = self.norm_pre(x) for (idx, blk) in enumerate(self.blocks): x = blk(x, Hp, Wp) if self.norm_post: x = self.norm(x) x = x.contiguous().view(B, Hp, Wp, (- 1)).permute(0, 3, 1, 2) return x
def test_assert_file_exists(): with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as test_dir: filename = os.path.join(test_dir, 'test.txt') with pytest.raises(FileNotFoundError): common.assert_file_exists(filename) with open(filename, 'w', encoding='utf-8') as fout: fout.write('Unban mox opal!') EXPECTED_MD5 = '44dbf21b4e89cea5184615a72a825a36' common.assert_file_exists(filename) common.assert_file_exists(filename, md5=EXPECTED_MD5) with pytest.raises(ValueError): common.assert_file_exists(filename, md5='12345') with pytest.raises(ValueError): common.assert_file_exists(filename, md5='12345', alternate_md5='12345') common.assert_file_exists(filename, md5='12345', alternate_md5=EXPECTED_MD5)
def test(): pytest.importorskip('pyarrow') this = ak.str.to_categorical(['one', 'two', 'one', 'three', 'one', 'four']) assert ak.is_categorical(this) this_packed = ak.to_packed(this) assert (this_packed.type == this.type) assert ak.all((ak.categories(this_packed) == ak.categories(this))) this_subset_packed = ak.to_packed(this[:(- 1)]) assert ak.is_categorical(this_subset_packed) assert (this_subset_packed.type.content == this.type.content) assert ak.all((ak.categories(this_subset_packed) == ak.categories(this)))
_to_string class TemplateNotFound(IOError, LookupError, TemplateError): message = None def __init__(self, name, message=None): IOError.__init__(self, name) if (message is None): from .runtime import Undefined if isinstance(name, Undefined): name._fail_with_undefined_error() message = name self.message = message self.name = name self.templates = [name] def __str__(self): return self.message
def from_pretty_midi_time_signature(time_signature: PmTimeSignature) -> TimeSignature: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) return TimeSignature(time=float(time_signature.time), numerator=time_signature.numerator, denominator=time_signature.denominator)
def resample_folder(input_folder, output_folder, fs, regex): files = get_all_files(input_folder, match_and=[regex]) for f in tqdm.tqdm(files): (audio, fs) = torchaudio.sox_effects.apply_effects_file(f, [['rate', str(fs)]]) audio = (audio / torch.max(torch.abs(audio), dim=(- 1), keepdim=True)[0]) os.makedirs(Path(os.path.join(output_folder, Path(f).relative_to(Path(input_folder)))).parent, exist_ok=True) torchaudio.save(os.path.join(output_folder, Path(f).relative_to(Path(input_folder))), audio, fs)
class PAN(nn.Module): def __init__(self, cfg): super(PAN, self).__init__() self.backbone = build_backbone(cfg.MODEL_BACKBONE) self.backbone_layers = self.backbone.get_layers() input_channel = 1024 self.aspp = ASPP(dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, resolution_in=cfg.MODEL_ASPP_RESOLUTION) self.upsample = nn.UpsamplingBilinear2d(scale_factor=4) self.shortcut_conv = nn.Sequential(nn.Conv2d(64, cfg.MODEL_SHORTCUT_DIM, 1, 1, padding=0), nn.ReLU(inplace=True)) self.cat_conv = nn.Sequential(nn.Conv2d((cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM), cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0)) def forward(self, x): x = self.backbone(x) layers = self.backbone.get_layers() feature_aspp = self.aspp(layers[(- 1)]) feature_aspp = self.upsample(feature_aspp) feature_shallow = self.shortcut_conv(layers[0]) feature_cat = torch.cat([feature_aspp, feature_shallow], 1) result = self.cat_conv(feature_cat) result = self.upsample(result) return result
def search(keyword, per_search=100, offset=0): payload = {'count': per_search, 'recordEvent': 'false', 'q': keyword, 'fq': 'attribute:categories:domain:string=="Industrial";binaryNames=exists=true', 'showBinaryMetadata': 'true', 'showAttributes': 'false', 'showBinaryAttributes': 'true', 'offset': offset, 'contentType': '3dw'} r = requests.get(url, params=payload) if (r.status_code == 200): return r.json() else: raise ConnectionError
def train_epoch_with_interactions(interaction_batches, params, model, randomize=True): if randomize: random.shuffle(interaction_batches) progbar = get_progressbar('train ', len(interaction_batches)) progbar.start() loss_sum = 0.0 for (i, interaction_batch) in enumerate(interaction_batches): assert (len(interaction_batch) == 1) interaction = interaction_batch.items[0] if (interaction.identifier == 'raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5'): continue if (('sparc' in params.data_directory) and ('baseball_1' in interaction.identifier)): continue batch_loss = model.train_step(interaction, params.train_maximum_sql_length) loss_sum += batch_loss torch.cuda.empty_cache() progbar.update(i) progbar.finish() total_loss = (loss_sum / len(interaction_batches)) return total_loss
class BaseYOLODetect(BaseDetDetect): def __init__(self, subtype='yolov6_s', cfg=None, num_classes=80, in_channels=None, channels=None, out_channels=None, num_blocks=None, depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='ReLU')): super(BaseYOLODetect, self).__init__() self.subtype = subtype self.cfg = cfg self.num_classes = num_classes self.in_channels = in_channels self.channels = channels self.out_channels = out_channels self.num_blocks = num_blocks self.depthwise = depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg if ((self.cfg is not None) and (self.subtype is not None)): (depth_mul, width_mul) = self.cfg[self.subtype.split('_')[1]] self.in_channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.in_channels)) if (self.channels is not None): self.channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.channels)) if (self.out_channels is not None): self.out_channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.out_channels)) if (self.num_blocks is not None): self.num_blocks = list(map((lambda x: max(round((x * depth_mul)), 1)), self.num_blocks)) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5)) if (m.bias is not None): nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): m.eps = 0.001 m.momentum = 0.03 def forward(self, x): pass
def collect_vocabs(all_instances): all_src_words = Counter() all_tgt_words = Counter() all_edge_types = Counter() for (sent1, sent2) in all_instances: all_src_words.update(sent1.graph['backbone_sequence']) all_tgt_words.update(sent2.graph['backbone_sequence']) for edge in sent1.graph['edges']: all_edge_types.update([edge[0]]) for edge in sent2.graph['edges']: all_edge_types.update([edge[0]]) return (all_src_words, all_tgt_words, all_edge_types)
def PercentDegree_PDirNet(Graph, Threshold=0): return _snap.PercentDegree_PDirNet(Graph, Threshold)
class MyScriptModuleWithRRefs(torch.jit.ScriptModule): def __init__(self, dst_worker): super().__init__() self.rrefs = [] for _ in range(4): self.rrefs.append(rpc_return_rref(dst_worker)) .script_method def forward(self) -> Tensor: res_tensor = torch.ones(2, 2) for rref in self.rrefs: res_tensor += rref.to_here() return res_tensor
def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = (collections.Counter(gold_toks) & collections.Counter(pred_toks)) num_same = sum(common.values()) if ((len(gold_toks) == 0) or (len(pred_toks) == 0)): return int((gold_toks == pred_toks)) if (num_same == 0): return 0 precision = ((1.0 * num_same) / len(pred_toks)) recall = ((1.0 * num_same) / len(gold_toks)) f1 = (((2 * precision) * recall) / (precision + recall)) return f1
def register_Ns3MultiModelSpectrumChannel_methods(root_module, cls): cls.add_constructor([param('ns3::MultiModelSpectrumChannel const &', 'arg0')]) cls.add_constructor([]) cls.add_method('AddRx', 'void', [param('ns3::Ptr< ns3::SpectrumPhy >', 'phy')], is_virtual=True) cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('std::size_t', 'i')], is_const=True, is_virtual=True) cls.add_method('GetNDevices', 'std::size_t', [], is_const=True, is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('StartTx', 'void', [param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')], is_virtual=True) cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) cls.add_method('StartRx', 'void', [param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params'), param('ns3::Ptr< ns3::SpectrumPhy >', 'receiver')], visibility='private', is_virtual=True) return
def main(file_path): (pred_s0, pred_s1, test_label_s0, test_label_s1) = test_model(file_path) r = [] acc0 = metrics.accuracy_score(test_label_s0, pred_s0) acc1 = metrics.accuracy_score(test_label_s1, pred_s1) print('SVM:', 's0', (acc0 * 100), 's1', (acc1 * 100), 'mean', (((acc0 + acc1) / 2) * 100))
def make_mujoco_environment(task: str, use_envpool: bool=False, use_vec_env=False, num_envs: int=2): env_wrappers = [] if use_envpool: env = envpool.make(task, env_type='gym', num_envs=num_envs) env_wrappers.append(BatchEnvWrapper) elif use_vec_env: env = make_vec_env(task, n_envs=num_envs) env_wrappers.append(BatchEnvWrapper) else: env = gym.make(task) env_wrappers.append(wrappers.GymWrapper) env_wrappers.append(wrappers.SinglePrecisionWrapper) return wrappers.wrap_all(env, env_wrappers)
def test_NCF(): model_name = 'NCF' (x, y, user_feature_columns, item_feature_columns) = get_xy_fd_ncf(False) model = NCF(user_feature_columns, item_feature_columns) model.compile('adam', 'binary_crossentropy') model.fit(x, y, batch_size=10, epochs=2, validation_split=0.5)
class PermutoFunction(torch.autograd.Function): def forward(ctx, q_in, features): q_out = permuto_cpp.forward(q_in, features)[0] ctx.save_for_backward(features) return q_out def backward(ctx, grad_q_out): feature_saved = ctx.saved_tensors[0] grad_q_back = permuto_cpp.backward(grad_q_out.contiguous(), feature_saved.contiguous())[0] return (grad_q_back, None)
class CellAssignModule(BaseModuleClass): def __init__(self, n_genes: int, rho: torch.Tensor, basis_means: torch.Tensor, b_g_0: Optional[torch.Tensor]=None, random_b_g_0: bool=True, n_batch: int=0, n_cats_per_cov: Optional[Iterable[int]]=None, n_continuous_cov: int=0): super().__init__() self.n_genes = n_genes self.n_labels = rho.shape[1] self.n_batch = n_batch self.n_cats_per_cov = n_cats_per_cov self.n_continuous_cov = n_continuous_cov design_matrix_col_dim = (n_batch + n_continuous_cov) design_matrix_col_dim += (0 if (n_cats_per_cov is None) else sum(n_cats_per_cov)) self.register_buffer('rho', rho) self.min_delta = 2 dirichlet_concentration = torch.tensor(([0.01] * self.n_labels)) self.register_buffer('dirichlet_concentration', dirichlet_concentration) self.shrinkage = True if ((b_g_0 is None) or (random_b_g_0 is True)): self.b_g_0 = torch.nn.Parameter(torch.randn(n_genes)) else: self.b_g_0 = torch.nn.Parameter(b_g_0) self.theta_logit = torch.nn.Parameter(torch.randn(self.n_labels)) self.delta_log = torch.nn.Parameter(torch.FloatTensor(self.n_genes, self.n_labels).uniform_((- 2), 2)) self.delta_log_mean = torch.nn.Parameter(torch.zeros(1)) self.delta_log_log_scale = torch.nn.Parameter(torch.zeros(1)) self.log_a = torch.nn.Parameter(torch.zeros(B)) if (design_matrix_col_dim == 0): self.beta = None else: beta_init = torch.zeros(self.n_genes, design_matrix_col_dim) self.beta = torch.nn.Parameter(beta_init) self.register_buffer('basis_means', torch.tensor(basis_means)) def _get_inference_input(self, tensors): return {} def _get_generative_input(self, tensors, inference_outputs): x = tensors[REGISTRY_KEYS.X_KEY] size_factor = tensors[REGISTRY_KEYS.SIZE_FACTOR_KEY] to_cat = [] if (self.n_batch > 0): to_cat.append(one_hot(tensors[REGISTRY_KEYS.BATCH_KEY], self.n_batch)) cont_key = REGISTRY_KEYS.CONT_COVS_KEY if (cont_key in tensors.keys()): to_cat.append(tensors[cont_key]) cat_key = REGISTRY_KEYS.CAT_COVS_KEY if (cat_key in tensors.keys()): for (cat_input, n_cat) in zip(torch.split(tensors[cat_key], 1, dim=1), self.n_cats_per_cov): to_cat.append(one_hot(cat_input, n_cat)) design_matrix = (torch.cat(to_cat, dim=1) if (len(to_cat) > 0) else None) input_dict = {'x': x, 'size_factor': size_factor, 'design_matrix': design_matrix} return input_dict _move_data def inference(self): return {} _move_data def generative(self, x, size_factor, design_matrix=None): delta = torch.exp(self.delta_log) theta_log = F.log_softmax(self.theta_logit, dim=(- 1)) n_cells = size_factor.shape[0] base_mean = torch.log(size_factor) base_mean = base_mean.unsqueeze((- 1)).expand(n_cells, self.n_genes, self.n_labels) if (design_matrix is not None): covariates = torch.einsum('np,gp->gn', design_matrix, self.beta) covariates = torch.transpose(covariates, 0, 1).unsqueeze((- 1)) covariates = covariates.expand(n_cells, self.n_genes, self.n_labels) base_mean = (base_mean + covariates) b_g_0 = self.b_g_0.unsqueeze((- 1)).expand(n_cells, self.n_genes, self.n_labels) delta_rho = (delta * self.rho) delta_rho = delta_rho.expand(n_cells, self.n_genes, self.n_labels) log_mu_ngc = ((base_mean + delta_rho) + b_g_0) mu_ngc = torch.exp(log_mu_ngc) a = torch.exp(self.log_a) a = a.expand(n_cells, self.n_genes, self.n_labels, B) b_init = (2 * ((self.basis_means[1] - self.basis_means[0]) ** 2)) b = torch.exp((torch.ones(B, device=x.device) * (- torch.log(b_init)))) b = b.expand(n_cells, self.n_genes, self.n_labels, B) mu_ngcb = mu_ngc.unsqueeze((- 1)).expand(n_cells, self.n_genes, self.n_labels, B) basis_means = self.basis_means.expand(n_cells, self.n_genes, self.n_labels, B) phi = (torch.sum((a * torch.exp(((- b) * torch.square((mu_ngcb - basis_means))))), 3) + LOWER_BOUND) nb_pdf = NegativeBinomial(mu=mu_ngc, theta=phi) x_ = x.unsqueeze((- 1)).expand(n_cells, self.n_genes, self.n_labels) x_log_prob_raw = nb_pdf.log_prob(x_) theta_log = theta_log.expand(n_cells, self.n_labels) p_x_c = (torch.sum(x_log_prob_raw, 1) + theta_log) normalizer_over_c = torch.logsumexp(p_x_c, 1) normalizer_over_c = normalizer_over_c.unsqueeze((- 1)).expand(n_cells, self.n_labels) gamma = torch.exp((p_x_c - normalizer_over_c)) return {'mu': mu_ngc, 'phi': phi, 'gamma': gamma, 'p_x_c': p_x_c, 's': size_factor} def loss(self, tensors, inference_outputs, generative_outputs, n_obs: int=1.0): p_x_c = generative_outputs['p_x_c'] gamma = generative_outputs['gamma'] q_per_cell = torch.sum((gamma * (- p_x_c)), 1) theta_log = F.log_softmax(self.theta_logit, dim=(- 1)) theta_log_prior = Dirichlet(self.dirichlet_concentration) theta_log_prob = (- theta_log_prior.log_prob((torch.exp(theta_log) + THETA_LOWER_BOUND))) prior_log_prob = theta_log_prob delta_log_prior = Normal(self.delta_log_mean, self.delta_log_log_scale.exp().sqrt()) delta_log_prob = torch.masked_select(delta_log_prior.log_prob(self.delta_log), (self.rho > 0)) prior_log_prob += (- torch.sum(delta_log_prob)) loss = (((torch.mean(q_per_cell) * n_obs) + prior_log_prob) / n_obs) return LossOutput(loss=loss, reconstruction_loss=q_per_cell, kl_local=torch.zeros_like(q_per_cell), kl_global=prior_log_prob) _mode() def sample(self, tensors, n_samples=1, library_size=1): raise NotImplementedError('No sampling method for CellAssign')
def _apply_commands(custom_options, ebase, images_dir): for (key, val) in custom_options.items(): if key.startswith('command'): cmd = custom_options[key] subprocess.run(cmd.split()).check_returncode() if key.startswith('image'): shutil.copy(val, os.path.join(images_dir, _make_fig_name(ebase2fbase(ebase), val)))
def _seg_35(): return [(13270, 'M', u'mol'), (13271, 'M', u'ph'), (13272, 'X'), (13273, 'M', u'ppm'), (13274, 'M', u'pr'), (13275, 'M', u'sr'), (13276, 'M', u'sv'), (13277, 'M', u'wb'), (13278, 'M', u'vm'), (13279, 'M', u'am'), (13280, 'M', u'1'), (13281, 'M', u'2'), (13282, 'M', u'3'), (13283, 'M', u'4'), (13284, 'M', u'5'), (13285, 'M', u'6'), (13286, 'M', u'7'), (13287, 'M', u'8'), (13288, 'M', u'9'), (13289, 'M', u'10'), (13290, 'M', u'11'), (13291, 'M', u'12'), (13292, 'M', u'13'), (13293, 'M', u'14'), (13294, 'M', u'15'), (13295, 'M', u'16'), (13296, 'M', u'17'), (13297, 'M', u'18'), (13298, 'M', u'19'), (13299, 'M', u'20'), (13300, 'M', u'21'), (13301, 'M', u'22'), (13302, 'M', u'23'), (13303, 'M', u'24'), (13304, 'M', u'25'), (13305, 'M', u'26'), (13306, 'M', u'27'), (13307, 'M', u'28'), (13308, 'M', u'29'), (13309, 'M', u'30'), (13310, 'M', u'31'), (13311, 'M', u'gal'), (13312, 'V'), (19894, 'X'), (19904, 'V'), (40944, 'X'), (40960, 'V'), (42125, 'X'), (42128, 'V'), (42183, 'X'), (42192, 'V'), (42540, 'X'), (42560, 'M', u''), (42561, 'V'), (42562, 'M', u''), (42563, 'V'), (42564, 'M', u''), (42565, 'V'), (42566, 'M', u''), (42567, 'V'), (42568, 'M', u''), (42569, 'V'), (42570, 'M', u''), (42571, 'V'), (42572, 'M', u''), (42573, 'V'), (42574, 'M', u''), (42575, 'V'), (42576, 'M', u''), (42577, 'V'), (42578, 'M', u''), (42579, 'V'), (42580, 'M', u''), (42581, 'V'), (42582, 'M', u''), (42583, 'V'), (42584, 'M', u''), (42585, 'V'), (42586, 'M', u''), (42587, 'V'), (42588, 'M', u''), (42589, 'V'), (42590, 'M', u''), (42591, 'V'), (42592, 'M', u''), (42593, 'V'), (42594, 'M', u''), (42595, 'V'), (42596, 'M', u''), (42597, 'V'), (42598, 'M', u''), (42599, 'V'), (42600, 'M', u''), (42601, 'V'), (42602, 'M', u''), (42603, 'V'), (42604, 'M', u''), (42605, 'V'), (42624, 'M', u''), (42625, 'V')]
def test_significance(estimate: float, simulations: List) -> float: mean_refute_value = np.mean(simulations) std_dev_refute_values = np.std(simulations) z_score = ((estimate - mean_refute_value) / std_dev_refute_values) if (z_score > 0): p_value = (1 - st.norm.cdf(z_score)) else: p_value = st.norm.cdf(z_score) return p_value
class PunktSentenceSplitter(): def __init__(self, language='en', punkt_data_path=None): self.lang2datapath = {'en': 'tokenizers/punkt/english.pickle'} self.log = log.get_global_console_logger() try: import nltk.data except ImportError: self.log.error("Cannot import NLTK data for the sentence splitter. Please check if the 'punkt' NLTK-package is installed correctly.") try: if (not punkt_data_path): punkt_data_path = self.lang2datapath[language] self.sent_detector = nltk.data.load(punkt_data_path) except KeyError: self.log.error('No sentence splitter data for language {}.'.format(language)) except: self.log.error('Could not load sentence splitter data: {}'.format(self.lang2datapath[language])) def split(self, text): text = cleanup(text) return self.sent_detector.tokenize(text.strip()) def split_files(input_dir, output_dir, lang='en', punkt_data_path=None): ss = PunktSentenceSplitter(lang, punkt_data_path) DirectoryProcessor.process(inputdir, outputdir, ss.split)
class SMORE(AbstractFormulation): def new_max_link_util(cls, num_paths, out=sys.stdout): return cls(objective=Objective.MIN_MAX_LINK_UTIL, num_paths=num_paths, DEBUG=True, VERBOSE=False, out=out) def new_total_flow(cls, num_paths, out=sys.stdout): return cls(objective=Objective.TOTAL_FLOW, num_paths=num_paths, DEBUG=True, VERBOSE=False, out=out) def __init__(self, *, objective, num_paths, DEBUG, VERBOSE, out=None): super().__init__(objective, DEBUG, VERBOSE, out) self._num_paths = num_paths def _construct_total_flow_lp(self, G, edge_to_paths, num_total_paths): m = Model('total-flow') path_vars = m.addVars(num_total_paths, vtype=GRB.CONTINUOUS, lb=0.0, name='f') obj = quicksum(path_vars) m.setObjective(obj, GRB.MAXIMIZE) commod_id_to_path_inds = {} for (k, d_k, path_inds) in self.commodities: commod_id_to_path_inds[k] = path_inds m.addConstr((quicksum((path_vars[p] for p in path_inds)) <= d_k)) for (u, v, c_e) in G.edges.data('capacity'): paths = edge_to_paths[(u, v)] constr_vars = [path_vars[p] for p in paths] m.addConstr((quicksum(constr_vars) <= c_e)) return LpSolver(m, None, self.DEBUG, self.VERBOSE, self.out) def _construct_smore_lp(self, G, edge_to_paths, num_total_paths): m = Model('min-edge-util') path_vars = m.addVars(num_total_paths, vtype=GRB.CONTINUOUS, lb=0.0, ub=1.0, name='f') max_link_util_var = m.addVar(vtype=GRB.CONTINUOUS, lb=0.0, name='z') m.update() m.setObjective(max_link_util_var, GRB.MINIMIZE) for (k, d_k, path_ids) in self.commodities: m.addConstr((quicksum((path_vars[p] for p in path_ids)) == 1)) for (u, v, c_e) in G.edges.data('capacity'): paths = edge_to_paths[(u, v)] constr_vars = [(path_vars[p] * self.commodities[self._path_to_commod[p]][(- 2)]) for p in paths] m.addConstr(((quicksum(constr_vars) / c_e) <= max_link_util_var)) return LpSolver(m, None, self.DEBUG, self.VERBOSE, self.out) def paths_full_fname_txt(problem, num_paths): return os.path.join(PATHS_DIR, '{}-{}-paths-rrt.txt'.format(problem.name, num_paths)) def paths_full_fname_pkl(problem, num_paths): return os.path.join(PATHS_DIR, '{}-{}-paths-rrt.pkl'.format(problem.name, num_paths)) def pre_solve(self, problem=None): if (problem is None): problem = self.problem paths_fname_txt = SMORE.paths_full_fname_txt(problem, self._num_paths) paths_fname_pkl = SMORE.paths_full_fname_pkl(problem, self._num_paths) if os.path.exists(paths_fname_pkl): self._print('Loading Raeke paths from pickle file', paths_fname_pkl) with open(paths_fname_pkl, 'rb') as r: paths_dict = pickle.load(r) else: self._print('Loading Raeke paths from text file', paths_fname_txt) try: with open(paths_fname_txt, 'r') as f: new_src_and_sink = True (src, target) = (None, None) paths_dict = {} for line in f: line = line.strip() if (line == ''): new_src_and_sink = True continue if new_src_and_sink: parts = line[:(- 2)].split(' -> ') (src, target) = (int(parts[0][1:]), int(parts[1][1:])) paths_dict[(src, target)] = [] new_src_and_sink = False else: path = [src] path_str = line[1:line.rindex(']')] for edge_str in path_str.split(', '): v = int(edge_str.split(',')[(- 1)][1:(- 1)]) path.append(v) paths_dict[(src, target)].append(remove_cycles(path)) self._print('Saving Raeke paths to pickle file') with open(paths_fname_pkl, 'wb') as w: pickle.dump(paths_dict, w) except FileNotFoundError as e: self._print('Unable to find {}'.format(paths_fname_txt)) raise e self.commodities = [] edge_to_paths = defaultdict(list) self._path_to_commod = {} self._all_paths = [] path_i = 0 for (k, (s_k, t_k, d_k)) in problem.commodity_list: paths = paths_dict[(s_k, t_k)] path_ids = [] for path in paths: self._all_paths.append(path) for edge in path_to_edge_list(path): edge_to_paths[edge].append(path_i) path_ids.append(path_i) self._path_to_commod[path_i] = k path_i += 1 self.commodities.append((k, d_k, path_ids)) self._print('pre_solve done') return (edge_to_paths, path_i) def _construct_lp(self, sat_flows=[]): (edge_to_paths, num_paths) = self.pre_solve() if (self._objective == Objective.TOTAL_FLOW): self._print('Constructing Total Flow LP') return self._construct_total_flow_lp(self.problem.G, edge_to_paths, num_paths) elif (self._objective == Objective.MIN_MAX_LINK_UTIL): self._print('Constructing SMORE LP') return self._construct_smore_lp(self.problem.G, edge_to_paths, num_paths) def sol_dict(self): if (not hasattr(self, '_sol_dict')): sol_dict_def = defaultdict(list) for var in self.model.getVars(): if (var.varName.startswith('f[') and (var.x != 0.0)): match = re.match('f\\[(\\d+)\\]', var.varName) p = int(match.group(1)) commod_key = self.problem.commodity_list[self._path_to_commod[p]] d_k = commod_key[(- 1)][(- 1)] flow_val = ((var.x * d_k) if (self._objective == Objective.MIN_MAX_LINK_UTIL) else var.x) sol_dict_def[commod_key] += [(edge, flow_val) for edge in path_to_edge_list(self._all_paths[p])] self._sol_dict = {} sol_dict_def = dict(sol_dict_def) for commod_key in self.problem.commodity_list: if (commod_key in sol_dict_def): self._sol_dict[commod_key] = sol_dict_def[commod_key] else: self._sol_dict[commod_key] = [] return self._sol_dict def sol_mat(self): edge_idx = self.problem.edge_idx sol_mat = np.zeros((len(edge_idx), len(self._path_to_commod)), dtype=np.float32) for var in self.model.getVars(): if (var.varName.startswith('f[') and (var.x != 0.0)): match = re.match('f\\[(\\d+)\\]', var.varName) p = int(match.group(1)) commod_key = self.problem.commodity_list[self._path_to_commod[p]] (k, d_k) = (commod_key[0], commod_key[(- 1)][(- 1)]) for edge in path_to_edge_list(self._all_paths[p]): sol_mat[(edge_idx[edge], k)] += (var.x * d_k) return sol_mat def total_flow(self): if (self._objective == Objective.TOTAL_FLOW): return self.obj_val else: sol_dict = self.sol_dict() total_flow = 0.0 for commod_key in self.problem.commodity_list: flow_list = sol_dict[commod_key] flow = compute_in_or_out_flow(flow_list, 0, {commod_key[(- 1)][0]}) assert (flow <= commod_key[(- 1)][(- 1)]) total_flow += flow return total_flow def fib_entries(cls, problem, num_paths): assert problem.is_traffic_matrix_full smore = cls.new_max_link_util(num_paths=num_paths) smore.pre_solve(problem) return smore.num_fib_entries_for_path_set() def num_fib_entries_for_path_set(self): self.fib_dict = defaultdict(dict) for (k, _, path_ids) in self.commodities: commod_id_str = 'k-{}'.format(k) src = list(path_to_edge_list(self._all_paths[path_ids[0]]))[0][0] self.fib_dict[src][commod_id_str] = path_ids for path_id in path_ids: for (u, v) in path_to_edge_list(self._all_paths[path_id]): assert (path_id not in self.fib_dict[u]) self.fib_dict[u][path_id] = v self.fib_dict = dict(self.fib_dict) fib_dict_counts = [len(self.fib_dict[k]) for k in self.fib_dict.keys()] return (sum(fib_dict_counts), max(fib_dict_counts)) def runtime(self): return self._solver.model.Runtime def obj_val(self): return self._solver.model.objVal
class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilate=[2, 2, 2]) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilate=None): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): if dilate: l = math.ceil((blocks / len(dilate))) r = dilate[(i // l)] layers.append(block(self.inplanes, planes, dilate=r)) else: layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
def load_generated_package(name: str, path: T.Openable, evict: bool=True) -> T.Any: if (not evict): if (name.split('.')[0] == 'sym'): raise ValueError('Attempted to hotload a generated package called `sym` - see `help(load_generated_package)` for more information') return _load_generated_package_internal(name, Path(path))[0] root_package_name = name.split('.')[0] callee_saved_modules: T.List[T.Tuple[(str, T.Any)]] = [] for module_name in tuple(sys.modules.keys()): if (root_package_name == module_name.split('.')[0]): try: conflicting_module = sys.modules[module_name] del sys.modules[module_name] callee_saved_modules.append((module_name, conflicting_module)) except KeyError: pass (module, added_module_names) = _load_generated_package_internal(name, Path(path)) for added_name in added_module_names: try: del sys.modules[added_name] except KeyError: pass for (removed_name, removed_module) in callee_saved_modules: sys.modules[removed_name] = removed_module return module
def tbLogWritter(summaryInfo): createDir(summaryInfo['Path']) writer = SummaryWriter((summaryInfo['Path'] + 'epoch_{}'.format(summaryInfo['Epoch']))) for k in summaryInfo: if ('Image' in k): writer.add_image(k, torchvision.utils.make_grid(summaryInfo[k]), summaryInfo['Step']) elif ('Loss' in k): writer.add_scalar(k, summaryInfo[k]) elif ('Model' in k): writer.add_graph(summaryInfo[k], summaryInfo['Input Image']) writer.close()
def test_dace_unroll(): def tounroll(A: dace.float64[1]): for i in dace.unroll(range(1, 4)): A[0] += (i * i) (src_ast, fname, _, _) = astutils.function_to_ast(tounroll.f) lu = LoopUnroller(tounroll.global_vars, fname, None) unrolled = lu.visit(src_ast) assert (len(unrolled.body[0].body) == 3) a = np.zeros([1]) tounroll(a) assert (a[0] == 14)
def warm_start_model(checkpoint_path, model, ignore_layers): assert os.path.isfile(checkpoint_path) print(f"Warm starting model from checkpoint '{checkpoint_path}'") checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') model_dict = checkpoint_dict['state_dict'] if (len(ignore_layers) > 0): model_dict = {k: v for (k, v) in model_dict.items() if (k not in ignore_layers)} dummy_dict = model.state_dict() dummy_dict.update(model_dict) model_dict = dummy_dict model.load_state_dict(model_dict) return model
.skip(reason='Shared function') def run_quota_tests(tests): for test in tests: (quota_limits, expected_vm_types, expected_n_instances) = test with open(QUOTA_FILE, 'w') as f: f.write(json.dumps(quota_limits, indent=2)) transfer_config = TransferConfig() planner = MulticastDirectPlanner(n_instances=8, n_connections=100, transfer_config=transfer_config, quota_limits_file=QUOTA_FILE) region_tags = [f'{p}:{REGIONS[p]}' for p in REGIONS.keys()] for (i, src_region_tag) in enumerate(region_tags): dst_region_tags = (region_tags[:i] + region_tags[(i + 1):]) (vm_types, n_instances) = planner._get_vm_type_and_instances(src_region_tag=src_region_tag, dst_region_tags=dst_region_tags) assert (vm_types == expected_vm_types), f'vm types are calculated wrong - expected: {expected_vm_types}, calculated: {vm_types}' assert (n_instances == expected_n_instances), f'n_instances are calculated wrong - expected: {expected_n_instances}, calculated: {n_instances}' if os.path.exists(QUOTA_FILE): os.remove(QUOTA_FILE)
def fibonacci_sphere(N: int, *, dtype=np.float32) -> Tuple[(np.ndarray, np.ndarray)]: gr = ((np.sqrt(5.0) + 1.0) / 2.0) ga = ((2 - gr) * (2 * np.pi)) i = np.arange(1, (N + 1), dtype=dtype) lat = np.arcsin(((- 1) + ((2 * i) / (N + 1)))) lon = np.remainder((ga * i), (2 * np.pi)) return (lat, lon)
def test_dont_blow_up_without_validation_set(): with tempfile.TemporaryDirectory() as tmpdir: config = LMDatasetConfig(train_urls=['kaa'], validation_urls=[], cache_dir=tmpdir) assert (config.validation_set(10) is None)
class TemporalDifferenceModel(TorchRLAlgorithm, metaclass=abc.ABCMeta): def __init__(self, max_tau=10, max_tau_for_rollout=None, epoch_max_tau_schedule=None, vectorized=True, cycle_taus_for_rollout=True, dense_rewards=False, finite_horizon=True, tau_sample_strategy='uniform', goal_reached_epsilon=0.001, terminate_when_goal_reached=False, truncated_geom_factor=2.0, square_distance=False, goal_weights=None, normalize_distance=False, observation_key=None, desired_goal_key=None, discount=1.0): assert (tau_sample_strategy in ['no_resampling', 'uniform', 'truncated_geometric', 'all_valid']) if (epoch_max_tau_schedule is None): epoch_max_tau_schedule = ConstantSchedule(max_tau) if (not finite_horizon): max_tau = 0 epoch_max_tau_schedule = ConstantSchedule(max_tau) cycle_taus_for_rollout = False self.discount = discount self.max_tau = max_tau self.epoch_max_tau_schedule = epoch_max_tau_schedule if (max_tau_for_rollout is None): self.max_tau_for_rollout = self.max_tau else: self.max_tau_for_rollout = max_tau_for_rollout self.vectorized = vectorized self.cycle_taus_for_rollout = cycle_taus_for_rollout self.dense_rewards = dense_rewards self.finite_horizon = finite_horizon self.tau_sample_strategy = tau_sample_strategy self.goal_reached_epsilon = goal_reached_epsilon self.terminate_when_goal_reached = terminate_when_goal_reached self.square_distance = square_distance self._current_path_goal = None self._rollout_tau = np.array([self.max_tau_for_rollout]) self.truncated_geom_factor = float(truncated_geom_factor) self.goal_weights = goal_weights if (self.goal_weights is not None): self.goal_weights = np.array(self.goal_weights) assert (self.goal_weights.size == self.env.goal_dim) self.normalize_distance = normalize_distance self.observation_key = observation_key self.desired_goal_key = desired_goal_key self.eval_sampler = MultigoalSimplePathSampler(env=self.env, policy=self.eval_policy, qf=self.qf1, max_samples=self.num_steps_per_eval, max_path_length=self.max_path_length, tau_sampling_function=self._sample_max_tau_for_rollout, cycle_taus_for_rollout=self.cycle_taus_for_rollout, render=self.render_during_eval, observation_key=self.observation_key, desired_goal_key=self.desired_goal_key) self.pretrain_obs = None from railrl.samplers.rollout_functions import create_rollout_function, tdm_rollout, tau_sampling_tdm_rollout self.train_rollout_function = create_rollout_function(tdm_rollout, init_tau=self.max_tau_for_rollout, cycle_tau=self.cycle_taus_for_rollout, decrement_tau=self.cycle_taus_for_rollout, observation_key=self.observation_key, desired_goal_key=self.desired_goal_key) self.eval_rollout_function = self.train_rollout_function def update_sampler_and_rollout_function(self): self.eval_sampler = MultigoalSimplePathSampler(env=self.env, policy=self.eval_policy, qf=self.qf1, max_samples=self.num_steps_per_eval, max_path_length=self.max_path_length, tau_sampling_function=self._sample_max_tau_for_rollout, cycle_taus_for_rollout=self.cycle_taus_for_rollout, render=self.render_during_eval, observation_key=self.observation_key, desired_goal_key=self.desired_goal_key) from railrl.samplers.rollout_functions import create_rollout_function, tdm_rollout self.train_rollout_function = create_rollout_function(tdm_rollout, init_tau=self.max_tau_for_rollout, cycle_tau=self.cycle_taus_for_rollout, decrement_tau=self.cycle_taus_for_rollout, observation_key=self.observation_key, desired_goal_key=self.desired_goal_key) self.eval_rollout_function = self.train_rollout_function def _start_epoch(self, epoch): self.max_tau = self.epoch_max_tau_schedule.get_value(epoch) super()._start_epoch(epoch) def get_batch(self): batch = self.replay_buffer.random_batch(self.batch_size) num_steps_left = self._sample_taus_for_training(batch) obs = batch['observations'] actions = batch['actions'] next_obs = batch['next_observations'] goals = batch['resampled_goals'] rewards = batch['rewards'] terminals = batch['terminals'] if (self.tau_sample_strategy == 'all_valid'): obs = np.repeat(obs, (self.max_tau + 1), 0) actions = np.repeat(actions, (self.max_tau + 1), 0) next_obs = np.repeat(next_obs, (self.max_tau + 1), 0) goals = np.repeat(goals, (self.max_tau + 1), 0) rewards = np.repeat(rewards, (self.max_tau + 1), 0) terminals = np.repeat(terminals, (self.max_tau + 1), 0) if self.finite_horizon: terminals = (1 - ((1 - terminals) * (num_steps_left != 0))) if self.terminate_when_goal_reached: diff = (self.env.convert_obs_to_goals(next_obs) - goals) goal_not_reached = (np.linalg.norm(diff, axis=1, keepdims=True) > self.goal_reached_epsilon) terminals = (1 - ((1 - terminals) * goal_not_reached)) if (not self.dense_rewards): rewards = (rewards * terminals) scaling = (self.discount ** (self.max_tau - num_steps_left)) rewards = (rewards / scaling) batch['rewards'] = rewards batch['terminals'] = terminals batch['actions'] = actions batch['num_steps_left'] = num_steps_left batch['goals'] = goals batch['observations'] = obs batch['next_observations'] = next_obs return np_to_pytorch_batch(batch) def _sample_taus_for_training(self, batch): if self.finite_horizon: if (self.tau_sample_strategy == 'uniform'): num_steps_left = np.random.randint(0, (self.max_tau + 1), (self.batch_size, 1)) elif (self.tau_sample_strategy == 'truncated_geometric'): num_steps_left = truncated_geometric(p=(self.truncated_geom_factor / self.max_tau), truncate_threshold=self.max_tau, size=(self.batch_size, 1), new_value=0) elif (self.tau_sample_strategy == 'no_resampling'): num_steps_left = batch['num_steps_left'] elif (self.tau_sample_strategy == 'all_valid'): num_steps_left = np.tile(np.arange(0, (self.max_tau + 1)), self.batch_size) num_steps_left = np.expand_dims(num_steps_left, 1) else: raise TypeError('Invalid tau_sample_strategy: {}'.format(self.tau_sample_strategy)) else: num_steps_left = np.zeros((self.batch_size, 1)) return num_steps_left def _sample_max_tau_for_rollout(self): if self.finite_horizon: return self.max_tau_for_rollout else: return 0 def offline_evaluate(self, epoch): raise NotImplementedError() def _start_new_rollout(self): self.exploration_policy.reset() self._rollout_tau = np.array([self.max_tau_for_rollout]) obs = self.training_env.reset() self._current_path_goal = self.training_env.get_goal() return obs def _handle_step(self, observation, action, reward, next_observation, terminal, agent_info, env_info): if self.vectorized: reward = reward[0] self._current_path_builder.add_all(observations=observation, actions=action, rewards=reward, next_observations=next_observation, terminals=terminal, agent_infos=agent_info, env_infos=env_info, num_steps_left=self._rollout_tau) if self.cycle_taus_for_rollout: self._rollout_tau -= 1 if (self._rollout_tau[0] < 0): self._rollout_tau = np.array([self.max_tau_for_rollout]) def _get_action_and_info(self, observation): full_observation = observation self.exploration_policy.set_num_steps_total(self._n_env_steps_total) goal = self._current_path_goal if self.observation_key: observation = observation[self.observation_key] if self.desired_goal_key: goal = self._current_path_goal[self.desired_goal_key] (ac, info) = self.exploration_policy.get_action(observation, goal, self._rollout_tau) if ('current_subgoal' in info): full_observation[self.desired_goal_key] = info['current_subgoal'] return (ac, info) def _handle_rollout_ending(self): self._n_rollouts_total += 1 if (len(self._current_path_builder) > 0): path = self._current_path_builder.get_all_stacked() self.replay_buffer.add_path(path) self._exploration_paths.append(path) self._current_path_builder = PathBuilder() def _handle_path(self, path): self._n_rollouts_total += 1 self.replay_buffer.add_path(path) self._exploration_paths.append(path) def evaluate(self, epoch, eval_paths=None): self.eval_statistics['Max Tau'] = self.max_tau self.eval_statistics['Max Tau for Rollout'] = self.max_tau_for_rollout super().evaluate(epoch, eval_paths=eval_paths)
def cythonize_extensions(extension): _check_cython_version() from Cython.Build import cythonize basic_check_build() sklearn._OPENMP_SUPPORTED = check_openmp_support() n_jobs = 1 with contextlib.suppress(ImportError): import joblib n_jobs = joblib.cpu_count() cython_enable_debug_directives = (os.environ.get('SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES', '0') != '0') compiler_directives = {'language_level': 3, 'boundscheck': cython_enable_debug_directives, 'wraparound': False, 'initializedcheck': False, 'nonecheck': False, 'cdivision': True, 'profile': False} return cythonize(extension, nthreads=n_jobs, compiler_directives=compiler_directives, annotate=False)
def get_2hop_relations_from_2entities(entity0: str, entity1: str): query = ((((((('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < SELECT distinct ?x0 as ?r0 ?y as ?r1 WHERE {\n ?x1 ?x0 ' + ':') + entity0) + ' .\n') + '?x1 ?y ') + ':') + entity1) + ' .\n FILTER regex(?x0, " FILTER regex(?y, " }\n ') pass
_nplike class Numpy(ArrayModuleNumpyLike['NDArray']): is_eager: Final = True supports_structured_dtypes: Final = True def __init__(self): self._module = numpy def ma(self): return self._module.ma def char(self): return self._module.char def ndarray(self): return self._module.ndarray def is_own_array_type(cls, type_: type) -> bool: return issubclass(type_, numpy.ndarray) def is_c_contiguous(self, x: (NDArray | PlaceholderArray)) -> bool: if isinstance(x, PlaceholderArray): return True else: return x.flags['C_CONTIGUOUS'] def packbits(self, x: NDArray, *, axis: (int | None)=None, bitorder: Literal[('big', 'little')]='big'): assert (not isinstance(x, PlaceholderArray)) return numpy.packbits(x, axis=axis, bitorder=bitorder) def unpackbits(self, x: NDArray, *, axis: (int | None)=None, count: (int | None)=None, bitorder: Literal[('big', 'little')]='big'): assert (not isinstance(x, PlaceholderArray)) return numpy.unpackbits(x, axis=axis, count=count, bitorder=bitorder)
def test_case45(): url = (brokerIp + '/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:B990') headers = {'Content-Type': 'application/ld+json', 'Accept': 'application/ld+json'} r = requests.get(url, headers=headers) print(r.content) resp_content = r.content resInJson = resp_content.decode('utf8').replace("'", '"') resp = json.loads(resInJson) print(resp) if (resp['id'] == 'urn:ngsi-ld:Vehicle:B990'): print('\nValidated') else: print('\nNot Validated') print(r.status_code) assert (r.status_code == 200)
_clip_fps_by_default def find_video_period(clip, fps=None, tmin=0.3): frame = (lambda t: clip.get_frame(t).flatten()) tt = np.arange(tmin, clip.duration, (1.0 / fps))[1:] ref = frame(0) corrs = [np.corrcoef(ref, frame(t))[(0, 1)] for t in tt] return tt[np.argmax(corrs)]
def reduce_dict(input_dict, average=True): world_size = comm.world_size if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def get_depth_gasda(dataset, file, phase=None): if (not phase): raise NotImplementedError('phase value is none!!') depth = cv2.imread(str(file), flags=cv2.IMREAD_ANYDEPTH).astype(np.float32) depth = cv2.resize(depth, tuple(dataset.labels_size), interpolation=cv2.INTER_NEAREST) if (phase == 'test'): toTensor = transforms.ToTensor() depth = toTensor(depth) return depth elif (phase == 'train'): depth = np.array(depth, dtype=np.float32) depth /= 65536.0 depth[(depth < 0.0)] = 0.0 depth = (depth * 2.0) depth -= 1.0 return depth
def filter_recursive(x_or_iterable): if isinstance(x_or_iterable, list): new_items = [] for sub_elem in x_or_iterable: filtered_sub_elem = filter_recursive(sub_elem) if ((filtered_sub_elem is not None) and (not (isinstance(filtered_sub_elem, list) and (len(filtered_sub_elem) == 0)))): new_items.append(filtered_sub_elem) return new_items else: return x_or_iterable
def match(speech, mode): global label, lastLabel for entry in os.scandir('Keypoints'): if entry.is_file(): if (os.path.splitext(entry)[1] == '.json'): filePlotName = entry.name try: js = json.loads(open(('Keypoints\\' + filePlotName)).read()) for items in js['people']: pose = items['pose_keypoints_2d'] handRight = items['hand_right_keypoints_2d'] handLeft = items['hand_left_keypoints_2d'] pose_points = helper.removePoints(pose) posePoints = helper.join_points(pose_points) hand_right_Points = helper.removePoints(handRight) handRightPoints = helper.join_points(hand_right_Points) hand_left_points = helper.removePoints(handLeft) handLeftPoints = helper.join_points(hand_left_points) frame = plotPose(posePoints, handRightPoints, handLeftPoints) cv2.imwrite((('gui\\Learn_images\\' + filePlotName) + '.jpg'), frame) frame = cv2.imread('PSL\\BLACK_background.jpg') eel.get_fileName(filePlotName) except: print('Decoding JSON has failed') pass try: if (mode == 0): label = alphabet.match_ann(('Keypoints\\' + filePlotName)) if (mode == 1): label = word.match_ann(('Keypoints\\' + filePlotName)) print(label) except Exception: pass if ((label != 'no match') and (label != 'no confidence') and (label != lastLabel)): lastLabel = label if (speech == 1): try: mp3 = (('data\\speech\\' + label) + '.mp3') mixer.init() mixer.music.load(mp3) mixer.music.play() except: pass return label
def test_check_symmetric(): arr_sym = np.array([[0, 1], [1, 2]]) arr_bad = np.ones(2) arr_asym = np.array([[0, 2], [0, 2]]) test_arrays = {'dense': arr_asym, 'dok': sp.dok_matrix(arr_asym), 'csr': sp.csr_matrix(arr_asym), 'csc': sp.csc_matrix(arr_asym), 'coo': sp.coo_matrix(arr_asym), 'lil': sp.lil_matrix(arr_asym), 'bsr': sp.bsr_matrix(arr_asym)} with pytest.raises(ValueError): check_symmetric(arr_bad) for (arr_format, arr) in test_arrays.items(): with pytest.warns(UserWarning): check_symmetric(arr) with pytest.raises(ValueError): check_symmetric(arr, raise_exception=True) output = check_symmetric(arr, raise_warning=False) if sp.issparse(output): assert (output.format == arr_format) assert_array_equal(output.toarray(), arr_sym) else: assert_array_equal(output, arr_sym)
def register_Ns3SequentialRandomVariable_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_method('GetMin', 'double', [], is_const=True) cls.add_method('GetMax', 'double', [], is_const=True) cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) cls.add_method('GetValue', 'double', [], is_virtual=True) cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return
class InceptionE(nn.Module): def __init__(self, input_channels): super().__init__() self.branch1x1 = BasicConv2d(input_channels, 320, kernel_size=1) self.branch3x3_1 = BasicConv2d(input_channels, 384, kernel_size=1) self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3stack_1 = BasicConv2d(input_channels, 448, kernel_size=1) self.branch3x3stack_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) self.branch3x3stack_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3stack_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(input_channels, 192, kernel_size=1)) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)] branch3x3 = torch.cat(branch3x3, 1) branch3x3stack = self.branch3x3stack_1(x) branch3x3stack = self.branch3x3stack_2(branch3x3stack) branch3x3stack = [self.branch3x3stack_3a(branch3x3stack), self.branch3x3stack_3b(branch3x3stack)] branch3x3stack = torch.cat(branch3x3stack, 1) branchpool = self.branch_pool(x) outputs = [branch1x1, branch3x3, branch3x3stack, branchpool] return torch.cat(outputs, 1)
def _onnx_unsupported(op_name): raise RuntimeError('Unsupported: ONNX export of operator {}. Please open a bug to request ONNX export support for the missing operator.'.format(op_name))
def test_false_str_estimator() -> None: with pytest.raises(ValueError, match='.*Please provide a string in*'): mapie_cal = MapieCalibrator(calibrator='not_estimator') mapie_cal.fit(X, y)
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): zip_filename = (base_name + '.zip') archive_dir = os.path.dirname(base_name) if (not os.path.exists(archive_dir)): if (logger is not None): logger.info('creating %s', archive_dir) if (not dry_run): os.makedirs(archive_dir) try: import zipfile except ImportError: zipfile = None if (zipfile is None): _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if (logger is not None): logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if (not dry_run): zip = zipfile.ZipFile(zip_filename, 'w', compression=zipfile.ZIP_DEFLATED) for (dirpath, dirnames, filenames) in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if (logger is not None): logger.info("adding '%s'", path) zip.close() return zip_filename
class MMFToPLCheckpointUpdater(): def __init__(self): pass def update_checkpoint(self, checkpoint: Dict[(str, Any)], model: torch.nn.Module) -> None: if is_model_only_checkpoint(checkpoint): self._update_model_checkpoint(checkpoint=checkpoint, model=model) return if (not is_pl_trainer_checkpoint(checkpoint)): self._update_trainer_checkpoint_from_mmf(checkpoint=checkpoint, model=model) def _update_trainer_checkpoint_from_mmf(self, checkpoint: Dict[(str, Any)], model: Any) -> None: remove_keys_inplace(checkpoint, {'best_iteration', 'current_iteration', 'best_update', 'best_metric_value', 'fp16_scaler', 'config', 'git/branch', 'git/commit_hash', 'git/commit_author', 'git/commit_message', 'git/diff'}) if ('model' in checkpoint): model_checkpoint = checkpoint.pop('model') checkpoint['state_dict'] = model_checkpoint self._update_model_format_state_keys(checkpoint['state_dict'], model=model) config = registry.get('config') if config.checkpoint.get('resume_pretrained', False): self._update_pretrained_state_mapping(checkpoint=checkpoint['state_dict'], model=model, config=config) if ('optimizer' in checkpoint): optimizer = checkpoint.pop('optimizer') checkpoint['optimizer_states'] = [optimizer] if ('lr_scheduler' in checkpoint): lr_scheduler = checkpoint.pop('lr_scheduler') checkpoint['lr_schedulers'] = [lr_scheduler] else: checkpoint['lr_schedulers'] = [] if ('num_updates' in checkpoint): global_step = checkpoint.pop('num_updates') checkpoint['global_step'] = global_step if ('current_epoch' in checkpoint): epoch = checkpoint.pop('current_epoch') checkpoint['epoch'] = epoch def _update_model_checkpoint(self, checkpoint: Dict[(str, Any)], model: torch.nn.Module) -> None: if (not is_pl_model_checkpoint(checkpoint)): self._update_model_checkpoint_from_mmf(checkpoint) self._update_model_format_state_keys(checkpoint['state_dict'], model=model) config = registry.get('config') if config.checkpoint.get('resume_pretrained', False): self._update_pretrained_state_mapping(checkpoint=checkpoint['state_dict'], model=model, config=config) def _update_pretrained_state_mapping(self, checkpoint: Dict[(str, Any)], model: torch.nn.Module, config: Dict[(str, Any)]) -> None: ckpt_update_dict = get_pretrained_state_mapping_checkpoint(checkpoint=checkpoint, model=model, config=config) accepted_keys = set() for (own_attr, attr) in ckpt_update_dict.items(): assert (own_attr == attr), ('Since `_update_model_format_state_keys` was run ', 'before, this has to be held true') logger.info(((('Copying ' + own_attr) + ' from ') + attr)) accepted_keys.add(attr) tmp_checkpoint = dict(checkpoint) for key in tmp_checkpoint: if (key not in accepted_keys): checkpoint.pop(key) def _update_model_format_state_keys(self, checkpoint: Dict[(str, Any)], model: torch.nn.Module) -> None: tmp_state_dict = dict(checkpoint) for attr in tmp_state_dict: new_attr = _format_state_key(model, attr) if (attr != new_attr): logger.info(f'checkpoint: rewriting {attr} into {new_attr}') value = checkpoint.pop(attr) checkpoint[new_attr] = value def _update_model_checkpoint_from_mmf(self, checkpoint: Dict[(str, Any)]) -> None: tmp_checkpoint = dict(checkpoint) checkpoint.clear() checkpoint['state_dict'] = tmp_checkpoint
class Unexpectedness(Metric): def _get_enriched_recommendations(self, recommendations: SparkDataFrame, base_recommendations: SparkDataFrame) -> SparkDataFrame: sorted_by_score_recommendations = self._get_items_list_per_user(recommendations) sorted_by_score_base_recommendations = self._get_items_list_per_user(base_recommendations).withColumnRenamed('pred_item_id', 'base_pred_item_id') enriched_recommendations = sorted_by_score_recommendations.join(sorted_by_score_base_recommendations, how='left', on=self.query_column) return self._rearrange_columns(enriched_recommendations) def __call__(self, recommendations: MetricsDataFrameLike, base_recommendations: MetricsDataFrameLike) -> MetricsReturnType: self._check_dataframes_equal_types(recommendations, base_recommendations) if isinstance(recommendations, SparkDataFrame): self._check_duplicates_spark(recommendations) self._check_duplicates_spark(base_recommendations) assert isinstance(base_recommendations, SparkDataFrame) return self._spark_call(recommendations, base_recommendations) recommendations = (self._convert_pandas_to_dict_with_score(recommendations) if isinstance(recommendations, PandasDataFrame) else self._convert_dict_to_dict_with_score(recommendations)) self._check_duplicates_dict(recommendations) assert isinstance(base_recommendations, (dict, PandasDataFrame)) base_recommendations = (self._convert_pandas_to_dict_with_score(base_recommendations) if isinstance(base_recommendations, PandasDataFrame) else self._convert_dict_to_dict_with_score(base_recommendations)) self._check_duplicates_dict(base_recommendations) return self._dict_call(list(recommendations), recs=recommendations, base_recs=base_recommendations) def _get_metric_value_by_user(ks: List[int], base_recs: Optional[List], recs: Optional[List]) -> List[float]: if ((not base_recs) or (not recs)): return [0.0 for _ in ks] res = [] for k in ks: res.append((1.0 - (len((set(recs[:k]) & set(base_recs[:k]))) / k))) return res
def build_argparse(): parser = argparse.ArgumentParser() parser.add_argument('--txt_file', type=str, help='Input plaintext file') parser.add_argument('--label_file', type=str, default=None, help='Character-level label file') parser.add_argument('--mwt_json_file', type=str, default=None, help='JSON file for MWT expansions') parser.add_argument('--conll_file', type=str, default=None, help='CoNLL file for output') parser.add_argument('--dev_txt_file', type=str, help='(Train only) Input plaintext file for the dev set') parser.add_argument('--dev_label_file', type=str, default=None, help='(Train only) Character-level label file for the dev set') parser.add_argument('--dev_conll_gold', type=str, default=None, help='(Train only) CoNLL-U file for the dev set for early stopping') parser.add_argument('--lang', type=str, help='Language') parser.add_argument('--shorthand', type=str, help='UD treebank shorthand') parser.add_argument('--mode', default='train', choices=['train', 'predict']) parser.add_argument('--skip_newline', action='store_true', help='Whether to skip newline characters in input. Particularly useful for languages like Chinese.') parser.add_argument('--emb_dim', type=int, default=32, help='Dimension of unit embeddings') parser.add_argument('--hidden_dim', type=int, default=64, help='Dimension of hidden units') parser.add_argument('--conv_filters', type=str, default='1,9', help='Configuration of conv filters. ,, separates layers and , separates filter sizes in the same layer.') parser.add_argument('--no-residual', dest='residual', action='store_false', help='Add linear residual connections') parser.add_argument('--no-hierarchical', dest='hierarchical', action='store_false', help='"Hierarchical" RNN tokenizer') parser.add_argument('--hier_invtemp', type=float, default=0.5, help='Inverse temperature used in propagating tokenization predictions between RNN layers') parser.add_argument('--input_dropout', action='store_true', help='Dropout input embeddings as well') parser.add_argument('--conv_res', type=str, default=None, help='Convolutional residual layers for the RNN') parser.add_argument('--rnn_layers', type=int, default=1, help='Layers of RNN in the tokenizer') parser.add_argument('--use_dictionary', action='store_true', help='Use dictionary feature. The lexicon is created using the training data and external dict (if any) expected to be found under the same folder of training dataset, formatted as SHORTHAND-externaldict.txt where each line in this file is a word. For example, data/tokenize/zh_gsdsimp-externaldict.txt') parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Maximum gradient norm to clip to') parser.add_argument('--anneal', type=float, default=0.999, help='Anneal the learning rate by this amount when dev performance deteriorate') parser.add_argument('--anneal_after', type=int, default=2000, help='Anneal the learning rate no earlier than this step') parser.add_argument('--lr0', type=float, default=0.002, help='Initial learning rate') parser.add_argument('--dropout', type=float, default=0.33, help='Dropout probability') parser.add_argument('--unit_dropout', type=float, default=0.33, help='Unit dropout probability') parser.add_argument('--feat_dropout', type=float, default=0.05, help='Features dropout probability for each element in feature vector') parser.add_argument('--feat_unit_dropout', type=float, default=0.33, help='The whole feature of units dropout probability') parser.add_argument('--tok_noise', type=float, default=0.02, help='Probability to induce noise to the input of the higher RNN') parser.add_argument('--sent_drop_prob', type=float, default=0.2, help='Probability to drop sentences at the end of batches during training uniformly at random. Idea is to fake paragraph endings.') parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay') parser.add_argument('--max_seqlen', type=int, default=100, help='Maximum sequence length to consider at a time') parser.add_argument('--batch_size', type=int, default=32, help='Batch size to use') parser.add_argument('--epochs', type=int, default=10, help='Total epochs to train the model for') parser.add_argument('--steps', type=int, default=50000, help='Steps to train the model for, if unspecified use epochs') parser.add_argument('--report_steps', type=int, default=20, help='Update step interval to report loss') parser.add_argument('--shuffle_steps', type=int, default=100, help='Step interval to shuffle each paragraph in the generator') parser.add_argument('--eval_steps', type=int, default=200, help='Step interval to evaluate the model on the dev set for early stopping') parser.add_argument('--max_steps_before_stop', type=int, default=5000, help='Early terminates after this many steps if the dev scores are not improving') parser.add_argument('--save_name', type=str, default=None, help='File name to save the model') parser.add_argument('--load_name', type=str, default=None, help='File name to load a saved model') parser.add_argument('--save_dir', type=str, default='saved_models/tokenize', help='Directory to save models in') utils.add_device_args(parser) parser.add_argument('--seed', type=int, default=1234) parser.add_argument('--use_mwt', dest='use_mwt', default=None, action='store_true', help='Whether or not to include mwt output layers. If set to None, this will be determined by examining the training data for MWTs') parser.add_argument('--no_use_mwt', dest='use_mwt', action='store_false', help='Whether or not to include mwt output layers') parser.add_argument('--wandb', action='store_true', help='Start a wandb session and write the results of training. Only applies to training. Use --wandb_name instead to specify a name') parser.add_argument('--wandb_name', default=None, help='Name of a wandb session to start when training. Will default to the dataset short name') return parser
class KVT_Dataset(Dataset): def __init__(self, data_path, split, sr, duration, num_chunks): self.data_path = data_path self.split = split self.sr = sr self.input_length = int((sr * duration)) self.num_chunks = num_chunks self.get_split() self.get_file_list() def get_split(self): track_split = json.load(open(os.path.join(self.data_path, 'kvt', 'track_split.json'), 'r')) self.train_track = track_split['train_track'] self.valid_track = track_split['valid_track'] self.test_track = track_split['test_track'] def get_file_list(self): annotation = json.load(open(os.path.join(self.data_path, 'kvt', 'annotation.json'), 'r')) self.list_of_label = json.load(open(os.path.join(self.data_path, 'kvt', 'kvt_tags.json'), 'r')) self.tag_to_idx = {i: idx for (idx, i) in enumerate(self.list_of_label)} if (self.split == 'TRAIN'): self.fl = [annotation[str(i)] for i in self.train_track] elif (self.split == 'VALID'): self.fl = [annotation[str(i)] for i in self.valid_track] elif (self.split == 'TEST'): self.fl = [annotation[str(i)] for i in self.test_track] elif (self.split == 'ALL'): self.fl = list(annotation.values()) else: raise ValueError(f'Unexpected split name: {self.split}') del annotation def audio_load(self, track_id): audio = np.load(os.path.join(self.data_path, 'kvt', 'npy', (track_id + '.npy')), mmap_mode='r') random_idx = random.randint(0, (audio.shape[(- 1)] - self.input_length)) audio = torch.from_numpy(np.array(audio[random_idx:(random_idx + self.input_length)])) return audio def tag_to_binary(self, text): bainry = np.zeros([len(self.list_of_label)], dtype=np.float32) if isinstance(text, str): bainry[self.tag_to_idx[text]] = 1.0 elif isinstance(text, list): for tag in text: bainry[self.tag_to_idx[tag]] = 1.0 return bainry def get_train_item(self, index): item = self.fl[index] tag_list = item['tag'] binary = self.tag_to_binary(tag_list) audio_tensor = self.audio_load(item['track_id']) return {'audio': audio_tensor, 'binary': binary} def get_eval_item(self, index): item = self.fl[index] tag_list = item['tag'] binary = self.tag_to_binary(tag_list) text = ', '.join(tag_list) tags = self.list_of_label track_id = item['track_id'] audio = np.load(os.path.join(self.data_path, 'kvt', 'npy', (track_id + '.npy')), mmap_mode='r') hop = ((len(audio) - self.input_length) // self.num_chunks) audio = np.stack([np.array(audio[(i * hop):((i * hop) + self.input_length)]) for i in range(self.num_chunks)]).astype('float32') return {'audio': audio, 'track_id': track_id, 'tags': tags, 'binary': binary, 'text': text} def __getitem__(self, index): if ((self.split == 'TRAIN') or (self.split == 'VALID')): return self.get_train_item(index) else: return self.get_eval_item(index) def __len__(self): return len(self.fl)
def test_hashtag_container(tweet_segmenter): original_tweet = 'esto es #UnaGenialidad' (hashtag_container, word_segmenter_output) = tweet_segmenter.build_hashtag_container([original_tweet]) assert all([(hashtag_container.hashtags == [['UnaGenialidad']]), (hashtag_container.hashtag_set == ['UnaGenialidad']), (hashtag_container.replacement_dict == {'#UnaGenialidad': 'Una Genialidad'}), isinstance(word_segmenter_output, hashformers.segmenter.WordSegmenterOutput)])
class TableSemanticParsingExample(Example): def __init__(self, dataset_id, db_name, db_id): super().__init__(dataset_id) self.db_name = db_name self.db_id = db_id self.schema_features = None self.schema_M = None self.M = None self.gt_tables_list = [] self.gt_table_names_list = [] self.gt_fields_list = [] self.transformer_output_value_masks = None self.pred_tables = None self.table_ids_list = [] def add_gt_tables(self, gt_tables, gt_table_names): self.gt_tables_list.append(gt_tables) self.gt_table_names_list.append(gt_table_names) def pretty_print(self, example_id=None, schema=None, de_vectorize_ptr=None, rev_vocab=None, post_process=None, use_table_aware_te=True): if example_id: print('Example {}'.format(example_id)) if schema: schema.pretty_print() print('NL: {}'.format(self.text.encode('utf-8'))) print('NL tokens: {}'.format([t.encode('utf-8') for t in self.text_tokens])) print('NL tokens (original): {}'.format([t.encode('utf-8') for t in self.text_ptr_values])) for (i, program) in enumerate(self.program_list): print('Target {}: {}'.format(i, program.encode('utf-8'))) if (i < len(self.program_tokens_list)): program_tokens = self.program_tokens_list[i] print('Target tokens: {}'.format([t.encode('utf-8') for t in program_tokens])) if (i < len(self.program_text_ptr_value_ids_list)): input_tokens = (self.input_ptr_values if use_table_aware_te else self.text_ptr_values) program_tokens = de_vectorize_ptr(self.program_text_ptr_value_ids_list[i], rev_vocab, input_tokens, post_process, return_tokens=True) print('Target T-P tokens: {}'.format(program_tokens)) print() def gt_tables(self): return self.gt_tables_list[self.program_id] def gt_table_names(self): return self.gt_table_names_list[self.program_id] def gt_fields(self): return self.gt_fields_list[self.program_id] def table_ids(self): return self.table_ids_list[self.program_id]