code
stringlengths
101
5.91M
def standardize_otpmizers_params(optm_dict): msg = "'optm_dict' must be of type dict. found {}.".format(type(optm_dict)) assert isinstance(optm_dict, dict), msg new_optm_dict = copy.deepcopy(optm_dict) loldkeys = list(new_optm_dict.keys()) for k in loldkeys: if k.startswith('optn'): msg = "'{}' is a dict. it must not be the case.otherwise, we have to do a recursive thing....".format(k) assert (not isinstance(new_optm_dict[k], dict)), msg new_k = k.split('__')[1] new_optm_dict[new_k] = new_optm_dict.pop(k) return new_optm_dict
class AsyncNextNode(AtomicExprNode): type = py_object_type is_temp = 1 def __init__(self, iterator): AtomicExprNode.__init__(self, iterator.pos) self.iterator = iterator def infer_type(self, env): return py_object_type def analyse_types(self, env): return self def generate_result_code(self, code): code.globalstate.use_utility_code(UtilityCode.load_cached('AsyncIter', 'Coroutine.c')) code.putln(('%s = __Pyx_Coroutine_AsyncIterNext(%s); %s' % (self.result(), self.iterator.py_result(), code.error_goto_if_null(self.result(), self.pos)))) code.put_gotref(self.result())
def resnet101(pretrained=True, **kwargs): model = ResNet3X3(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: print(' pretrained ') model.load_state_dict(torch.load('./pretrained/resnet101-imagenet.pth', map_location='cpu')) return model
def eval_ppl_epoch(args, eval_data, eval_examples, model, tokenizer): eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) logger.info((' ' + '***** Running ppl evaluation *****')) logger.info(' Num examples = %d', len(eval_examples)) logger.info(' Batch size = %d', args.eval_batch_size) model.eval() (eval_loss, batch_num) = (0, 0) for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc='Eval ppl'): batch = tuple((t.to(args.device) for t in batch)) (source_ids, target_ids) = batch source_mask = source_ids.ne(tokenizer.pad_token_id) target_mask = target_ids.ne(tokenizer.pad_token_id) with torch.no_grad(): outputs = model(input_ids=source_ids, attention_mask=source_mask, labels=target_ids, decoder_attention_mask=target_mask) loss = outputs.loss eval_loss += loss.item() batch_num += 1 eval_loss = (eval_loss / batch_num) eval_ppl = round(np.exp(eval_loss), 5) return eval_ppl
def make_hdf5(model_config, train_config, mode): if ('hdf5' in model_config['dataset_name']): raise ValueError('Reading from an HDF5 file which you will probably be about to overwrite! Override this error only if you know what youre doing!') file_name = '{dataset_name}_{size}_{mode}.hdf5'.format(dataset_name=model_config['dataset_name'], size=model_config['img_size'], mode=mode) file_path = os.path.join(model_config['data_path'], file_name) train = (True if (mode == 'train') else False) if os.path.isfile(file_path): print('{file_name} exist!\nThe file are located in the {file_path}'.format(file_name=file_name, file_path=file_path)) else: dataset = LoadDataset(model_config['dataset_name'], model_config['data_path'], train=train, download=True, resize_size=model_config['img_size'], hdf5_path=None, random_flip=False) loader = DataLoader(dataset, batch_size=model_config['batch_size4prcsing'], shuffle=False, pin_memory=False, num_workers=train_config['num_workers'], drop_last=False) print(('Starting to load %s into an HDF5 file with chunk size %i and compression %s...' % (model_config['dataset_name'], model_config['chunk_size'], model_config['compression']))) for (i, (x, y)) in enumerate(tqdm(loader)): x = (255 * ((x + 1) / 2.0)).byte().numpy() y = y.numpy() if (i == 0): with h5.File(file_path, 'w') as f: print(('Producing dataset of len %d' % len(loader.dataset))) imgs_dset = f.create_dataset('imgs', x.shape, dtype='uint8', maxshape=(len(loader.dataset), 3, model_config['img_size'], model_config['img_size']), chunks=(model_config['chunk_size'], 3, model_config['img_size'], model_config['img_size']), compression=model_config['compression']) print(('Image chunks chosen as ' + str(imgs_dset.chunks))) imgs_dset[...] = x labels_dset = f.create_dataset('labels', y.shape, dtype='int64', maxshape=(len(loader.dataset),), chunks=(model_config['chunk_size'],), compression=model_config['compression']) print(('Label chunks chosen as ' + str(labels_dset.chunks))) labels_dset[...] = y else: with h5.File(file_path, 'a') as f: f['imgs'].resize((f['imgs'].shape[0] + x.shape[0]), axis=0) f['imgs'][(- x.shape[0]):] = x f['labels'].resize((f['labels'].shape[0] + y.shape[0]), axis=0) f['labels'][(- y.shape[0]):] = y return file_path
def use_transformers(sentences=('', '')): from transformers import BertTokenizer, BertModel import torch def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze((- 1)).expand(token_embeddings.size()).float() return (torch.sum((token_embeddings * input_mask_expanded), 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)) tokenizer = BertTokenizer.from_pretrained('shibing624/text2vec-base-chinese') model = BertModel.from_pretrained('shibing624/text2vec-base-chinese') encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') with torch.no_grad(): model_output = model(**encoded_input) sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print(sentence_embeddings.shape) return sentence_embeddings
def test_siblings_policy_negative_examples_3(digraph, features_1d, labels): policy = SiblingsPolicy(digraph, features_1d, labels) ground_truth = [False, False, False, True, False, False, True, True] result = policy.negative_examples('2.1') assert_array_equal(ground_truth, result)
class CompaqVisualFCompiler(FCompiler): compiler_type = 'compaqv' description = 'DIGITAL or Compaq Visual Fortran Compiler' version_pattern = '(DIGITAL|Compaq) Visual Fortran Optimizing Compiler Version (?P<version>[^\\s]*).*' compile_switch = '/compile_only' object_switch = '/object:' library_switch = '/OUT:' static_lib_extension = '.lib' static_lib_format = '%s%s' module_dir_switch = '/module:' module_include_switch = '/I' ar_exe = 'lib.exe' fc_exe = 'DF' if (sys.platform == 'win32'): from numpy.distutils.msvccompiler import MSVCCompiler try: m = MSVCCompiler() m.initialize() ar_exe = m.lib except DistutilsPlatformError: pass except AttributeError: msg = get_exception() if ('_MSVCCompiler__root' in str(msg)): print(('Ignoring "%s" (I think it is msvccompiler.py bug)' % msg)) else: raise except IOError: e = get_exception() if (not ('vcvarsall.bat' in str(e))): print('Unexpected IOError in', __file__) raise e except ValueError: e = get_exception() if (not ("path']" in str(e))): print('Unexpected ValueError in', __file__) raise e executables = {'version_cmd': ['<F90>', '/what'], 'compiler_f77': [fc_exe, '/f77rtl', '/fixed'], 'compiler_fix': [fc_exe, '/fixed'], 'compiler_f90': [fc_exe], 'linker_so': ['<F90>'], 'archiver': [ar_exe, '/OUT:'], 'ranlib': None} def get_flags(self): return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', '/names:lowercase', '/assume:underscore'] def get_flags_opt(self): return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] def get_flags_arch(self): return ['/threads'] def get_flags_debug(self): return ['/debug']
class NaiveSyncBatchNorm(nn.BatchNorm2d): def forward(self, input): if ((get_world_size() == 1) or (not self.training)): return super().forward(input) assert (input.shape[0] > 0), 'SyncBatchNorm does not support empty inputs' C = input.shape[1] mean = torch.mean(input, dim=[0, 2, 3]) meansqr = torch.mean((input * input), dim=[0, 2, 3]) vec = torch.cat([mean, meansqr], dim=0) vec = (AllReduce.apply(vec) * (1.0 / dist.get_world_size())) (mean, meansqr) = torch.split(vec, C) var = (meansqr - (mean * mean)) self.running_mean += (self.momentum * (mean.detach() - self.running_mean)) self.running_var += (self.momentum * (var.detach() - self.running_var)) invstd = torch.rsqrt((var + self.eps)) scale = (self.weight * invstd) bias = (self.bias - (mean * scale)) scale = scale.reshape(1, (- 1), 1, 1) bias = bias.reshape(1, (- 1), 1, 1) return ((input * scale) + bias)
def multi_gpu_test(model, data_loader, tmpdir=None): model.eval() results = [] dataset = data_loader.dataset (rank, world_size) = get_dist_info() if (rank == 0): prog_bar = mmcv.ProgressBar(len(dataset)) for (i, data) in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) results.append(result) if (rank == 0): batch_size = data['img'][0].size(0) for _ in range((batch_size * world_size)): prog_bar.update() results = collect_results(results, len(dataset), tmpdir) return results
class ModulatedDeformConvFunction(Function): def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1): ctx.stride = stride ctx.padding = padding ctx.dilation = dilation ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.with_bias = (bias is not None) if (not ctx.with_bias): bias = input.new_empty(1) if (not input.is_cuda): raise NotImplementedError if (weight.requires_grad or mask.requires_grad or offset.requires_grad or input.requires_grad): ctx.save_for_backward(input, offset, mask, weight, bias) output = input.new_empty(ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] deform_conv_cuda.modulated_deform_conv_cuda_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) return output def backward(ctx, grad_output): if (not grad_output.is_cuda): raise NotImplementedError (input, offset, mask, weight, bias) = ctx.saved_tensors grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) grad_mask = torch.zeros_like(mask) grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) deform_conv_cuda.modulated_deform_conv_cuda_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) if (not ctx.with_bias): grad_bias = None return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None) def _infer_shape(ctx, input, weight): n = input.size(0) channels_out = weight.size(0) (height, width) = input.shape[2:4] (kernel_h, kernel_w) = weight.shape[2:4] height_out = ((((height + (2 * ctx.padding)) - ((ctx.dilation * (kernel_h - 1)) + 1)) // ctx.stride) + 1) width_out = ((((width + (2 * ctx.padding)) - ((ctx.dilation * (kernel_w - 1)) + 1)) // ctx.stride) + 1) return (n, channels_out, height_out, width_out)
def get_language_modeling_adapter_spec() -> AdapterSpec: return AdapterSpec(method=ADAPT_LANGUAGE_MODELING, instructions='', input_prefix='', input_suffix='', output_prefix='', output_suffix='', max_train_instances=0, num_outputs=1, max_tokens=0, temperature=0.0)
def main(start_epoch, epochs): assert torch.cuda.is_available(), NotImplementedError('No cuda available ') if (not osp.exists('data/')): os.mkdir('data/') if (not osp.exists('log/')): os.mkdir('log/') args = obtain_evaluate_args() torch.backends.cudnn.benchmark = True model_fname = 'data/deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp) if (args.dataset == 'cityscapes'): dataset = CityscapesSegmentation(args=args, root=Path.db_root_dir(args.dataset), split='reval') else: return NotImplementedError if (args.backbone == 'autodeeplab'): model = Retrain_Autodeeplab(args) else: raise ValueError('Unknown backbone: {}'.format(args.backbone)) if (not args.train): val_dataloader = DataLoader(dataset, batch_size=16, shuffle=False) model = torch.nn.DataParallel(model).cuda() print('start evaluate') for epoch in range(epochs): print('evaluate epoch {:}'.format((epoch + start_epoch))) checkpoint_name = (model_fname % (epoch + start_epoch)) print(checkpoint_name) checkpoint = torch.load(checkpoint_name) state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items() if ('tracked' not in k)} model.module.load_state_dict(state_dict) inter_meter = AverageMeter() union_meter = AverageMeter() for (i, sample) in enumerate(val_dataloader): (inputs, target) = (sample['image'], sample['label']) (N, H, W) = target.shape total_outputs = torch.zeros((N, dataset.NUM_CLASSES, H, W)).cuda() with torch.no_grad(): for (j, scale) in enumerate(args.eval_scales): new_scale = [int((H * scale)), int((W * scale))] inputs = F.upsample(inputs, new_scale, mode='bilinear', align_corners=True) inputs = inputs.cuda() outputs = model(inputs) outputs = F.upsample(outputs, (H, W), mode='bilinear', align_corners=True) total_outputs += outputs (_, pred) = torch.max(total_outputs, 1) pred = pred.detach().cpu().numpy().squeeze().astype(np.uint8) mask = target.numpy().astype(np.uint8) print('eval: {0}/{1}'.format((i + 1), len(val_dataloader))) (inter, union) = inter_and_union(pred, mask, len(dataset.CLASSES)) inter_meter.update(inter) union_meter.update(union) iou = (inter_meter.sum / (union_meter.sum + 1e-10)) miou = 'epoch: {0} Mean IoU: {1:.2f}'.format(epoch, (iou.mean() * 100)) f = open('log/result.txt', 'a') for (i, val) in enumerate(iou): class_iou = 'IoU {0}: {1:.2f}\n'.format(dataset.CLASSES[i], (val * 100)) f.write(class_iou) f.write('\n') f.write(miou) f.write('\n') f.close()
class SubData(NamedTuple): data: Data batch_size: int n_id: Tensor offset: Tensor count: Tensor def to(self, *args, **kwargs): return SubData(self.data.to(*args, **kwargs), self.batch_size, self.n_id, self.offset, self.count)
def get_bar_order(plot_params): if plot_params['detailed']: if plot_params['show_score_diffs']: bar_order = ['neg_s', 'pos_s', 'neg_s_neg_p', 'neg_s_pos_p', 'pos_s_neg_p', 'pos_s_pos_p'] else: bar_order = ['neg_s_neg_p', 'neg_s_pos_p', 'pos_s_neg_p', 'pos_s_pos_p'] elif (not plot_params['all_pos_contributions']): bar_order = ['neg_total', 'pos_total'] else: bar_order = ['all_pos_pos', 'all_pos_neg'] if plot_params['show_total']: bar_order = (['total'] + bar_order) return bar_order
def _wrapper(args=None): sys.stderr.write("WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\nPlease see for advice on fixing the underlying issue.\nTo avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\n") return main(args)
class TestSave(TestCase): def roundtrip(self, x, scaling=1): with NamedTemporaryFile(suffix='.png') as f: fname = f.name imsave(fname, x) y = imread(fname) assert_array_almost_equal((x * scaling).astype(np.int32), y) def test_imsave_roundtrip(self): dtype = np.uint8 np.random.seed(0) for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: x = (np.ones(shape, dtype=dtype) * np.random.rand(*shape)) if np.issubdtype(dtype, np.floating): (yield (self.roundtrip, x, 255)) else: x = (x * 255).astype(dtype) (yield (self.roundtrip, x))
def from_representation(array: ndarray, kind: str, **kwargs) -> Music: if (kind.lower() in ('pitch', 'pitch-based')): return from_pitch_representation(array, **kwargs) if (kind.lower() in ('pianoroll', 'piano-roll', 'piano roll')): return from_pianoroll_representation(array, **kwargs) if (kind.lower() in ('event', 'event-based')): return from_event_representation(array, **kwargs) if (kind.lower() in ('note', 'note-based')): return from_note_representation(array, **kwargs) raise ValueError(f"Expect `kind` to be 'pitch', 'pianoroll', 'event' or 'note', butgot : {kind}.")
def test_random_noise(): results = {} results['lq'] = np.ones((8, 8, 3)).astype(np.float32) model = RandomNoise(params=dict(noise_type=['gaussian'], noise_prob=[1], gaussian_sigma=[0, 50], gaussian_gray_noise_prob=1), keys=['lq']) results = model(results) assert (results['lq'].shape == (8, 8, 3)) model = RandomNoise(params=dict(noise_type=['poisson'], noise_prob=[1], poisson_scale=[0, 1], poisson_gray_noise_prob=1), keys=['lq']) results = model(results) assert (results['lq'].shape == (8, 8, 3)) params = dict(noise_type=['gaussian'], noise_prob=[1], gaussian_sigma=[0, 50], gaussian_gray_noise_prob=1, prob=0) model = RandomNoise(params=params, keys=['lq']) assert (model(results) == results) assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('inplace', [False, True]) def test_div2_double_backward(inplace, seed, ctx, func_name): from nbla_test_utils import backward_function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3).astype(np.float32), (rng.randn(2, 3).astype(np.float32) * 2)] backward_function_tester(rng, F.div2, inputs=inputs, func_args=[inplace], func_kwargs={}, atol_accum=0.1, dstep=0.0001, ctx=ctx)
def qspline1d_eval(cj, newx, dx=1.0, x0=0): newx = ((asarray(newx) - x0) / dx) res = zeros_like(newx) if (res.size == 0): return res N = len(cj) cond1 = (newx < 0) cond2 = (newx > (N - 1)) cond3 = (~ (cond1 | cond2)) res[cond1] = qspline1d_eval(cj, (- newx[cond1])) res[cond2] = qspline1d_eval(cj, ((2 * (N - 1)) - newx[cond2])) newx = newx[cond3] if (newx.size == 0): return res result = zeros_like(newx) jlower = (floor((newx - 1.5)).astype(int) + 1) for i in range(3): thisj = (jlower + i) indj = thisj.clip(0, (N - 1)) result += (cj[indj] * _quadratic((newx - thisj))) res[cond3] = result return res
def format_code_example(code: str, max_len: int, in_docstring: bool=False): code_lines = code.split('\n') idx = 0 while ((idx < len(code_lines)) and is_empty_line(code_lines[idx])): idx += 1 if (idx >= len(code_lines)): return ('', '') indent = find_indent(code_lines[idx]) code_lines = [l[indent:] for l in code_lines[idx:]] has_doctest = (code_lines[0][:3] in DOCTEST_PROMPTS) (code_samples, outputs) = parse_code_example(code_lines) delimiter = '\n\n### New code sample ###\n' full_code = delimiter.join(code_samples) line_length = (max_len - indent) if has_doctest: line_length -= 4 for (k, v) in BLACK_AVOID_PATTERNS.items(): full_code = full_code.replace(k, v) try: formatted_code = black.format_str(full_code, mode=black.FileMode([black.TargetVersion.PY37], line_length=line_length)) error = '' except Exception as e: formatted_code = full_code error = f'''Code sample: {full_code} Error message: {e}''' for (k, v) in BLACK_AVOID_PATTERNS.items(): formatted_code = formatted_code.replace(v, k) if in_docstring: formatted_code = formatted_code.replace('"""', "'''") code_samples = formatted_code.split(delimiter) if (len(outputs) == (len(code_samples) - 1)): outputs.append('') formatted_lines = [] for (code_sample, output) in zip(code_samples, outputs): code_sample = code_sample.strip() in_triple_quotes = False for line in code_sample.strip().split('\n'): if (has_doctest and (not is_empty_line(line))): prefix = ('... ' if (line.startswith(' ') or (line in [')', ']', '}']) or in_triple_quotes) else '>>> ') else: prefix = '' indent_str = ('' if is_empty_line(line) else (' ' * indent)) formatted_lines.append(((indent_str + prefix) + line)) if ('"""' in line): in_triple_quotes = (not in_triple_quotes) formatted_lines.extend([((' ' * indent) + line) for line in output.split('\n')]) if (not output.endswith('===PT-TF-SPLIT===')): formatted_lines.append('') result = '\n'.join(formatted_lines) return (result.rstrip(), error)
def parse_filenames(dirname, pattern='*conll'): for (path, subdirs, files) in os.walk(dirname): for name in files: if fnmatch(name, pattern): (yield os.path.join(path, name))
_utils.test() def test_atomic_xor_expr_evaled(): c = ti.field(ti.i32) step = 42 ti.root.place(c) def func(): c[None] = 1023 for i in range(10): ti.atomic_xor(c[None], (2 ** i)) func() assert (c[None] == 0)
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_nl_btw(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format in {'compact', 'standard'}): result = ([btw.compact(val)] + result) return result
class WeightNormConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, init_scale=1.0, polyak_decay=0.9995): super(WeightNormConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups) self.V = self.weight self.g = Parameter(torch.Tensor(out_channels)) self.b = self.bias self.register_buffer('V_avg', torch.zeros(self.V.size())) self.register_buffer('g_avg', torch.zeros(out_channels)) self.register_buffer('b_avg', torch.zeros(out_channels)) self.init_scale = init_scale self.polyak_decay = polyak_decay self.reset_parameters() def reset_parameters(self): return def forward(self, x, init=False): if (init is True): self.V.data.copy_((torch.randn(self.V.data.size()).type_as(self.V.data) * 0.05)) v_norm = (self.V.data / self.V.data.view(self.out_channels, (- 1)).norm(2, 1).view(self.out_channels, *([1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)) x_init = F.conv2d(x, Variable(v_norm), None, self.stride, self.padding, self.dilation, self.groups).data t_x_init = x_init.transpose(0, 1).contiguous().view(self.out_channels, (- 1)) (m_init, v_init) = (t_x_init.mean(1).squeeze(1), t_x_init.var(1).squeeze(1)) scale_init = (self.init_scale / torch.sqrt((v_init + 1e-10))) self.g.data.copy_(scale_init) self.b.data.copy_(((- m_init) * scale_init)) scale_init_shape = scale_init.view(1, self.out_channels, *([1] * (len(x_init.size()) - 2))) m_init_shape = m_init.view(1, self.out_channels, *([1] * (len(x_init.size()) - 2))) x_init = (scale_init_shape.expand_as(x_init) * (x_init - m_init_shape.expand_as(x_init))) self.V_avg.copy_(self.V.data) self.g_avg.copy_(self.g.data) self.b_avg.copy_(self.b.data) return Variable(x_init) else: (v, g, b) = get_vars_maybe_avg(self, ['V', 'g', 'b'], self.training, polyak_decay=self.polyak_decay) scalar = torch.norm(v.view(self.out_channels, (- 1)), 2, 1) if (len(scalar.size()) == 2): scalar = (g / scalar.squeeze(1)) else: scalar = (g / scalar) w = (scalar.view(self.out_channels, *([1] * (len(v.size()) - 1))).expand_as(v) * v) x = F.conv2d(x, w, b, self.stride, self.padding, self.dilation, self.groups) return x
def construct_transduction(example, para_generator, hallu_generator): para = para_generator.generate(input_text=example['text']) if (para is None): return None hallu = hallu_generator.hallucinate(input_text=para) if (hallu is None): return None return {'text': example['text'], 'para': hallu['original_text'], 'template': hallu['template'], 'hallu': hallu['gen_text'], 'answers': hallu['answers'], 'fillings': hallu['fillings']}
def test_water_filling(): policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicyWithPerf(priority_reweighting_policies=None) worker_types = ['k80', 'p100', 'v100'] cluster_spec = {worker_type: 64 for worker_type in worker_types} num_jobs = 300 print(('Total number of jobs: %d' % num_jobs)) unflattened_throughputs = {} scale_factors = {} unflattened_priority_weights = {} num_workers_requested = 0 for i in range(num_jobs): throughputs = [random.random() for i in range(len(worker_types))] throughputs.sort() unflattened_throughputs[i] = {worker_types[i]: throughputs[i] for i in range(len(worker_types))} scale_factors[i] = (2 ** random.randint(0, 2)) num_workers_requested += scale_factors[i] unflattened_priority_weights[i] = random.randint(1, 5) print(('Job %d: Throughputs=%s, Priority=%d, Scale factor=%d' % (i, unflattened_throughputs[i], unflattened_priority_weights[i], scale_factors[i]))) print(('Total number of workers requested: %d' % num_workers_requested)) start_time = time.time() allocation = policy.get_allocation(unflattened_throughputs, scale_factors, unflattened_priority_weights, cluster_spec, verbose=True) print() return (time.time() - start_time)
def _unique_impl(input: Tensor, sorted: bool=True, return_inverse: bool=False, return_counts: bool=False, dim: Optional[int]=None) -> _unique_impl_out: if (not torch.jit.is_scripting()): if ((type(input) is not Tensor) and has_torch_function((input,))): return handle_torch_function(unique, (input,), input, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) if (dim is not None): (output, inverse_indices, counts) = _VF.unique_dim(input, dim, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts) else: (output, inverse_indices, counts) = torch._unique2(input, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts) return (output, inverse_indices, counts)
def test_RegularArray_NumpyArray(): v2a = ak.contents.regulararray.RegularArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])), 3) def f(out, obj): out[0] = len(obj) out[1] = obj[0][0] out[2] = obj[0][1] out[3] = obj[1][0] out[4] = obj[1][1] out[5] = len(obj[1]) out = np.zeros(6, dtype=np.float64) f(out, ak.highlevel.Array(v2a)) assert (out.tolist() == [2.0, 0.0, 1.1, 3.3, 4.4, 3.0]) v2b = ak.contents.regulararray.RegularArray(ak.contents.emptyarray.EmptyArray().to_NumpyArray(np.dtype(np.float64)), 0, zeros_length=10) def f(out, obj): out[0] = len(obj) out[1] = len(obj[0]) out[2] = len(obj[1]) out = np.zeros(3, dtype=np.float64) f(out, ak.highlevel.Array(v2b)) assert (out.tolist() == [10.0, 0.0, 0.0])
def vis_with_legend(indir_list, raw_rgb_dir, outdir, raw_gray_dir=None, gt_dir=None, ext='png'): n_imgs = (1 + len(indir_list)) if raw_gray_dir: n_imgs += 1 if gt_dir: n_imgs += 1 mkdir_if_not_exist(outdir) n_row = 2 n_col = int(round((float(n_imgs) / n_row))) img_fn_list = os.listdir(indir_list[0]) for one_img_fn in tqdm(img_fn_list): fig = plt.figure() ax_list = [] ax_list.append(fig.add_subplot(n_row, n_col, 1)) raw_img = Image.open(os.path.join(raw_rgb_dir, one_img_fn)) ax_list[0].imshow(raw_img) ax_list[0].axis('off') ax_list[0].set_xticklabels([]) ax_list[0].set_yticklabels([]) offset = 1 if raw_gray_dir: ax_list.append(fig.add_subplot(n_row, n_col, (offset + 1))) raw_img = Image.open(os.path.join(raw_gray_dir, one_img_fn)) ax_list[offset].imshow(raw_img, cmap='gray') ax_list[offset].axis('off') ax_list[offset].set_xticklabels([]) ax_list[offset].set_yticklabels([]) offset += 1 if gt_dir: ax_list.append(fig.add_subplot(n_row, n_col, (offset + 1))) gt_img = Image.open(os.path.join(gt_dir, one_img_fn.replace('leftImg8bit', 'gtFine_gtlabels'))) ax_list[offset].imshow(gt_img, vmin=0, vmax=(N_CLASS - 1), interpolation='none', cmap='jet') ax_list[offset].axis('off') ax_list[offset].set_xticklabels([]) ax_list[offset].set_yticklabels([]) offset += 1 for (i, indir) in enumerate(indir_list): hard_to_see_img = Image.open(os.path.join(indir, one_img_fn)).resize(raw_img.size) hard_to_see_img = np.array(hard_to_see_img) ax_list.append(fig.add_subplot(n_row, n_col, ((i + offset) + 1))) im = ax_list[(i + offset)].imshow(hard_to_see_img.astype(np.uint8), vmin=0, vmax=(N_CLASS - 1), interpolation='none', cmap='jet') ax_list[(i + offset)].axis('off') ax_list[(i + offset)].set_xticklabels([]) ax_list[(i + offset)].set_yticklabels([]) ax_list[(i + offset)].set_title(indir.replace('outputs/', '').replace('/label', '').replace('/', '\n'), fontsize=4) fig.subplots_adjust(wspace=0, hspace=0) colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[i], label=label_list[i]) for i in range(len(values))] if ((n_col * 2) <= N_CLASS): n_legend_col = (n_col * 2) else: n_legend_col = N_CLASS lgd = plt.legend(patches, label_list, loc='lower center', bbox_to_anchor=(0, 0, 1, 1), bbox_transform=plt.gcf().transFigure, ncol=n_legend_col, fontsize=5) outfn = os.path.join(outdir, one_img_fn) outfn = (os.path.splitext(outfn)[0] + ('.%s' % ext)) fig.savefig(outfn, transparent=True, bbox_inches='tight', pad_inches=0, bbox_extra_artists=(lgd,), dpi=300) plt.close()
class EigenCAM(BaseCAM): def __init__(self, model, target_layers, use_cuda=False, reshape_transform=None): super(EigenCAM, self).__init__(model, target_layers, use_cuda, reshape_transform) def get_cam_image(self, input_tensor, target_layer, target_category, activations, grads, eigen_smooth): return get_2d_projection(activations)
class Inception1d(nn.Module): def __init__(self, num_classes=2, input_channels=8, kernel_size=40, depth=6, bottleneck_size=32, nb_filters=32, use_residual=True, lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head='relu', concat_pooling=True): super().__init__() assert (kernel_size >= 40) kernel_size = [((k - 1) if ((k % 2) == 0) else k) for k in [kernel_size, (kernel_size // 2), (kernel_size // 4)]] layers = [InceptionBackbone(input_channels=input_channels, kss=kernel_size, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)] n_ks = (len(kernel_size) + 1) head = create_head1d((n_ks * nb_filters), nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling) layers.append(head) self.layers = nn.Sequential(*layers) def forward(self, x, *args, **kwargs): y = self.layers(x.transpose((- 1), (- 2))) return (y, None) def get_layer_groups(self): depth = self.layers[0].depth if (depth > 3): return ((self.layers[0].im[3:], self.layers[0].sk[1:]), self.layers[(- 1)]) else: return self.layers[(- 1)] def get_output_layer(self): return self.layers[(- 1)][(- 1)] def set_output_layer(self, x): self.layers[(- 1)][(- 1)] = x
def accimage_loader(path): try: import accimage return accimage.Image(path) except IOError: return pil_loader(path)
def gen_web_cov_report(cov_paths, cargs): genhtml_opts = '' if cargs.enable_branch_coverage: genhtml_opts += ' --branch-coverage' run_cmd((((((cargs.genhtml_path + genhtml_opts) + ' --output-directory ') + cov_paths['web_dir']) + ' ') + cov_paths['lcov_info_final']), cov_paths['log_file'], cargs, LOG_ERRORS) logr(('[+] Final lcov web report: %s/%s' % (cov_paths['web_dir'], 'index.html')), cov_paths['log_file'], cargs) return
def get_performance_per_query(per_query_baseline, measure): diff_per_query = {} for (query, measurements) in per_query_baseline.items(): diff_per_query.update({query: measurements.get(measure)}) return diff_per_query
def trieste_deep_gaussian_process(data: Dataset, search_space: SearchSpace, num_layers: int, num_inducing_points: int, learning_rate: float, batch_size: int, epochs: int, fix_noise: bool=False) -> Tuple[(DeepGaussianProcess, Dict[(str, Any)])]: dgp = build_vanilla_deep_gp(data, search_space, num_layers, num_inducing_points) if fix_noise: dgp.likelihood_layer.likelihood.variance.assign(1e-05) set_trainable(dgp.likelihood_layer, False) def scheduler(epoch: int, lr: float) -> float: if (epoch == (epochs // 2)): return (lr * 0.1) else: return lr fit_args = {'batch_size': batch_size, 'epochs': epochs, 'verbose': 0, 'callbacks': tf.keras.callbacks.LearningRateScheduler(scheduler)} optimizer = KerasOptimizer(tf.optimizers.Adam(learning_rate), fit_args) model = DeepGaussianProcess(dgp, optimizer) return (model, fit_args)
class PPROutputData(genpy.Message): _md5sum = '732c0e3ca36f241464f8c445e78a0d0a' _type = 'quadrotor_msgs/PPROutputData' _has_header = True _full_text = "Header header\nuint16 quad_time\nfloat64 des_thrust\nfloat64 des_roll\nfloat64 des_pitch\nfloat64 des_yaw\nfloat64 est_roll\nfloat64 est_pitch\nfloat64 est_yaw\nfloat64 est_angvel_x\nfloat64 est_angvel_y\nfloat64 est_angvel_z\nfloat64 est_acc_x\nfloat64 est_acc_y\nfloat64 est_acc_z\nuint16[4] pwm\n\n\nMSG: std_msgs/Header\n# Standard metadata for higher-level stamped data types.\n# This is generally used to communicate timestamped data \n# in a particular coordinate frame.\n# \n# sequence ID: consecutively increasing ID \nuint32 seq\n#Two-integer timestamp that is expressed as:\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n# time-handling sugar is provided by the client library\ntime stamp\n#Frame this data is associated with\n# 0: no frame\n# 1: global frame\nstring frame_id\n\n" __slots__ = ['header', 'quad_time', 'des_thrust', 'des_roll', 'des_pitch', 'des_yaw', 'est_roll', 'est_pitch', 'est_yaw', 'est_angvel_x', 'est_angvel_y', 'est_angvel_z', 'est_acc_x', 'est_acc_y', 'est_acc_z', 'pwm'] _slot_types = ['std_msgs/Header', 'uint16', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'uint16[4]'] def __init__(self, *args, **kwds): if (args or kwds): super(PPROutputData, self).__init__(*args, **kwds) if (self.header is None): self.header = std_msgs.msg.Header() if (self.quad_time is None): self.quad_time = 0 if (self.des_thrust is None): self.des_thrust = 0.0 if (self.des_roll is None): self.des_roll = 0.0 if (self.des_pitch is None): self.des_pitch = 0.0 if (self.des_yaw is None): self.des_yaw = 0.0 if (self.est_roll is None): self.est_roll = 0.0 if (self.est_pitch is None): self.est_pitch = 0.0 if (self.est_yaw is None): self.est_yaw = 0.0 if (self.est_angvel_x is None): self.est_angvel_x = 0.0 if (self.est_angvel_y is None): self.est_angvel_y = 0.0 if (self.est_angvel_z is None): self.est_angvel_z = 0.0 if (self.est_acc_x is None): self.est_acc_x = 0.0 if (self.est_acc_y is None): self.est_acc_y = 0.0 if (self.est_acc_z is None): self.est_acc_z = 0.0 if (self.pwm is None): self.pwm = [0, 0, 0, 0] else: self.header = std_msgs.msg.Header() self.quad_time = 0 self.des_thrust = 0.0 self.des_roll = 0.0 self.des_pitch = 0.0 self.des_yaw = 0.0 self.est_roll = 0.0 self.est_pitch = 0.0 self.est_yaw = 0.0 self.est_angvel_x = 0.0 self.est_angvel_y = 0.0 self.est_angvel_z = 0.0 self.est_acc_x = 0.0 self.est_acc_y = 0.0 self.est_acc_z = 0.0 self.pwm = [0, 0, 0, 0] def _get_types(self): return self._slot_types def serialize(self, buff): try: _x = self buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if (python3 or (type(_x) == unicode)): _x = _x.encode('utf-8') length = len(_x) if python3: buff.write(struct.pack(('<I%sB' % length), length, *_x)) else: buff.write(struct.pack(('<I%ss' % length), length, _x)) _x = self buff.write(_struct_H13d.pack(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z)) buff.write(_struct_4H.pack(*self.pwm)) except struct.error as se: self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))) except TypeError as te: self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))) def deserialize(self, str): try: if (self.header is None): self.header = std_msgs.msg.Header() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 106 (_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z) = _struct_H13d.unpack(str[start:end]) start = end end += 8 self.pwm = _struct_4H.unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) def serialize_numpy(self, buff, numpy): try: _x = self buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if (python3 or (type(_x) == unicode)): _x = _x.encode('utf-8') length = len(_x) if python3: buff.write(struct.pack(('<I%sB' % length), length, *_x)) else: buff.write(struct.pack(('<I%ss' % length), length, _x)) _x = self buff.write(_struct_H13d.pack(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z)) buff.write(self.pwm.tostring()) except struct.error as se: self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))) except TypeError as te: self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))) def deserialize_numpy(self, str, numpy): try: if (self.header is None): self.header = std_msgs.msg.Header() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 106 (_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z) = _struct_H13d.unpack(str[start:end]) start = end end += 8 self.pwm = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=4) return self except struct.error as e: raise genpy.DeserializationError(e)
def parse_args(): parser = argparse.ArgumentParser(description='Get the FLOPs of a segmentor') parser.add_argument('config', help='train config file path') parser.add_argument('--shape', type=int, nargs='+', default=[2048, 1024], help='input image size') args = parser.parse_args() return args
def ground_truth(r): y = np.empty_like(r) alpha = (- r[0]) beta = 1.0 y[0] = (- r[0]) for k in range(1, r.shape[0]): beta *= (1.0 - (alpha * alpha)) alpha = ((- (r[k] + np.dot(np.flip(r[:k]), y[:k]))) / beta) y[:k] += (alpha * np.flip(y[:k])) y[k] = alpha return y
def resize(image, size, max_size=None): def get_size_with_aspect_ratio(image_size, size, max_size=None): (w, h) = image_size if (max_size is not None): min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if (((max_original_size / min_original_size) * size) > max_size): size = int(round(((max_size * min_original_size) / max_original_size))) if (((w <= h) and (w == size)) or ((h <= w) and (h == size))): return (h, w) if (w < h): ow = size oh = int(((size * h) / w)) else: oh = size ow = int(((size * w) / h)) return (oh, ow) def get_size(image_size, size, max_size=None): if isinstance(size, (list, tuple)): return size[::(- 1)] else: return get_size_with_aspect_ratio(image_size, size, max_size) size = get_size(image.size, size, max_size) rescaled_image = F.resize(image, size) return rescaled_image
class TestSequenceGenerator(TestSequenceGeneratorBase): def setUp(self): (self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup() self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}} def test_with_normalization(self): generator = SequenceGenerator(self.tgt_dict, beam_size=2) hypos = generator.generate([self.model], self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): generator = SequenceGenerator(self.tgt_dict, beam_size=2, normalize_scores=False) hypos = generator.generate([self.model], self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen) hypos = generator.generate([self.model], self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen) hypos = generator.generate([self.model], self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.generate([self.model], self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
('/download') def download(): file = request.args['file'] filepath = '/'.join(file.split('_')) return send_file(filepath, as_attachment=True)
def plot_confusion_matrix(cmtx, num_classes, class_names=None, figsize=None): if ((class_names is None) or (type(class_names) != list)): class_names = [str(i) for i in range(num_classes)] figure = plt.figure(figsize=figsize) plt.imshow(cmtx, interpolation='nearest', cmap=plt.cm.Blues) plt.title('Confusion matrix') plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) threshold = (cmtx.max() / 2.0) for (i, j) in itertools.product(range(cmtx.shape[0]), range(cmtx.shape[1])): color = ('white' if (cmtx[(i, j)] > threshold) else 'black') plt.text(j, i, (format(cmtx[(i, j)], '.2f') if (cmtx[(i, j)] != 0) else '.'), horizontalalignment='center', color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('epsilon', [0.001, 1]) def test_epsilon_insensitive_loss_forward_backward(seed, ctx, func_name, epsilon): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2) for _ in range(2)] function_tester(rng, F.epsilon_insensitive_loss, ref_epsilon_insensitive_loss_forward, inputs, func_args=[epsilon], atol_b=0.01, ctx=ctx, func_name=func_name)
def _get_split_ranges(nnp, args, supported_set): def get_ranges_from_param(split_spec): ranges = [] for srange in split_spec.split(','): srange_s = srange.split('-') if (len(srange_s) == 2): if (srange_s[0] == ''): pos_start = 0 else: pos_start = int(srange_s[0]) if (srange_s[1] == ''): pos_end = (len(network.function) - 1) else: pos_end = int(srange_s[1]) if ((pos_end < pos_start) or (pos_end > (len(network.function) - 1))): print('[ERROR] range must be in 0 to {}'.format((len(network.function) - 1))) sys.exit((- 1)) else: print('[ERROR] range must be "x0-y0,x1-y1,..."') sys.exit((- 1)) ranges.append((pos_start, pos_end)) return ranges def get_ranges_from_func_set(support_set): pos_start = 0 pos_end = 0 ranges = [] for (pos, func) in enumerate(network.function): if (func.type in support_set): pos_end = pos else: if (pos_end >= pos_start): ranges.append((pos_start, pos_end)) pos_start = (pos + 1) if (pos_end >= pos_start): ranges.append((pos_start, pos_end)) return ranges network = None for n in nnp.protobuf.network: if (n.name == nnp.protobuf.executor[0].network_name): network = n if args.split: return get_ranges_from_param(args.split) return get_ranges_from_func_set(supported_set)
def test_container_add(): from sfepy.base.base import Struct, Container a = Struct(name='a') b = Struct(name='b') c1 = Container() c1 = (c1 + c1) assert_((c1.names == [])) c1 += Container([a, b]) assert_((c1.names == ['a', 'b'])) c2 = (c1 + c1) assert_((c2.names == (2 * ['a', 'b']))) c2 += c2 assert_((c2.names == (4 * ['a', 'b'])))
def check_psenac(lamada, w, k): try: if ((not isinstance(lamada, int)) or (lamada <= 0)): raise ValueError('Error, parameter lamada must be an int type and larger than and equal to 0.') elif ((w > 1) or (w < 0)): raise ValueError('Error, parameter w must be ranged from 0 to 1.') elif ((not isinstance(k, int)) or (k <= 0)): raise ValueError('Error, parameter k must be an int type and larger than 0.') except ValueError: raise
def copy_dory_sig(): testdata = relative_file('data/dory-subset.fq.sig') shutil.copyfile(testdata, 'dory-subset.fq.sig')
class TestBamfilter(unittest.TestCase): def test_get_ref_lengths(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_get_ref_lengths.bam'), 'out') expected = {'ref1': 41, 'ref2': 42, 'ref3': 43} self.assertEqual(expected, b._get_ref_lengths()) def test_get_contigs_to_use(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_get_contigs_to_use.bam'), 'out') test_file = os.path.join(data_dir, 'bamfilter_test_get_contigs_to_use.infile') self.assertEqual(b._get_contigs_to_use(test_file), {'contig42', 'contig4444244'}) self.assertEqual(b._get_contigs_to_use(None), set()) self.assertEqual(b._get_contigs_to_use({'42', '43'}), {'42', '43'}) def test_check_contigs_to_use(self): input_bam = os.path.join(data_dir, 'bamfilter_test_check_contigs_to_use.bam') b = bamfilter.BamFilter(input_bam, 'out') ref_lengths = b._get_ref_lengths() self.assertTrue(b._check_contigs_to_use(ref_lengths)) b = bamfilter.BamFilter(input_bam, 'out', contigs_to_use={'1'}) self.assertTrue(b._check_contigs_to_use(ref_lengths)) b = bamfilter.BamFilter(input_bam, 'out', contigs_to_use={'1', '2'}) self.assertTrue(b._check_contigs_to_use(ref_lengths)) with self.assertRaises(bamfilter.Error): b = bamfilter.BamFilter(input_bam, 'out', contigs_to_use={'42'}) self.assertTrue(b._check_contigs_to_use(ref_lengths)) def test_all_reads_from_contig(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_all_reads_from_contig.bam'), 'out') tmp = 'tmp.test_all_reads_from_contig.out.fa' f = pyfastaq.utils.open_file_write(tmp) expected = os.path.join(data_dir, 'bamfilter_test_all_reads_from_contig.reads.fa') b._all_reads_from_contig('1', f) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_get_all_unmapped_reads(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_get_all_unmapped_reads.bam'), 'out') expected = os.path.join(data_dir, 'bamfilter_test_get_all_unmapped_reads.reads.fa') tmp = 'tmp.test_get_all_unmapped_reads.out.fa' f = pyfastaq.utils.open_file_write(tmp) b._get_all_unmapped_reads(f) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_break_reads(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_break_reads.bam'), 'out') expected = os.path.join(data_dir, 'bamfilter_test_break_reads.broken_reads.fa') tmp = 'tmp.test_break_reads.out.fa' f = pyfastaq.utils.open_file_write(tmp) b._break_reads('contig1', 390, f, min_read_length=5) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_exclude_region(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_exclude_region.bam'), 'out') expected = os.path.join(data_dir, 'bamfilter_test_exclude_region.reads.fa') tmp = 'tmp.test_exclude_reads.out.fa' f = pyfastaq.utils.open_file_write(tmp) b._exclude_region('1', 500, 700, f) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_get_region_start(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_get_region_start.bam'), 'out') expected = os.path.join(data_dir, 'bamfilter_test_get_region_start.reads.fa') tmp = 'tmp.test_get_region.out.fa' f = pyfastaq.utils.open_file_write(tmp) b._get_region('1', 0, 64, f, min_length=20) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_get_region_end(self): b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_get_region_end.bam'), 'out') expected = os.path.join(data_dir, 'bamfilter_test_get_region_end.reads.fa') tmp = 'tmp.test_get_region.out.fa' f = pyfastaq.utils.open_file_write(tmp) b._get_region('2', 379, 499, f, min_length=20) pyfastaq.utils.close(f) self.assertTrue(filecmp.cmp(expected, tmp, shallow=False)) os.unlink(tmp) def test_run_keep_unmapped_no_quals(self): outprefix = 'tmp.bamfilter_run' b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_run_no_qual.bam'), outprefix, length_cutoff=600, min_read_length=100, contigs_to_use={'contig1', 'contig3', 'contig4'}) b.run() expected = os.path.join(data_dir, 'bamfilter_test_run_keep_unmapped.out.reads.fa') self.assertTrue(filecmp.cmp(expected, (outprefix + '.fasta'), shallow=False)) os.unlink((outprefix + '.fasta')) os.unlink((outprefix + '.log')) b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_run_no_qual.bam'), outprefix, fastq_out=True, length_cutoff=600, min_read_length=100, contigs_to_use={'contig1', 'contig3', 'contig4'}) b.run() expected = os.path.join(data_dir, 'bamfilter_test_run_keep_unmapped.out.reads.fa') self.assertTrue(filecmp.cmp(expected, (outprefix + '.fastq'), shallow=False)) os.unlink((outprefix + '.fastq')) os.unlink((outprefix + '.log')) def test_run_keep_unmapped_with_quals(self): outprefix = 'tmp.bamfilter_run' b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_run_with_qual.bam'), outprefix, fastq_out=False, length_cutoff=600, min_read_length=100, contigs_to_use={'contig1', 'contig3', 'contig4'}) b.run() expected = os.path.join(data_dir, 'bamfilter_test_run_keep_unmapped.out.reads.fa') self.assertTrue(filecmp.cmp(expected, (outprefix + '.fasta'), shallow=False)) os.unlink((outprefix + '.fasta')) os.unlink((outprefix + '.log')) b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_run_with_qual.bam'), outprefix, fastq_out=True, length_cutoff=600, min_read_length=100, contigs_to_use={'contig1', 'contig3', 'contig4'}) b.run() expected = os.path.join(data_dir, 'bamfilter_test_run_keep_unmapped.out.reads.fq') self.assertTrue(filecmp.cmp(expected, (outprefix + '.fastq'), shallow=False)) os.unlink((outprefix + '.fastq')) os.unlink((outprefix + '.log')) def test_run_discard_unmapped_no_quals(self): outprefix = 'tmp.bamfilter_run' b = bamfilter.BamFilter(os.path.join(data_dir, 'bamfilter_test_run_no_qual.bam'), outprefix, length_cutoff=600, min_read_length=100, contigs_to_use={'contig1', 'contig3', 'contig4'}, discard_unmapped=True) b.run() expected = os.path.join(data_dir, 'bamfilter_test_run_discard_unmapped.out.reads.fa') self.assertTrue(filecmp.cmp(expected, (outprefix + '.fasta'), shallow=False)) os.unlink((outprefix + '.fasta')) os.unlink((outprefix + '.log'))
class Unet(nn.Module): def __init__(self, in_ch, out_ch, nf=3, cond_nf=64, norm_layer=nn.InstanceNorm2d): super(Unet, self).__init__() self.downscale = 16 self.in_ch = in_ch self.out_ch = out_ch self.nf = nf self.cond_nf = cond_nf self.merge_cond_mult = nn.Sequential(nn.Conv1d(cond_nf, cond_nf, 2, 1, 0, bias=True), nn.LeakyReLU(0.1, inplace=True), nn.Conv1d(cond_nf, cond_nf, 1, 1, 0, bias=True)) self.merge_cond_offset = nn.Sequential(nn.Conv1d(cond_nf, cond_nf, 2, 1, 0, bias=True), nn.LeakyReLU(0.1, inplace=True), nn.Conv1d(cond_nf, cond_nf, 1, 1, 0, bias=True)) self.merge_cond = nn.Linear(cond_nf, cond_nf, bias=True) if (self.nf != self.in_ch): self.conv_in = nn.Conv2d(in_ch, nf, 1, 1, 0, bias=False) self.down_conv_0 = nn.Conv2d(nf, (nf * 2), 1, 1, 0, padding_mode='reflect', bias=False) self.down_0 = nn.Conv2d((nf * 2), (nf * 2), 4, 2, 1, groups=(nf * 2), padding_mode='reflect', bias=False) self.down_conv_1 = nn.Conv2d((nf * 2), (nf * 4), 1, 1, 0, padding_mode='reflect', bias=False) self.down_1 = nn.Conv2d((nf * 4), (nf * 4), 4, 2, 1, groups=(nf * 4), padding_mode='reflect', bias=False) self.down_conv_2 = nn.Conv2d((nf * 4), (nf * 8), 1, 1, 0, padding_mode='reflect', bias=False) self.down_2 = nn.Conv2d((nf * 8), (nf * 8), 4, 2, 1, groups=(nf * 8), padding_mode='reflect', bias=False) if (self.downscale == 16): self.down_conv_3 = nn.Conv2d((nf * 8), (nf * 16), 1, 1, 0, padding_mode='reflect', bias=False) self.down_3 = nn.Conv2d((nf * 16), (nf * 16), 4, 2, 1, groups=(nf * 16), padding_mode='reflect', bias=False) self.up_3 = nn.Conv2d((nf * 16), (nf * 16), 3, 1, 1, padding_mode='reflect', groups=(nf * 16), bias=False) self.conv_up_3 = nn.Conv2d((nf * 16), (nf * 8), 1, 1, 0, padding_mode='reflect', bias=False) self.modulate_3 = SpatialOffsetBlock((nf * 8), (nf * 8), ks=3) self.retouch_3 = RetouchBlock((nf * 8), (nf * 8), base_nf=cond_nf, cond_nf=cond_nf) self.up = nn.Upsample(scale_factor=2, mode='nearest') self.up_2 = nn.Conv2d((nf * 8), (nf * 8), 3, 1, 1, padding_mode='reflect', groups=(nf * 8), bias=False) self.conv_up_2 = nn.Conv2d((nf * 8), (nf * 4), 1, 1, 0, padding_mode='reflect', bias=False) self.modulate_2 = SpatialOffsetBlock((nf * 4), (nf * 4), ks=3) self.retouch_2 = RetouchBlock((nf * 4), (nf * 4), base_nf=cond_nf, cond_nf=cond_nf) self.up_1 = nn.Conv2d((nf * 4), (nf * 4), 3, 1, 1, padding_mode='reflect', groups=(nf * 4), bias=False) self.conv_up_1 = nn.Conv2d((nf * 4), (nf * 2), 1, 1, 0, padding_mode='reflect', bias=False) self.modulate_1 = SpatialOffsetBlock((nf * 2), (nf * 2), ks=5) self.retouch_1 = RetouchBlock((nf * 2), (nf * 2), base_nf=cond_nf, cond_nf=cond_nf) self.up_0 = nn.Conv2d((nf * 2), (nf * 2), 3, 1, 1, padding_mode='reflect', groups=(nf * 2), bias=False) self.conv_up_0 = nn.Conv2d((nf * 2), (nf * 1), 1, 1, 0, padding_mode='reflect', bias=False) self.modulate_0 = SpatialOffsetBlock((nf * 1), (nf * 1), ks=5) self.retouch_0 = RetouchBlock((nf * 1), (nf * 1), base_nf=cond_nf, cond_nf=cond_nf) if (self.nf != self.out_ch): self.conv_out = nn.Conv2d(nf, out_ch, 1, 1, 0, bias=False) def forward(self, netC, x, ref): cond_x_code = torch.mean(netC(x), dim=[2, 3], keepdim=False) cond_ref_code = torch.mean(netC(ref), dim=[2, 3], keepdim=False) cond_stack = torch.stack([cond_x_code, (cond_ref_code - cond_x_code)], dim=2) cond_code_offset = self.merge_cond_offset(cond_stack).squeeze(2) cond_code_mult = F.relu(self.merge_cond_mult(cond_stack)).squeeze(2) cond_retouch_code = self.merge_cond(((cond_x_code * cond_code_mult) + cond_code_offset)) (x, pad_left, pad_right, pad_top, pad_bottom) = pad_tensor(x, divide=self.downscale) if (self.nf != self.in_ch): x0 = self.conv_in(x) else: x0 = x x1 = self.down_0(self.down_conv_0(x0)) x2 = self.down_1(self.down_conv_1(x1)) x3 = self.down_2(self.down_conv_2(x2)) if (self.downscale == 16): x4 = self.down_3(self.down_conv_3(x3)) up_x3 = self.conv_up_3(self.up_3(self.up(x4))) up_x3 = self.modulate_3(up_x3, x3) up_x3 = self.retouch_3(up_x3, cond_retouch_code) else: up_x3 = x3 up_x2 = self.conv_up_2(self.up_2(self.up(up_x3))) up_x2 = self.modulate_2(up_x2, x2) up_x2 = self.retouch_2(up_x2, cond_retouch_code) up_x1 = self.conv_up_1(self.up_1(self.up(up_x2))) up_x1 = self.modulate_1(up_x1, x1) up_x1 = self.retouch_1(up_x1, cond_retouch_code) up_x0 = self.conv_up_0(self.up_0(self.up(up_x1))) up_x0 = self.modulate_0(up_x0, x0) up_x0 = self.retouch_0(up_x0, cond_retouch_code) if (self.nf != self.in_ch): out = self.conv_out(up_x0) else: out = up_x0 out = pad_tensor_back(out, pad_left, pad_right, pad_top, pad_bottom) return out
class ReluReplacementTest(SingleLayerReplacementTest): def __init__(self, unit_test): super().__init__(unit_test) def get_debug_config(self): return mct.core.DebugConfig(network_editor=[EditRule(filter=NodeTypeFilter(torch.nn.ReLU), action=ReplaceLayer(Identity, get_identity_params_from_relu))]) def create_feature_network(self, input_shape): return TwoLayersReluNet() def compare(self, quantized_models, float_model, input_x=None, quantization_info=None): quantized_model = quantized_models.get('no_quantization') self.unit_test.assertTrue(torch.all(torch.eq(quantized_model(input_x), input_x[0]))) self.unit_test.assertTrue(isinstance(quantized_model.activation1, Identity)) self.unit_test.assertTrue(isinstance(quantized_model.activation2, Identity))
class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
class _TimeZone(datetime.tzinfo): def __init__(self, offset): self._offset = offset def utcoffset(self, dt): return self._offset def dst(self, dt): return None def tzname(self, dt): m = (self._offset.total_seconds() // 60) if (m < 0): res = '-' m = (- m) else: res = '+' h = (m // 60) m = (m - (h * 60)) return '{}{:.02}{:.02}'.format(res, h, m)
class NumpyType(LayoutBuilderType): def __init__(self, dtype, parameters): super().__init__(name=f'ak.lb.Numpy({dtype!r}, parameters={parameters!r})') self._dtype = dtype self._init(parameters) def dtype(self): return self._dtype def data(self): return ak.numba.GrowableBufferType(self._dtype)
def test_submodule_trainable_variables(): (trackable_layer, variables, modules, module_variables) = setup_layer_modules_variables() trainable_attributes = [v for v in (variables + module_variables) if v.trainable] assert (trackable_layer.trainable_variables == trainable_attributes)
def get_config_from_folder_or_ckpt(folder: str, ckpt: Dict[(str, Any)]=None) -> Dict[(str, Any)]: configs = glob.glob(os.path.join(folder, '*.yaml')) if (len(configs) > 0): assert (len(configs) <= 1), ('Multiple yaml files with the pretrained model. ' + "MMF doesn't know what to do.") config_file = configs[0] config = load_yaml(config_file) else: assert ('config' in ckpt), "No configs provided with pretrained model while checkpoint also doesn't have configuration." config = ckpt['config'] return config
_model_architecture('universal_transformer_lm', 'universal_transformer_lm_gpt2_medium') def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1280) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 5120) args.decoder_layers = getattr(args, 'decoder_layers', 36) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 20) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
class ControlOutputs(object): def __init__(self, graph): if (not isinstance(graph, tf_ops.Graph)): raise TypeError('Expected a tf.Graph, got: {}'.format(type(graph))) self._control_outputs = {} self._graph = graph self._version = None self._build() def update(self): if (self._version != self._graph.version): self._build() return self def _build(self): self._control_outputs.clear() ops = self._graph.get_operations() for op in ops: for control_input in op.control_inputs: if (control_input not in self._control_outputs): self._control_outputs[control_input] = [] if (op not in self._control_outputs[control_input]): self._control_outputs[control_input].append(op) self._version = self._graph.version def get_all(self): return self._control_outputs def get(self, op): if (op in self._control_outputs): return self._control_outputs[op] else: return () def graph(self): return self._graph
def add_model_to_main_init(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, frameworks: Optional[List[str]]=None, with_processing: bool=True): with open((TRANSFORMERS_PATH / '__init__.py'), 'r', encoding='utf-8') as f: content = f.read() lines = content.split('\n') idx = 0 new_lines = [] framework = None while (idx < len(lines)): if ((not is_empty_line(lines[idx])) and (find_indent(lines[idx]) == 0)): framework = None elif lines[idx].lstrip().startswith('if is_torch_available'): framework = 'pt' elif lines[idx].lstrip().startswith('if is_tf_available'): framework = 'tf' elif lines[idx].lstrip().startswith('if is_flax_available'): framework = 'flax' if ((framework is not None) and (frameworks is not None) and (framework not in frameworks)): new_lines.append(lines[idx]) idx += 1 elif (re.search(f'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None): block = [lines[idx]] indent = find_indent(lines[idx]) idx += 1 while (find_indent(lines[idx]) > indent): block.append(lines[idx]) idx += 1 if (lines[idx].strip() in [')', ']', '],']): block.append(lines[idx]) idx += 1 block = '\n'.join(block) new_lines.append(block) add_block = True if (not with_processing): processing_classes = [old_model_patterns.tokenizer_class, old_model_patterns.feature_extractor_class, old_model_patterns.processor_class] processing_classes = [c for c in processing_classes if (c is not None)] for processing_class in processing_classes: block = block.replace(f' "{processing_class}",', '') block = block.replace(f', "{processing_class}"', '') block = block.replace(f' {processing_class},', '') block = block.replace(f', {processing_class}', '') if (processing_class in block): add_block = False if add_block: new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0]) else: new_lines.append(lines[idx]) idx += 1 with open((TRANSFORMERS_PATH / '__init__.py'), 'w', encoding='utf-8') as f: f.write('\n'.join(new_lines))
def remove_bn_and_dropout(module): for (child_name, child) in module.named_children(): child_type = str(type(child)) if (('BatchNorm' in child_type) or ('Dropout' in child_type)): module.__setattr__(child_name, torch.nn.Sequential()) else: remove_bn_and_dropout(child)
class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() def name(self): return 'BaseDataset' def initialize(self): pass
def compute_dual_line_graph(hypergraph, s=1, singleton_type='grey_out'): dual_hgraph = hypergraph.dual() dual_line_graph = convert_to_line_graph(dual_hgraph.incidence_dict, s, singleton_type) return dual_line_graph
def order_and_prune_files(file_paths, min_duration, max_duration): print('Sorting manifests...') duration_file_paths = [(path, float(subprocess.check_output([('soxi -D "%s"' % path.strip())], shell=True))) for path in file_paths] if (min_duration and max_duration): print(('Pruning manifests between %d and %d seconds' % (min_duration, max_duration))) duration_file_paths = [(path, duration) for (path, duration) in duration_file_paths if (min_duration <= duration <= max_duration)] def func(element): return element[1] duration_file_paths.sort(key=func) return [x[0] for x in duration_file_paths]
class OrVerifier(Verifier): def __init__(self, stmt, subverifiers): self.subs = subverifiers self.stmt = stmt def process_precommitment(self, precommitment): if (precommitment is None): return for (index, sub) in enumerate(self.subs): sub.process_precommitment(precommitment[index]) def check_responses_consistency(self, responses, responses_dict=None): if (responses_dict is None): responses_dict = {} for (index, sub) in enumerate(self.subs): if (not sub.check_responses_consistency(responses[1][index], {})): return False return True
def get_action(a): if isinstance(a, int): return a return (a.item() if (a.shape == [1]) else a)
class Account(): api_key: str description: str = '' emails: List[str] = field(default_factory=list) groups: List[str] = field(default_factory=list) is_admin: bool = False usages: Dict[(str, Dict[(str, Usage)])] = field(default_factory=dict)
def load_cpnet_vocab(cpnet_vocab_path): with open(cpnet_vocab_path, 'r', encoding='utf8') as fin: cpnet_vocab = [l.strip() for l in fin] cpnet_vocab = [c.replace('_', ' ') for c in cpnet_vocab] return cpnet_vocab
def hash_seq(ls): v = 5381 for x in ls: v = ((1000003 * v) + hash_obj(x)) v = (v & ) return v
class Feature_Init(): def get_split_feature(self, split_tuple, parent_sentence, children_sentence_list, boxer_graph): iLength = boxer_graph.calculate_iLength(parent_sentence, children_sentence_list) split_pattern = boxer_graph.get_pattern_4_split_candidate(split_tuple) split_feature = ((split_pattern + '_') + str(iLength)) return split_feature def get_drop_ood_feature(self, ood_node, nodeset, main_sent_dict, boxer_graph): ood_word = boxer_graph.extract_oodword(ood_node, main_sent_dict) ood_position = boxer_graph.nodes[ood_node]['positions'][0] span = boxer_graph.extract_span_min_max(nodeset) boundaryVal = 'false' if ((ood_position <= span[0]) or (ood_position >= span[1])): boundaryVal = 'true' drop_ood_feature = ((ood_word + '_') + boundaryVal) return drop_ood_feature def get_drop_rel_feature(self, rel_node, nodeset, main_sent_dict, boxer_graph): rel_word = boxer_graph.relations[rel_node]['predicates'] rel_span = boxer_graph.extract_span_for_nodeset_with_rel(rel_node, nodeset) drop_rel_feature = ((rel_word + '_') + str(len(rel_span))) return drop_rel_feature def get_drop_mod_feature(self, mod_cand, main_sent_dict, boxer_graph): mod_pos = int(mod_cand[0]) mod_word = main_sent_dict[mod_pos][0] drop_mod_feature = mod_word return drop_mod_feature
class GlobalAttention(nn.Module): def __init__(self, dim, attn_type='dot', include_rnn=True, dropout=0.0): super(GlobalAttention, self).__init__() self.dim = dim self.attn_type = attn_type self.include_rnn = include_rnn self.drop = nn.Dropout(dropout) assert (self.attn_type in ['dot', 'general', 'mlp']), 'Please select a valid attention type.' if (self.attn_type == 'general'): self.linear_in = nn.Linear(dim, dim, bias=False) elif (self.attn_type == 'mlp'): self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = (self.attn_type == 'mlp') self.linear_out = nn.Linear((dim * 2), dim, bias=out_bias) self.sm = nn.Softmax(dim=1) self.tanh = nn.Tanh() self.mask = None def applyMask(self, mask): self.mask = mask def score(self, h_t, h_s): (src_batch, src_len, src_dim) = h_s.size() (tgt_batch, tgt_len, tgt_dim) = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if (self.attn_type in ['general', 'dot']): if (self.attn_type == 'general'): h_t_ = h_t.contiguous().view((tgt_batch * tgt_len), tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view((- 1), dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view((- 1), dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh((wq + uh)) return self.v(wquh.view((- 1), dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input, context, context_lengths_or_mask): if (input.dim() == 2): one_step = True input = input.unsqueeze(1) else: one_step = False (batch, sourceL, dim) = context.size() (batch_, targetL, dim_) = input.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if (self.mask is not None): (beam_, batch_, sourceL_) = self.mask.size() aeq(batch, (batch_ * beam_)) aeq(sourceL, sourceL_) align = self.score(input, context) if (context_lengths_or_mask is not None): if (context_lengths_or_mask.dim() == 1): mask = sequence_mask(context_lengths_or_mask.data) elif (context_lengths_or_mask.dim() == 2): mask = context_lengths_or_mask.data mask = mask.unsqueeze(1) align.data.masked_fill_((1 - mask), (- float('inf'))) align_vectors = self.sm(align.view((batch * targetL), sourceL)) align_vectors = align_vectors.view(batch, targetL, sourceL) c = torch.bmm(align_vectors, context) if self.include_rnn: concat_c = torch.cat([c, input], 2).view((batch * targetL), (dim * 2)) attn_h = self.linear_out(concat_c).view(batch, targetL, dim) else: attn_h = c.view(batch, targetL, dim) attn_h = self.drop(attn_h) if (self.attn_type in ['general', 'dot']): attn_h = self.tanh(attn_h) if one_step: assert False attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) (batch_, dim_) = attn_h.size() aeq(batch, batch_) aeq(dim, dim_) (batch_, sourceL_) = align_vectors.size() aeq(batch, batch_) aeq(sourceL, sourceL_) else: (batch_, targetL_, dim_) = attn_h.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(dim, dim_) (batch_, targetL_, sourceL_) = align_vectors.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(sourceL, sourceL_) return (attn_h, align_vectors)
def get_b32_config(): config = get_b16_config() config.patches.size = (32, 32) return config
def load_data_wikisql(args): in_dir = args.data_dir splits = ['train', 'dev', 'test'] schema_graphs = load_schema_graphs_wikisql(in_dir, splits=splits) dataset = dict() for split in splits: dataset[split] = load_data_split_wikisql(in_dir, split, schema_graphs) dataset['schema'] = schema_graphs return dataset
.parametrize('dtype', [ti.f32, ti.f64]) def test_cast_default_fp(dtype): ti.init(default_fp=dtype) def func(x: int, y: int) -> float: return (ti.cast(x, float) * float(y)) assert (func(23, 4) == pytest.approx((23.0 * 4.0)))
class ADGEncoder(): def __init__(self, medium, **kwargs): self.medium = medium self.context = kwargs.get('context', None) self.finish_sent = kwargs.get('finish_sent') self.precision = kwargs.get('precision') self.is_sort = kwargs.get('is_sort') self.clean_up_output = kwargs.get('clean_up_output', False) self.input_key = kwargs.get('input_key') self.sample_seed_prefix = kwargs.get('sample_seed_prefix') self.input_nonce = kwargs.get('input_nonce') self.seed = kwargs.get('seed', None) self.g = th.Generator(device='cpu') if (self.seed is None): self.g.seed() else: self.g.manual_seed(self.seed) pass def encode(self, private_message_bit: bitarray.bitarray, context: str=None, verbose=False): message = private_message_bit enc = self.medium.enc device = self.medium.device topk = self.medium.probs_top_k precision = self.precision is_sort = self.is_sort finish_sent = self.finish_sent if verbose: print('Starting reset...') max_val = (2 ** precision) threshold = (2 ** (- precision)) cur_interval = [0, max_val] prev = context enc_context = th.LongTensor(self.medium.encode_context(context)) output = enc_context past = None total_num = 0 total_num_for_stats = 0 total_log_probs = 0 total_kl = 0 total_entropy_ptau = 0 total_num_sents = 0 mask_generator = DRBG(self.input_key, (self.sample_seed_prefix + self.input_nonce)) stats = {'message_len_bits': len(message), 'loop_error': 0.0} stats_traj = defaultdict(list) bit_index = 0 with th.no_grad(): i = 0 j = 0 sent_finish = False stega_bit = '' while ((i < len(message)) or (finish_sent and (not sent_finish))): if (j == 0): (probs, info) = self.medium.reset(context=context) else: t_medium_1 = time.time() (probs, info) = self.medium.step(prev) delta_t_medium = (time.time() - t_medium_1) stats_traj['enc_t_medium_per_step'].append(delta_t_medium) j += 1 probs = th.from_numpy(probs.astype(np.float64)) (probs, indices) = probs.sort(descending=True) bit_tmp = 0 t_iter_1 = time.time() if ('kl(sampled|true)' in info): stats_traj['kl(sampled|true)'].append(info['kl(sampled|true)']) stats_traj['kl(uniform|true)'].append(info['kl(uniform|true)']) stats_traj['kl(sampled|uniform)'].append(info['kl(sampled|uniform)']) stats_traj['kl(uniform|sampled)'].append(info['kl(uniform|sampled)']) stats_traj['chisquare_p(sampled|true)'].append(info['chisquare_p(sampled|true)']) stats_traj['chisquare_p(uniform|true)'].append(info['chisquare_p(uniform|true)']) stats_traj['medium_entropy_raw'].append(info['medium_entropy_raw']) stats_traj['medium_entropy'].append(info['medium_entropy']) stats_traj['medium_entropy_over_raw'].append((info['medium_entropy'] / info['medium_entropy_raw'])) stats_traj['medium_logit_dim'].append(probs.shape[0]) probs_temp = probs log_probs_temp = th.from_numpy(info['log_probs'].astype(np.float64)) log_probs = th.from_numpy(info['log_probs_T=1'].astype(np.float64)) entropy_in_this_distribution = entropy(probs_temp, log_probs_temp) total_entropy_ptau += entropy_in_this_distribution probs_temp_int = probs_temp indices_orig = th.from_numpy(info['indices']) if (i >= len(message)): selection = 0 sent_finish = is_sent_finish(indices[selection].item(), enc) print('Is Finished is true for i={}!'.format(i)) else: total_num_for_stats += 1 prob = probs.cpu().clone() q = probs.cpu().clone().zero_() if (prob[0].item() > 0.5): indices = indices_orig while (prob[0].item() <= 0.5): bit = 1 while ((1 / (2 ** (bit + 1))) > prob[0]): bit += 1 mean = (1 / (2 ** bit)) prob = prob.tolist() indices = indices_orig.tolist() result = [] for _ in range((2 ** bit)): result.append([[], []]) for i_ in range(((2 ** bit) - 1)): result[i_][0].append(prob[0]) result[i_][1].append(indices[0]) del prob[0] del indices[0] while (sum(result[i_][0]) < mean): delta = (mean - sum(result[i_][0])) index = near(prob, delta) if ((prob[index] - delta) < delta): result[i_][0].append(prob[index]) result[i_][1].append(indices[index]) del prob[index] del indices[index] else: break mean = (sum(prob) / (((2 ** bit) - i_) - 1)) result[((2 ** bit) - 1)][0].extend(prob) result[((2 ** bit) - 1)][1].extend(indices) bit_embed = [int(_) for _ in message[(bit_index + bit_tmp):((bit_index + bit_tmp) + bit)]] int_embed = bits2int(bit_embed) prob = th.FloatTensor(result[int_embed][0]).to(device) indices = th.LongTensor(result[int_embed][1]).to(device) prob = (prob / prob.sum()) (prob, _) = prob.sort(descending=True) indices = indices[_] bit_tmp += bit i += bit for (_, g) in enumerate(result): (ps, idxs) = g g_sum = sum(ps) for id_ in idxs: pidx = indices_orig.cpu().tolist().index(id_) q[pidx] = (probs[pidx] / (g_sum * (2 ** bit))) kl = kl2((q / q.sum()), probs[:len(q)]) if (kl < 0.0): h = 1 pass total_kl += kl pass if (j > (len(private_message_bit) * 100)): stats['loop_error'] = 1.0 break selection = int(th.multinomial(prob, 1)) prev = indices[selection].view(1) output = th.cat((output, prev.to(output[0].device))) total_log_probs += log_probs[selection].item() total_num += 1 partial = enc.decode(output[len(context):].tolist()) if ('<eos>' in partial): break if (j > 0): delta_t_step_no_medium = (time.time() - t_iter_1) stats_traj['enc_t_step_no_medium'].append(delta_t_step_no_medium) avg_NLL = ((- total_log_probs) / total_num_for_stats) avg_KL = (total_kl / total_num_for_stats) avg_Hq = (total_entropy_ptau / total_num_for_stats) words_per_bit = (total_num_for_stats / i) stats['avg_NLL'] = avg_NLL stats['avg_KL'] = avg_KL stats['avg_Hq'] = avg_Hq stats['words_per_bit'] = words_per_bit for (k, v) in stats_traj.items(): if (k in ['kl(sampled|true)', 'kl(uniform|true)', 'kl(sampled|uniform)', 'kl(uniform|sampled)']): continue stats[(k + '/mean')] = np.array(v).mean() stats[(k + '/std')] = np.array(v).std() stats[(k + '/80')] = np.sort(np.array(v))[int((len(v) * 0.8))] stats[(k + '/20')] = np.sort(np.array(v))[int((len(v) * 0.2))] stats[(k + '/95')] = np.sort(np.array(v))[int((len(v) * 0.95))] stats[(k + '/5')] = np.sort(np.array(v))[int((len(v) * 0.05))] if ('kl(sampled|true)' in stats_traj): i = 0 while (i < len(stats_traj['kl(sampled|true)'])): stats['kl(sampled|true)_it{}'.format(i)] = stats_traj['kl(sampled|true)'][i] stats['kl(uniform|true)_it{}'.format(i)] = stats_traj['kl(uniform|true)'][i] stats['kl(uniform|sampled)_it{}'.format(i)] = stats_traj['kl(uniform|sampled)'][i] stats['kl(sampled|uniform)_it{}'.format(i)] = stats_traj['kl(sampled|uniform)'][i] stats['chisquare_p(sampled|true)_it{}'.format(i)] = stats_traj['chisquare_p(sampled|true)'][i] stats['chisquare_p(uniform|true)_it{}'.format(i)] = stats_traj['chisquare_p(uniform|true)'][i] stats['(kl(sampled|true)-kl(uniform|true))_it{}'.format(i)] = (stats_traj['kl(sampled|true)'][i] - stats_traj['kl(uniform|true)'][i]) i += 100 stats['n_steps'] = j stats['bits_per_step'] = (len(private_message_bit) / float(j)) stats['steps_per_bit'] = (j / float(len(private_message_bit))) stats['eff'] = (len(private_message_bit) / sum(stats_traj['medium_entropy'])) stats['eff_raw'] = (len(private_message_bit) / sum(stats_traj['medium_entropy_raw'])) stats['eff_output'] = (len(private_message_bit) / len(output[len(enc_context):])) return (self.medium.enc.decode(output[len(enc_context):].tolist()), output[len(enc_context):].tolist(), stats)
def register_all_voc_pgt(root): for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_VOC_PGT.items(): for (key, (image_root, json_file)) in splits_per_dataset.items(): register_coco_instances(key, _get_builtin_metadata(key), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root))
class AssertionViolation(InterpreterError): _node: Node _index: int _reason: Callable[([Any], bool)] _captures: Iterable[int] def __init__(self, node: Node, index: int, reason: Callable[([Any], bool)], captures: Iterable[int]): super().__init__() self._node = node self._index = index self._reason = reason self._captures = captures def node(self) -> Node: return self._node def arg(self) -> Node: return self._node.children[self._index] def index(self) -> int: return self._index def reason(self) -> Callable[([Any], bool)]: return self._reason def captures(self) -> Iterable[int]: return self._captures
class build(_build): def run(self): if (RELEASE_DIR is None): self.execute(_configure_z3, (), msg='Configuring Z3') self.execute(_build_z3, (), msg='Building Z3') self.execute(_copy_bins, (), msg='Copying binaries') _build.run(self)
def two_stages_kwargs(): return {'first_level_models': [ALSWrap(rank=4), ItemKNN(num_neighbours=4), LightFMWrap(no_components=4)], 'train_splitter': TimeSplitter(time_threshold=0.1), 'use_first_level_models_feat': True, 'second_model_params': {'timeout': 30, 'general_params': {'use_algos': ['lgb']}}, 'num_negatives': 6, 'negatives_type': 'first_level', 'use_generated_features': True, 'user_cat_features_list': ['gender'], 'item_cat_features_list': ['class'], 'custom_features_processor': None}
def cast_tensor_type(inputs, src_type=None, dst_type=None): assert (dst_type is not None) if isinstance(inputs, torch.Tensor): if isinstance(dst_type, torch.device): if (hasattr(inputs, 'to') and hasattr(inputs, 'device') and ((inputs.device == src_type) or (src_type is None))): return inputs.to(dst_type) else: return inputs elif (hasattr(inputs, 'to') and hasattr(inputs, 'dtype') and ((inputs.dtype == src_type) or (src_type is None))): return inputs.to(dst_type) else: return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type) for (k, v) in inputs.items()}) elif isinstance(inputs, abc.Iterable): return type(inputs)((cast_tensor_type(item, src_type=src_type, dst_type=dst_type) for item in inputs)) else: return inputs
class TemplateNLG(NLG): def __init__(self, nlg_template_path): self.nlg_template = read_s3_json('botsim', nlg_template_path) def generate(self, dialog_state, role): sentences = [] sentences_slots = [] matched = False assert ((role == 'agent') or (role == 'user')) act = dialog_state['action'] if (act in self.nlg_template['dialog_act'].keys()): for ele in self.nlg_template['dialog_act'][act]: if ((set(ele['inform_slots']) == set(dialog_state['inform_slots'].keys())) and (set(ele['request_slots']) == set(dialog_state['request_slots'].keys()))): (sentences, sentences_slots) = self.dialog_state_to_response_and_slot(dialog_state, ele['response'][role]) matched = True break assert matched return (sentences, sentences_slots) def dialog_state_to_response_and_slot(dialog_state, template_sentences): sentences = template_sentences sentences_slots = template_sentences for key in ['inform_slots', 'request_slots']: for slot in dialog_state[key].keys(): slot_val = dialog_state[key][slot] sentences = [sentence.replace((('$' + slot) + '$'), str(slot_val), 1) for sentence in sentences] sentences_slots = [sentence.replace((('$' + slot) + '$'), ((('' + slot) + ':') + ' + str(slot_val) + '), 1) for sentence in sentences_slots] return (sentences, sentences_slots)
class MyDataset(torch.utils.data.Dataset): def __init__(self, input_ids, attention_mask, token_type_ids, title_id, hn_title_ids, bert_model): self.bert_model = bert_model self.input_ids = input_ids self.attention_mask = attention_mask if ('roberta' not in self.bert_model): self.token_type_ids = token_type_ids self.title_id = title_id self.hn_title_ids = hn_title_ids def __getitem__(self, idx): item = dict() item['input_ids'] = self.input_ids[idx] item['attention_mask'] = self.attention_mask[idx] if ('roberta' not in self.bert_model): item['token_type_ids'] = self.token_type_ids[idx] item['title_id'] = self.title_id[idx] item['hn_title_ids'] = self.hn_title_ids[idx] return item def __len__(self): return len(self.input_ids)
def read_depth_png_tf(depth_dir): depth_dir = depth_dir.numpy().decode('utf-8') depth = read_depth_png(depth_dir) return depth
def test_metric(log, log_to_pred, model): dataset = create_dataset(log) model.fit(dataset) pred_dataset = create_dataset(log.unionByName(log_to_pred)) p_pred_metr_from_init_conf = model.predict_pairs(pairs=log_to_pred.select('user_idx', 'item_idx'), dataset=pred_dataset) model.similarity_metric = 'confidence' p_pred_metr_from_user_conf = model.predict_pairs(pairs=log_to_pred.select('user_idx', 'item_idx'), dataset=pred_dataset) sparkDataFrameEqual(p_pred_metr_from_init_conf, p_pred_metr_from_user_conf) model.similarity_metric = 'lift' p_pred_metr_from_user_lift = model.predict_pairs(pairs=log_to_pred.select('user_idx', 'item_idx'), dataset=pred_dataset) sparkDataFrameNotEqual(p_pred_metr_from_user_conf, p_pred_metr_from_user_lift)
.overload_method(TupleType, '_length_get', inline='always') def Tuple_length(builder): if isinstance(builder, TupleType): def getter(builder): return len(builder._contents[0]) return getter
class NoVisualization(object): def __init__(self, seq_info): self.frame_idx = seq_info['min_frame_idx'] self.last_idx = seq_info['max_frame_idx'] def set_image(self, image): pass def draw_groundtruth(self, track_ids, boxes): pass def draw_detections(self, detections): pass def draw_trackers(self, trackers): pass def run(self, frame_callback): pbar = tqdm(total=((self.last_idx - self.frame_idx) + 2)) while (self.frame_idx <= self.last_idx): frame_callback(self, self.frame_idx) self.frame_idx += 1 pbar.update(1) pbar.close()
def walk_files(root, extension): for (path, dirs, files) in os.walk(root): for file in files: if file.endswith(extension): (yield os.path.join(path, file))
def get_arrays(notes, labels, n_tracks, seq_len): data = {'time': np.zeros((seq_len,), int), 'pitch': np.zeros((seq_len,), int), 'duration': np.zeros((seq_len,), int), 'velocity': np.zeros((seq_len,), int), 'label': np.zeros((seq_len,), int), 'onset_hint': np.zeros((n_tracks,), int), 'pitch_hint': np.zeros((n_tracks,), int)} for (i, (note, label)) in enumerate(zip(notes, labels)): data['time'][i] = note[0] data['pitch'][i] = (note[1] + 1) data['duration'][i] = note[2] data['velocity'][i] = note[3] data['label'][i] = (label + 1) for i in range(n_tracks): nonzero = (data['label'] == i).nonzero()[0] if nonzero.size: data['onset_hint'][i] = nonzero[0] data['pitch_hint'][i] = round(np.mean(data['pitch'][nonzero])) return data
class NTU_Feeder(Dataset): def __init__(self, phase, path, data_shape, connect_joint, debug, **kwargs): (_, _, self.T, self.V, self.M) = data_shape self.conn = connect_joint label_path = '{}/{}_label.pkl'.format(path, phase) if os.path.exists(label_path): with open(label_path, 'rb') as f: (self.sample_name, self.label) = pickle.load(f, encoding='latin1') else: logging.info('') logging.error('Error: Do NOT exist data files: {}!'.format(label_path)) logging.info('Please generate data first!') raise ValueError() if debug: self.sample_name = self.sample_name[:300] self.label = self.label[:300] def __len__(self): return len(self.sample_name) def __getitem__(self, idx): label = self.label[idx] name = self.sample_name[idx] data = np.zeros((3, self.T, self.V, self.M)) with open(name, 'r') as fr: frame_num = int(fr.readline()) for frame in range(frame_num): if (frame >= self.T): break person_num = int(fr.readline()) for person in range(person_num): fr.readline() joint_num = int(fr.readline()) for joint in range(joint_num): v = fr.readline().split(' ') if ((joint < self.V) and (person < self.M)): data[(0, frame, joint, person)] = float(v[0]) data[(1, frame, joint, person)] = float(v[1]) data[(2, frame, joint, person)] = float(v[2]) data = multi_input(data, self.conn) return (data, label, name)
def test_wordvec_type(): with tempfile.TemporaryDirectory(dir=f'{TEST_WORKING_DIR}/out') as temp_dir: google_dir = os.path.join(temp_dir, 'google', 'English') os.makedirs(google_dir) fake_file = os.path.join(google_dir, 'en.vectors.txt') fout = open(fake_file, 'w') fout.close() filename = utils.get_wordvec_file(wordvec_dir=temp_dir, shorthand='en_foo', wordvec_type='google') assert (filename == fake_file) with pytest.raises(FileNotFoundError): utils.get_wordvec_file(wordvec_dir=temp_dir, shorthand='en_foo')
def load_data(): print('loading data...') dirs = '/miniscratch/mittalsa/data/data' filename = os.path.join(dirs, 'sort-of-clevr.pickle') with open(filename, 'rb') as f: (train_datasets, val_datasets, test_datasets) = pickle.load(f) ternary_train = [] ternary_val = [] ternary_test = [] rel_train = [] rel_val = [] rel_test = [] norel_train = [] norel_val = [] norel_test = [] print('processing data...') for (img, ternary, relations, norelations) in train_datasets: img = np.swapaxes(img, 0, 2) for (qst, ans) in zip(ternary[0], ternary[1]): ternary_train.append((img, qst, ans)) for (qst, ans) in zip(relations[0], relations[1]): rel_train.append((img, qst, ans)) for (qst, ans) in zip(norelations[0], norelations[1]): norel_train.append((img, qst, ans)) for (img, ternary, relations, norelations) in val_datasets: img = np.swapaxes(img, 0, 2) for (qst, ans) in zip(ternary[0], ternary[1]): ternary_val.append((img, qst, ans)) for (qst, ans) in zip(relations[0], relations[1]): rel_val.append((img, qst, ans)) for (qst, ans) in zip(norelations[0], norelations[1]): norel_val.append((img, qst, ans)) for (img, ternary, relations, norelations) in test_datasets: img = np.swapaxes(img, 0, 2) for (qst, ans) in zip(ternary[0], ternary[1]): ternary_test.append((img, qst, ans)) for (qst, ans) in zip(relations[0], relations[1]): rel_test.append((img, qst, ans)) for (qst, ans) in zip(norelations[0], norelations[1]): norel_test.append((img, qst, ans)) return (ternary_train, ternary_val, ternary_test, rel_train, rel_val, rel_test, norel_train, norel_val, norel_test)
def rounds_to_string(rounds): return ((((('\nFAST: ' + str(rounds[0])) + '\nMEDIUM: ') + str(rounds[1])) + '\nEXHAUSTIVE: ') + str(rounds[2]))
def broadcast_coalesced(tensors, devices, buffer_size=): return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
class ContinuousMeanQFunction(ContinuousQFunction): _encoder: EncoderWithAction _fc: nn.Linear def __init__(self, encoder: EncoderWithAction, hidden_size: int): super().__init__() self._encoder = encoder self._fc = nn.Linear(hidden_size, 1) def forward(self, x: TorchObservation, action: torch.Tensor) -> QFunctionOutput: return QFunctionOutput(q_value=self._fc(self._encoder(x, action)), quantiles=None, taus=None) def encoder(self) -> EncoderWithAction: return self._encoder
def model_with_ann(tmp_path): nmslib_hnsw_params = NmslibHnswParam(space='negdotprod_sparse', m=10, ef_s=200, ef_c=200, post=0) return SLIM(0.0, 0.01, seed=42, index_builder=ExecutorNmslibIndexBuilder(index_params=nmslib_hnsw_params, index_store=SharedDiskIndexStore(warehouse_dir=str(tmp_path), index_dir='nmslib_hnsw_index')))
class OffsetPlayerSpaceInvadersWorld(SpaceInvadersWorld): def initial_shield_configuration(self): return [{'health': 20, 'position': ((self._width // 4), 200)}, {'health': 20, 'position': (((2 * self._width) // 4), 200)}, {'health': 20, 'position': (((3 * self._width) // 4), 200)}] def initial_player_ship_position(self): return ((self._width / 2), 100)
('/<path:path>') def static_file(path): if (path in ['stanza-brat.css', 'stanza-brat.js', 'stanza-parseviewer.js', 'loading.gif', 'favicon.png', 'stanza-logo.png']): return app.send_static_file(path) elif (path in 'index.html'): return app.send_static_file('stanza-brat.html') else: abort(403)
_flax _vision class FlaxVisionTextDualEncoderIntegrationTest(unittest.TestCase): def test_inference(self): model = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1) processor = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian') image = Image.open('./tests/fixtures/tests_samples/COCO/.png') inputs = processor(text=['una foto di un gatto', 'una foto di un cane'], images=image, padding=True, return_tensors='np') outputs = model(**inputs) self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual(outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0])) expected_logits = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image, expected_logits, atol=0.001))
def worker_init_fn(worker_id): time_seed = np.array(time.time(), dtype=np.int32) np.random.seed((time_seed + worker_id))
class AbsPosAttentionBase(MultiHeadAttentionBase): def _attention(self, mask: Optional[torch.Tensor], q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor: return self._attention_read(mask, torch.bmm(q, k.transpose(1, 2)), v)