code
stringlengths
101
5.91M
def protocol_decoder(protocol): if (protocol == 'O_C_I_to_M'): data_name_list_train = ['OULU', 'CASIA_MFSD', 'Replay_attack'] data_name_list_test = ['MSU_MFSD'] if (protocol == 'O_C_to_M'): data_name_list_train = ['OULU', 'CASIA_MFSD'] data_name_list_test = ['MSU_MFSD'] if (protocol == 'O_to_O'): data_name_list_train = ['OULU'] data_name_list_test = ['OULU'] elif (protocol == 'O_M_I_to_C'): data_name_list_train = ['OULU', 'MSU_MFSD', 'Replay_attack'] data_name_list_test = ['CASIA_MFSD'] elif (protocol == 'O_C_M_to_I'): data_name_list_train = ['OULU', 'CASIA_MFSD', 'MSU_MFSD'] data_name_list_test = ['Replay_attack'] elif (protocol == 'I_C_M_to_O'): data_name_list_train = ['MSU_MFSD', 'CASIA_MFSD', 'Replay_attack'] data_name_list_test = ['OULU'] elif (protocol == 'M_I_to_C'): data_name_list_train = ['MSU_MFSD', 'Replay_attack'] data_name_list_test = ['CASIA_MFSD'] elif (protocol == 'M_I_to_O'): data_name_list_train = ['MSU_MFSD', 'Replay_attack'] data_name_list_test = ['OULU'] return (data_name_list_train, data_name_list_test)
def parallel_firing_graph(S, eff): g = DiGraph() g.add_vertices(range(len(eff))) for i in g.vertices(sort=True): new_edge = False new_div = deepcopy(eff[i]) for v in eff[i]: if (eff[i][v] >= S.out_degree(v)): new_edge = True new_div[v] -= S.out_degree(v) for oe in S.outgoing_edges(v): new_div[oe[1]] += oe[2] if (new_edge and (new_div in eff)): g.add_edge((i, eff.index(new_div))) return g
_test() def test_ddr_reduce_red_2x40_6b_decouple_array_interfaces(): with set_temporary('compiler', 'xilinx', 'decouple_array_interfaces', value=True): return exec_test(2, 40, 6, 'ddr', 'red_2x40_6b_decoupled')
def get_parameter_list(row): parameter_list = [] if ((not isinstance(row[constants.LOGLINE_NAME], str)) or (not isinstance(row[constants.PARSED_LOGLINE_NAME], str))): return parameter_list ll = row[constants.LOGLINE_NAME].split() pp = row[constants.PARSED_LOGLINE_NAME].split() buffer = [] i = 0 j = 0 consec_pattern = False while ((i < len(ll)) and (j < len(pp))): if (ll[i] == pp[j]): if buffer: parameter_list.append(' '.join(buffer)) buffer = [] consec_pattern = False i += 1 j += 1 elif (pp[j] == '*'): if consec_pattern: parameter_list.append(' '.join(buffer)) buffer = [ll[i]] else: buffer.append(ll[i]) consec_pattern = True i += 1 j += 1 else: buffer.append(ll[i]) i += 1 if buffer: if (i < len(ll)): parameter_list.append(' '.join((buffer + ll[i:]))) else: parameter_list.append(' '.join(buffer)) return parameter_list
def default_params(): params = {} params['dataset'] = 'adult' params['iters'] = 10000 params['epsilon'] = 1.0 params['seed'] = 0 return params
def parse_args(raw_args=None): p = argparse.ArgumentParser() p.add_argument('-i', '--input', type=str, help='afl input/seed dir', required=True) p.add_argument('-o', '--output', type=str, help='afl output dir', required=True) p.add_argument('-j', '--jobs', type=int, help='thread number', default=1) p.add_argument('-g', '--group', type=str, help='group', choices=['unibench', 'lava', 'fuzzer-test-suite'], required=True) p.add_argument('-p', '--program', type=str, help='program', required=True) p.add_argument('--args', type=str, help='program argument', required=True) p.add_argument('-f', '--fuzzer', type=str, help='fuzzer', required=True) sp = p.add_subparsers(dest='command', help='command', required=True) sp.add_parser('start') sp.add_parser('stop') sp.add_parser('pause') sp.add_parser('resume') p_scale = sp.add_parser('scale') p_scale.add_argument('scale_num', type=int) return p.parse_args(raw_args)
def slerp(a, b, t): a = normalize(a) b = normalize(b) d = (a * b).sum((- 1), keepdim=True) p = (t * torch.acos(d)) c = normalize((b - (d * a))) d = ((a * torch.cos(p)) + (c * torch.sin(p))) return normalize(d)
class WrapEpochValue(): def __init__(self, func): self.func = func def get_value(self, epoch): return self.func(epoch=epoch)
class OffsetPlayer150SpaceInvadersWorld(SpaceInvadersWorld): def initial_shield_configuration(self): return [{'health': 20, 'position': ((self._width // 4), 200)}, {'health': 20, 'position': (((2 * self._width) // 4), 200)}, {'health': 20, 'position': (((3 * self._width) // 4), 200)}] def initial_player_ship_position(self): return ((self._width / 2), 150)
def _reduce_single_cache(args, k, res, metas): if (hasattr(args.subset, 'size') and isinstance(args.subset.size, int)): res = res[:args.subset_size] print('saving cache ({}), subset size: {}'.format(k, len(res))) results = [res] name = args.data.output.path.name possible_out_path = ((args.data.output.path.parent / 'caches') / name) out_path = None counts = 0 for samples_list in results: (out_path, count) = save_output(samples_list, metas, possible_out_path, (k + '_')) counts += count return (out_path, count)
_grad() def convert_wav2vec2_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True): if (config_path is not None): config = Wav2Vec2Config.from_pretrained(config_path) else: config = Wav2Vec2Config() if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, 'vocab.json') if (not os.path.isdir(pytorch_dump_folder_path)): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) vocab_dict = target_dict.indices vocab_dict['<pad>'] = 0 vocab_dict['<s>'] = 1 with open(vocab_path, 'w', encoding='utf-8') as vocab_handle: json.dump(vocab_dict, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer(vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=False) return_attention_mask = (True if (config.feat_extract_norm == 'layer') else False) feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_wav2vec = Wav2Vec2ForCTC(config) else: hf_wav2vec = Wav2Vec2ForPreTraining(config) if is_finetuned: (model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/')[:(- 1)])}) else: task_arg = argparse.Namespace(task='audio_pretraining') task = fairseq.tasks.setup_task(task_arg) (model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=task) model = model[0].eval() recursively_load_weights(model, hf_wav2vec, (not is_finetuned)) hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
def _patch_function(fn: FunctionType, nargs: int) -> FunctionType: co = fn.__code__ co_flags = (co.co_flags & (~ HAS_VARSTUFF)) co_args: tuple if hasattr(co, 'co_posonlyargcount'): co_args = (nargs, 0, 0, co.co_nlocals, co.co_stacksize, co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars) else: co_args = (nargs, 0, co.co_nlocals, co.co_stacksize, co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars) new_code = CodeType(*co_args) return FunctionType(new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__)
def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()): def interpret(node, ix): if (ix in type_args): if type_env: type = node.analyse_as_type(type_env) if (not type): raise CompileError(node.pos, 'Invalid type.') return (type, node.pos) else: raise CompileError(node.pos, 'Type not allowed here.') else: if ((sys.version_info[0] >= 3) and isinstance(node, StringNode) and (node.unicode_value is not None)): return (node.unicode_value, node.pos) return (node.compile_time_value(empty_scope), node.pos) if optlist: optlist = [interpret(x, ix) for (ix, x) in enumerate(optlist)] if optdict: assert isinstance(optdict, DictNode) new_optdict = {} for item in optdict.key_value_pairs: (new_key, dummy) = interpret(item.key, None) new_optdict[new_key] = interpret(item.value, item.key.value) optdict = new_optdict return (optlist, new_optdict)
class GaussianConvBaseline(Baseline, Parameterized): def __init__(self, env_spec, subsample_factor=1.0, regressor_args=None): Serializable.quick_init(self, locals()) super(GaussianConvBaseline, self).__init__(env_spec) if (regressor_args is None): regressor_args = dict() self._regressor = GaussianConvRegressor(input_shape=env_spec.observation_space.shape, output_dim=1, name='vf', **regressor_args) def fit(self, paths): observations = np.concatenate([p['observations'] for p in paths]) returns = np.concatenate([p['returns'] for p in paths]) self._regressor.fit(observations, returns.reshape(((- 1), 1))) def predict(self, path): return self._regressor.predict(path['observations']).flatten() def get_param_values(self, **tags): return self._regressor.get_param_values(**tags) def set_param_values(self, flattened_params, **tags): self._regressor.set_param_values(flattened_params, **tags)
def check_conda_lock_version(): expected_conda_lock_version = execute_command([sys.executable, 'sklearn/_min_dependencies.py', 'conda-lock']).strip() installed_conda_lock_version = version('conda-lock') if (installed_conda_lock_version != expected_conda_lock_version): raise RuntimeError(f'Expected conda-lock version: {expected_conda_lock_version}, got: {installed_conda_lock_version}')
def row_str(rel_diff, diff_seconds, measurement): params = measurement.metadata['params'] tensor_parameters = measurement.metadata['tensor_parameters'] dim = params['dim'] x_numel = tensor_parameters['x']['numel'] steps = [params[f'x_step_{i}'] for i in range(dim)] order = tensor_parameters['x']['order'] order = str(('' if all(((i == j) for (i, j) in zip(order, range(dim)))) else order)) task_specific = '' if measurement.stmt.startswith('torch.topk'): (dim_str, k_str) = measurement.stmt[:(- 1)].replace('torch.topk(x, ', '').split(', ') task_specific = f'{dim_str}, {k_str:<8}' elif measurement.stmt.startswith('torch.std'): pass elif measurement.stmt.startswith('torch.sort'): task_specific = measurement.stmt[:(- 1)].replace('torch.sort(x, ', '') return f"{(rel_diff * 100):>5.0f}% {(abs(diff_seconds) * 1000000.0):>11.1f} us{'':>6}|{x_numel:>12} {params['dtype_str']:>10} {str([params[f'k{i}'] for i in range(dim)]):>17} {(str(steps) if (not all(((i == 1) for i in steps))) else ''):>12} {order:>12}{'':>8}{task_specific}"
def main(): parser = argparse.ArgumentParser('Text Matching task') parser.add_argument('--model_arch', default='cosent', const='cosent', nargs='?', choices=['cosent', 'sentencebert', 'bert'], help='model architecture') parser.add_argument('--model_name', default='hfl/chinese-macbert-base', type=str, help='Transformers model model or path') parser.add_argument('--train_file', default='data/STS-B/STS-B.train.data', type=str, help='Train data path') parser.add_argument('--valid_file', default='data/STS-B/STS-B.valid.data', type=str, help='Valid data path') parser.add_argument('--test_file', default='data/STS-B/STS-B.test.data', type=str, help='Test data path') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_predict', action='store_true', help='Whether to run predict.') parser.add_argument('--output_dir', default='./outputs/STS-B-model', type=str, help='Model output directory') parser.add_argument('--max_seq_length', default=128, type=int, help='Max sequence length') parser.add_argument('--num_epochs', default=10, type=int, help='Number of training epochs') parser.add_argument('--batch_size', default=64, type=int, help='Batch size') parser.add_argument('--learning_rate', default=2e-05, type=float, help='Learning rate') parser.add_argument('--encoder_type', default='MEAN', type=(lambda t: EncoderType[t]), choices=list(EncoderType), help='Encoder type, string name of EncoderType') parser.add_argument('--bf16', action='store_true', help='Whether to use bfloat16 amp training.') parser.add_argument('--data_parallel', action='store_true', help='Whether to use multi-gpu data parallel.') args = parser.parse_args() logger.info(args) if args.do_train: if (args.model_arch == 'cosent'): model = CosentModel(model_name_or_path=args.model_name, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) elif (args.model_arch == 'sentencebert'): model = SentenceBertModel(model_name_or_path=args.model_name, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) else: model = BertMatchModel(model_name_or_path=args.model_name, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) model.train_model(args.train_file, args.output_dir, eval_file=args.valid_file, num_epochs=args.num_epochs, batch_size=args.batch_size, lr=args.learning_rate, bf16=args.bf16, data_parallel=args.data_parallel) logger.info(f'Model saved to {args.output_dir}') if args.do_predict: if (args.model_arch == 'cosent'): model = CosentModel(model_name_or_path=args.output_dir, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) elif (args.model_arch == 'sentencebert'): model = SentenceBertModel(model_name_or_path=args.output_dir, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) else: model = BertMatchModel(model_name_or_path=args.output_dir, encoder_type=args.encoder_type, max_seq_length=args.max_seq_length) test_data = load_text_matching_test_data(args.test_file) srcs = [] trgs = [] labels = [] for terms in test_data: (src, trg, label) = (terms[0], terms[1], terms[2]) srcs.append(src) trgs.append(trg) labels.append(label) logger.debug(f'{test_data[0]}') sentence_embeddings = model.encode(srcs) logger.debug(f'{type(sentence_embeddings)}, {sentence_embeddings.shape}, {sentence_embeddings[0].shape}') calc_similarity_scores(model, srcs, trgs, labels)
class AnomalyMapGenerator(nn.Module): def __init__(self, input_size: (ListConfig | tuple), sigma: int=4) -> None: super().__init__() self.input_size = input_size kernel_size = ((2 * int(((4.0 * sigma) + 0.5))) + 1) self.blur = GaussianBlur2d(kernel_size=(kernel_size, kernel_size), sigma=(sigma, sigma), channels=1) def compute_anomaly_map(self, patch_scores: Tensor) -> Tensor: anomaly_map = F.interpolate(patch_scores, size=(self.input_size[0], self.input_size[1])) anomaly_map = self.blur(anomaly_map) return anomaly_map def forward(self, patch_scores: Tensor) -> Tensor: anomaly_map = self.compute_anomaly_map(patch_scores) return anomaly_map
def test_without_control(): array = ak.Array([{'ok': 1, 'x': 1.1, 'y': (1 + 1j), 'z': b'one'}, {'ok': 2, 'x': 2.2, 'y': (2 + 2j), 'z': b'two'}, {'ok': 3, 'x': 3.3, 'y': (3 + 3j), 'z': b'three'}, {'ok': 4, 'x': float('nan'), 'y': float('nan'), 'z': b'four'}, {'ok': 5, 'x': float('inf'), 'y': (float('inf') + 5j), 'z': b'five'}, {'ok': 6, 'x': float('-inf'), 'y': (6 + (float('-inf') * 1j)), 'z': b'six'}, {'ok': 7, 'x': 7.7, 'y': (7 + 7j), 'z': b'seven'}, {'ok': 8, 'x': None, 'y': (8 + 8j), 'z': b'eight'}, {'ok': 9, 'x': 9.9, 'y': (9 + 9j), 'z': b'nine'}]) assert (ak.to_json(array.ok) == '[1,2,3,4,5,6,7,8,9]') with pytest.raises(ValueError): ak.to_json(array.x) assert (ak.to_json(array.x[:3]) == '[1.1,2.2,3.3]') with pytest.raises(ValueError): ak.to_json(array.x, nan_string='NAN') with pytest.raises(ValueError): ak.to_json(array.x, nan_string='NAN', posinf_string='INF') assert (ak.to_json(array.x, nan_string='NAN', posinf_string='INF', neginf_string='-INF') == '[1.1,2.2,3.3,"NAN","INF","-INF",7.7,null,9.9]') with pytest.raises(TypeError): ak.to_json(array.y[:3]) assert (ak.to_json(array.y[:3], complex_record_fields=['R', 'I']) == '[{"R":1.0,"I":1.0},{"R":2.0,"I":2.0},{"R":3.0,"I":3.0}]') with pytest.raises(TypeError): ak.to_json(array.z) assert (ak.to_json(array.z, convert_bytes=(lambda x: x.decode())) == '["one","two","three","four","five","six","seven","eight","nine"]')
def parsingiou_post_processor(): parsingiou_post_processor = ParsingIoUPostProcessor() return parsingiou_post_processor
def load_TAG_info(cf): d = cf.data if (not d.is_processed('g_info')): if (cf.local_rank <= 0): if (d.md['type'] == 'ogb'): (g, labels, split_idx) = load_ogb_graph_structure_only(cf) splits = {**{f'{_}_x': split_idx[_].numpy() for _ in ['train', 'valid', 'test']}, 'labels': labels} g_info = SN(splits=splits, labels=labels, n_nodes=g.num_nodes()) elif (d.md['type'] == 'amazon'): (g, labels, split_idx) = load_amazon_graph_structure_only(cf) splits = {'train_x': split_idx[0], 'valid_x': split_idx[1], 'test_x': split_idx[2]} g_info = SN(splits=splits, labels=labels, n_nodes=g.num_nodes()) elif (d.md['type'] == 'dblp'): g = load_dblp_graph_structure_only(cf) g_info = SN(n_nodes=g.num_nodes()) elif (d.md['type'] == 'good'): g = load_dblp_graph_structure_only(cf) g_info = SN(n_nodes=g.num_nodes()) else: raise NotImplementedError d.save_g_info(g_info) del g else: print(f'Waiting for feature processing on LOCAL_RANK #{cf.local_rank}') while (not d.is_processed('g_info')): time.sleep(2) print(f'Detected processed feature, LOCAL_RANK #{cf.local_rank} start loading!') time.sleep(5) g_info = uf.pickle_load(d._g_info_file) return g_info
class BaseSegNeck(nn.Module, metaclass=ABCMeta): def __init__(self, in_channels=None, out_channels=None, aux_out_channels=None, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='ReLU')): super(BaseSegNeck, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aux_out_channels = aux_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg def forward(self, x): pass
def _discrete_log_pgroup(p, vals, aa, b): from itertools import product as iproduct qq = (lambda j, k: vector(((p ** (j + max(0, (v - k)))) for (a, v) in zip(aa, vals)))) subbasis = (lambda j, k: [(q * a) for (q, a) in zip(qq(j, k), aa)]) dotprod = (lambda xs, ys: sum(((x * y) for (x, y) in zip(xs, ys)))) def _base(j, k, c): assert ((k - j) == 1) aajk = subbasis(j, k) assert (not any(((p * a) for a in aajk))) idxs = [i for (i, a) in enumerate(aajk) if a] rs = [([0], [0]) for i in range(len(aajk))] for i in range(len(idxs)): rs[idxs[i]] = ((range(p), [0]) if (i % 2) else ([0], range(p))) if (len(idxs) % 2): m = (p.isqrt() + 1) rs[idxs[(- 1)]] = (range(0, p, m), range(m)) tab = {} for x in iproduct(*(r for (r, _) in rs)): key = dotprod(x, aajk) if hasattr(key, 'set_immutable'): key.set_immutable() tab[key] = vector(x) for y in iproduct(*(r for (_, r) in rs)): key = (c - dotprod(y, aajk)) if hasattr(key, 'set_immutable'): key.set_immutable() if (key in tab): return (tab[key] + vector(y)) raise TypeError('Not in group') def _rec(j, k, c): assert (0 <= j < k) if ((k - j) <= 1): return _base(j, k, c) w = 2 js = (list(range(j, k, ((((k - j) + w) - 1) // w))) + [k]) assert (len(js) == (w + 1)) x = vector(([0] * len(aa))) for i in reversed(range(w)): gamma = (((p ** (js[i] - j)) * c) - dotprod(x, subbasis(js[i], k))) v = _rec(js[i], js[(i + 1)], gamma) assert (not any(((q1 % q2) for (q1, q2) in zip(qq(js[i], js[(i + 1)]), qq(js[i], k))))) x += vector((((q1 // q2) * r) for (q1, q2, r) in zip(qq(js[i], js[(i + 1)]), qq(js[i], k), v))) return x return _rec(0, max(vals), b)
def get_name_from_path_name(p): bname = os.path.basename(p) return bname.split('_')[1].split('.')[0]
class TestCsqrt(object): def test_simple(self): check_complex_value(np.sqrt, 1, 0, 1, 0) rres = (0.5 * np.sqrt(2)) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) check_complex_value(np.sqrt, (- 1), 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) _skip def test_special_values(self): check = check_complex_value f = np.sqrt check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) check(f, 1, np.inf, np.inf, np.inf) check(f, (- 1), np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, (- np.inf), np.inf, np.inf, np.inf) check(f, (- np.nan), np.inf, np.inf, np.inf) check(f, 1, np.nan, np.nan, np.nan) check(f, (- 1), np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) check(f, (- np.inf), 1, np.PZERO, np.inf) check(f, np.inf, 1, np.inf, np.PZERO) def _check_ninf_nan(dummy): msgform = 'csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)' z = np.sqrt(np.array(complex((- np.inf), np.nan))) with np.errstate(invalid='ignore'): if (not (np.isnan(z.real) and np.isinf(z.imag))): raise AssertionError((msgform % (z.real, z.imag))) _check_ninf_nan(None) check(f, np.inf, np.nan, np.inf, np.nan) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan)
def dl_urls_concurrent(urls, outfolder, nthreads=1, timeout=1, quality=100, crop=False, resize=256): os.makedirs(outfolder, exist_ok=True) num_dl = [] with concurrent.futures.ThreadPoolExecutor(max_workers=nthreads) as executor: for k in range(0, len(urls), nthreads): end_ind = min(len(urls), (k + nthreads)) urls_chunk = urls[k:end_ind] all_futures = [] for (ui, url) in enumerate(urls_chunk): fn = (outfolder + f'dl_{(k + ui):03d}.jpg') all_futures += [executor.submit(dl_image, url, timeout, fn, quality, crop=crop, resize=resize)] all_res = [] for (fi, future) in enumerate(all_futures): all_res += [future.result()] num_dl += all_res return num_dl
def get_rotation_matrix(image_shape: Tuple[(int, int, int)], yaw_in_radians: float) -> np.ndarray: rotation_in_degrees = ((angle_of_rotation(yaw_in_radians) * 180) / np.pi) return cv2.getRotationMatrix2D(((image_shape[1] / 2), (image_shape[0] / 2)), rotation_in_degrees, 1)
def save_predictions(test_data, submission_file_path): with open(submission_file_path, 'w') as f: for (index, row) in test_data.iterrows(): item = {'index': row['index'], 'prediction': row['predictions']} f.write(('%s\n' % item))
def execute_unary(type: str) -> List[str]: query = (('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT (?x0 AS ?value) WHERE {\n SELECT DISTINCT ?x0 WHERE {\n ?x0 :type.object.type :' + type) + '. \n }\n }\n ') sparql.setQuery(query) try: results = sparql.query().convert() except urllib.error.URLError: print(query) exit(0) rtn = [] for result in results['results']['bindings']: rtn.append(result['value']['value'].replace(' '')) return rtn
def _my_elliptic_e(*args): if (len(args) == 1): return scipy.special.ellipe(*args) else: return scipy.special.ellipeinc(*args)
class XLNetForMultipleChoice(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def scatter_add_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axis=None): dy = grad_inputs[0] x0 = inputs[0] raise NotImplementedError('scatter_add_backward is not implemented.')
def save_binary_img(img_name, save_dir, thresh): img_name = (img_name[:(- 4)] + '.png') img_arry = np.zeros(thresh.shape) img_arry = (thresh == 255) img_arry = (img_arry * 1) cv2.imwrite(os.path.join(save_dir, img_name), thresh)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(in_planes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out
class OurPrecomputedInpaintingResultsDataset(OurInpaintingDataset): def __init__(self, datadir, predictdir, inpainted_suffix='png', **kwargs): super().__init__(datadir, **kwargs) if (not datadir.endswith('/')): datadir += '/' self.predictdir = predictdir self.pred_filenames = [os.path.join(predictdir, (os.path.basename(os.path.splitext(fname)[0]) + f'_inpainted.{inpainted_suffix}')) for fname in self.mask_filenames] def __getitem__(self, i): result = super().__getitem__(i) result['inpainted'] = self.file_loader(self.pred_filenames[i]) if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)): result['inpainted'] = pad_img_to_modulo(result['inpainted'], self.pad_out_to_modulo) return result
class Cleaner(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('rm -vrf ./build ./dist ./*pyc ./*egg-info')
def count_arithmetic_ops_code(code): ctr = ArithmeticCounter() if isinstance(code, (tuple, list)): for stmt in code: ctr.visit(stmt) elif isinstance(code, str): ctr.visit(ast.parse(code)) else: ctr.visit(code) return ctr.count
def parse_arguments(): parser = argparse.ArgumentParser(description='Parameters to train your model.') parser.add_argument('--epochs', default=391, help='Number of epochs to train the model for', type=int) parser.add_argument('--bs', default=6, help='Batch size', type=int) parser.add_argument('--lr', default=0.0004, help='Learning Rate', type=float) parser.add_argument('--wd', default=0.0, help='L2 Weight decay', type=float) parser.add_argument('--img_size', default=256, help='Image size to be used for training', type=int) parser.add_argument('--aug', default=True, help='Whether to use Image augmentation', type=bool) parser.add_argument('--n_worker', default=2, help='Number of workers to use for loading data', type=int) parser.add_argument('--test_interval', default=2, help='Number of epochs after which to test the weights', type=int) parser.add_argument('--save_interval', default=None, help='Number of epochs after which to save the weights. If None, does not save', type=int) parser.add_argument('--save_opt', default=False, help='Whether to save optimizer along with model weights or not', type=bool) parser.add_argument('--log_interval', default=250, help='Logging interval (in #batches)', type=int) parser.add_argument('--res_mod', default=None, help='Path to the model to resume from', type=str) parser.add_argument('--res_opt', default=None, help='Path to the optimizer to resume from', type=str) parser.add_argument('--use_gpu', default=True, help='Flag to use GPU or not', type=bool) parser.add_argument('--base_save_path', default='./models', help='Base path for the models to be saved', type=str) parser.add_argument('--alpha_sal', default=0.7, help='weight for saliency loss', type=float) parser.add_argument('--wbce_w0', default=1.0, help='w0 for weighted BCE Loss', type=float) parser.add_argument('--wbce_w1', default=1.15, help='w1 for weighted BCE Loss', type=float) return parser.parse_args()
def generateAprilTag(canvas, position, metricSize, tagSpacing, tagID, tagFamililyData, rotation=2, symmCorners=True, borderBits=2, ccolor=color.rgb.black): try: tagCode = tagFamililyData.tagCodes[tagID] except: print('[ERROR]: Requested tag ID of {0} not available in the {1} TagFamiliy'.format(tagID, tagFamililyData.chosenTagFamiliy)) sqrtBits = math.sqrt(tagFamililyData.totalBits) bitSquareSize = (metricSize / (sqrtBits + (borderBits * 2))) xPos = position[0] yPos = position[1] borderSize = (borderBits * bitSquareSize) c.fill(path.rect(xPos, yPos, metricSize, borderSize), [ccolor]) c.fill(path.rect(xPos, ((yPos + metricSize) - borderSize), metricSize, borderSize), [ccolor]) c.fill(path.rect(((xPos + metricSize) - borderSize), yPos, borderSize, metricSize), [ccolor]) c.fill(path.rect(xPos, yPos, borderSize, metricSize), [ccolor]) codeMatrix = np.zeros((int(sqrtBits), int(sqrtBits))) for i in range(0, int(sqrtBits)): for j in range(0, int(sqrtBits)): if (not (tagCode & (1 << ((int(sqrtBits) * i) + j)))): codeMatrix[(i, j)] = 1 codeMatrix = np.rot90(codeMatrix, rotation) for i in range(0, int(sqrtBits)): for j in range(0, int(sqrtBits)): if codeMatrix[(i, j)]: c.fill(path.rect((xPos + ((j + borderBits) * bitSquareSize)), (yPos + ((((borderBits - 1) + sqrtBits) - i) * bitSquareSize)), bitSquareSize, bitSquareSize), [ccolor]) if symmCorners: print('drawing corners!') metricSquareSize = (tagSpacing * metricSize) corners = [[(xPos - metricSquareSize), (yPos - metricSquareSize)], [(xPos + metricSize), (yPos - metricSquareSize)], [(xPos + metricSize), (yPos + metricSize)], [(xPos - metricSquareSize), (yPos + metricSize)]] for point in corners: c.fill(path.rect(point[0], point[1], metricSquareSize, metricSquareSize), [ccolor])
def gae_step_epoch(value_net, optimizer_value, states, returns, l2_reg=0, vf_iter=3, mini_batch_size=128): train = data_utils.TensorDataset(states, returns) train_loader = data_utils.DataLoader(train, batch_size=mini_batch_size, shuffle=True) for _ in range(0, vf_iter): for (batch_idx, (state_batch, returns_batch)) in enumerate(train_loader): value_net.zero_grad() values_pred = value_net(state_batch) value_loss = (values_pred - returns_batch).pow(2).mean() for param in value_net.parameters(): value_loss += (param.pow(2).sum() * l2_reg) value_loss.backward() optimizer_value.step() return
def crf_inference(img, probs, t=10, scale_factor=1, labels=21): (h, w) = img.shape[:2] n_labels = labels d = dcrf.DenseCRF2D(w, h, n_labels) unary = unary_from_softmax(probs) unary = np.ascontiguousarray(unary) img_c = np.ascontiguousarray(img) d.setUnaryEnergy(unary) d.addPairwiseGaussian(sxy=(3 / scale_factor), compat=3) d.addPairwiseBilateral(sxy=(80 / scale_factor), srgb=13, rgbim=np.copy(img_c), compat=10) Q = d.inference(t) return np.array(Q).reshape((n_labels, h, w))
class TachyonTriangleFactory(TriangleFactory): def __init__(self, tach, tex): self._tachyon = tach self._texture = tex def triangle(self, a, b, c, color=None): if (color is None): return TachyonTriangle(a, b, c, self._texture) else: return TachyonTriangle(a, b, c, color) def smooth_triangle(self, a, b, c, da, db, dc, color=None): if (color is None): return TachyonSmoothTriangle(a, b, c, da, db, dc, self._texture) else: return TachyonSmoothTriangle(a, b, c, da, db, dc, color) def get_colors(self, list): return self._tachyon.texture_recolor(self._texture, list)
def interpolate_rocs(id_to_roc_dictionary, eval_fpr_points=None): if (eval_fpr_points is None): eval_fpr_points = np.logspace((- 6), 0, 1000) interpolated_tprs = {} for (k, (fpr, tpr, thresh)) in id_to_roc_dictionary.items(): interpolated_tprs[k] = np.interp(eval_fpr_points, fpr, tpr) return (eval_fpr_points, interpolated_tprs)
def execute_binary(relation: str) -> List[Tuple[(str, str)]]: query = (('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT DISTINCT ?x0 ?x1 WHERE {\n ?x0 :' + relation) + ' ?x1. \n }\n ') sparql.setQuery(query) try: results = sparql.query().convert() except urllib.error.URLError: print(query) exit(0) rtn = [] for result in results['results']['bindings']: rtn.append((result['x0']['value'], result['x1']['value'])) return rtn
_spec_function('interactive_qa_mmlu') def get_interactive_qa_mmlu_spec(subject: str) -> RunSpec: scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.interactive_qa_mmlu_scenario.InteractiveQAMMLUScenario', args={'subject': subject}) adapter_spec = get_multiple_choice_adapter_spec(method=ADAPT_MULTIPLE_CHOICE_JOINT, instructions=f"The following are multiple choice questions (with answers) about {subject.replace('_', ' ')}.", input_noun='Question', output_noun='Answer') return RunSpec(name=f'interactive_qa_mmlu:subject={subject}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=get_exact_match_metric_specs(), groups=['mmlu'])
def read_nli_split(split_dir): pre_lst = [] hyp_lst = [] try: with open(split_dir, 'r', encoding='utf-8') as src: for line in src: line = line.strip() (sent_1, sent_2) = (line.split('\t')[0], line.split('\t')[1]) if (len(sent_1.split(' ')) > len(sent_2.split(' '))): (pre, hyp) = (sent_1, sent_2) else: (pre, hyp) = (sent_2, sent_1) pre_lst.append(pre) hyp_lst.append(hyp) except FileNotFoundError: print(f"Sorry! The file {split_dir} can't be found.") return (pre_lst, hyp_lst)
_utils.test(exclude=[ti.vulkan, ti.opengl, ti.dx11]) def test_ad_frac(): def frac(x): fractional = ((x - ti.floor(x)) if (x > 0.0) else (x - ti.ceil(x))) return fractional def ti_frac(input_field: ti.template(), output_field: ti.template()): for i in input_field: output_field[i] = (frac(input_field[i]) ** 2) def calc_loss(input_field: ti.template(), loss: ti.template()): for i in input_field: loss[None] += input_field[i] n = 10 field0 = ti.field(dtype=ti.f32, shape=(n,), needs_grad=True) randoms = np.random.randn(10).astype(np.float32) field0.from_numpy(randoms) field1 = ti.field(dtype=ti.f32, shape=(n,), needs_grad=True) loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True) with ti.ad.Tape(loss): ti_frac(field0, field1) calc_loss(field1, loss) grads = field0.grad.to_numpy() expected = (np.modf(randoms)[0] * 2) for i in range(n): assert (grads[i] == test_utils.approx(expected[i], rel=0.0001))
def _save_output(args, shard_name, ids, data, name='features', suffix='.pkl', prefix=''): res = [] for idx in ids: row = {} row['video_{}'.format(name)] = [] row['audio_{}'.format(name)] = [] for model_feat in data: feature = {} feature['model_key'] = model_feat['model_key'] feature['extractor_name'] = model_feat['name'] feature['dataset'] = model_feat['dataset'] point_feat = model_feat['data'][idx] feature['array'] = point_feat[name] if isinstance(feature['array'], (tuple, list)): feature['array'] = {'layer_{}'.format(i): v for (i, v) in enumerate(feature['array'])} meta_keys = ['filename', 'shard_size', 'shard_name'] for key in meta_keys: row[key] = point_feat[key] model_name = model_feat['model_key'] if (model_name in args.model_types.audio): row['audio_{}'.format(name)].append(feature) else: row['video_{}'.format(name)].append(feature) res.append(row) out_path = get_out_path(args, ((prefix + shard_name) + suffix)) dump_pickle(res, out_path) return out_path
def get_pseudo_label_NRL(args, logger): logger.info('getting NRL pseudo labels...') from datasets.build_knowledge.get_nodes import get_nodes (node2step, step2node) = get_nodes(args, logger) from datasets.build_knowledge.get_edges import get_edges (pkg, _, _) = get_edges(args, logger) pkg_tr = np.transpose(pkg) (pkg, pkg_tr) = (csr_matrix(pkg), csr_matrix(pkg_tr)) sim_score_path = os.path.join(args.howto100m_dir, 'sim_scores') sample_pseudo_label_savedir = os.path.join(args.howto100m_dir, 'pseudo_labels') os.makedirs(sample_pseudo_label_savedir, exist_ok=True) for khop in range(1, (args.label_khop + 1)): logger.info('Hop {}...'.format(khop)) sample_pseudo_label_savepath = os.path.join(sample_pseudo_label_savedir, 'NRL-hop_{}-criteria_{}-threshold_{}-topK_{}-size_{}.pickle'.format(khop, args.label_find_neighbors_criteria, args.label_find_neighbors_thresh, args.label_find_neighbors_topK, args.num_nodes)) if (not os.path.exists(sample_pseudo_label_savepath)): pseudo_label_NRL = dict() if (khop == 1): load_time_start = time.time() with open(os.path.join(sample_pseudo_label_savedir, 'VNM-criteria_{}-threshold_{}-topK_{}-size_{}.pickle'.format(args.label_find_matched_nodes_criteria, args.label_find_matched_nodes_for_segments_thresh, args.label_find_matched_nodes_for_segments_topK, args.num_nodes)), 'rb') as f: pseudo_label_VNM = pickle.load(f) logger.info('loading VNM pseudo labels for ALL {} samples took {} seconds'.format(len(pseudo_label_VNM), round((time.time() - load_time_start), 2))) for sample_index in tqdm(range(len(pseudo_label_VNM))): pseudo_label_NRL[sample_index] = get_pseudo_label_NRL_for_one_segment(args, logger, khop, pseudo_label_VNM[sample_index], pseudo_label_VNM[sample_index], pkg, pkg_tr) else: load_time_start = time.time() with open(os.path.join(sample_pseudo_label_savedir, 'NRL-hop_{}-criteria_{}-threshold_{}-topK_{}-size_{}.pickle'.format((khop - 1), args.label_find_neighbors_criteria, args.label_find_neighbors_thresh, args.label_find_neighbors_topK, args.num_nodes)), 'rb') as f: pseudo_label_NRL_previous_hop = pickle.load(f) logger.info('loading NRL pseudo labels PREVIOUS HOP for ALL {} samples took {} seconds'.format(len(pseudo_label_NRL_previous_hop), round((time.time() - load_time_start), 2))) for sample_index in tqdm(range(len(pseudo_label_NRL_previous_hop))): in_neighbors_previous_hop = pseudo_label_NRL_previous_hop[sample_index]['{}-hop-in'.format((khop - 1))] out_neighbors_previous_hop = pseudo_label_NRL_previous_hop[sample_index]['{}-hop-out'.format((khop - 1))] pseudo_label_NRL[sample_index] = get_pseudo_label_NRL_for_one_segment(args, logger, khop, in_neighbors_previous_hop, out_neighbors_previous_hop, pkg, pkg_tr) pseudo_label_NRL[sample_index].update(pseudo_label_NRL_previous_hop[sample_index]) with open(sample_pseudo_label_savepath, 'wb') as f: pickle.dump(pseudo_label_NRL, f) logger.info('{} saved!'.format(sample_pseudo_label_savepath)) logger.info('finished getting NRL pseudo labels!') return
class Test_Metropolis(object): def setup_method(self): self.T = 2.0 self.met = Metropolis(self.T) def test_boolean_return(self): ret = self.met(f_new=0.0, f_old=1.0) assert isinstance(ret, bool) def test_lower_f_accepted(self): assert_(self.met(f_new=0.0, f_old=1.0)) def test_KeyError(self): assert_raises(KeyError, self.met, f_old=1.0) assert_raises(KeyError, self.met, f_new=1.0) def test_accept(self): one_accept = False one_reject = False for i in range(1000): if (one_accept and one_reject): break ret = self.met(f_new=1.0, f_old=0.5) if ret: one_accept = True else: one_reject = True assert_(one_accept) assert_(one_reject) def test_GH7495(self): met = Metropolis(2) with np.errstate(over='raise'): met.accept_reject(0, 2000)
class AlbrightPreprocesser(Preprocesser): def __init__(self, opt): super().__init__(opt) self.TAG = 'V;PST' def read_unimorph_data(self, file): (data, edit) = (dict(), dict()) with open(file, 'r', encoding='utf-8') as fp: for line in fp.readlines(): if (line == '\n'): continue (lemma, word, tags) = line.strip().split('\t') if (tags == self.TAG): edit_script = edit_distance(lemma, word) data[lemma] = ('\t'.join([lemma, word, tags]) + '\n') edit[lemma] = edit_script return (data, edit) def read_data(self, file): (data, edit) = (dict(), dict()) with open(file, 'r', encoding='utf-8') as fp: for line in fp.readlines(): if (line == '\n'): continue (lemma, word, *_) = line.strip().split('\t') edit_script = edit_distance(lemma, word) data[lemma] = ('\t'.join([lemma, word, self.TAG]) + '\n') edit[lemma] = edit_script return (data, edit) def match_edit_script(self, short_script, long_script): return match_script(short_script, long_script)
class TestMetric(object): def _assert_scores_equal(self, ave_scores, expected_ave_scores): for key in ave_scores: for score_key in ['precision', 'recall', 'f1']: assert (numpy.abs((ave_scores[key][score_key] - expected_ave_scores[key][score_key])) < 1e-06) .parametrize('tags_gold_data, tags_pred_data, expected_ave_scores', [([['B-A', 'B-B', 'O', 'B-A']], [['O', 'B-B', 'B-C', 'B-A']], {'macro': {'precision': (2 / 3), 'recall': (1 / 2), 'f1': (5 / 9)}, 'micro': {'precision': (2 / 3), 'recall': (2 / 3), 'f1': (2 / 3)}}), ([['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']], [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']], {'macro': {'precision': 0.5, 'recall': 0.5, 'f1': 0.5}, 'micro': {'precision': 0.5, 'recall': 0.5, 'f1': 0.5}})]) def test_example(self, tags_gold_data, tags_pred_data, expected_ave_scores): translator = ChunksTagsTranslator(scheme='BIO2') chunks_gold_data = [translator.tags2chunks(tags) for tags in tags_gold_data] chunks_pred_data = [translator.tags2chunks(tags) for tags in tags_pred_data] (scores, ave_scores) = precision_recall_f1_report(chunks_gold_data, chunks_pred_data) self._assert_scores_equal(ave_scores, expected_ave_scores) def test_conll2000(self): gold_data = ConllIO(text_col_id=0, tag_col_id=2, scheme='BIO2').read('data/conlleval/output.txt') pred_data = ConllIO(text_col_id=0, tag_col_id=3, scheme='BIO2').read('data/conlleval/output.txt') chunks_gold_data = [ex['chunks'] for ex in gold_data] chunks_pred_data = [ex['chunks'] for ex in pred_data] expected_ave_scores = {'macro': {'precision': 0., 'recall': 0.5877642, 'f1': 0.}, 'micro': {'precision': 0., 'recall': 0., 'f1': 0.}} (scores, ave_scores) = precision_recall_f1_report(chunks_gold_data, chunks_pred_data) self._assert_scores_equal(ave_scores, expected_ave_scores)
def parse_sql(toks, start_idx, tables_with_alias, schema): isBlock = False len_ = len(toks) idx = start_idx sql = {} if (toks[idx] == '('): isBlock = True idx += 1 (from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema) sql['from'] = {'table_units': table_units, 'conds': conds} if IN_EXECUTION_ORDER: idx = from_end_idx (idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables) sql['where'] = where_conds (idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables) sql['groupBy'] = group_col_units (idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables) sql['having'] = having_conds (idx, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables) sql['select'] = select_col_units (idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables) sql['orderBy'] = order_col_units (idx, limit_val) = parse_limit(toks, idx) sql['limit'] = limit_val else: (_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables) idx = from_end_idx sql['select'] = select_col_units (idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables) sql['where'] = where_conds (idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables) sql['groupBy'] = group_col_units (idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables) sql['having'] = having_conds (idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables) sql['orderBy'] = order_col_units (idx, limit_val) = parse_limit(toks, idx) sql['limit'] = limit_val idx = skip_semicolon(toks, idx) if isBlock: assert (toks[idx] == ')') idx += 1 idx = skip_semicolon(toks, idx) for op in SQL_OPS: sql[op] = None if ((idx < len_) and (toks[idx] in SQL_OPS)): sql_op = toks[idx] idx += 1 (idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema) sql[sql_op] = IUE_sql return (idx, sql)
class LibFuzzerWatcher(Watcher): def __init__(self, config: WatcherConfig): target_directories = ((config.output_dir / 'queue'), (config.output_dir / 'crashes')) super().__init__(target_directories) def _manage_directories(self) -> None: for target_directory in self._target_directories: logger.debug(f'Waiting on directory: {target_directory}') self._wait_for_dir(target_directory) def _ignore_test_case(self, test_case_path: Path) -> bool: return test_case_path.name.startswith('framework-') def _get_test_case_type(self, test_case_path: Path) -> SeedType: if test_case_path.name.startswith('crash-'): test_case_type = SeedType.CRASH elif test_case_path.name.startswith('leak-'): test_case_type = SeedType.CRASH elif test_case_path.name.startswith('timeout-'): test_case_type = SeedType.HANG elif test_case_path.name.startswith('oom-'): test_case_type = SeedType.HANG else: test_case_type = SeedType.NORMAL return test_case_type
def test_conditional_assignment(simple_module, tracer_mock): adapter = BranchCoverageInstrumentation(tracer_mock) transformer = InstrumentationTransformer(tracer_mock, [adapter]) simple_module.conditional_assignment.__code__ = transformer.instrument_module(simple_module.conditional_assignment.__code__) simple_module.conditional_assignment(10) tracer_mock.register_predicate.assert_called_once() assert (tracer_mock.register_code_object.call_count == 1) tracer_mock.executed_compare_predicate.assert_called_once() tracer_mock.executed_code_object.assert_has_calls([call(0)])
.expansion class ExpandAxpyFpga(ExpandTransformation): environments = [] def expansion(node, parent_state: SDFGState, parent_sdfg: SDFG, **kwargs): return ExpandAxpyVectorized.expansion(node, parent_state, parent_sdfg, schedule=dace.ScheduleType.FPGA_Device, **kwargs)
def write_video(path, savepath, size): file_list = sorted(os.listdir(path)) fps = 20 four_cc = cv2.VideoWriter_fourcc(*'MJPG') save_path = savepath video_writer = cv2.VideoWriter(save_path, four_cc, float(fps), size) for item in file_list: if (item.endswith('.jpg') or item.endswith('.png')): item = ((path + '/') + item) img = cv2.imread(item) video_writer.write(img) video_writer.release() cv2.destroyAllWindows()
def process_image(img, scale, isotropic, crop, mean, rescale, need_rescale): if need_rescale: if isotropic: img_shape = tf.to_float(tf.shape(img)[:2]) min_length = tf.minimum(img_shape[0], img_shape[1]) new_shape = tf.to_int32(((scale / min_length) * img_shape)) else: new_shape = tf.stack([scale, scale]) img = tf.image.resize_images(img, (new_shape[0], new_shape[1])) offset = ((new_shape - crop) // 2) offset = tf.to_int32(offset) img = tf.slice(img, begin=tf.stack([offset[0], offset[1], 0]), size=tf.stack([crop, crop, (- 1)])) else: img = tf.image.resize_images(img, crop, crop) img = tf.to_float(img) [l, r] = rescale img = (((img / 255.0) * (r - l)) + l) img = (img - mean) return img
class RPN(nn.Module): def __init__(self, backbone, min_size=800, max_size=2000, image_mean=None, image_std=None, rpn_anchor_generator=None, rpn_head=None, rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000, rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000, rpn_nms_thresh=0.7, rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3, rpn_batch_size_per_image=256, rpn_positive_fraction=0.5, weight_loss=False, last_nms_iou_thres=0.3, n_max_det=30): super(RPN, self).__init__() if (not hasattr(backbone, 'out_channels')): raise ValueError('backbone should contain an attribute out_channels specifying the number of output channels (assumed to be the same for all the levels)') assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))) self.backbone = backbone out_channels = backbone.out_channels if (rpn_anchor_generator is None): anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) aspect_ratios = (((0.5, 1.0, 2.0),) * len(anchor_sizes)) rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) if (rpn_head is None): rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0]) rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test) rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test) rpn = RegionProposalNetwork(rpn_anchor_generator, rpn_head, rpn_fg_iou_thresh, rpn_bg_iou_thresh, rpn_batch_size_per_image, rpn_positive_fraction, rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh, weight_loss=weight_loss) self.rpn = rpn self.last_nms_iou_thres = last_nms_iou_thres self.n_max_det = n_max_det if (image_mean is None): image_mean = [0.485, 0.456, 0.406] if (image_std is None): image_std = [0.229, 0.224, 0.225] transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) self.transform = transform def filter_proposals(self, proposals, scores): final_boxes = [] final_scores = [] for (boxes, scores) in zip(proposals, scores): keep = box_ops.nms(boxes, scores, self.last_nms_iou_thres) keep = keep[:self.n_max_det] (boxes, scores) = (boxes[keep], scores[keep]) final_boxes.append(boxes) final_scores.append(scores) return (final_boxes, final_scores) def forward(self, images, targets=None): if (self.training and (targets is None)): raise ValueError('In training mode, targets should be passed') original_image_sizes = [img.shape[(- 2):] for img in images] (images, targets) = self.transform(images, targets) features = self.backbone(images.tensors) if isinstance(features, torch.Tensor): features = OrderedDict([(0, features)]) (proposals, scores, proposal_losses) = self.rpn(images, features, targets) (boxes, scores) = self.filter_proposals(proposals, scores) result = [] for i in range(len(scores)): score = scores[i].cpu().numpy() eps = 0.05 score = (((score - np.min(score)) / abs((np.max(score) - np.min(score)))) + eps) score = torch.tensor(score) result.append({'boxes': boxes[i], 'scores': score, 'labels': torch.tensor(([1] * len(scores[i])))}) detections = self.transform.postprocess(result, images.image_sizes, original_image_sizes) for (i, (pred, im_s, o_im_s)) in enumerate(zip(proposals, images.image_sizes, original_image_sizes)): boxes = resize_boxes(pred, im_s, o_im_s) proposals[i] = boxes losses = {} losses.update(proposal_losses) if self.training: return losses return (detections, proposals)
def test_clean_output_format(df_countries: pd.DataFrame) -> None: df_clean_official = clean_country(df_countries, 'messy_country', output_format='official') df_clean_alpha2 = clean_country(df_countries, 'messy_country', output_format='alpha-2') df_clean_alpha3 = clean_country(df_countries, 'messy_country', output_format='alpha-3') df_clean_numeric = clean_country(df_countries, 'messy_country', output_format='numeric') df_check_official = df_countries.copy() df_check_official['messy_country_clean'] = ['Canada', 'Canada', np.nan, np.nan, 'Ireland', 'Democratic Republic of the Congo', 'Republic of the Congo', 'Greenland', 'Republic of Estonia', 'Republic of Yemen', 'American Samoa', 'Republic of Turkey', 'Belize', 'Argentine Republic', 'Bouvet Island', 'New Zealand', np.nan, np.nan, np.nan] df_check_alpha2 = df_countries.copy() df_check_alpha2['messy_country_clean'] = ['CA', 'CA', np.nan, np.nan, 'IE', 'CD', 'CG', 'GL', 'EE', 'YE', 'AS', 'TR', 'BZ', 'AR', 'BV', 'NZ', np.nan, np.nan, np.nan] df_check_alpha3 = df_countries.copy() df_check_alpha3['messy_country_clean'] = ['CAN', 'CAN', np.nan, np.nan, 'IRL', 'COD', 'COG', 'GRL', 'EST', 'YEM', 'ASM', 'TUR', 'BLZ', 'ARG', 'BVT', 'NZL', np.nan, np.nan, np.nan] df_check_numeric = df_countries.copy() df_check_numeric['messy_country_clean'] = ['124', '124', np.nan, np.nan, '372', '180', '178', '304', '233', '887', '16', '792', '84', '32', '74', '554', np.nan, np.nan, np.nan] assert df_clean_official.equals(df_check_official) assert df_clean_alpha2.equals(df_check_alpha2) assert df_clean_alpha3.equals(df_check_alpha3) assert df_clean_numeric.equals(df_check_numeric)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.selu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.selu(out) return out
def test_listoffsetarray(): content = ak.contents.NumpyArray(np.arange(((2 * 3) * 5)).reshape(5, 3, 2)) offsets = ak.index.Index64(np.array([0, 3, 3, 5], dtype=np.int64)) array = ak.contents.ListOffsetArray(offsets, content) assert (to_list(array) == [[[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]], [], [[[18, 19], [20, 21], [22, 23]], [[24, 25], [26, 27], [28, 29]]]]) assert (to_list(ak.num(array, 0)) == 3) assert (to_list(ak.num(array, 1)) == [3, 0, 2]) assert (to_list(ak.num(array, 2)) == [[3, 3, 3], [], [3, 3]]) assert (to_list(ak.num(array, 3)) == [[[2, 2, 2], [2, 2, 2], [2, 2, 2]], [], [[2, 2, 2], [2, 2, 2]]]) with pytest.raises(np.AxisError) as err: ak.num(array, 4) assert ('axis=4 exceeds the depth' in str(err.value))
class FFA(Mode): def __init__(self): Mode.__init__(self) self.ID = 0 self.name = 'Free For All' self.specByLeaderboard = True def onPlayerSpawn(self, gameServer, player): player.color = gameServer.getRandomColor() gameServer.spawnPlayer(player, gameServer.randomPos2())
def epoch_val(net, testloader): net.eval() correct = 0 total = 0 total_loss = 0.0 total_cls_loss = 0.0 total_reconst_loss = 0.0 iter = 0 cls_criterion = nn.CrossEntropyLoss() reconst_criterion = nn.MSELoss() with torch.no_grad(): for data in testloader: (images, labels) = data images = images.cuda(non_blocking=True) labels = labels.cuda(non_blocking=True) (logits, reconstruct, _) = net(images) cls_loss = cls_criterion(logits, labels) reconst_loss = reconst_criterion(reconstruct, images) loss = (cls_loss + reconst_loss) total_loss = (total_loss + loss.item()) total_cls_loss = (total_cls_loss + cls_loss.item()) total_reconst_loss = (total_reconst_loss + reconst_loss.item()) (_, predicted) = torch.max(logits.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() iter = (iter + 1) return [(100 * (correct / total)), (total_cls_loss / iter), (total_reconst_loss / iter), (total_loss / iter)]
_func def calc_normal(obj: SDFObject, p: vec3) -> vec3: e = ((vec2(1, (- 1)) * 0.5773) * 0.005) return normalize(((((e.xyy * signed_distance(obj, (p + e.xyy))) + (e.yyx * signed_distance(obj, (p + e.yyx)))) + (e.yxy * signed_distance(obj, (p + e.yxy)))) + (e.xxx * signed_distance(obj, (p + e.xxx)))))
def train_step(input, target, model, loss_fn, optimizer, **unused): model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step()
def test_glad(opts): set_results_dir(opts) dir_create(opts.results_dir) set_random_seeds(opts.randseed, (opts.randseed + 1), (opts.randseed + 2)) (x, y) = read_anomaly_dataset(opts.dataset, datafile=opts.datafile) ensemble = prepare_loda_ensemble(x, mink=opts.loda_mink, maxk=opts.loda_maxk, debug=(opts.loda_debug and (x.shape[1] == 2)), m=4) xx = yy = None (hf, _) = get_top_ranked_instances(x, ensemble, n=opts.n_anoms) if opts.plot: (xx, yy) = plot_ensemble_scores(x, y, ensemble, selected=None, dataset=opts.dataset, outpath=opts.results_dir) afss = get_afss_model(opts, n_output=ensemble.m) afss.init_network(x, prime_network=True) if opts.debug: afss.log_probability_ranges(x) hf_scores_before = afss.get_weighted_scores(x[hf], ensemble.get_scores(x[hf])) if opts.plot: (xx, yy) = plot_weighted_scores(x, y, ensemble, afss, selected=None, xx=xx, yy=yy, contour_levels=20, name='_before', n_anoms=opts.n_anoms, dataset=opts.dataset, outpath=opts.results_dir) selected = None (xx, yy) = plot_afss_scores(x, y, ensemble, afss, selected=selected, plot_ensemble=False, cmap='jet', xx=xx, yy=yy, name='_before', dataset=opts.dataset, outpath=opts.results_dir) (xx, yy) = plot_afss_scores(x, y, ensemble, afss, selected=None, plot_ensemble=True, cmap='jet', xx=xx, yy=yy, name='_ensemble', dataset=opts.dataset, outpath=opts.results_dir) scores = ensemble.get_scores(x) logger.debug(('scores: %s' % str(scores.shape))) for i in range(10): afss.update_afss(x, y, hf, scores, tau=opts.afss_tau) if opts.debug: afss.log_probability_ranges(x) hf_scores_after = afss.get_weighted_scores(x[hf], ensemble.get_scores(x[hf])) logger.debug(('hf_scores_before:\n%s\nhf_scores_after:\n%s\ny:\n%s' % (str(list(hf_scores_before)), str(list(hf_scores_after)), str(list(y[hf]))))) if opts.plot: (xx, yy) = plot_weighted_scores(x, y, ensemble, afss, selected=None, xx=xx, yy=yy, contour_levels=20, name='_after', n_anoms=opts.n_anoms, dataset=opts.dataset, outpath=opts.results_dir) (xx, yy) = plot_afss_scores(x, y, ensemble, afss, selected=None, plot_ensemble=False, cmap='jet', xx=xx, yy=yy, name='_after', dataset=opts.dataset, outpath=opts.results_dir) afss.close_session()
def _flatten_module(module, recursive, predicate, attribute_traversal_key, attributes_to_ignore, with_path, expand_composites, module_path=(), seen=None): if (seen is None): seen = {id(module)} try: module_dict = vars(module) except TypeError: module_dict = {} submodules = [] for key in sorted(module_dict, key=attribute_traversal_key): if (key in attributes_to_ignore): continue prop = module_dict[key] try: leaves = nest.flatten_with_tuple_paths(prop, expand_composites=expand_composites) except Exception: leaves = [] for (leaf_path, leaf) in leaves: leaf_path = ((key,) + leaf_path) if (not with_path): leaf_id = id(leaf) if (leaf_id in seen): continue seen.add(leaf_id) if predicate(leaf): if with_path: (yield ((module_path + leaf_path), leaf)) else: (yield leaf) if recursive: submodules.append(((module_path + leaf_path), leaf)) for (submodule_path, submodule) in submodules: subvalues = _flatten_module(submodule, recursive=recursive, predicate=predicate, attribute_traversal_key=attribute_traversal_key, attributes_to_ignore=_TF_MODULE_IGNORED_PROPERTIES, with_path=with_path, expand_composites=expand_composites, module_path=submodule_path, seen=seen) for subvalue in subvalues: (yield subvalue)
def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3Ipv4Route_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Ipv4Route >']) register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >']) register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3DsdvHelper_methods(root_module, root_module['ns3::DsdvHelper']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag']) register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag']) register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Const_ns3Ipv4Header___amp___Ns3Ptr__lt__const_ns3Packet__gt___Ns3Ipv4L3ProtocolDropReason_Ns3Ptr__lt__ns3Ipv4__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, const ns3::Ipv4Header &, ns3::Ptr<const ns3::Packet>, ns3::Ipv4L3Protocol::DropReason, ns3::Ptr<ns3::Ipv4>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Const_ns3Ipv4Header___amp___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, const ns3::Ipv4Header &, ns3::Ptr<const ns3::Packet>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Ipv4Header___amp___Ns3SocketSocketErrno_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Ptr__lt__ns3Ipv4__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Ptr<ns3::Ipv4>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Ipv4Route__gt___Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Ipv4Header___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) register_Ns3DsdvDsdvHeader_methods(root_module, root_module['ns3::dsdv::DsdvHeader']) register_Ns3DsdvPacketQueue_methods(root_module, root_module['ns3::dsdv::PacketQueue']) register_Ns3DsdvQueueEntry_methods(root_module, root_module['ns3::dsdv::QueueEntry']) register_Ns3DsdvRoutingProtocol_methods(root_module, root_module['ns3::dsdv::RoutingProtocol']) register_Ns3DsdvRoutingTable_methods(root_module, root_module['ns3::dsdv::RoutingTable']) register_Ns3DsdvRoutingTableEntry_methods(root_module, root_module['ns3::dsdv::RoutingTableEntry']) return
_utils.test() def test_explicit_local_atomic_max(): A = ti.field(ti.f32, shape=()) def func(): a = (- 1000) for i in range(10): ti.atomic_max(a, i) A[None] = a func() assert (A[None] == 9)
def register_Ns3QueueDiscItem_methods(root_module, cls): cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Address const &', 'addr'), param('uint16_t', 'protocol')]) cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) cls.add_method('GetProtocol', 'uint16_t', [], is_const=True) cls.add_method('GetTxQueueIndex', 'uint8_t', [], is_const=True) cls.add_method('SetTxQueueIndex', 'void', [param('uint8_t', 'txq')]) cls.add_method('GetTimeStamp', 'ns3::Time', [], is_const=True) cls.add_method('SetTimeStamp', 'void', [param('ns3::Time', 't')]) cls.add_method('AddHeader', 'void', [], is_pure_virtual=True, is_virtual=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Mark', 'bool', [], is_pure_virtual=True, is_virtual=True) return
def write_wave(path, audio, sample_rate): with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
def test_nonzero_offset_fromarrow_ListOffsetArray_3(): content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1])) offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.contents.ListOffsetArray(offsets, content) assert (to_list(ak._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[0:5:2])) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[0:5:2]))
def create(lr_scheduler, num_sample, **kwargs): if (lr_scheduler not in __scheduler.keys()): logging.info('') logging.error('Error: Do NOT exist this lr_scheduler: {}!'.format(lr_scheduler)) raise ValueError() return __scheduler[lr_scheduler](num_sample, **kwargs)
class Component(): def __init__(self, name, path, deps): global BUILD_DIR, SRC_DIR, REV_BUILD_DIR if (name in _ComponentNames): raise MKException(("Component '%s' was already defined." % name)) if (path is None): path = name self.name = name path = norm_path(path) self.path = path self.deps = find_all_deps(name, deps) self.build_dir = path self.src_dir = os.path.join(SRC_DIR, path) self.to_src_dir = os.path.join(REV_BUILD_DIR, self.src_dir) def get_link_name(self): return (os.path.join(self.build_dir, self.name) + '$(LIB_EXT)') def find_file(self, fname, ownerfile, orig_include=None): full_fname = os.path.join(self.src_dir, fname) possibilities = set() if os.path.exists(full_fname): possibilities.add(self) for dep in self.deps: c_dep = get_component(dep) full_fname = os.path.join(c_dep.src_dir, fname) if os.path.exists(full_fname): possibilities.add(c_dep) if possibilities: if (len(possibilities) > 1): assert (orig_include is not None) orig_dir = os.path.dirname(orig_include) for possibility in possibilities: path = possibility.path.replace('\\', '/') if path.endswith(orig_dir): return possibility return possibilities.pop() raise MKException(("Failed to find include file '%s' for '%s' when processing '%s'." % (fname, ownerfile, self.name))) def add_cpp_h_deps(self, out, basename): includes = extract_c_includes(os.path.join(self.src_dir, basename)) out.write(os.path.join(self.to_src_dir, basename)) for (include, orig_include) in includes.items(): owner = self.find_file(include, basename, orig_include) out.write((' %s.node' % os.path.join(owner.build_dir, include))) def add_rule_for_each_include(self, out, basename): fullname = os.path.join(self.src_dir, basename) includes = extract_c_includes(fullname) for (include, orig_include) in includes.items(): owner = self.find_file(include, fullname, orig_include) owner.add_h_rule(out, include) def add_h_rule(self, out, include): include_src_path = os.path.join(self.to_src_dir, include) if (include_src_path in _Processed_Headers): return _Processed_Headers.add(include_src_path) self.add_rule_for_each_include(out, include) include_node = ('%s.node' % os.path.join(self.build_dir, include)) out.write(('%s: ' % include_node)) self.add_cpp_h_deps(out, include) out.write('\n') out.write(('\ done > %s\n' % include_node)) def add_cpp_rules(self, out, include_defs, cppfile): self.add_rule_for_each_include(out, cppfile) objfile = ('%s$(OBJ_EXT)' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0])) srcfile = os.path.join(self.to_src_dir, cppfile) out.write(('%s: ' % objfile)) self.add_cpp_h_deps(out, cppfile) out.write('\n') if SHOW_CPPS: out.write(('\ %s\n' % os.path.join(self.src_dir, cppfile))) out.write(('\$(CXX) $(CXXFLAGS) $(%s) $(CXX_OUT_FLAG)%s %s\n' % (include_defs, objfile, srcfile))) def mk_makefile(self, out): include_defs = mk_fresh_name('includes') out.write(('%s =' % include_defs)) for dep in self.deps: out.write((' -I%s' % get_component(dep).to_src_dir)) out.write((' -I%s' % os.path.join(REV_BUILD_DIR, 'src'))) out.write('\n') mk_dir(os.path.join(BUILD_DIR, self.build_dir)) if (VS_PAR and IS_WINDOWS): cppfiles = list(get_cpp_files(self.src_dir)) dependencies = set() for cppfile in cppfiles: dependencies.add(os.path.join(self.to_src_dir, cppfile)) self.add_rule_for_each_include(out, cppfile) includes = extract_c_includes(os.path.join(self.src_dir, cppfile)) for (include, orig_include) in includes.items(): owner = self.find_file(include, cppfile, orig_include) dependencies.add(('%s.node' % os.path.join(owner.build_dir, include))) for cppfile in cppfiles: out.write(('%s$(OBJ_EXT) ' % os.path.join(self.build_dir, os.path.splitext(cppfile)[0]))) out.write(': ') for dep in dependencies: out.write(dep) out.write(' ') out.write('\n') out.write(('\$(CXX) $(CXXFLAGS) /MP%s $(%s)' % (VS_PAR_NUM, include_defs))) for cppfile in cppfiles: out.write(' ') out.write(os.path.join(self.to_src_dir, cppfile)) out.write('\n') out.write(('\tmove *.obj %s\n' % self.build_dir)) else: for cppfile in get_cpp_files(self.src_dir): self.add_cpp_rules(out, include_defs, cppfile) def main_component(self): return False def has_assembly_info(self): return False def require_install_tactics(self): return False def require_def_file(self): return False def require_mem_initializer(self): return False def mk_install_deps(self, out): return def mk_install(self, out): return def mk_uninstall(self, out): return def is_example(self): return False def mk_win_dist(self, build_path, dist_path): return def mk_unix_dist(self, build_path, dist_path): return def final_info(self): pass
class CnnClassifier(Classifier): def __init__(self, config, experts): super().__init__(config, experts) nf = config['cls_nf'] h1_dim = (1 * nf) h2_dim = (2 * nf) fc_dim = (4 * nf) feature_volume = (((config['x_h'] // 4) * (config['x_w'] // 4)) * h2_dim) self.net = nn.Sequential(nn.Conv2d(config['x_c'], h1_dim, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(h1_dim, h2_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.ReLU(), nn.MaxPool2d(2), Lambda((lambda x: x.view(x.size(0), (- 1)))), nn.Linear(feature_volume, fc_dim, bias=False), nn.ReLU(), nn.Linear(fc_dim, config['y_c']), nn.LogSoftmax(dim=1)) self.to(self.device) self.setup_optimizer() def forward(self, x): x = x.to(self.device) return self.net(x)
def format_kwargs(sep, pattern, **kwargs): out = sep.join([(pattern % (key, val)) for (key, val) in kwargs.items()]) return out
def get_child_state_dict(state_dict, key): return {'.'.join(k.split('.')[1:]): v for (k, v) in state_dict.items() if k.startswith('{}.'.format(key))}
def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable=True): with tf.device('/cpu:0'): dtype = (tf.float16 if use_fp16 else tf.float32) var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable) return var
def register_Ns3GroupInfo_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::GroupInfo const &', 'arg0')]) cls.add_instance_attribute('m_col', 'uint8_t', is_const=False) cls.add_instance_attribute('m_index', 'uint8_t', is_const=False) cls.add_instance_attribute('m_maxProbRate', 'uint16_t', is_const=False) cls.add_instance_attribute('m_maxTpRate', 'uint16_t', is_const=False) cls.add_instance_attribute('m_maxTpRate2', 'uint16_t', is_const=False) cls.add_instance_attribute('m_ratesTable', 'ns3::HtMinstrelRate', is_const=False) cls.add_instance_attribute('m_supported', 'bool', is_const=False) return
def first_line_that(lines, f): for (i, line) in enumerate(lines): if f(line): return (i, line) raise RuntimeError('no line found that satisfies condition')
def _memoize(func): memo = {} def wrapper(*a, **kw): key = repr((a, kw)) if (key not in memo): try: memo[key] = func(*a, **kw) except Exception as e: memo[key] = e raise ret = memo[key] if isinstance(ret, Exception): raise ret return ret wrapper.__name__ = func.__name__ return wrapper
class MethodViewType(type): def __init__(cls, name, bases, d): super(MethodViewType, cls).__init__(name, bases, d) if ('methods' not in d): methods = set() for base in bases: if getattr(base, 'methods', None): methods.update(base.methods) for key in if hasattr(cls, key): methods.add(key.upper()) if methods: cls.methods = methods
def interpolate_fixed_common(args, e_common, e_separate_A, e_separate_B, decoder, imgA1, imgA2, imgB1, imgB2, content_img): (test_domA, test_domB) = get_test_imgs(args) exps = [] common = e_common(test_domB[content_img].unsqueeze(0)) a1 = e_separate_A(test_domA[imgA1].unsqueeze(0)) a2 = e_separate_A(test_domA[imgA2].unsqueeze(0)) b1 = e_separate_B(test_domB[imgB1].unsqueeze(0)) b2 = e_separate_B(test_domB[imgB2].unsqueeze(0)) with torch.no_grad(): filler = test_domB[0].unsqueeze(0).clone() exps.append(filler.fill_(0)) exps.append(test_domA[imgA1].unsqueeze(0)) for i in range((args.num_display - 2)): exps.append(filler.fill_(0)) exps.append(test_domA[imgA2].unsqueeze(0)) for i in range(args.num_display): if (i == 0): exps.append(test_domB[imgB1].unsqueeze(0)) elif (i == (args.num_display - 1)): exps.append(test_domB[imgB2].unsqueeze(0)) else: exps.append(filler.fill_(0)) for j in range(args.num_display): cur_sep_A = (((float(j) / (args.num_display - 1)) * a2) + ((1 - (float(j) / (args.num_display - 1))) * a1)) cur_sep_B = (((float(i) / (args.num_display - 1)) * b2) + ((1 - (float(i) / (args.num_display - 1))) * b1)) encoding = torch.cat([common, cur_sep_A, cur_sep_B], dim=1) decoding = decoder(encoding) exps.append(decoding) with torch.no_grad(): exps = torch.cat(exps, 0) vutils.save_image(exps, ('%s/interpolation_fixed_C.png' % args.out), normalize=True, nrow=(args.num_display + 1))
class CyGlobals(CyLocals): name = 'cy globals' command_class = gdb.COMMAND_STACK completer_class = gdb.COMPLETE_NONE _on_frame(c_command='info variables', python_command='py-globals') def invoke(self, args, from_tty): global_python_dict = self.get_cython_globals_dict() module_globals = self.get_cython_function().module.globals max_globals_len = 0 max_globals_dict_len = 0 if module_globals: max_globals_len = len(max(module_globals, key=len)) if global_python_dict: max_globals_dict_len = len(max(global_python_dict)) max_name_length = max(max_globals_len, max_globals_dict_len) seen = set() print('Python globals:') for (k, v) in sorted(global_python_dict.items(), key=sortkey): v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN) seen.add(k) print((' %-*s = %s' % (max_name_length, k, v))) print('C globals:') for (name, cyvar) in sorted(module_globals.items(), key=sortkey): if (name not in seen): try: value = gdb.parse_and_eval(cyvar.cname) except RuntimeError: pass else: if (not value.is_optimized_out): self.print_gdb_value(cyvar.name, value, max_name_length, ' ')
def train(args): data_loader = dl.DataLoader(args.batch_size, args.seq_length, args.max_num_obj, args.leave_dataset, preprocess=False) with open(os.path.join('save', 'config.pkl'), 'wb') as file: pickle.dump(args, file) model = model.DESIREModel(args) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) for epoch in range(args.num_epochs): sess.run(tf.assign(model.learning_rate, (args.learning_rate * (args.decay_rate ** epoch)))) data_loader.reset_batch_pointer() for batch in range(data_loader.num_batches): start = time.time() (xval, yval, dval) = data_loader.next_batch() loss_batch = 0 for batch in range(data_loader.batch_size): (x_batch, y_batch, d_batch) = (xval[batch], yval[batch], dval[batch]) x_batch = np.reshape(x_batch, [args.seq_length, args.max_num_obj, 3]) y_batch = np.reshape(y_batch, [args.seq_length, args.max_num_obj, 3]) feed = {model.input_data: x_batch, model.target_data: y_batch} train_loss = sess.run(model.cost, feed) loss_batch += train_loss end = time.time() loss_batch = (loss_batch / data_loader.batch_size) print('{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(((epoch * data_loader.num_batches) + batch), (args.num_epochs * data_loader.num_batches), epoch, loss_batch, (end - start))) sys.stdout.flush() if (((((epoch * data_loader.num_batches) + batch) % args.save_every) == 0) and (((epoch * data_loader.num_batches) + batch) > 0)): checkpoint_path = os.path.join('save', 'social_model.ckpt') saver.save(sess, checkpoint_path, global_step=((epoch * data_loader.num_batches) + batch)) print('model saved to {}'.format(checkpoint_path)) sys.stdout.flush()
def base_axis_1_broadcast(x): h = PF.convolution(x, 3, (2, 2), pad=(0, 0), name='c1', base_axis=1) y = F.broadcast(h, shape=(2, 3, 3, 3)) return y
_torch _vision class EfficientFormerImageProcessorTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (ViTImageProcessor if is_vision_available() else None) def setUp(self): self.image_proc_tester = EfficientFormerImageProcessorTester(self) def image_processor_dict(self): return self.image_proc_tester.prepare_image_processor_dict() def test_image_proc_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, 'image_mean')) self.assertTrue(hasattr(image_processor, 'image_std')) self.assertTrue(hasattr(image_processor, 'do_normalize')) self.assertTrue(hasattr(image_processor, 'do_resize')) self.assertTrue(hasattr(image_processor, 'size')) def test_batch_feature(self): pass def test_call_pil(self): image_processor = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'])) encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'])) def test_call_numpy(self): image_processor = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'])) encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'])) def test_call_pytorch(self): image_processor = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'])) encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width']))
def detect_compiler_type() -> Optional[CompilerType]: user_specify = os.environ.get('CXX', None) if user_specify: if (user_specify in ['clang', 'clang++']): return CompilerType.CLANG elif (user_specify in ['gcc', 'g++']): return CompilerType.GCC raise RuntimeError(f'User specified compiler is not valid {user_specify}') auto_detect_result = subprocess.check_output(['cc', '-v'], stderr=subprocess.STDOUT).decode('utf-8') if ('clang' in auto_detect_result): return CompilerType.CLANG elif ('gcc' in auto_detect_result): return CompilerType.GCC raise RuntimeError(f'Auto detected compiler is not valid {auto_detect_result}')
def contract(graph: Graph, matching: List[List[Node]], edge_weight_function: EdgeWeightFunction, uf: Optional[UnionFind]=None) -> Graph: new_graph = Graph.from_other(graph) for m in sorted(matching, key=(lambda x: x[0].id), reverse=True): root = m[0] for i in m[1:]: new_graph.merge(root.id, i.id, edge_weight_function=edge_weight_function, uf=uf) if (uf is not None): uf.union(x=root.id, y=i.id) return new_graph
class MultibankGlobalAttention(nn.Module): def __init__(self, dim, coverage=False, attn_type='dot'): super(MultibankGlobalAttention, self).__init__() self.attention = GlobalAttention(dim, coverage, attn_type) def forward(self, input, memory_banks, memory_lengths=None, coverage=None): attention_hidden_states = [] alignment_vectors = [] for (idx, memory_bank) in enumerate(memory_banks): memory_lengths = (None if (idx > 0) else memory_lengths) (attn_h, align_vectors) = self.attention(input, memory_bank, memory_lengths, coverage) attention_hidden_states.append(attn_h) alignment_vectors.append(align_vectors) final_attn_h = torch.sum(torch.stack(attention_hidden_states), dim=0) final_align_v = torch.cat(alignment_vectors, dim=2) return (final_attn_h, final_align_v)
def receiveNewFrame(frameNumber, markerSetCount, unlabeledMarkersCount, rigidBodyCount, skeletonCount, labeledMarkerCount, latency, timecode, timecodeSub, timestamp, isRecording, trackedModelsChanged): pass
def get_soundness_func_block_arg_defs(ctx: LeanGenContext) -> List[str]: arg_types = ctx.block_info.get_args_with_type() return create_arg_defs(arg_types)
class TemporalBeginCrop(object): def __init__(self, size): self.size = size def __call__(self, frame_indices): out = frame_indices[:self.size] for index in out: if (len(out) >= self.size): break out.append(index) return out
def information_retrieval_agents(agent_test_config, memory_json_file, workspace: Workspace): agents = [] command_registry = get_command_registry(agent_test_config) ai_goals = ["Write to a file called output.txt containing tesla's revenue in 2022 after searching for 'tesla revenue 2022'.", "Write to a file called output.txt containing tesla's revenue in 2022.", "Write to a file called output.txt containing tesla's revenue every year since its creation."] for ai_goal in ai_goals: ai_config = AIConfig(ai_name='Information Retrieval Agent', ai_role='an autonomous agent that specializes in retrieving information.', ai_goals=[ai_goal]) ai_config.command_registry = command_registry system_prompt = ai_config.construct_full_prompt() Config().set_continuous_mode(False) agents.append(Agent(ai_name='Information Retrieval Agent', memory=memory_json_file, command_registry=command_registry, ai_config=ai_config, config=agent_test_config, next_action_count=0, system_prompt=system_prompt, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, workspace_directory=workspace.root)) return agents
def run(local_rank, func, backend, cfg): try: if ('PHILLY_HOME' in os.environ): master_url = ((('tcp://' + os.environ['MASTER_ADDR']) + ':') + os.environ['MASTER_PORT']) torch.distributed.init_process_group(backend=backend, init_method=master_url, rank=du.get_rank(), world_size=du.get_global_size()) else: torch.distributed.init_process_group(backend=backend) except Exception as e: raise e torch.cuda.set_device(local_rank) func(cfg)
def calc_precision_and_jaccard(pred, gt, th=0.5): bin_pred = generate_binary_map(pred, 'mean+std', th) tp = (bin_pred == gt).sum() precision = (tp / pred.size) i = (bin_pred * gt).sum() u = ((bin_pred.sum() + gt.sum()) - i) jaccard = (i / (u + 1e-10)) return (precision, jaccard)
class Block(nn.Module): def __init__(self, in_channels, out_channels, group=1): super(Block, self).__init__() self.r1 = ops.Merge_Run_dual(in_channels, out_channels) self.r2 = ops.ResidualBlock(in_channels, out_channels) self.r3 = ops.EResidualBlock(in_channels, out_channels) self.ca = CALayer(in_channels) def forward(self, x): r1 = self.r1(x) r2 = self.r2(r1) r3 = self.r3(r2) out = self.ca(r3) return out
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=(- 1)): def lr_lambda(current_step): if (current_step < num_warmup_steps): return (float(current_step) / float(max(1, num_warmup_steps))) progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps)))) if (progress >= 1.0): return 0.0 return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0)))))) return LambdaLR(optimizer, lr_lambda, last_epoch)
def download_model_weights(args): model_names = sorted(args.models) for model_name in model_names: (model_class, args) = get_model_class(model_name, args) if hasattr(model_class, 'download'): print('downloading {} weights'.format(model_name)) model_class.download(args)