code
stringlengths
101
5.91M
class TestIsscalar(object): def test_basic(self): assert_(np.isscalar(3)) assert_((not np.isscalar([3]))) assert_((not np.isscalar((3,)))) assert_(np.isscalar(3j)) assert_(np.isscalar(long(10))) assert_(np.isscalar(4.0))
def test_integer(): a = ak.highlevel.ArrayBuilder() a.integer(10) a.integer(9) a.integer(8) a.integer(7) a.integer(6) assert (to_list(a.snapshot()) == [10, 9, 8, 7, 6]) assert (to_list(a) == [10, 9, 8, 7, 6]) assert (to_list(a.snapshot()[1:(- 1)]) == [9, 8, 7])
class FEMUDev(PCIDevSim): def run_cmd(self, env: ExpEnv) -> str: cmd = f'{env.repodir}/sims/external/femu/femu-simbricks {env.dev_pci_path(self)} {env.dev_shm_path(self)}' return cmd
class _DataLoaderIter(object): def __init__(self, loader): self.dataset = loader.dataset self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = (loader.pin_memory and torch.cuda.is_available()) self.timeout = loader.timeout self.done_event = threading.Event() self.memory_dir = loader.memory_dir self.sample_iter = iter(self.batch_sampler) base_seed = torch.LongTensor(1).random_().item() if (self.num_workers > 0): self.worker_init_fn = loader.worker_init_fn self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)] self.worker_queue_idx = 0 self.worker_result_queue = multiprocessing.SimpleQueue() self.batches_outstanding = 0 self.worker_pids_set = False self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} self.workers = [multiprocessing.Process(target=_worker_loop, args=(self.dataset, self.index_queues[i], self.worker_result_queue, self.collate_fn, (base_seed + i), self.worker_init_fn, i, self.memory_dir)) for i in range(self.num_workers)] if (self.pin_memory or (self.timeout > 0)): self.data_queue = queue.Queue() if self.pin_memory: maybe_device_id = torch.cuda.current_device() else: maybe_device_id = None self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id)) self.worker_manager_thread.daemon = True self.worker_manager_thread.start() else: self.data_queue = self.worker_result_queue for w in self.workers: w.daemon = True w.start() _update_worker_pids(id(self), tuple((w.pid for w in self.workers))) _set_SIGCHLD_handler() self.worker_pids_set = True for _ in range((2 * self.num_workers)): self._put_indices() def __len__(self): return len(self.batch_sampler) def _get_batch(self): if (self.timeout > 0): try: (batch_idx, item) = self.data_queue.get(timeout=self.timeout) except queue.Empty: raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) else: (batch_idx, item) = self.data_queue.get() if (self.memory_dir is not None): data = np.load(item).item() os.remove(item) else: data = item return (batch_idx, data) def __next__(self): if (self.num_workers == 0): indices = next(self.sample_iter) batch = self.collate_fn([self.dataset[i] for i in indices]) if self.pin_memory: batch = pin_memory_batch(batch) return batch if (self.rcvd_idx in self.reorder_dict): batch = self.reorder_dict.pop(self.rcvd_idx) return self._process_next_batch(batch) if (self.batches_outstanding == 0): self._shutdown_workers() raise StopIteration while True: assert ((not self.shutdown) and (self.batches_outstanding > 0)) (idx, batch) = self._get_batch() self.batches_outstanding -= 1 if (idx != self.rcvd_idx): self.reorder_dict[idx] = batch continue return self._process_next_batch(batch) next = __next__ def __iter__(self): return self def _put_indices(self): assert (self.batches_outstanding < (2 * self.num_workers)) indices = next(self.sample_iter, None) if (indices is None): return self.index_queues[self.worker_queue_idx].put((self.send_idx, indices)) self.worker_queue_idx = ((self.worker_queue_idx + 1) % self.num_workers) self.batches_outstanding += 1 self.send_idx += 1 def _process_next_batch(self, batch): self.rcvd_idx += 1 self._put_indices() if isinstance(batch, ExceptionWrapper): raise batch.exc_type(batch.exc_msg) return batch def __getstate__(self): raise NotImplementedError('_DataLoaderIter cannot be pickled') def _shutdown_workers(self): try: if (not self.shutdown): self.shutdown = True self.done_event.set() for q in self.index_queues: q.put(None) try: while (not self.worker_result_queue.empty()): self.worker_result_queue.get() except (FileNotFoundError, ImportError): pass self.worker_result_queue.put(None) finally: if self.worker_pids_set: _remove_worker_pids(id(self)) self.worker_pids_set = False def __del__(self): if (self.num_workers > 0): self._shutdown_workers()
class Inception(nn.Module): def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes): super(Inception, self).__init__() self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.ELU(True)) self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.ELU(True), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.ELU(True)) self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.ELU(True), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ELU(True), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ELU(True)) self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.ELU(True)) def forward(self, x): y1 = self.b1(x) y2 = self.b2(x) y3 = self.b3(x) y4 = self.b4(x) return torch.cat([y1, y2, y3, y4], 1)
def tera_url(ckpt, refresh=False, *args, **kwargs): return tera_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def osnet_x0_75(num_classes=1000, loss='softmax', **kwargs): return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[48, 192, 288, 384], loss=loss, **kwargs)
_HEADS.register('gce_head') class GCEHead(nn.Module): def __init__(self, cfg, dim_in, spatial_in): super(GCEHead, self).__init__() self.dim_in = dim_in[(- 1)] self.spatial_in = spatial_in[(- 1)] use_nl = cfg.KEYPOINT.GCE_HEAD.USE_NL norm = cfg.MASK.GCE_HEAD.NORM conv_dim = cfg.KEYPOINT.GCE_HEAD.CONV_DIM aspp_dim = cfg.KEYPOINT.GCE_HEAD.ASPP_DIM num_convs_before_aspp = cfg.KEYPOINT.GCE_HEAD.NUM_CONVS_BEFORE_ASPP aspp_dilation = cfg.KEYPOINT.GCE_HEAD.ASPP_DILATION num_convs_after_aspp = cfg.KEYPOINT.GCE_HEAD.NUM_CONVS_AFTER_ASPP before_aspp_list = [] for _ in range(num_convs_before_aspp): before_aspp_list.append(make_conv(self.dim_in, conv_dim, kernel_size=3, norm=make_norm(conv_dim, norm=norm), act=make_act())) self.dim_in = conv_dim self.conv_before_aspp = (nn.Sequential(*before_aspp_list) if len(before_aspp_list) else None) self.aspp = ASPP(self.dim_in, aspp_dim, dilations=aspp_dilation, norm=norm) self.dim_in = self.aspp.dim_out feat_list = [make_conv(self.dim_in, conv_dim, kernel_size=1, norm=make_norm(conv_dim, norm=norm), act=make_act())] if use_nl: feat_list.append(NonLocal2d(conv_dim, int((conv_dim * cfg.KRCNN.GCE_HEAD.NL_RATIO)), conv_dim, use_gn=True)) self.feat = nn.Sequential(*feat_list) self.dim_in = conv_dim assert (num_convs_after_aspp >= 1) after_aspp_list = [] for _ in range(num_convs_after_aspp): after_aspp_list.append(make_conv(self.dim_in, conv_dim, kernel_size=3, norm=make_norm(conv_dim, norm=norm), act=make_act())) self.dim_in = conv_dim self.conv_after_aspp = (nn.Sequential(*after_aspp_list) if len(after_aspp_list) else None) self.dim_out = [self.dim_in] self.spatial_out = [self.spatial_in] self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def forward(self, x): x = x[(- 1)] if (self.conv_before_aspp is not None): x = self.conv_before_aspp(x) x = self.aspp(x) x = self.feat(x) if (self.conv_after_aspp is not None): x = self.conv_after_aspp(x) return [x]
def ExampleGen(data_path, num_epochs=None): epoch = 0 while True: if ((num_epochs is not None) and (epoch >= num_epochs)): break filelist = glob.glob(data_path) assert filelist, 'Empty filelist.' random.shuffle(filelist) for f in filelist: reader = open(f, 'rb') while True: len_bytes = reader.read(8) if (not len_bytes): break str_len = struct.unpack('q', len_bytes)[0] example_str = struct.unpack(('%ds' % str_len), reader.read(str_len))[0] (yield example_pb2.Example.FromString(example_str)) epoch += 1
def _calculate(core_id): print(('Started calculating: %d' % core_id)) try: val = 0 for _ in range(1, 1000): for i in range(1, 1000000): val *= ((((i * i) / i) + i) - i) except KeyboardInterrupt: pass print(('Finished calculating: %d' % core_id))
class Downsample(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) return x
def validate_ec_ci(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(ci.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(ci.is_valid) else: return df.applymap(ci.is_valid) return ci.is_valid(df)
def main(): parser = get_parser() args = parser.parse_args() os.makedirs(args.save_dir, exist_ok=True) def create_files(dest): copyfile((osp.join(args.data, args.split) + '.tsv'), (dest + '.tsv')) if osp.exists((osp.join(args.data, args.split) + '.wrd')): copyfile((osp.join(args.data, args.split) + '.wrd'), (dest + '.wrd')) if osp.exists((osp.join(args.data, args.split) + '.phn')): copyfile((osp.join(args.data, args.split) + '.phn'), (dest + '.phn')) if osp.exists((dest + '.npy')): os.remove((dest + '.npy')) npaa = NpyAppendArray((dest + '.npy')) return npaa save_path = osp.join(args.save_dir, args.split) npaa = create_files(save_path) (generator, num) = get_iterator(args) iterator = generator() with open((save_path + '.lengths'), 'w') as l_f: for w2v_feats in tqdm.tqdm(iterator, total=num): print(len(w2v_feats), file=l_f) if (len(w2v_feats) > 0): npaa.append(w2v_feats.numpy())
def replaces_attribute(func: Callable[(..., Tuple[str])], classname: str, attr_name: str): Replacements._attr_rep[(classname, attr_name)] = func return func
.experimental def test_indexer(df, tmp_path): path = (tmp_path / 'indexer').resolve() indexer = Indexer('user_idx', 'item_idx') df = convert2spark(df) indexer.fit(df, df) save_indexer(indexer, path) i = load_indexer(path) i.inverse_transform(i.transform(df)) assert (i.user_indexer.inputCol == indexer.user_indexer.inputCol)
def test_poissonvi(): adata = synthetic_iid(batch_size=100) POISSONVI.setup_anndata(adata) model = POISSONVI(adata) model.train(max_epochs=1) model.get_latent_representation() model.get_accessibility_estimates()
() ('--seed', default=1) ('--n_epochs', default=600) ('--batch_size_per_task', default=1024) _experiment def te_ppo_pointenv(ctxt, seed, n_epochs, batch_size_per_task): set_seed(seed) tasks = TASKS latent_length = 2 inference_window = 6 batch_size = (batch_size_per_task * len(TASKS)) policy_ent_coeff = 0.001 encoder_ent_coeff = 0.001 inference_ce_coeff = 0.05 max_path_length = 100 embedding_init_std = 0.1 embedding_max_std = 0.2 embedding_min_std = 1e-06 policy_init_std = 1.0 policy_max_std = 2.0 policy_min_std = None task_names = sorted(tasks.keys()) task_args = [tasks[t]['args'] for t in task_names] task_kwargs = [tasks[t]['kwargs'] for t in task_names] with LocalTFRunner(snapshot_config=ctxt) as runner: task_envs = [GarageEnv(PointEnv(*t_args, **t_kwargs)) for (t_args, t_kwargs) in zip(task_args, task_kwargs)] env = MultiEnvWrapper(task_envs, round_robin_strategy, mode='vanilla') task_embed_spec = TEPPO.get_encoder_spec(env.task_space, latent_dim=latent_length) task_encoder = GaussianMLPEncoder(name='embedding', embedding_spec=task_embed_spec, hidden_sizes=(20, 20), std_share_network=True, init_std=embedding_init_std, max_std=embedding_max_std, output_nonlinearity=tf.nn.tanh, std_output_nonlinearity=tf.nn.tanh, min_std=embedding_min_std) traj_embed_spec = TEPPO.get_infer_spec(env.spec, latent_dim=latent_length, inference_window_size=inference_window) inference = GaussianMLPEncoder(name='inference', embedding_spec=traj_embed_spec, hidden_sizes=(20, 20), std_share_network=True, init_std=0.1, output_nonlinearity=tf.nn.tanh, std_output_nonlinearity=tf.nn.tanh, min_std=embedding_min_std) policy = GaussianMLPTaskEmbeddingPolicy(name='policy', env_spec=env.spec, encoder=task_encoder, hidden_sizes=(32, 16), std_share_network=True, max_std=policy_max_std, init_std=policy_init_std, min_std=policy_min_std) baseline = LinearMultiFeatureBaseline(env_spec=env.spec, features=['observations', 'tasks', 'latents']) algo = TEPPO(env_spec=env.spec, policy=policy, baseline=baseline, inference=inference, max_path_length=max_path_length, discount=0.99, lr_clip_range=0.2, policy_ent_coeff=policy_ent_coeff, encoder_ent_coeff=encoder_ent_coeff, inference_ce_coeff=inference_ce_coeff, use_softplus_entropy=True, optimizer_args=dict(batch_size=32, max_epochs=10, learning_rate=0.001), inference_optimizer_args=dict(batch_size=32, max_epochs=10, learning_rate=0.001), center_adv=True, stop_ce_gradient=True) runner.setup(algo, env, sampler_cls=LocalSampler, sampler_args=None, worker_class=TaskEmbeddingWorker) runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
def backend_of_obj(obj, default: (D | Sentinel)=UNSET) -> (Backend | D): cls = type(obj) try: lookup = _type_to_backend_lookup[cls] return lookup(obj) except KeyError: for factory in _backend_lookup_factories: maybe_lookup = factory(cls) if (maybe_lookup is not None): break else: if (default is UNSET): raise TypeError(f'cannot find backend for {cls.__name__}') else: return cast(D, default) _type_to_backend_lookup[cls] = maybe_lookup return maybe_lookup(obj)
def _isrecursive(pattern): if isinstance(pattern, binary_type): return (pattern == b'**') else: return (pattern == '**')
class Gulf(Benchmark): def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip(([0.0] * self.N), ([50.0] * self.N))) self.global_optimum = [[50.0, 25.0, 1.5]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 m = 99.0 i = arange(1.0, (m + 1)) u = (25 + (((- 50) * log((i / 100.0))) ** (2 / 3.0))) vec = (exp((- ((abs((u - x[1])) ** x[2]) / x[0]))) - (i / 100.0)) return sum((vec ** 2))
def collect_directories(root_dir: os.PathLike, recursive: bool=False, verbose: bool=False, ignore_dir_names: List[str]=[], check_root_dir: bool=False) -> List[os.PathLike]: dirs_list = [] dirs_to_check = (os.listdir(root_dir) + ([root_dir] if check_root_dir else [])) for exp_name in sorted(dirs_to_check): exp_path = os.path.join(root_dir, exp_name) if ((not os.path.isdir(exp_path)) or os.path.islink(exp_path) or (os.path.basename(exp_path) in ignore_dir_names)): continue elif recursive: dirs_list.extend(collect_directories(exp_path, recursive=recursive, verbose=verbose, ignore_dir_names=ignore_dir_names)) checkpoints_dir_path = os.path.join(exp_path, 'output') if ((not os.path.isdir(checkpoints_dir_path)) or os.path.islink(checkpoints_dir_path)): if verbose: print(f'Didnt find the checkpoints dir for {exp_name}') continue dirs_list.append(checkpoints_dir_path) return dirs_list
class BranchformerEncoder(nn.Module): def __init__(self, num_layers, d_model, nhead, kernel_size=31, kdim=None, vdim=None, activation=nn.GELU, dropout=0.0, attention_type='RelPosMHAXL', csgu_linear_units=3072, gate_activation=nn.Identity, use_linear_after_conv=False): super().__init__() self.layers = torch.nn.ModuleList([BranchformerEncoderLayer(nhead=nhead, d_model=d_model, kdim=kdim, vdim=vdim, dropout=dropout, activation=activation, kernel_size=kernel_size, attention_type=attention_type, csgu_linear_units=csgu_linear_units, gate_activation=gate_activation, use_linear_after_conv=use_linear_after_conv) for i in range(num_layers)]) self.norm = LayerNorm(d_model, eps=1e-06) self.attention_type = attention_type def forward(self, src, src_mask: Optional[torch.Tensor]=None, src_key_padding_mask: Optional[torch.Tensor]=None, pos_embs: Optional[torch.Tensor]=None): if (self.attention_type == 'RelPosMHAXL'): if (pos_embs is None): raise ValueError('The chosen attention type for the Branchformer is RelPosMHAXL. For this attention type, the positional embeddings are mandatory') output = src attention_lst = [] for enc_layer in self.layers: (output, attention) = enc_layer(output, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, pos_embs=pos_embs) attention_lst.append(attention) output = self.norm(output) return (output, attention_lst)
def test_constructor_clone_args(constructor_mock, default_test_case): ref = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(float)) clone = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(float)) const = stmt.ConstructorStatement(default_test_case, constructor_mock, {'a': ref}) assert (const._clone_args({ref: clone}) == {'a': clone})
def internal_to_external(name: str) -> tuple: und = name.rfind('_') meth = name[:und] ext = name[(und + 1):] if (meth not in FUSED_OPERATION_TO_SVE): raise NotSupportedError('Unknown internal function') return (((FUSED_OPERATION_TO_SVE[meth] + '_') + ext), SVE_SUFFIX_TO_TYPE[ext])
class VISEncoder(nn.Module): def __init__(self, d_model, N, heads, dropout): super().__init__() self.N = N self.embed = nn.Linear(2048, d_model) self.pe = PositionalEncoder(d_model, dropout=dropout) self.layers = get_clones(EncoderLayer(d_model, heads, dropout), N) self.norm = Norm(d_model) def forward(self, img, mask, mode): x = self.embed(img) x = self.pe(x, mode=mode) for i in range(self.N): x = self.layers[i](x, mask) return self.norm(x)
class sage__rings__padics(JoinFeature): def __init__(self): JoinFeature.__init__(self, 'sage.rings.padics', [PythonModule('sage.rings.padics.factory')], type='standard')
def kernel_shap_1000_meanref(model, data): return (lambda X: KernelExplainer(model.predict, kmeans(data, 1)).shap_values(X, nsamples=1000, l1_reg=0))
def combiners(n_combiners=1): assert _retry(_test_nodes, n_nodes=n_combiners, node_type='combiner')
('copy') _method('process_source') def apply_copy(self): Utils.def_attrs(self, fun=copy_func) self.default_install_path = 0 lst = self.to_list(self.source) self.meths.remove('process_source') for filename in lst: node = self.path.find_resource(filename) if (not node): raise Errors.WafError(('cannot find input file %s for processing' % filename)) target = self.target if ((not target) or (len(lst) > 1)): target = node.name newnode = self.path.find_or_declare(target) tsk = self.create_task('copy', node, newnode) tsk.fun = self.fun tsk.chmod = getattr(self, 'chmod', Utils.O644) if (not tsk.env): tsk.debug() raise Errors.WafError('task without an environment')
_registry class ONNXForward(abc.ABC): def forward_can_be_applied(node: ONNXOp, state: SDFGState, sdfg: SDFG) -> bool: return True def forward(node: ONNXOp, state: SDFGState, sdfg: SDFG) -> typing.Union[(Node, SDFG)]: ... def registered_implementations(cls, op_name: str) -> typing.List[typing.Tuple[(str, 'ONNXForward')]]: impls = [] for (impl, args) in cls.extensions().items(): if (('op' in args) and (args['op'] == op_name)): impls.append((args['name'], impl)) return impls
def morphological_laplace(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0): tmp1 = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) numpy.add(tmp1, output, output) numpy.subtract(output, input, output) return numpy.subtract(output, input, output) else: tmp2 = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) numpy.add(tmp1, tmp2, tmp2) numpy.subtract(tmp2, input, tmp2) numpy.subtract(tmp2, input, tmp2) return tmp2
def bert_large_uncased_whole_word_maskings_384_2p_bw12_pipedream(): return dict(model_type='bert', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': False, 'return_dict': False}, do_resize_token_embedding=False)
_if_no_torch def test_backpack_nano_compare(): import torch vocab_size = 5257 torch.manual_seed(0) converter = BackpackConfig.default_hf_checkpoint_converter cls = converter.HFAutoModelClass() config = converter.HfConfigClass(n_embd=32, n_positions=512, n_head=8, n_layer=2, vocab_size=vocab_size, resid_pdrop=0.0) model = cls(config) model.tie_weights() input = jax.random.randint(PRNGKey(0), (512,), 0, vocab_size) input_torch = torch.from_numpy(np.array(input)).to(torch.int64).unsqueeze(0) model.eval() with torch.no_grad(): torch_out = model(input_torch) torch_out = torch_out.logits[0].detach().cpu().numpy() with tempfile.TemporaryDirectory() as tmpdir: lev_config = converter.config_from_hf_config(config) model.save_pretrained(tmpdir) loaded_checkpoint = converter.load_state_dict(tmpdir) roundtrip_hf_config = converter.hf_config_from_config(lev_config) for (k, v) in roundtrip_hf_config.__dict__.items(): assert (getattr(roundtrip_hf_config, k) == v), f'{k} {getattr(roundtrip_hf_config, k)} != {v}' Vocab = haliax.Axis('vocab', vocab_size) lev_model = BackpackLMHeadModel.init(Vocab, lev_config, key=PRNGKey(0)) lev_model = lev_model.from_state_dict(loaded_checkpoint) lev_model = inference_mode(lev_model, True) hax_input = haliax.named(input, lev_config.Pos) attn_mask = hax.nn.attention.causal_mask(lev_config.Pos, lev_config.KeyPos) with jax.disable_jit(): lev_out = lev_model(hax_input, attn_mask=attn_mask, key=None).array np.testing.assert_allclose(torch_out, np.array(lev_out), atol=0.01, rtol=0.01) with tempfile.TemporaryDirectory() as tmpdir: converter.save_pretrained(lev_model, tmpdir) model = AutoModelForCausalLM.from_pretrained(tmpdir, trust_remote_code=True) model.eval() with torch.no_grad(): torch_out = model(input_torch) torch_out = torch_out.logits[0].detach().cpu().numpy() np.testing.assert_allclose(torch_out, np.array(lev_out), atol=0.001, rtol=0.001)
def concepts_to_adj_matrices_1hop_neighbours(data): (qc_ids, ac_ids) = data qa_nodes = (set(qc_ids) | set(ac_ids)) extra_nodes = set() for u in (set(qc_ids) | set(ac_ids)): if (u in cpnet.nodes): extra_nodes |= set(cpnet[u]) extra_nodes = (extra_nodes - qa_nodes) schema_graph = ((sorted(qc_ids) + sorted(ac_ids)) + sorted(extra_nodes)) arange = np.arange(len(schema_graph)) qmask = (arange < len(qc_ids)) amask = ((arange >= len(qc_ids)) & (arange < (len(qc_ids) + len(ac_ids)))) (adj, concepts) = concepts2adj(schema_graph) return (adj, concepts, qmask, amask)
('BackwardWarp') def _BackwardWarpGrad(op, grad): grad0 = _backward_warp_module.backward_warp_grad(grad, op.inputs[0], op.inputs[1]) return [None, grad0]
def flatten_tables(incoming_base_situ, outgoing_base_situ, incoming_base_spect, outgoing_base_spect): def get_new_rows(base_table_incoming, base_table_outgoing): def get_candidates(schema_id): check_again = False head_cands = base_table_incoming[((base_table_incoming['parent_schema_id'] == schema_id) & (base_table_incoming['incoming_relation_id'] == 0))][['global_id', 'type', 'span_end']].drop_duplicates().get_values().tolist() if (len(head_cands) == 1): head = head_cands[0][0] if (head_cands[0][1] == 'Complex_discourse_unit'): check_again = True else: firste = min([h for h in head_cands], key=(lambda t: t[2])) head = firste[0] if (firste[1] == 'Complex_discourse_unit'): check_again = True return (head, check_again) def get_first_elem(schema_id): (node, check) = get_candidates(schema_id) if check: while check: schema_id = node (node, check) = get_candidates(schema_id) head_index = base_table_incoming[(base_table_incoming['global_id'] == node)].drop_duplicates().index.values[0] return head_index new_table_incoming = [] new_table_outgoing = [] in_drops = [] out_drops = [] cdus = base_table_incoming[(base_table_incoming['type'] == 'Complex_discourse_unit')]['global_id'].drop_duplicates().tolist() for cdu in cdus: first = get_first_elem(cdu) first_elem = base_table_incoming.loc[first].tolist() incs = base_table_incoming[(base_table_incoming['global_id'] == cdu)][['incoming_relation_id', 'relation_type']].get_values().tolist() outs = base_table_outgoing[(base_table_outgoing['global_id'] == cdu)][['outgoing_relation_id', 'relation_type']].get_values().tolist() for inc in incs: new_elem = first_elem[:16] new_elem.extend(inc) new_table_incoming.append(new_elem) for out in outs: new_elem = first_elem[:16] new_elem.extend(out) new_table_outgoing.append(new_elem) if (first_elem[16] == 0): in_drops.append(first) out_first = base_table_outgoing[(base_table_outgoing['parent_schema_id'] == cdu)]['span_end'].idxmin() out_elem = base_table_outgoing.loc[out_first].tolist() if (out_elem[16] == 0): out_drops.append(out_first) new_inc = pd.DataFrame(new_table_incoming, columns=list(base_table_incoming)) new_out = pd.DataFrame(new_table_outgoing, columns=list(base_table_outgoing)) return (new_inc, new_out, in_drops, out_drops) def clean_table(base_table): clean_table = base_table.drop(['schema_composition'], axis=1) clean_table = clean_table[(~ (clean_table['type'] == 'Complex_discourse_unit'))].reset_index(drop=True) clean_table['parent_schema_id'] = 0 return clean_table (add_inc, add_out, drop_in, drop_out) = get_new_rows(incoming_base_situ, outgoing_base_situ) incoming_base_situ = incoming_base_situ.drop(incoming_base_situ.index[drop_in]) outgoing_base_situ = outgoing_base_situ.drop(outgoing_base_situ.index[drop_out]) incoming_base_situ = pd.concat([incoming_base_situ, add_inc]) outgoing_base_situ = pd.concat([outgoing_base_situ, add_out]) flat_inc_situ = clean_table(incoming_base_situ) flat_out_situ = clean_table(outgoing_base_situ) picks = [add_inc, add_out, drop_in, drop_out] output = open((pickle_path + 'flat_changes_list.pkl'), 'wb') pickle.dump(picks, output) output.close() (add_inc, add_out, drop_in, drop_out) = get_new_rows(incoming_base_spect, outgoing_base_spect) incoming_base_spect = incoming_base_spect.drop(incoming_base_spect.index[drop_in]) outgoing_base_spect = outgoing_base_spect.drop(outgoing_base_spect.index[drop_out]) incoming_base_spect = pd.concat([incoming_base_spect, add_inc]) outgoing_base_spect = pd.concat([outgoing_base_spect, add_out]) flat_inc_spect = clean_table(incoming_base_spect) flat_out_spect = clean_table(outgoing_base_spect) return (flat_inc_situ, flat_out_situ, flat_inc_spect, flat_out_spect)
def axilite_read(sim, addr, basename='s_axi_control_'): _write_signal(sim, (basename + 'ARADDR'), addr) _write_signal(sim, (basename + 'ARVALID'), 1) wait_for_handshake(sim, 'AR', basename=basename) _write_signal(sim, (basename + 'ARVALID'), 0) _write_signal(sim, (basename + 'RREADY'), 1) ret_data = wait_for_handshake(sim, 'R', basename=basename) _write_signal(sim, (basename + 'RREADY'), 0) return ret_data
class CyEval(gdb.Function, CythonBase, EvaluateOrExecuteCodeMixin): _function_value_to_unicode def invoke(self, python_expression): input_type = libpython.PythonCodeExecutor.Py_eval_input return self.evalcode(python_expression, input_type)
def convert_EmailProperty(model, prop, kwargs): kwargs['validators'].append(validators.email()) return get_TextField(kwargs)
def get_custom_class_library_path(): library_filename = glob.glob('build/*custom_class*') assert (len(library_filename) == 1) library_filename = library_filename[0] path = os.path.abspath(library_filename) assert os.path.exists(path), path return path
def find_span(offsets, start, end): start_index = end_index = (- 1) for (i, offset) in enumerate(offsets): if ((start_index < 0) or (start >= offset[0])): start_index = i if ((end_index < 0) and (end <= offset[1])): end_index = i return (start_index, end_index)
def avgpp(cp, size): _check_params(len(cp), size) sample_count = _sample_count(cp, size) prevextremevalid = False prevextreme = None avg = 0 nextreme = 0 prevval = getsample(cp, size, 0) val = getsample(cp, size, 1) prevdiff = (val - prevval) for i in range(1, sample_count): val = getsample(cp, size, i) diff = (val - prevval) if ((diff * prevdiff) < 0): if prevextremevalid: avg += abs((prevval - prevextreme)) nextreme += 1 prevextremevalid = True prevextreme = prevval prevval = val if (diff != 0): prevdiff = diff if (nextreme == 0): return 0 return (avg / nextreme)
def test_ate_causal_graph_builder(): import whynot.traceable_numpy as wnp def covariate_builder(run): return wnp.array([run[0].x1, run[2].x2, run[3].x3]) def outcome_extractor(run): return wnp.sum(run.states[(- 1)].values()) def soft_threshold(x, tau, r=200): return (1.0 / (wnp.exp(((tau * r) - (r * x))) + 1)) def propensity_scores(untreated_run, intervention): run = untreated_run return (1.0 - (0.9 * soft_threshold((run[0].x1 + run[4].x2), tau=4))) intervention = SimpleIntervention(time=3, param=1.0) experiment = wn.DynamicsExperiment(name='test_experiment.', description='testing_ate', simulator=SimpleSimulator(), simulator_config=SimpleConfig(param=2.0, end_time=6), intervention=intervention, state_sampler=(lambda : SimpleState()), propensity_scorer=propensity_scores, outcome_extractor=outcome_extractor, covariate_builder=covariate_builder) dset = experiment.run(num_samples=10, causal_graph=True) graph = dset.causal_graph times = list(range(0, 7)) state_names = SimpleState.variable_names() config_names = SimpleConfig.parameter_names() nodes = copy.deepcopy(list(graph.nodes)) nodes.remove('Treatment') nodes.remove('Outcome') for time in times: for state in state_names: nodes.remove(f'{state}_{time}') for config_name in config_names: nodes.remove(f'PARAM:{config_name}_{time}') assert (len(nodes) == 0) assert (set(graph.graph['covariate_names']) == set(['x1_0', 'x2_2', 'x3_3'])) edges = copy.deepcopy(list(graph.edges)) for outcome_dep in [f'{name}_{times[(- 1)]}' for name in state_names]: edges.remove((outcome_dep, 'Outcome')) edges.remove(('x1_0', 'Treatment')) edges.remove(('x2_4', 'Treatment')) for time in times: if (time >= intervention.time): edges.remove(('Treatment', f'PARAM:param_{time}')) for time in times[:(- 1)]: if ((time % 3) == 0): start_nodes = [f'{name}_{time}' for name in state_names] end_nodes = [f'{name}_{(time + 1)}' for name in state_names] for edge in itertools.product(start_nodes, end_nodes): edges.remove(edge) edges.remove((f'PARAM:param_{time}', f'x2_{(time + 1)}')) elif ((time % 3) == 1): for name in state_names: edges.remove((f'{name}_{time}', f'{name}_{(time + 1)}')) else: edges.remove((f'PARAM:param_{time}', f'x1_{(time + 1)}')) for name in state_names: edges.remove((f'{name}_{time}', f'x2_{(time + 1)}')) assert (len(edges) == 0)
def test_get_workspace_model_nopoi(workspace_factory): w = workspace_factory() m = w.model(poi_name=None) assert (m.config.poi_name is None) assert (m.config.poi_index is None)
def generate_and_evaluate_baseline(out_dir: str, lidarseg_preds_dir: str, lidarseg_method_name: str, det_or_track_preds_dir: str, det_or_track_method_name: str, task: str='tracking', version: str='v1.0-test', dataroot: str='/data/sets/nuscenes', verbose: bool=False) -> None: nusc = NuScenes(version=version, dataroot=dataroot, verbose=verbose) eval_set = nusc.version.split('-')[(- 1)] dir_to_save_panoptic_preds_to = os.path.join(out_dir, task, 'panoptic_predictions', '{}_with_{}'.format(lidarseg_method_name, det_or_track_method_name)) os.makedirs(dir_to_save_panoptic_preds_to, exist_ok=True) dir_of_lidarseg_method_preds = os.path.join(lidarseg_preds_dir, lidarseg_method_name) json_of_preds_by_det_or_track_method = get_prediction_json_path(os.path.join(det_or_track_preds_dir, det_or_track_method_name)) generate_panoptic_labels(nusc, dir_of_lidarseg_method_preds, json_of_preds_by_det_or_track_method, eval_set=eval_set, task=task, out_dir=dir_to_save_panoptic_preds_to) dir_to_save_evaluation_results_to = os.path.join(out_dir, task, 'panoptic_eval_results', '{}_with_{}'.format(lidarseg_method_name, det_or_track_method_name)) os.makedirs(dir_to_save_evaluation_results_to, exist_ok=True) dir_of_panoptic_preds = dir_to_save_panoptic_preds_to evaluator = NuScenesPanopticEval(nusc=nusc, results_folder=dir_of_panoptic_preds, eval_set=eval_set, task=task, min_inst_points=15, out_dir=dir_to_save_evaluation_results_to, verbose=verbose) evaluator.evaluate() print('Evaluation for panoptic {} using predictions merged from {} and {} saved at {}.'.format(task, lidarseg_method_name, det_or_track_method_name, dir_to_save_evaluation_results_to))
def validate_password(actual_pw, typed_pw): if (len(actual_pw) != len(typed_pw)): return False for i in range(len(actual_pw)): if (actual_pw[i] != typed_pw[i]): return False return True
def test_person_name(): error_sentence_1 = '' import jieba.posseg print(jieba.posseg.lcut(error_sentence_1)) correct_sent = ct.correct(error_sentence_1) print('original sentence:{} => correct sentence:{}'.format(error_sentence_1, correct_sent)) error_sentence_1 = '' correct_sent = ct.correct(error_sentence_1) print('original sentence:{} => correct sentence:{}'.format(error_sentence_1, correct_sent))
class BaseIntervention(): def __init__(self, config_class, time, **kwargs): self.time = time config_args = set((f.name for f in dataclasses.fields(config_class))) for arg in kwargs: if (arg not in config_args): raise TypeError(f'__init__() got an unexpected keyword argument {arg}!') self.updates = kwargs
class _RepoWorkDir(): def __init__(self, repo, version): if (not isinstance(repo, _Repo)): repo = _get_repo(repo) _simple_validate_version(version) self.repo = repo self.version = version if version: (self.version_date, self.version_rev) = version.split('-') else: (self.version_date, self.version_rev) = (None, None) def get_path(self): return _repo_path(self.repo.name, self.version) def validate_version(self, version): if version: assert self.version (date, rev) = version.split('-') _simple_validate_date(date) _simple_validate_commit_rev(rev) if (not self.version_date.startswith(date)): raise common.InvalidVersion(('Version %s, date does not match in %s' % (self.version, version))) if (len(rev) <= len(self.version_rev)): if (not self.version_rev.startswith(rev)): raise common.InvalidVersion(('Version %s, revision does not match in %s' % (self.version, version))) elif (not rev.startswith(self.version_rev)): raise common.InvalidVersion(('Version %s, revision does not match in %s' % (self.version, version))) elif self.version: raise common.InvalidVersion(('Development version, got %s' % (version,)))
def load(model, opt): if (vars(opt).get('start_from', None) is not None): assert os.path.isdir(opt.start_from), (' %s must be a a path' % opt.start_from) assert os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl'))), ('infos.pkl file does not exist in path %s' % opt.start_from) utils.load_state_dict(model, torch.load(os.path.join(opt.start_from, 'model.pth')))
class MelGeneralizedCepstrumToSpectrum(nn.Module): def __init__(self, cep_order, fft_length, alpha=0, gamma=0, norm=False, mul=False, out_format='power', n_fft=512): super(MelGeneralizedCepstrumToSpectrum, self).__init__() self.fft_length = fft_length assert (2 <= self.fft_length) if ((out_format == 0) or (out_format == 'db')): c = (20 / math.log(10)) self.convert = (lambda x: (x.real * c)) elif ((out_format == 1) or (out_format == 'log-magnitude')): self.convert = (lambda x: x.real) elif ((out_format == 2) or (out_format == 'magnitude')): self.convert = (lambda x: torch.exp(x.real)) elif ((out_format == 3) or (out_format == 'power')): self.convert = (lambda x: torch.exp((2 * x.real))) elif ((out_format == 4) or (out_format == 'cycle')): self.convert = (lambda x: (x.imag / math.pi)) elif ((out_format == 5) or (out_format == 'radian')): self.convert = (lambda x: x.imag) elif ((out_format == 6) or (out_format == 'degree')): c = (180 / math.pi) self.convert = (lambda x: (x.imag * c)) elif (out_format == 'complex'): self.convert = (lambda x: torch.polar(torch.exp(x.real), x.imag)) else: raise ValueError(f'out_format {out_format} is not supported') self.mgc2c = MelGeneralizedCepstrumToMelGeneralizedCepstrum(cep_order, (fft_length // 2), in_alpha=alpha, in_gamma=gamma, in_norm=norm, in_mul=mul, n_fft=n_fft) def forward(self, mc): c = self.mgc2c(mc) sp = torch.fft.rfft(c, n=self.fft_length) sp = self.convert(sp) return sp
class GoogleMapGeocoding(VirtualFunctionTool): name = 'GoogleMapGeocoding' summary = 'Convert a location address to geographic coordinates.' parameters: List[ArgParameter] = [{'name': 'location_address', 'type': 'string', 'description': "The address of the location, in the format of 'street address, city, zip code'.", 'required': True}] returns: List[ArgReturn] = [{'name': 'coordinates', 'type': 'string', 'description': "The geographic coordinates of the location in the format of 'latitude,longitude'."}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'location_address' does not exist."}, {'name': 'InvalidRequestException', 'description': "The 'location_address' is not in the valid format."}]
def train(nsteps: int, trainer: Trainer, beta: (float | torch.Tensor), nlog: int=1, nprint: int=1, x: Optional[torch.Tensor]=None, grab: Optional[bool]=None) -> tuple[(torch.Tensor, dict)]: beta = (torch.tensor(beta) if isinstance(beta, float) else beta) history = {} if (x is None): state = exp.trainer.dynamics.random_state(beta) x = state.x assert (x is not None) for step in range(nsteps): log.info(f'STEP: {step}') (x, metrics) = train_step(x, beta=beta, trainer=trainer) if ((step > 0) and ((step % nprint) == 0)): print_dict(metrics, grab=grab) if ((step > 0) and ((step % nlog) == 0)): for (key, val) in metrics.items(): try: history[key].append(val) except KeyError: history[key] = [val] return (x, history)
def create_cpg_net(train=True): logger = logging.getLogger(__name__) FREEZE_CONV_BODY = cfg.TRAIN.FREEZE_CONV_BODY FREEZE_AT = cfg.TRAIN.FREEZE_AT WSL_CSC = cfg.WSL.CSC CENTER_LOSS = cfg.WSL.CENTER_LOSS MIN_ENTROPY_LOSS = cfg.WSL.MIN_ENTROPY_LOSS MASK_ON = cfg.MODEL.MASK_ON EXECUTION_TYPE = cfg.MODEL.EXECUTION_TYPE cfg.immutable(False) cfg.TRAIN.FREEZE_CONV_BODY = False cfg.TRAIN.FREEZE_AT = 0 cfg.WSL.CSC = False cfg.WSL.CENTER_LOSS = False cfg.WSL.MIN_ENTROPY_LOSS = False cfg.MODEL.MASK_ON = False cfg.MODEL.EXECUTION_TYPE = b'simple' cfg.immutable(True) output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True) for gpu_id in range(cfg.NUM_GPUS): logger.info('Building model: {}'.format((((('gpu_' + str(gpu_id)) + '_') + cfg.MODEL.TYPE) + '_cpg'))) model = model_builder_wsl.create((((('gpu_' + str(gpu_id)) + '_') + cfg.MODEL.TYPE) + '_cpg'), train=train) if (cfg.MEMONGER and False): (start_op, end_op) = OP_surgery_head(model, gpu_id) optimize_memory_cpg(model, gpu_id) OP_surgery_back(model, gpu_id, start_op, end_op) namescope = 'gpu_{}/'.format(gpu_id) model.net._net.op[0].input[0] = ((namescope + cfg.WSL.CPG_PRE_BLOB) + '_grad') model.net._net.op[(- 1)].output[(- 1)] = ((namescope + cfg.WSL.CPG_DATA_BLOB) + '_grad') else: OP_surgery(model, gpu_id) Input_surgery(model, gpu_id) workspace.CreateBlob((((('gpu_' + str(gpu_id)) + '/') + cfg.WSL.CPG_PRE_BLOB) + '_grad')) optimize_memory_cpg(model, gpu_id) workspace.CreateNet(model.net) logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir))) dump_proto_files(model, output_dir) cfg.immutable(False) cfg.TRAIN.FREEZE_CONV_BODY = FREEZE_CONV_BODY cfg.TRAIN.FREEZE_AT = FREEZE_AT cfg.WSL.CSC = WSL_CSC cfg.WSL.CENTER_LOSS = CENTER_LOSS cfg.WSL.MIN_ENTROPY_LOSS = MIN_ENTROPY_LOSS cfg.MODEL.MASK_ON = MASK_ON cfg.MODEL.EXECUTION_TYPE = EXECUTION_TYPE cfg.immutable(True)
class ImageFolder(DatasetFolder): def __init__(self, root: str, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, loader: Callable[([str], Any)]=default_loader, is_valid_file: Optional[Callable[([str], bool)]]=None): super().__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), transform=transform, target_transform=target_transform, is_valid_file=is_valid_file) self.imgs = self.samples
class Dict2Obj(object): def __init__(self, dictionary): for key in dictionary.keys(): setattr(self, key, dictionary[key]) def __repr__(self): attrs = str([x for x in self.__dict__]) return ('<Dict2Obj: %s' % attrs)
def TransformedLoader(loader, func, transforms, workers=None, batch_size=None, do_tqdm=False, augment=False, fraction=1.0): new_ims = [] new_targs = [] total_len = len(loader) enum_loader = enumerate(loader) it = (enum_loader if (not do_tqdm) else tqdm(enum_loader, total=total_len)) for (i, (im, targ)) in it: (new_im, new_targ) = func(im, targ) if (augment or ((i / float(total_len)) > fraction)): new_ims.append(im.cpu()) new_targs.append(targ.cpu()) if ((i / float(total_len)) <= fraction): new_ims.append(new_im.cpu()) new_targs.append(new_targ.cpu()) dataset = folder.TensorDataset(ch.cat(new_ims, 0), ch.cat(new_targs, 0), transform=transforms) return ch.utils.data.DataLoader(dataset, num_workers=workers, batch_size=batch_size)
.parametrize('y_pred', [np.array(y_pred_list), y_pred_list]) def test_absolute_conformity_score_consistency(y_pred: NDArray) -> None: abs_conf_score = AbsoluteConformityScore() signed_conf_scores = abs_conf_score.get_signed_conformity_scores(X_toy, y_toy, y_pred) y_obs = abs_conf_score.get_estimation_distribution(X_toy, y_pred, signed_conf_scores) np.testing.assert_allclose(y_obs, y_toy)
def plot(runs, group, from_iter: int=0, loss_group=None): xs = [] ys = [] cs = [] loss_group = (loss_group or group) for r in runs: hist = r.history(keys=[f'validation/{group}/accuracy/total', f'validation/{loss_group}/loss', 'iteration'], pandas=False) for p in hist: if (p['iteration'] < from_iter): continue xs.append(p[f'validation/{loss_group}/loss']) ys.append((p[f'validation/{group}/accuracy/total'] * 100)) cs.append(p['iteration']) sc = plt.scatter(xs, ys, c=cs) plt.xscale('log') cbar = plt.colorbar(sc, ticks=[min(cs), max(cs)], pad=0.02) cbar.ax.set_yticklabels([f'{(min(cs) // 1000)}k', f'{(max(cs) // 1000)}k']) fig.axes[0].yaxis.set_label_coords((- 0.12), 0.45)
.parametrize('direction', ['forwards', 'backwards']) .parametrize('n', [10, 100]) def test_linear_union_sequence(n, direction): elements = get_elements(n) dis = DisjointSet(elements) assert (elements == list(dis)) indices = list(range((n - 1))) if (direction == 'backwards'): indices = indices[::(- 1)] for (it, i) in enumerate(indices): assert (not dis.connected(elements[i], elements[(i + 1)])) assert dis.merge(elements[i], elements[(i + 1)]) assert dis.connected(elements[i], elements[(i + 1)]) assert (dis.n_subsets == ((n - 1) - it)) roots = [dis[i] for i in elements] if (direction == 'forwards'): assert all(((elements[0] == r) for r in roots)) else: assert all(((elements[(- 2)] == r) for r in roots)) assert (not dis.merge(elements[0], elements[(- 1)]))
class SUN(): def __init__(self) -> None: super(SUN, self).__init__() def exp(self, x: Tensor, u: Tensor) -> Tensor: return (x expm((x.conj().transpose((- 2), (- 1)) u))) def log(self, x: Tensor, y: Tensor) -> Tensor: (_, n, _) = x.shape assert (n == 3), 'Operation supported only for SU(3)' return (x log3x3((x.conj().transpose((- 2), (- 1)) y))) def proju(self, x: Tensor, u: Tensor) -> Tensor: (_, n, _) = x.shape algebra_elem = torch.linalg.solve(u, x)[0] B = ((algebra_elem - algebra_elem.conj().transpose((- 2), (- 1))) / 2) trace = torch.einsum('bii->b', B) B = (B - (((1 / n) * trace.unsqueeze((- 1)).unsqueeze((- 1))) * torch.eye(n).repeat(x.shape[0], 1, 1))) assert (torch.abs(torch.mean(torch.einsum('bii->b', B))) < 1e-06) return B
class SchedulingConstraint(util.ContentHashClass): def __init__(self, topbat=0, topifm=0, topofm=0, update_dict=None): if any((((n < 0) or (not isinstance(n, numbers.Integral))) for n in [topbat, topifm, topofm])): raise ValueError('SchedulingConstraint: constrained factors must be positive integers.') if (not update_dict): update_dict = {} if (not isinstance(update_dict, dict)): raise TypeError('SchedulingConstraint: update_dict must be a dict instance.') update_dict = util.HashableDict.fromdict(update_dict) for val in update_dict.values(): if (not callable(val)): raise TypeError('SchedulingConstraint: values in update_dict must be callable.') self.topbat = topbat self.topifm = topifm self.topofm = topofm self.update_dict = update_dict def is_valid_top_bl(self, top_bl_t, top_bl_ord): if self.update_dict: raise ValueError('SchedulingConstraint: update_dict is not empty, rules have not been updated.') if (self.topbat and (self.topbat != top_bl_t[le.BAT])): return False if (self.topifm and (self.topifm != top_bl_t[le.IFM])): return False if (self.topofm and (self.topofm != top_bl_t[le.OFM])): return False del top_bl_ord return True def is_valid_part(self, part): if self.update_dict: raise ValueError('SchedulingConstraint: update_dict is not empty, rules have not been updated.') return True def filter_gen_ts(self, gen_tifm, gen_tofm, gen_tbat): return (self._filter_gen(gen_tifm, self.topifm), self._filter_gen(gen_tofm, self.topofm), self._filter_gen(gen_tbat, self.topbat)) def update_by_prev(self, prev_results): for layer_name in self.update_dict: self.update_dict[layer_name](self, prev_results[layer_name]) self.update_dict = util.HashableDict() def _filter_gen(gen, topt=0): for tpl in gen: if (topt in (0, tpl[0])): (yield tpl) def __repr__(self): return '{}({})'.format(self.__class__.__name__, ', '.join(['{}={}'.format(k, repr(v)) for (k, v) in self.__dict__.items()]))
def find_similarity(s1, s2): with torch.no_grad(): s1 = [make_example(x, model) for x in s1] s2 = [make_example(x, model) for x in s2] (wx1, wl1, wm1) = model.torchify_batch(s1) (wx2, wl2, wm2) = model.torchify_batch(s2) scores = model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2) return [x.item() for x in scores]
def webqsp_load_and_cache_gen_examples(args, tokenizer, evaluate=False): logger = args.logger if ((args.local_rank not in [(- 1), 0]) and (not evaluate)): torch.distributed.barrier() input_dir = (args.data_dir if args.data_dir else '.') split_file = (args.predict_file if evaluate else args.train_file) dataset_id = os.path.basename(split_file).split('_')[0] split_id = os.path.basename(split_file).split('_')[1] cached_features_file = os.path.join('feature_cache', 'gen_{}_{}_{}_{}'.format(dataset_id, split_id, args.model_type, args.top_k_candidates)) if (os.path.exists(cached_features_file) and (not args.overwrite_cache)): logger.info('Loading features from cached file %s', cached_features_file) features = torch.load(cached_features_file) else: logger.info('Creating features from dataset file at %s', input_dir) candidate_file = (args.predict_file if evaluate else args.train_file) dataset_file = join('outputs', f'WebQSP.{split_id}.expr.json') examples = webqsp_read_gen_examples_from_json(dataset_file, candidate_file, is_eval=evaluate) features = extract_gen_features_from_examples(args, tokenizer, examples) if (args.local_rank in [(- 1), 0]): logger.info('Saving features into cached file %s', cached_features_file) torch.save(features, cached_features_file) if ((args.local_rank == 0) and (not evaluate)): torch.distributed.barrier() return ListDataset(features)
def ensure_compatible_hparams(hparams, default_hparams, hparams_path): default_hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path) default_config = default_hparams.values() config = hparams.values() for key in default_config: if (key not in config): hparams.add_hparam(key, default_config[key]) if default_hparams.override_loaded_hparams: for key in default_config: if (getattr(hparams, key) != default_config[key]): utils.print_out(('# Updating hparams.%s: %s -> %s' % (key, str(getattr(hparams, key)), str(default_config[key])))) setattr(hparams, key, default_config[key]) return hparams
def bilinear_deconv2d(input, deconv_info, is_train, name='bilinear_deconv2d', info=False, activation_fn=tf.nn.relu, norm='batch'): with tf.variable_scope(name): output_shape = deconv_info[0] k = deconv_info[1] s = deconv_info[2] h = (int(input.get_shape()[1]) * s) w = (int(input.get_shape()[2]) * s) _ = tf.image.resize_bilinear(input, [h, w]) _ = conv2d(_, output_shape, is_train, k_h=k, k_w=k, s=1, norm=False, activation_fn=None) _ = bn_act(_, is_train, norm=norm, activation_fn=activation_fn) if info: log.info('{} {}'.format(name, _.get_shape().as_list())) return _
def setup_files_and_dirs(outdir, hdfs): hdfs.create_dir(f'/data') if (not os.path.exists(outdir)): os.mkdir(outdir) os.system(f'dd if=/dev/zero of={outdir}/10GBdata.bin bs=128KB count=78125')
def module_init(): root_module = Module('ns.propagation', cpp_namespace='::ns3') return root_module
def beta2(rce, T1, T2): r = rce[0] c = rce[1] e = rce[2] assert (T1[(r, c)] == e) assert (e >= 0) for x in range(T1.ncols()): if (T2[(r, x)] == e): return (r, x, e) raise ValueError
def get_test_dataset(data_args: Dict[(str, Any)]) -> Tuple[(Optional[PoseSegmentsDataset], Optional[DataLoader])]: if (not args.test): return (None, None) dataset = get_dataset(split='test', **data_args) loader = DataLoader(dataset, batch_size=args.batch_size_devtest, shuffle=False, collate_fn=zero_pad_collator) return (dataset, loader)
def labels_to_onehots(labels, num_classes): batch_size = labels.get_shape().as_list()[0] with tf.name_scope('one_hot'): labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) sparse_ptrs = tf.concat(1, [indices, labels], name='ptrs') onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes], 1.0, 0.0) return onehots
def simplify(f, algorithm='maxima', **kwds): try: return f.simplify(algorithm=algorithm, **kwds) except (TypeError, AttributeError): pass try: return f.simplify() except AttributeError: return f
class CharacterTargets(Vocabulary): def __init__(self, vocab_file, seq_postfix=None, unknown_label='', labels=None, **kwargs): super(CharacterTargets, self).__init__(vocab_file=vocab_file, seq_postfix=seq_postfix, unknown_label=unknown_label, labels=labels, **kwargs) def get_seq(self, sentence): if (self.unknown_label is not None): seq = [self._vocab.get(k, self.unknown_label_id) for k in sentence] else: seq = [self._vocab[k] for k in sentence] return (seq + self.seq_postfix) def get_seq_labels(self, seq): return ''.join(map(self._labels.__getitem__, seq))
def readoutput(outtxt): output = {} with open(outtxt) as f: for line in f.readlines(): line = line.strip().split(' ') if (len(line) == 2): (idx, score) = (int(line[0]), float(line[1])) output[idx] = score return output
def build_transform(image_augmentation, backbone_name, size=SIZE, interpolation=INTERPOLATION, pixel_mean=IMAGENET_PIXEL_MEAN, pixel_std=IMAGENET_PIXEL_STD, crop_padding=CROP_PADDING, rrcrop_scale=RRCROP_SCALE): clip_mode = (backbone_name in CLIP_MODELS) if clip_mode: pixel_mean = CLIP_PIXEL_MEAN pixel_std = CLIP_PIXEL_STD if (backbone_name == 'RN50x4'): size = (288, 288) elif (backbone_name == 'RN50x16'): size = (384, 384) elif (backbone_name == 'ViT-L/'): size = (336, 336) normalize = Normalize(mean=pixel_mean, std=pixel_std) if (image_augmentation == 'none'): transform = Compose([Resize(size=max(size), interpolation=interpolation), CenterCrop(size=size), ToTensor(), normalize]) elif (image_augmentation == 'flip'): transform = Compose([Resize(size=max(size), interpolation=interpolation), CenterCrop(size=size), RandomHorizontalFlip(p=1.0), ToTensor(), normalize]) elif (image_augmentation == 'randomcrop'): transform = Compose([Resize(size=max(size), interpolation=interpolation), RandomCrop(size=size, padding=crop_padding), RandomHorizontalFlip(p=0.5), ToTensor(), normalize]) elif (image_augmentation == 'randomresizedcrop'): transform = Compose([RandomResizedCrop(size=size, scale=rrcrop_scale, interpolation=interpolation), RandomHorizontalFlip(p=0.5), ToTensor(), normalize]) elif (image_augmentation == 'twoCrops'): transform_ = Compose([RandomResizedCrop(size=size, scale=rrcrop_scale, interpolation=interpolation), RandomHorizontalFlip(p=0.5), ToTensor(), normalize]) transform = TransformTwice(transform_) else: raise ValueError('Invalid image augmentation method: {}'.format(image_augmentation)) return transform
def gen_lobatto(max_order): assert (max_order > 2) x = sm.symbols('x') lobs = [0, 1] lobs[0] = ((1 - x) / 2) lobs[1] = ((1 + x) / 2) dlobs = [lob.diff('x') for lob in lobs] legs = [sm.legendre(0, 'y')] clegs = [sm.ccode(legs[0])] dlegs = [sm.legendre(0, 'y').diff('y')] cdlegs = [sm.ccode(dlegs[0])] clobs = [sm.ccode(lob) for lob in lobs] cdlobs = [sm.ccode(dlob) for dlob in dlobs] denoms = [] for ii in range(2, (max_order + 1)): coef = sm.sympify(('sqrt(2 * (2 * %s - 1)) / 2' % ii)) leg = sm.legendre((ii - 1), 'y') pleg = leg.as_poly() coefs = pleg.all_coeffs() denom = max((sm.denom(val) for val in coefs)) cleg = sm.ccode((sm.horner((leg * denom)) / denom)) dleg = leg.diff('y') cdleg = sm.ccode((sm.horner((dleg * denom)) / denom)) lob = sm.simplify((coef * sm.integrate(leg, ('y', (- 1), x)))) lobnc = sm.simplify(sm.integrate(leg, ('y', (- 1), x))) plobnc = lobnc.as_poly() coefs = plobnc.all_coeffs() denom = (sm.denom(coef) * max((sm.denom(val) for val in coefs))) clob = sm.ccode((sm.horner((lob * denom)) / denom)) dlob = lob.diff('x') cdlob = sm.ccode((sm.horner((dlob * denom)) / denom)) legs.append(leg) clegs.append(cleg) dlegs.append(dleg) cdlegs.append(cdleg) lobs.append(lob) clobs.append(clob) dlobs.append(dlob) cdlobs.append(cdlob) denoms.append(denom) coef = sm.sympify(('sqrt(2 * (2 * %s - 1)) / 2' % (max_order + 1))) leg = sm.legendre(max_order, 'y') pleg = leg.as_poly() coefs = pleg.all_coeffs() denom = max((sm.denom(val) for val in coefs)) cleg = sm.ccode((sm.horner((leg * denom)) / denom)) dleg = leg.diff('y') cdleg = sm.ccode((sm.horner((dleg * denom)) / denom)) legs.append(leg) clegs.append(cleg) dlegs.append(dleg) cdlegs.append(cdleg) kerns = [] ckerns = [] dkerns = [] cdkerns = [] for (ii, lob) in enumerate(lobs[2:]): kern = sm.simplify((lob / (lobs[0] * lobs[1]))) dkern = kern.diff('x') denom = (denoms[ii] / 4) ckern = sm.ccode((sm.horner((kern * denom)) / denom)) cdkern = sm.ccode((sm.horner((dkern * denom)) / denom)) kerns.append(kern) ckerns.append(ckern) dkerns.append(dkern) cdkerns.append(cdkern) return (legs, clegs, dlegs, cdlegs, lobs, clobs, dlobs, cdlobs, kerns, ckerns, dkerns, cdkerns, denoms)
def CremonaModularSymbols(level, sign=0, cuspidal=False, verbose=0): from .homspace import ModularSymbols return ModularSymbols(level=level, sign=sign, cuspidal=cuspidal, verbose=verbose)
_decorator(list()) def get_data(html): cont = get_weibo_infos_right(html) return get_weibo_list(cont)
class UCBSelectPolicy(SelectPolicy): def __init__(self, explore_coeff: float=1.0, fuzzer: GPTFuzzer=None): super().__init__(fuzzer) self.step = 0 self.last_choice_index = None self.explore_coeff = explore_coeff self.rewards = [0 for _ in range(len(self.fuzzer.prompt_nodes))] def select(self) -> PromptNode: if (len(self.fuzzer.prompt_nodes) > len(self.rewards)): self.rewards.extend([0 for _ in range((len(self.fuzzer.prompt_nodes) - len(self.rewards)))]) self.step += 1 scores = np.zeros(len(self.fuzzer.prompt_nodes)) for (i, prompt_node) in enumerate(self.fuzzer.prompt_nodes): smooth_visited_num = (prompt_node.visited_num + 1) scores[i] = ((self.rewards[i] / smooth_visited_num) + (self.explore_coeff * np.sqrt(((2 * np.log(self.step)) / smooth_visited_num)))) self.last_choice_index = np.argmax(scores) self.fuzzer.prompt_nodes[self.last_choice_index].visited_num += 1 return self.fuzzer.prompt_nodes[self.last_choice_index] def update(self, prompt_nodes: 'list[PromptNode]'): succ_num = sum([prompt_node.num_jailbreak for prompt_node in prompt_nodes]) self.rewards[self.last_choice_index] += (succ_num / len(self.fuzzer.questions))
.parametrize('loader_options, from_schema_options', (({'base_url': ' {}), ({}, {'hypothesis_settings': hypothesis.settings(deadline=1)}))) .operations('slow') def test_exceptions(schema_url, app, loader_options, from_schema_options): schema = oas_loaders.from_uri(schema_url, **loader_options) results = from_schema(schema, **from_schema_options).execute() assert any(((event.status == Status.error) for event in results if isinstance(event, events.AfterExecution)))
class PyCUDAFunctionManager(CUDAFunctionManager): def __init__(self, num_agents: int=1, num_envs: int=1, blocks_per_env: int=1, process_id: int=0): super().__init__(num_agents=num_agents, num_envs=num_envs, blocks_per_env=blocks_per_env, process_id=process_id) self._CUDA_module = None self._cuda_functions = {} self._cuda_function_names = [] cc = Context.get_device().compute_capability() self.arch = f'sm_{cc[0]}{cc[1]}' valid = validate_device_setup(arch=self.arch, num_blocks=self._grid[0], threads_per_block=self._block[0], blocks_per_env=self._blocks_per_env) if (not valid): raise Exception('The simulation setup fails to pass the validation') def load_cuda_from_source_code(self, code: str, default_functions_included: bool=True): assert (self._CUDA_module is None), 'CUDA module has already been loaded, not allowed to load twice' self._CUDA_module = SourceModule(code, no_extern_c=True) logging.info('Successfully build and load the source code') if default_functions_included: self.initialize_default_functions() def load_cuda_from_binary_file(self, cubin: str, default_functions_included: bool=True): assert (self._CUDA_module is None), 'CUDA module has already been loaded, not allowed to load twice' self._CUDA_module = cuda_driver.module_from_file(cubin) logging.info(f'Successfully load the cubin_file from {cubin}') if default_functions_included: self.initialize_default_functions() def compile_and_load_cuda(self, env_name: str, template_header_file: str, template_runner_file: str, template_path: Optional[str]=None, default_functions_included: bool=True, customized_env_registrar: Optional[EnvironmentRegistrar]=None, event_messenger=None): bin_path = f'{get_project_root()}/warp_drive/cuda_bin' cubin_file = f'{bin_path}/env_runner.fatbin' if (self._process_id > 0): assert (event_messenger is not None), 'Event messenger is required to sync up the compilation status among processes.' event_messenger.wait(timeout=12) if (not event_messenger.is_set()): raise Exception(f'Process {self._process_id} fails to get the successful compilation message ... ') else: header_path = f'{get_project_root()}/warp_drive/cuda_includes' if (template_path is None): template_path = f'{get_project_root()}/warp_drive/cuda_includes' update_env_header(template_header_file=template_header_file, path=template_path, num_agents=self._num_agents, num_envs=self._num_envs, blocks_per_env=self._blocks_per_env) update_env_runner(template_runner_file=template_runner_file, path=template_path, env_name=env_name, customized_env_registrar=customized_env_registrar) check_env_header(header_file='env_config.h', path=header_path, num_envs=self._num_envs, num_agents=self._num_agents, blocks_per_env=self._blocks_per_env) logging.debug(f'header file {header_path}/env_config.h has number_agents: {self._num_agents}, num_agents per block: {self.block[0]}, num_envs: {self._num_envs}, num of blocks: {self.grid[0]} and blocks_per_env: {self._blocks_per_env}that are consistent with the block and the grid') main_file = f'{header_path}/env_runner.cu' logging.info(f'Compiling {main_file} -> {cubin_file}') self._compile(main_file, cubin_file, arch=self.arch) if (event_messenger is not None): event_messenger.set() self.load_cuda_from_binary_file(cubin=cubin_file, default_functions_included=default_functions_included) def _compile(main_file, cubin_file, arch=None): bin_path = f'{get_project_root()}/warp_drive/cuda_bin' mkbin = f'mkdir -p {bin_path}' with subprocess.Popen(mkbin, shell=True, stderr=subprocess.STDOUT) as mkbin_process: if (mkbin_process.wait() != 0): raise Exception('make bin file failed ... ') logging.info(f'Successfully mkdir the binary folder {bin_path}') if os.path.exists(f'{cubin_file}'): os.remove(f'{cubin_file}') try: if (arch is None): cc = Context.get_device().compute_capability() arch = f'sm_{cc[0]}{cc[1]}' cmd = f'nvcc --fatbin -arch={arch} {main_file} -o {cubin_file}' with subprocess.Popen(cmd, shell=True, stderr=subprocess.STDOUT) as make_process: if (make_process.wait() != 0): raise Exception(f'''build failed when running the following build... : {cmd} try to build the fatbin hybrid version of virtual PTX + gpu binary ... ''') logging.info(f'Running cmd: {cmd}') logging.info(f'Successfully build the cubin_file from {main_file} to {cubin_file}') return except Exception as err: logging.error(err) arch_codes = ['-code=sm_37', '-code=sm_50', '-code=sm_60', '-code=sm_70', '-code=sm_80'] compiler = 'nvcc --fatbin -arch=compute_37 -code=compute_37' in_out_fname = f'{main_file} -o {cubin_file}' build_success = False for i in range(len(arch_codes)): try: cmd = ' '.join((([compiler] + arch_codes[:(len(arch_codes) - i)]) + [in_out_fname])) with subprocess.Popen(cmd, shell=True, stderr=subprocess.STDOUT) as make_process: if (make_process.wait() != 0): raise Exception(f'''build failed when running the following build... : {cmd} try to build the lower gpu-code version ... ''') logging.info(f'Running cmd: {cmd}') logging.info(f'Successfully build the cubin_file from {main_file} to {cubin_file}') build_success = True break except Exception as err: logging.error(err) if (not build_success): raise Exception('build failed ... ') def initialize_default_functions(self): default_func_names = ['reset_log_mask', 'update_log_mask', 'log_one_step_in_float', 'log_one_step_in_int', 'reset_in_float_when_done_2d', 'reset_in_int_when_done_2d', 'reset_in_float_when_done_3d', 'reset_in_int_when_done_3d', 'undo_done_flag_and_reset_timestep', 'init_random', 'free_random', 'sample_actions'] self.initialize_functions(default_func_names) self._default_functions_initialized = True logging.info('Successfully initialize the default CUDA functions managed by the CUDAFunctionManager') def initialize_functions(self, func_names: Optional[list]=None): assert (self._CUDA_module is not None), 'CUDA module has not yet been loaded, call load_cuda_from_source_code(code), or load_cuda_from_binary_file(file) first ' for fname in func_names: assert (fname not in self._cuda_functions) assert (fname not in self._cuda_function_names) logging.info(f'starting to load the cuda kernel function: {fname} from the CUDA module ') self._cuda_functions[fname] = self._CUDA_module.get_function(fname) self._cuda_function_names.append(fname) logging.info(f'finished loading the cuda kernel function: {fname} from the CUDA module, ') def initialize_shared_constants(self, data_manager: PyCUDADataManager, constant_names: list): for cname in constant_names: (constant_on_device, _) = self._CUDA_module.get_global(cname) cuda_driver.memcpy_htod(constant_on_device, data_manager.shared_constant(cname)) logging.info(f'Successfully initialize the CUDA shared constant {cname} managed by the CUDAFunctionManager') def _get_function(self, fname): assert (fname in self._cuda_function_names), f'{fname} is not defined' return self._cuda_functions[fname] def compile(self): return self._compile def cuda_function_names(self): return self._cuda_function_names
def test(): vec = ak.Array([{'x': 1, 'y': 2, 'z': 3}, {'x': 4, 'y': 5, 'z': 9}], with_name='vector', behavior=behavior) assert ak.almost_equal((vec + vec), ak.Array([{'x': 2, 'y': 4, 'z': 6}, {'x': 8, 'y': 10, 'z': 18}], with_name='vector', behavior=behavior)) assert ak.almost_equal(pickle.loads(pickle.dumps(vec)), vec)
def prepare_experiment(args, config): if (args.run_folder is not None): run_folder = args.run_folder else: run_folder = prepare_experiment_folder(config['expirement_base_path'], args.run_name) save_config(os.path.join(run_folder, 'config.yaml'), config) dir_path = os.path.dirname(os.path.realpath(__file__)) shutil.copytree(dir_path, os.path.join(run_folder, 'matchmaker-src'), ignore=shutil.ignore_patterns('__pycache__')) return run_folder
def test_statement_to_ast_dict_single(statement_to_ast_visitor, default_test_case, function_mock): dict_stmt = stmt.DictStatement(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(dict[(int, int)]), [(stmt.IntPrimitiveStatement(default_test_case, 5).ret_val, stmt.IntPrimitiveStatement(default_test_case, 5).ret_val)]) statement_to_ast_visitor.visit_dict_statement(dict_stmt) assert (__create_source_from_ast(statement_to_ast_visitor.ast_node) == 'var_0 = {var_1: var_2}')
class TFXLMModel(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def _slice_arrays(arrays, start=None, stop=None): if (arrays is None): return [None] elif isinstance(arrays, list): if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return [(None if (x is None) else x[start]) for x in arrays] else: return [(None if (x is None) else x[start:stop]) for x in arrays] elif hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] elif hasattr(start, '__getitem__'): return arrays[start:stop] else: return [None]
class FEBlock(nn.Module): def __init__(self, num_feat, rep_scale=4): super(FEBlock, self).__init__() self.num_feat = num_feat self.conv_first = ConvRep3(3, num_feat, rep_scale=rep_scale) self.conv_up = ConvRep3(num_feat, (num_feat * 4), rep_scale=rep_scale) self.conv_last = ConvRep3((2 * num_feat), 3, rep_scale=rep_scale) self.downsample = nn.PixelShuffle(2) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) def forward(self, x): x = self.lrelu(self.conv_first(x)) base = x x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x = self.lrelu(self.conv_up(x)) x = self.downsample(x) x = torch.cat((x, base), 1) x = self.conv_last(x) return x
def register_Ns3DefaultDeleter__Ns3S1apConnectionInfo_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::DefaultDeleter< ns3::S1apConnectionInfo > const &', 'arg0')]) cls.add_method('Delete', 'void', [param('ns3::S1apConnectionInfo *', 'object')], is_static=True) return
def compute(folder: str) -> Tuple[(List[dict], List[list], List[np.ndarray], List[np.ndarray])]: results = glob.glob(f'{folder}/*.sav') if ('{}/trained_model.sav'.format(folder) in results): results.remove(f'{folder}/trained_model.sav') hyperparameters = [] scores = [] avg = [] std = [] for result in results: (parameters, s) = pickle.load(open(result, 'rb')) hyperparameters.append(parameters) scores.append([round(i, 3) for i in s]) avg.append(np.mean(s)) std.append(np.std(s)) return (hyperparameters, scores, avg, std)
def apply_gen_fixed_toolkit_prompt(prompt): prompt = removed_submodules(prompt, ['tool_gen_blacklist', 'brainstorm_toolkit_step']) return prompt
class Dataset(ABC): def load(self) -> Tuple[(List[np.ndarray], List[np.ndarray], np.ndarray, np.ndarray)]: pass def download(self, destination: str): pass
class AFLBasedController(ControllerModel): def __init__(self, AFLClass, name, seed, output, group, program, argument, cgroup_path): self.AFLClass = AFLClass self.name = name self.db = None self.seed = seed self.output = output self.group = group self.program = program self.argument = argument self.cgroup_path = cgroup_path self.afls = [] def init(self): db_proxy.initialize(self.db) self.db.connect() self.db.create_tables([AFLModel, ControllerModel]) for fuzzer in AFLModel.select(): afl = self.AFLClass(seed=fuzzer.seed, output=fuzzer.output, group=fuzzer.group, program=fuzzer.program, argument=fuzzer.argument, master=fuzzer.master, fuzzer_id=fuzzer.fuzzer_id, cgroup_path=self.cgroup_path, pid=fuzzer.pid) self.afls.append(afl) def get_master(self): for AFL in self.afls: if AFL.is_master: return AFL def get_current_active(self): active = [] for afl in self.afls: if afl.is_active: active.append(afl) return active def get_current_inactive(self): inactive = [] for afl in self.afls: if afl.is_inactive: inactive.append(afl) return inactive def start(self): if self.afls: print('already started', file=sys.stderr) return afl = self.AFLClass(seed=self.seed, output=self.output, group=self.group, program=self.program, argument=self.argument, cgroup_path=self.cgroup_path, master=True, fuzzer_id=1) afl.start() while (not afl.is_ready): time.sleep(1) AFLModel.create(seed=self.seed, output=self.output, group=self.group, program=self.program, argument=self.argument, master=True, pid=afl.pid, fuzzer_id=1) ControllerModel.create(scale_num=1) ready_path = os.path.join(self.output, 'ready') pathlib.Path(ready_path).touch(mode=438, exist_ok=True) def scale(self, scale_num): if (not self.afls): print('start first', file=sys.stderr) return num = scale_num assert (num >= 0) current_active = self.get_current_active() current_inactive = self.get_current_inactive() current_active_num = len(current_active) current_inactive_num = len(current_inactive) master = self.get_master() if (current_active_num < num): diff = (num - current_active_num) resume_num = min(diff, current_inactive_num) resumed = 0 if (resume_num and master.is_inactive): master.resume() resumed += 1 for afl in current_inactive: if (resumed == resume_num): break if afl.is_active: continue afl.resume() resumed += 1 start_id = (len(self.afls) + 1) for i in range(start_id, ((start_id + diff) - resume_num)): afl = self.AFLClass(seed=self.seed, output=self.output, group=self.group, program=self.program, argument=self.argument, master=False, cgroup_path=self.cgroup_path, fuzzer_id=i) afl.start() AFLModel.create(seed=self.seed, output=self.output, group=self.group, program=self.program, argument=self.argument, master=False, pid=afl.pid, fuzzer_id=i) elif (current_active_num > num): diff = (current_active_num - num) paused = 0 for afl in current_active: if (paused == diff): break if ((num >= 1) and afl.is_master): continue afl.pause() paused += 1 else: return controller = ControllerModel.get() controller.scale_num = scale_num controller.save() def pause(self): for afl in self.afls: afl.pause() def resume(self): controller = ControllerModel.get() self.scale(controller.scale_num) def stop(self): for afl in self.afls: afl.stop() self.db.drop_tables([AFLModel, ControllerModel])
def eq(a, b): if z3_debug(): _z3_assert((is_ast(a) and is_ast(b)), 'Z3 ASTs expected') return a.eq(b)
def alter2chord_prob(alter): alter_list = [0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1] chord_prob = [] alpha0 = 1 alpha1 = 1 for f in alter_list: if (f == 0): chord_prob.append((alter[0] * alpha0)) if (f == 1): chord_prob.append((alter[1] * alpha1)) return np.asarray(chord_prob)
def get_sgd_learning_rate(i, *, warmup): i += 1 return min((math.sqrt(warmup) / math.sqrt(i)), (i / warmup))
class BasicTokenizer(object): def __init__(self, do_lower_case=True): self.do_lower_case = do_lower_case def tokenize(self, text): text = _convert_to_unicode_or_throw(text) text = self._clean_text(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if (cat == 'Mn'): continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text): chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return [''.join(x) for x in output] def _clean_text(self, text): output = [] for char in text: cp = ord(char) if ((cp == 0) or (cp == 65533) or _is_control(char)): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
def pip_installed_packages(normalization=None): with open(os.devnull, 'w') as devnull: proc = subprocess.Popen([sys.executable, '-m', 'pip', 'list', '--no-index', '--format', 'json'], stdout=subprocess.PIPE, stderr=devnull) stdout = proc.communicate()[0].decode() def normalize(name: str) -> str: if (normalization is None): return name elif (normalization == 'spkg'): return name.lower().replace('-', '_').replace('.', '_') else: raise NotImplementedError(f'normalization {normalization} is not implemented') try: return {normalize(package['name']): package['version'] for package in json.loads(stdout)} except json.decoder.JSONDecodeError: return {}
def test_redirect(capfd): msg = 'Should not be in log!' stream = StringIO() with redirect_stdout(stream): m.raw_output(msg) (stdout, stderr) = capfd.readouterr() assert (stdout == msg) assert (stream.getvalue() == '') stream = StringIO() with redirect_stdout(stream): with m.ostream_redirect(): m.raw_output(msg) (stdout, stderr) = capfd.readouterr() assert (stdout == '') assert (stream.getvalue() == msg) stream = StringIO() with redirect_stdout(stream): m.raw_output(msg) (stdout, stderr) = capfd.readouterr() assert (stdout == msg) assert (stream.getvalue() == '')
class ROIHeadsTest(unittest.TestCase): def test_roi_heads(self): torch.manual_seed(121) cfg = get_cfg() cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead' cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = 'ROIAlignV2' cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) cfg.MODEL.MASK_ON = True num_images = 2 images_tensor = torch.rand(num_images, 20, 30) image_sizes = [(10, 10), (20, 30)] images = ImageList(images_tensor, image_sizes) num_channels = 1024 features = {'res4': torch.rand(num_images, num_channels, 1, 2)} feature_shape = {'res4': ShapeSpec(channels=num_channels, stride=16)} image_shape = (15, 15) gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) gt_instance0 = Instances(image_shape) gt_instance0.gt_boxes = Boxes(gt_boxes0) gt_instance0.gt_classes = torch.tensor([2, 1]) gt_instance0.gt_masks = BitMasks((torch.rand(((2,) + image_shape)) > 0.5)) gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32) gt_instance1 = Instances(image_shape) gt_instance1.gt_boxes = Boxes(gt_boxes1) gt_instance1.gt_classes = torch.tensor([1, 2]) gt_instance1.gt_masks = BitMasks((torch.rand(((2,) + image_shape)) > 0.5)) gt_instances = [gt_instance0, gt_instance1] proposal_generator = build_proposal_generator(cfg, feature_shape) roi_heads = StandardROIHeads(cfg, feature_shape) with EventStorage(): (proposals, proposal_losses) = proposal_generator(images, features, gt_instances) (_, detector_losses) = roi_heads(images, features, proposals, gt_instances) detector_losses.update(proposal_losses) expected_losses = {'loss_cls': 4., 'loss_box_reg': 0., 'loss_mask': 0., 'loss_rpn_cls': 0., 'loss_rpn_loc': 0.} succ = all((torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) for name in detector_losses.keys())) self.assertTrue(succ, 'Losses has changed! New losses: {}'.format({k: v.item() for (k, v) in detector_losses.items()})) def test_rroi_heads(self): torch.manual_seed(121) cfg = get_cfg() cfg.MODEL.PROPOSAL_GENERATOR.NAME = 'RRPN' cfg.MODEL.ANCHOR_GENERATOR.NAME = 'RotatedAnchorGenerator' cfg.MODEL.ROI_HEADS.NAME = 'RROIHeads' cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead' cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) cfg.MODEL.RPN.HEAD_NAME = 'StandardRPNHead' cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = 'ROIAlignRotated' cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) num_images = 2 images_tensor = torch.rand(num_images, 20, 30) image_sizes = [(10, 10), (20, 30)] images = ImageList(images_tensor, image_sizes) num_channels = 1024 features = {'res4': torch.rand(num_images, num_channels, 1, 2)} feature_shape = {'res4': ShapeSpec(channels=num_channels, stride=16)} image_shape = (15, 15) gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32) gt_instance0 = Instances(image_shape) gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0) gt_instance0.gt_classes = torch.tensor([2, 1]) gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, (- 50)]], dtype=torch.float32) gt_instance1 = Instances(image_shape) gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1) gt_instance1.gt_classes = torch.tensor([1, 2]) gt_instances = [gt_instance0, gt_instance1] proposal_generator = build_proposal_generator(cfg, feature_shape) roi_heads = build_roi_heads(cfg, feature_shape) with EventStorage(): (proposals, proposal_losses) = proposal_generator(images, features, gt_instances) (_, detector_losses) = roi_heads(images, features, proposals, gt_instances) detector_losses.update(proposal_losses) expected_losses = {'loss_cls': 4., 'loss_box_reg': 0., 'loss_rpn_cls': 0., 'loss_rpn_loc': 0.} succ = all((torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) for name in detector_losses.keys())) self.assertTrue(succ, 'Losses has changed! New losses: {}'.format({k: v.item() for (k, v) in detector_losses.items()}))
_function_dispatch(_kron_dispatcher) def kron(a, b): b = asanyarray(b) a = array(a, copy=False, subok=True, ndmin=b.ndim) (ndb, nda) = (b.ndim, a.ndim) if ((nda == 0) or (ndb == 0)): return _nx.multiply(a, b) as_ = a.shape bs = b.shape if (not a.flags.contiguous): a = reshape(a, as_) if (not b.flags.contiguous): b = reshape(b, bs) nd = ndb if (ndb != nda): if (ndb > nda): as_ = (((1,) * (ndb - nda)) + as_) else: bs = (((1,) * (nda - ndb)) + bs) nd = nda result = outer(a, b).reshape((as_ + bs)) axis = (nd - 1) for _ in range(nd): result = concatenate(result, axis=axis) wrapper = get_array_prepare(a, b) if (wrapper is not None): result = wrapper(result) wrapper = get_array_wrap(a, b) if (wrapper is not None): result = wrapper(result) return result