code
stringlengths
101
5.91M
def _network(proto, default_context, batch_size, all_variables, rng): network = Network() network.name = proto.name network.repeat_info = {} for r in proto.repeat_info: network.repeat_info[r.id] = r.times network.variables = OrderedDict() if (batch_size is None): network.batch_size = proto.batch_size else: network.batch_size = batch_size for v in proto.variable: for variable_index in itertools.product(*map(tuple, map(range, [network.repeat_info[id] for id in v.repeat_id]))): name = v.name for (index, i) in enumerate(variable_index): if ((('{' + v.repeat_id[index]) + '}') in name): name = name.replace((('{' + v.repeat_id[index]) + '}'), (('[' + str(i)) + ']')) else: name += (((('_' + v.repeat_id[index]) + '[') + str(i)) + ']') if (name in all_variables): variable = all_variables[name] else: shape = tuple([(d if (d >= 1) else network.batch_size) for d in v.shape.dim]) variable = _create_variable(v, name, shape, rng) all_variables[name] = variable network.variables[name] = variable logger.debug('{}'.format((name, variable.shape, (v.initializer.type if v.initializer.type else '-'), v.initializer.multiplier))) network.functions = OrderedDict() network.function_inputs = OrderedDict() network.function_outputs = OrderedDict() network.variable_inputs = OrderedDict() network.variable_outputs = OrderedDict() for f in proto.function: ctx = (default_context if (not f.context.backends) else _context(f.context)) for variable_index in itertools.product(*map(tuple, map(range, [network.repeat_info[id] for id in f.repeat_id]))): (function, input_variable_names, output_variable_names) = _create_function(ctx, network, f, variable_index) if (function is not None): network.functions[function.name] = function for v_name in output_variable_names: network.variable_inputs[network.variables[v_name]] = [function] for v_name in input_variable_names: if (not (network.variables[v_name] in network.variable_outputs)): network.variable_outputs[network.variables[v_name]] = [] network.variable_outputs[network.variables[v_name]].append(function) network.setup(optimize=True) return network
class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, target): dst = [] for t in self.transforms: dst.append(t(target)) return dst
def set_color_by_material(mat_color: ti.types.ndarray()): for i in range(n_particles): mat = F_materials[i] F_colors[i] = ti.Vector([mat_color[(mat, 0)], mat_color[(mat, 1)], mat_color[(mat, 2)], 1.0])
def get_update(x): for z in x.split('-'): if ('update=' in z): return z.replace('update=', '')
def encode_prompt(prompt_instructions, classification=False): if classification: prompt = 'Come up with a series of classification tasks. Try to specify the possible output labels when possible.\n' else: prompt = 'Come up with a series of tasks:\n' for (idx, instruction) in enumerate(prompt_instructions): instruction = re.sub('\\s+', ' ', instruction).strip().rstrip(':') prompt += f'''{(idx + 1)}. {instruction} ''' prompt += f'{(len(prompt_instructions) + 1)}.' return prompt
class AllBuilder(): def __getattr__(self, attr): from functools import partial return partial(self._wrapper, attr) def _wrapper(self, name, *args, **kwds): start = time.time() docs = self.get_all_documents() refs = [x for x in docs if x.endswith('reference')] others = [x for x in docs if (not x.endswith('reference'))] logger.warning('\nBuilding reference manual, first pass.\n') for document in refs: getattr(get_builder(document), 'inventory')(*args, **kwds) from sage.env import SAGE_DOC logger.warning('Building reference manual, second pass.\n') os.makedirs(os.path.join(SAGE_DOC, 'html', 'en', 'reference', '_static'), exist_ok=True) for document in refs: getattr(get_builder(document), name)(*args, **kwds) L = [((doc, name, kwds) + args) for doc in others] if (sys.platform == 'darwin'): for target in L: build_other_doc(target) else: build_many(build_other_doc, L) logger.warning(('Elapsed time: %.1f seconds.' % (time.time() - start))) logger.warning('Done building the documentation!') def get_all_documents(self): documents = [] for lang in build_options.LANGUAGES: for document in os.listdir(os.path.join(SAGE_DOC_SRC, lang)): if ((document not in build_options.OMIT) and os.path.isdir(os.path.join(SAGE_DOC_SRC, lang, document))): documents.append(os.path.join(lang, document)) if ('en/reference' in documents): documents.remove('en/reference') documents.insert(0, 'en/reference') return documents
class BPRSlimModel(object): def __init__(self, data, num_users, num_items, lr, lj_reg, li_reg, sampler, random_seed=42): self._data = data self._num_users = num_users self._num_items = num_items self._sp_i_train_ratings = self._data.sp_i_train_ratings self._lr = lr self._lj_reg = lj_reg self._li_reg = li_reg self._sampler = sampler self._random_seed = random_seed self._random_state = np.random.RandomState(self._random_seed) self._mask_indices = np.array(self._sp_i_train_ratings.indices, dtype=np.int32) self._mask_indptr = np.array(self._sp_i_train_ratings.indptr, dtype=np.int32) self._s_dense = np.empty((self._num_items, self._num_items), np.double) def train_step(self, batch): (u, i, j) = batch x_uij = 0.0 index = 0 seen_items_start_pos = self._mask_indptr[u] seen_items_end_pos = self._mask_indptr[(u + 1)] while (index < (seen_items_end_pos - seen_items_start_pos)): seenItem = self._mask_indices[(seen_items_start_pos + index)] index += 1 x_uij += (self._s_dense[(i, seenItem)] - self._s_dense[(j, seenItem)]) gradient = (1 / (1 + np.exp(x_uij))) loss = (np.sum(x_uij) ** 2) index = 0 while (index < (seen_items_end_pos - seen_items_start_pos)): seenItem = self._mask_indices[(seen_items_start_pos + index)] index += 1 if (seenItem != i): self._s_dense[(i, seenItem)] += (self._lr * (gradient - (self._li_reg * self._s_dense[(i, seenItem)]))) if (seenItem != j): self._s_dense[(j, seenItem)] -= (self._lr * (gradient - (self._lj_reg * self._s_dense[(j, seenItem)]))) return loss def predict(self, u, i): x_ui = 0.0 index = 0 seen_items_start_pos = self._mask_indptr[u] seen_items_end_pos = self._mask_indptr[(u + 1)] while (index < (seen_items_end_pos - seen_items_start_pos)): seenItem = self._mask_indices[(seen_items_start_pos + index)] index += 1 x_ui += self._s_dense[(i, seenItem)] return x_ui def get_user_recs(self, user, k=100): user_items = self._data.train_dict[user].keys() predictions = {i: self.predict(user, i) for i in self._data.items if (i not in user_items)} (indices, values) = zip(*predictions.items()) indices = np.array(indices) values = np.array(values) local_k = min(k, len(values)) partially_ordered_preds_indices = np.argpartition(values, (- local_k))[(- local_k):] real_values = values[partially_ordered_preds_indices] real_indices = indices[partially_ordered_preds_indices] local_top_k = real_values.argsort()[::(- 1)] return [(real_indices[item], real_values[item]) for item in local_top_k] def get_model_state(self): saving_dict = {} saving_dict['_s_dense'] = self._s_dense return saving_dict def set_model_state(self, saving_dict): self._s_dense = saving_dict['_s_dense']
def meta_learning_loss(player): episode_loss = torch.tensor(0) with torch.cuda.device(player.gpu_id): episode_loss = episode_loss.cuda() for i in player.meta_learning_actions: step_optimal_action = torch.tensor(player.meta_learning_actions[i]).reshape([1]).long() with torch.cuda.device(player.gpu_id): step_optimal_action = step_optimal_action.cuda() step_loss = F.cross_entropy(player.probs[i], step_optimal_action) episode_loss = (episode_loss + step_loss) return episode_loss
class SingleImageDataset(BaseDataset): def __init__(self, opt): BaseDataset.__init__(self, opt) self.dir_A = os.path.join(opt.dataroot, 'trainA') self.dir_B = os.path.join(opt.dataroot, 'trainB') if (os.path.exists(self.dir_A) and os.path.exists(self.dir_B)): self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) self.A_size = len(self.A_paths) self.B_size = len(self.B_paths) assert ((len(self.A_paths) == 1) and (len(self.B_paths) == 1)), 'SingleImageDataset class should be used with one image in each domain' A_img = Image.open(self.A_paths[0]).convert('RGB') B_img = Image.open(self.B_paths[0]).convert('RGB') print(('Image sizes %s and %s' % (str(A_img.size), str(B_img.size)))) self.A_img = A_img self.B_img = B_img A_zoom = (1 / self.opt.random_scale_max) zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(((len(self) // opt.batch_size) + 1), 1, 2)) self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [(- 1), 2]) B_zoom = (1 / self.opt.random_scale_max) zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(((len(self) // opt.batch_size) + 1), 1, 2)) self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [(- 1), 2]) self.patch_indices_A = list(range(len(self))) random.shuffle(self.patch_indices_A) self.patch_indices_B = list(range(len(self))) random.shuffle(self.patch_indices_B) def __getitem__(self, index): A_path = self.A_paths[0] B_path = self.B_paths[0] A_img = self.A_img B_img = self.B_img if (self.opt.phase == 'train'): param = {'scale_factor': self.zoom_levels_A[index], 'patch_index': self.patch_indices_A[index], 'flip': (random.random() > 0.5)} transform_A = get_transform(self.opt, params=param, method=Image.BILINEAR) A = transform_A(A_img) param = {'scale_factor': self.zoom_levels_B[index], 'patch_index': self.patch_indices_B[index], 'flip': (random.random() > 0.5)} transform_B = get_transform(self.opt, params=param, method=Image.BILINEAR) B = transform_B(B_img) else: transform = get_transform(self.opt, method=Image.BILINEAR) A = transform(A_img) B = transform(B_img) return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} def __len__(self): return 100000
def context_decoder_fn_train(encoder_state, context_vector, name=None): with ops.name_scope(name, 'simple_decoder_fn_train', [encoder_state]): pass def decoder_fn(time, cell_state, cell_input, cell_output, context_state): with ops.name_scope(name, 'simple_decoder_fn_train', [time, cell_state, cell_input, cell_output, context_state]): if (context_vector is not None): cell_input = tf.concat([cell_input, context_vector], axis=1) if (cell_state is None): return (None, encoder_state, cell_input, cell_output, context_state) else: return (None, cell_state, cell_input, cell_output, context_state) return decoder_fn
def hirose(input: Tensor, m_sqaure: float=1): mag_input = torch.abs(input) return (F.tanh((mag_input / m_sqaure)) * (input / mag_input))
def normal_(tensor: Tensor, mean: float=0.0, std: float=1.0) -> Tensor: return _no_grad_normal_(tensor, mean, std)
def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, required=True, help='Path to raw data') parser.add_argument('--output-path', type=str, required=True, help='Path to save data') parser.add_argument('--n-jobs', type=int, default=20, required=False, help='Number of threads') parser.add_argument('--n-shards', type=int, default=8, required=False, help='Use `1/n_shards` of full dataset') parser.add_argument('--shard-id', type=int, default=0, required=False, help='Take shard with given id') parser.add_argument('--config', type=str, required=True, help='Config file path') args = parser.parse_args() return args
class AlgoTrainer(BaseAlgo): def __init__(self, algo_init, args): super(AlgoTrainer, self).__init__(args) self.bcs = algo_init['bcs']['net'] self.bcs_opt = algo_init['bcs']['opt'] self.rews = algo_init['rews']['net'] self.rews_opt = algo_init['rews']['opt'] self.vae = algo_init['vae']['net'] self.vae_opt = algo_init['vae']['opt'] self.actor = algo_init['actor']['net'] self.actor_opt = algo_init['actor']['opt'] self.critic1 = algo_init['critic1']['net'] self.critic1_opt = algo_init['critic1']['opt'] self.critic2 = algo_init['critic2']['net'] self.critic2_opt = algo_init['critic2']['opt'] self.actor_target = copy.deepcopy(self.actor) self.critic1_target = copy.deepcopy(self.critic1) self.critic2_target = copy.deepcopy(self.critic2) self.args = args def _train_bc(self, buffer): bc_loss_list = [] rew_loss_list = [] for step in range(100000): for i in range(len(self.bcs)): batch = buffer.sample(256) batch = to_torch(batch, torch.float, device=self.args['device']) rew = batch.rew obs = batch.obs act = batch.act obs_next = batch.obs_next obs_act = torch.cat([obs, act], axis=1) (obs_next_pre, _) = self.bcs[i](obs_act) (rew_pre, _) = self.rews[i](torch.cat([obs, act, obs_next], axis=1)) bc_loss = F.mse_loss(obs_next_pre, obs_next) rew_loss = F.mse_loss(rew_pre, rew) self.bcs_opt[i].zero_grad() self.rews_opt[i].zero_grad() bc_loss.backward() rew_loss.backward() self.bcs_opt[i].step() self.rews_opt[i].step() bc_loss_list.append(bc_loss.item()) rew_loss_list.append(rew_loss.item()) if (((step + 1) % 1000) == 0): logger.info('BC Epoch : {}, bc_loss : {:.4}', ((step + 1) // 1000), np.mean(bc_loss_list)) logger.info('BC Epoch : {}, recon_loss : {:.4}', ((step + 1) // 1000), np.mean(rew_loss_list)) def _train_vae_step(self, batch): batch = to_torch(batch, torch.float, device=self.args['device']) obs = batch.obs act = batch.act (recon, mean, std) = self.vae(obs, act) recon_loss = F.mse_loss(recon, torch.cat([obs, act], axis=1)) KL_loss = ((- self.args['vae_kl_weight']) * (((1 + torch.log(std.pow(2))) - mean.pow(2)) - std.pow(2)).mean()) vae_loss = (recon_loss + (0.5 * KL_loss)) self.vae_opt.zero_grad() vae_loss.backward() self.vae_opt.step() return (vae_loss.cpu().data.numpy(), recon_loss.cpu().data.numpy(), KL_loss.cpu().data.numpy()) def _train_vae(self, replay_buffer): logs = {'vae_loss': [], 'recon_loss': [], 'kl_loss': []} for i in range(self.args['vae_iterations']): batch = replay_buffer.sample(self.args['vae_batch_size']) (vae_loss, recon_loss, KL_loss) = self._train_vae_step(batch) logs['vae_loss'].append(vae_loss) logs['recon_loss'].append(recon_loss) logs['kl_loss'].append(KL_loss) if (((i + 1) % 1000) == 0): logger.info('VAE Epoch : {}, KL_loss : {:.4}', ((i + 1) // 1000), KL_loss) logger.info('VAE Epoch : {}, recon_loss : {:.4}', ((i + 1) // 1000), recon_loss) logger.info('VAE Epoch : {}, Loss : {:.4}', ((i + 1) // 1000), vae_loss) logger.info('Save VAE Model -> {}', (('/tmp/vae_' + str(i)) + '.pkl')) def _train_policy(self, replay_buffer, eval_fn): for it in range(self.args['actor_iterations']): batch = replay_buffer.sample(self.args['actor_batch_size']) batch = to_torch(batch, torch.float, device=self.args['device']) rew = batch.rew done = batch.done obs = batch.obs act = batch.act obs_next = batch.obs_next rew_list = [] obs = [obs for _ in self.bcs] for i in range(5): act = [self.actor_target(o)[0] for o in obs] obs_act = [torch.cat([o, a], axis=1) for (o, a) in zip(obs, act)] obs_next = [net(oa)[0] for (net, oa) in zip(self.bcs, obs_act)] obs_act_obs = [torch.cat([oa, on], axis=1) for (oa, on) in zip(obs_act, obs_next)] rew = [net(oao)[0] for (net, oao) in zip(self.rews, obs_act_obs)] if (i == 0): r_p = [torch.mean(torch.abs((self.vae(o, a)[0] - oa)).detach(), axis=1) for (o, a, oa) in zip(obs, act, obs_act)] r_p = torch.mean(torch.cat(rew, axis=1), axis=1) rew = torch.cat(rew, axis=1) rew_list.append(rew) obs = obs_next r_e = None for index in range(len(rew_list)): if (r_e is None): r_e = rew_list[index] else: r_e += (rew_list[index] * (0.99 ** index)) (rew_min, _) = torch.min(r_e, axis=1) rew_mean = torch.mean(r_e, axis=1) rew = (((0.5 * rew_min) + (0.5 * rew_mean)) - (0.5 * r_p)) done = batch.done obs = batch.obs act = batch.act obs_next = batch.obs_next with torch.no_grad(): (action_next, _) = self.actor_target(obs_next) target_q1 = self.critic1_target(obs_next, action_next) target_q2 = self.critic2_target(obs_next, action_next) target_q = ((self.args['lmbda'] * torch.min(target_q1, target_q2)) + ((1 - self.args['lmbda']) * torch.max(target_q1, target_q2))) target_q = (rew + (((1 - done) * self.args['discount']) * target_q)) current_q1 = self.critic1(obs, act) current_q2 = self.critic2(obs, act) critic_loss = (F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)) self.critic1_opt.zero_grad() self.critic2_opt.zero_grad() critic_loss.backward() self.critic1_opt.step() self.critic2_opt.step() (action, _) = self.actor(obs) actor_loss = (- self.critic1(obs, action).mean()) self.actor.zero_grad() actor_loss.backward() self.actor_opt.step() self._sync_weight(self.actor_target, self.actor) self._sync_weight(self.critic1_target, self.critic1) self._sync_weight(self.critic2_target, self.critic2) if (((it + 1) % 1000) == 0): if (eval_fn is None): self.eval_policy() else: self.vae._actor = copy.deepcopy(self.actor) res = eval_fn(self.get_policy()) self.log_res(((it + 1) // 1000), res) def _train_policy_latent(self, replay_buffer, eval_fn): for it in range(self.args['actor_iterations']): batch = replay_buffer.sample(self.args['actor_batch_size']) batch = to_torch(batch, torch.float, device=self.args['device']) rew = batch.rew done = batch.done obs = batch.obs act = batch.act obs_next = batch.obs_next with torch.no_grad(): (_, _, next_action) = self.actor_target(obs_next, self.vae.decode) target_q1 = self.critic1_target(obs_next, next_action) target_q2 = self.critic2_target(obs_next, next_action) target_q = ((self.args['lmbda'] * torch.min(target_q1, target_q2)) + ((1 - self.args['lmbda']) * torch.max(target_q1, target_q2))) target_q = (rew + (((1 - done) * self.args['discount']) * target_q)) current_q1 = self.critic1(obs, act) current_q2 = self.critic2(obs, act) critic_loss = (F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)) self.critic1_opt.zero_grad() self.critic2_opt.zero_grad() critic_loss.backward() self.critic1_opt.step() self.critic2_opt.step() (latent_actions, mid_actions, actions) = self.actor(obs, self.vae.decode) actor_loss = (- self.critic1(obs, actions).mean()) self.actor.zero_grad() actor_loss.backward() self.actor_opt.step() self._sync_weight(self.actor_target, self.actor) self._sync_weight(self.critic1_target, self.critic1) self._sync_weight(self.critic2_target, self.critic2) if (((it + 1) % 1000) == 0): print('mid_actions :', torch.abs((actions - mid_actions)).mean()) if (eval_fn is None): self.eval_policy() else: self.vae._actor = copy.deepcopy(self.actor) res = eval_fn(self.get_policy()) self.log_res(((it + 1) // 1000), res) def get_model(self): pass def save_model(self): pass def get_policy(self): return self.actor def train(self, replay_buffer, callback_fn=None): self._train_bc(replay_buffer) self._train_vae(replay_buffer) self.vae.eval() if self.args['latent']: self._train_policy_latent(replay_buffer, callback_fn) else: self._train_policy(replay_buffer, callback_fn)
def create_attn(attn_type, channels, **kwargs): module_cls = None if (attn_type is not None): if isinstance(attn_type, str): attn_type = attn_type.lower() if (attn_type == 'se'): module_cls = SEModule elif (attn_type == 'ese'): module_cls = EffectiveSEModule elif (attn_type == 'eca'): module_cls = EcaModule elif (attn_type == 'ceca'): module_cls = CecaModule elif (attn_type == 'cbam'): module_cls = CbamModule elif (attn_type == 'lcbam'): module_cls = LightCbamModule else: assert False, ('Invalid attn module (%s)' % attn_type) elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type if (module_cls is not None): return module_cls(channels, **kwargs) return None
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')]) return
.parametrize('op, ctx, func_name', list_ctx_and_func_name(['max', 'min'])) def test_max_min_int64_add_scalar(op, ctx, func_name): expected = {'max': [100.0], 'min': [1.0]} nn.set_default_context(ctx) nn.set_auto_forward(True) x = nn.Variable((1, 100)) x.d = range(100) func = getattr(F, op) idx = func(x, axis=1, only_index=True) idx += 1.0 assert np.allclose(expected[op], idx.d), 'test long long copy failed!'
def prepare_nlc_data(data_dir, max_vocabulary_size, tokenizer=char_tokenizer, other_dev_path=None): train_path = get_nlc_train_set(data_dir) if (other_dev_path is None): dev_path = get_nlc_dev_set(data_dir) else: dev_path = get_nlc_dev_set(other_dev_path) vocab_path = os.path.join(data_dir, 'vocab.dat') if (tokenizer != bpe_tokenizer): create_vocabulary(vocab_path, [(train_path + '.y.txt'), (train_path + '.x.txt')], max_vocabulary_size, tokenizer) y_train_ids_path = (train_path + '.ids.y') x_train_ids_path = (train_path + '.ids.x') data_to_token_ids((train_path + '.y.txt'), y_train_ids_path, vocab_path, tokenizer) data_to_token_ids((train_path + '.x.txt'), x_train_ids_path, vocab_path, tokenizer) y_dev_ids_path = (dev_path + '.ids.y') x_dev_ids_path = (dev_path + '.ids.x') data_to_token_ids((dev_path + '.y.txt'), y_dev_ids_path, vocab_path, tokenizer) data_to_token_ids((dev_path + '.x.txt'), x_dev_ids_path, vocab_path, tokenizer) return (x_train_ids_path, y_train_ids_path, x_dev_ids_path, y_dev_ids_path, vocab_path)
def test_bipartite_change_stats_inouye(): print('testing bipartrite change stats on Inouye-Pyke example...') start = time.time() g = BipartiteGraph('../examples/data/bipartite/Inouye_Pyke_pollinator_web/inouye_bipartite.net') assert (g.numNodes() == 133) assert (g.numEdges() == 281) assert (len(list(g.nodeModeIterator(MODE_A))) == 91) assert (len(list(g.nodeModeIterator(MODE_B))) == 42) g.printSummary() b2star2 = sum([(g.twoPaths(i, j) if (i < j) else 0) for i in g.nodeModeIterator(MODE_A) for j in g.nodeModeIterator(MODE_A)]) assert (b2star2 == 1437) b1star2 = sum([(g.twoPaths(i, j) if (i < j) else 0) for i in g.nodeModeIterator(MODE_B) for j in g.nodeModeIterator(MODE_B)]) assert (b1star2 == 877) twopaths = sum([(g.twoPaths(i, j) if (i < j) else 0) for i in g.nodeIterator() for j in g.nodeIterator()]) assert (twopaths == 2314) outcome_binvar = list(map(int_or_na, open('../examples/data/bipartite/Inouye_Pyke_pollinator_web/inouye_outcome.txt').read().split()[1:])) obs_stats = computeObservedStatistics(g, outcome_binvar, [partial(changeBipartiteDensity, MODE_A), partial(changeBipartiteActivity, MODE_A), partial(changeBipartiteEgoTwoStar, MODE_A), partial(changeBipartiteAlterTwoStar1, MODE_A), partial(changeBipartiteAlterTwoStar2, MODE_A), partial(changeBipartiteFourCycle1, MODE_A), partial(changeBipartiteFourCycle2, MODE_A)]) print(obs_stats) assert all((obs_stats == numpy.array([39, 129, 347, 1258, 266, 718, 122]))) print('OK,', (time.time() - start), 's') print()
def _get_init_fn(checkpoint_path, ignore_missing_vars): if (checkpoint_path is None): return None variables_to_restore = slim.get_variables_to_restore()[1:] for v in variables_to_restore: print(v) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore, ignore_missing_vars=ignore_missing_vars)
def gap_workspace_file(system='gap', name='workspace', dir=None): if (dir is None): dir = os.path.join(DOT_SAGE, 'gap') data = f'{GAP_ROOT_PATHS}' for path in GAP_ROOT_PATHS.split(';'): if (not path): continue sysinfo = os.path.join(path, 'sysinfo.gap') if os.path.exists(sysinfo): data += subprocess.getoutput(f'. "{sysinfo}" && echo ":$GAP_VERSION:$GAParch"') h = hashlib.sha1(data.encode('utf-8')).hexdigest() return os.path.join(dir, f'{system}-{name}-{HOSTNAME}-{h}')
class IndexedFreeGroup(IndexedGroup, Group): def __init__(self, indices, prefix, category=None, **kwds): category = Groups().or_subcategory(category) IndexedGroup.__init__(self, indices, prefix, category, **kwds) def _repr_(self): return 'Free group indexed by {}'.format(self._indices) _method def one(self): return self.element_class(self, ()) def gen(self, x): if (x not in self._indices): raise IndexError('{} is not in the index set'.format(x)) try: return self.element_class(self, ((self._indices(x), 1),)) except TypeError: return self.element_class(self, ((x, 1),)) class Element(IndexedFreeMonoidElement): def __len__(self): return sum((abs(exp) for (gen, exp) in self._monomial)) length = __len__ def _mul_(self, other): if (not self._monomial): return other if (not other._monomial): return self ret = list(self._monomial) rhs = list(other._monomial) while (ret and rhs and (ret[(- 1)][0] == rhs[0][0])): rhs[0] = (rhs[0][0], (rhs[0][1] + ret.pop()[1])) if (rhs[0][1] == 0): rhs.pop(0) ret += rhs return self.__class__(self.parent(), tuple(ret)) def __invert__(self): return self.__class__(self.parent(), tuple(((x[0], (- x[1])) for x in reversed(self._monomial)))) def to_word_list(self): sign = (lambda x: (1 if (x > 0) else (- 1))) return [(k, sign(e)) for (k, e) in self._sorted_items() for dummy in range(abs(e))]
def CalculateCompositionNormalizedVDWV(ProteinSequence): result = CalculateComposition(ProteinSequence, _NormalizedVDWV, '_NormalizedVDWV') return result
def validate_ean(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(ean.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(ean.is_valid) else: return df.applymap(ean.is_valid) return ean.is_valid(df)
class ProxyAlreadyVisited(object): def __init__(self, rep): self._rep = rep def __repr__(self): return self._rep
def makeNewUserDir(username): if invalidUsername(username): print('Usernames cannot contain invalid characters') return False try: raisePrivileges() os.mkdir(('/home/' + username)) lowerPrivileges() except OSError: print(('Unable to create new user directory for user:' + username)) return False return True
def write_conll(doc: Doc, clusters: List[List[Span]], f_obj: TextIO): placeholder = (' -' * 7) doc_id = doc['document_id'] words = doc['cased_words'] part_id = doc['part_id'] sents = doc['sent_id'] max_word_len = max((len(w) for w in words)) starts = defaultdict((lambda : [])) ends = defaultdict((lambda : [])) single_word = defaultdict((lambda : [])) for (cluster_id, cluster) in enumerate(clusters): for (start, end) in cluster: if ((end - start) == 1): single_word[start].append(cluster_id) else: starts[start].append(cluster_id) ends[(end - 1)].append(cluster_id) f_obj.write(f'''#begin document ({doc_id}); part {part_id:0>3d} ''') word_number = 0 for (word_id, word) in enumerate(words): cluster_info_lst = [] for cluster_marker in starts[word_id]: cluster_info_lst.append(f'({cluster_marker}') for cluster_marker in single_word[word_id]: cluster_info_lst.append(f'({cluster_marker})') for cluster_marker in ends[word_id]: cluster_info_lst.append(f'{cluster_marker})') cluster_info = ('|'.join(cluster_info_lst) if cluster_info_lst else '-') if ((word_id == 0) or (sents[word_id] != sents[(word_id - 1)])): f_obj.write('\n') word_number = 0 f_obj.write(f'''{doc_id} {part_id} {word_number:>2} {word:>{max_word_len}}{placeholder} {cluster_info} ''') word_number += 1 f_obj.write('#end document\n\n')
def get_zip_manifest(zip_path: Path, zip_root: Optional[Path]=None): _zip_path = (zip_path if (zip_root is None) else Path.joinpath(zip_root, zip_path)) with zipfile.ZipFile(_zip_path, mode='r') as f: info = f.infolist() manifest = {} for i in tqdm(info): utt_id = Path(i.filename).stem (offset, file_size) = (((i.header_offset + 30) + len(i.filename)), i.file_size) manifest[utt_id] = f'{zip_path.as_posix()}:{offset}:{file_size}' with open(_zip_path, 'rb') as f: f.seek(offset) data = f.read(file_size) assert ((len(data) > 1) and is_npy_data(data)) return manifest
def test_control_bfgs_multiple(ocp): ocp.solve(algorithm='bfgs', rtol=0.01, atol=0.0, max_iter=11) assert (ocp.solver.relative_norm <= ocp.solver.rtol)
def get_ebm(**model_cfg): model_cfg = copy.deepcopy(model_cfg) if ('arch' in model_cfg): model_cfg.pop('arch') in_dim = model_cfg['x_dim'] model_cfg.pop('x_dim') net = get_net(in_dim=in_dim, out_dim=1, **model_cfg['net']) model_cfg.pop('net') return EnergyBasedModel(net, **model_cfg)
class ValidationLogAdapter(GaugeAdapter): re_log_line = re.compile('^(?:.*: )?([\\w\\.]+)( [\\w\\.]+)?: iterations=([0-9]+) runtime: ([0-9]+)([mu])s success: (true|false)') re_actors = re.compile('^\\[Total\\]\\s+A#([0-9]+)\\s+M#([0-9]+)\\s+P#([0-9]+)') re_NPB_partial_invalid = re.compile('.*Failed.*verification') re_NPB_invalid = re.compile('.*Benchmark done.*verification failed') re_incorrect = re.compile('.*incorrect.*') def __init__(self, include_faulty, executor): super(ValidationLogAdapter, self).__init__(include_faulty, executor) self._other_error_definitions = [self.re_NPB_partial_invalid, self.re_NPB_invalid, self.re_incorrect] def parse_data(self, data, run_id, invocation): iteration = 1 data_points = [] current = DataPoint(run_id) for line in data.split('\n'): if self.check_for_error(line): raise ResultsIndicatedAsInvalid('Output of bench program indicated error.') match = self.re_log_line.match(line) if match: time = float(match.group(4)) if (match.group(5) == 'u'): time /= 1000 criterion = (match.group(2) or 'total').strip() success_measure = Measurement(invocation, iteration, (match.group(6) == 'true'), 'bool', run_id, 'Success') measure = Measurement(invocation, iteration, time, 'ms', run_id, criterion) current.add_measurement(success_measure) current.add_measurement(measure) if measure.is_total(): data_points.append(current) current = DataPoint(run_id) iteration += 1 else: match = self.re_actors.match(line) if match: measure1 = Measurement(invocation, iteration, int(match.group(1)), 'count', run_id, 'Actors') measure2 = Measurement(invocation, iteration, int(match.group(2)), 'count', run_id, 'Messages') measure3 = Measurement(invocation, iteration, int(match.group(3)), 'count', run_id, 'Promises') measure4 = Measurement(invocation, iteration, 0, 'ms', run_id, 'total') current.add_measurement(measure1) current.add_measurement(measure2) current.add_measurement(measure3) current.add_measurement(measure4) data_points.append(current) current = DataPoint(run_id) iteration += 1 if (not data_points): raise OutputNotParseable(data) return data_points
class ParamHistoryManagerBase(): def filter(self, param_grid: List[Dict[(str, Any)]]) -> Iterable[Dict]: pass
class FlaxGPT2Model(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
def AB2(u0, u1, rhs, dt, tstep, solver, context): rhs = solver.ComputeRHS(rhs, u0, solver, **context) if (tstep == 0): u0 += (rhs * dt) else: u0 += (((1.5 * rhs) * dt) - (0.5 * u1)) u1[:] = (rhs * dt) return (u0, dt, dt)
def run_method(idx, args, file, method): if (method == 'sf'): graph = process_file_karate(file) result = sf(graph, args['n_eigen']) elif (method == 'ldp'): subgraphs = process_file_karate(file) result = ldp(subgraphs) elif (method == 'fgsd'): graph = process_file_karate(file) result = fgsd(graph) elif (method == 'feather'): graph = process_file_karate(file) result = feather(graph, order=args['order']) elif (method == 'geo_scattering'): graph = process_file_karate(file) result = geo_scattering(graph, order=args['order']) elif (method == 'g2v'): graph = process_file_karate(file) result = g2v_document(idx, graph) elif (method == 'lsd'): graph = process_file_slaq(file) result = netlsd_naive(graph) elif (method == 'lsd_slaq'): graph = process_file_slaq(file) result = netlsd(graph, lanczos_steps=args['n_steps'], nvectors=args['n_vectors']) elif (method == 'vnge_slaq'): graph = process_file_slaq(file) result = vnge(graph, lanczos_steps=args['n_steps'], nvectors=args['n_vectors']) elif (method == 'vnge'): graph = process_file_slaq(file) result = vnge_naive(graph) elif (method == 'nog'): graph = process_file_nog(file) result = np.array([graph.number_of_nodes(), graph.number_of_edges()], dtype=np.int64) else: print('Method {} not implemented'.format(method)) exit(1) return result
def profile_kv(scopename): logkey = ('wait_' + scopename) tstart = time.time() try: (yield) finally: get_current().name2val[logkey] += (time.time() - tstart)
class LayerNormMLP(nn.Module): hidden_dims: Sequence[int] activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.gelu activate_final: int = False kernel_init: Callable[([PRNGKey, Shape, Dtype], Array)] = default_init() def __call__(self, x: jnp.ndarray) -> jnp.ndarray: for (i, size) in enumerate(self.hidden_dims): x = nn.Dense(size, kernel_init=self.kernel_init)(x) if (((i + 1) < len(self.hidden_dims)) or self.activate_final): x = self.activations(x) x = nn.LayerNorm()(x) return x
def train(train_loader, model, optimizer, epoch, save_path): global step model.train() loss_all = 0 epoch_step = 0 try: for (i, (images, gts, depths)) in enumerate(train_loader, start=1): optimizer.zero_grad() images = images.cuda() gts = gts.cuda() depths = depths.cuda() pre_res = model(images, depths) loss1 = structure_loss(pre_res[0], gts) loss2 = structure_loss(pre_res[1], gts) loss3 = structure_loss(pre_res[2], gts) loss_seg = ((loss1 + loss2) + loss3) loss = loss_seg loss.backward() clip_gradient(optimizer, opt.clip) optimizer.step() step += 1 epoch_step += 1 loss_all += loss.data if (((i % 50) == 0) or (i == total_step) or (i == 1)): print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Loss2: {:0.4f} Loss3: {:0.4f}'.format(datetime.now(), epoch, opt.epoch, i, total_step, loss1.data, loss2.data, loss3.data)) logging.info('#TRAIN#:Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Loss2: {:0.4f} Loss3: {:0.4f}'.format(epoch, opt.epoch, i, total_step, loss1.data, loss2.data, loss3.data)) loss_all /= epoch_step logging.info('#TRAIN#:Epoch [{:03d}/{:03d}], Loss_AVG: {:.4f}'.format(epoch, opt.epoch, loss_all)) writer.add_scalar('Loss-epoch', loss_all, global_step=epoch) if ((epoch % 5) == 0): torch.save(model.state_dict(), (save_path + 'HyperNet_epoch_{}.pth'.format(epoch))) except KeyboardInterrupt: print('Keyboard Interrupt: save model and exit.') if (not os.path.exists(save_path)): os.makedirs(save_path) torch.save(model.state_dict(), (save_path + 'HyperNet_epoch_{}.pth'.format((epoch + 1)))) print('save checkpoints successfully!') raise
class Recon3(Problem): def __init__(self): G = nx.DiGraph() G.add_node(0, label='0', pos=((- 2), 0)) G.add_node(1, label='1', pos=((- 1), 0.5)) G.add_node(2, label='2', pos=((- 1), (- 0.5))) G.add_node(3, label='3', pos=(0, 0.5)) G.add_node(4, label='4', pos=(0, (- 0.5))) G.add_node(5, label='5', pos=(1, 0)) G.add_edge(0, 1, capacity=1) G.add_edge(0, 2, capacity=5) G.add_edge(3, 5, capacity=5) G.add_edge(4, 5, capacity=1) G.add_edge(1, 3, capacity=100) G.add_edge(2, 4, capacity=100) G.add_edge(2, 3, capacity=3) num_nodes = len(G.nodes) traffic_matrix = np.zeros((num_nodes, num_nodes), dtype=np.float32) traffic_matrix[(0, 5)] = 10 super().__init__(G, traffic_matrix) def name(self): return 'recon3'
class ProGenConfig(PretrainedConfig): model_type = 'progen' def __init__(self, vocab_size=50400, n_positions=2048, n_ctx=2048, n_embd=4096, n_layer=28, n_head=16, rotary_dim=64, n_inner=None, activation_function='gelu_new', resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, gradient_checkpointing=False, use_cache=True, bos_token_id=50256, eos_token_id=50256, **kwargs): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.rotary_dim = rotary_dim self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.gradient_checkpointing = gradient_checkpointing self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id def max_position_embeddings(self): return self.n_positions def hidden_size(self): return self.n_embd def num_attention_heads(self): return self.n_head def num_hidden_layers(self): return self.n_layer
def shape(tensor, dim=None): if (dim is None): return tensor.shape.as_list() else: return tensor.shape.as_list()[dim]
def self_convert(wav, vocoder): mel = preprocess(wav) c = mel.transpose((- 1), (- 2)).squeeze() with torch.no_grad(): recon_hifi = vocoder.inference(c) recon_hifi = recon_hifi.view((- 1)).cpu().numpy() return recon_hifi
_cache(maxsize=200) def _read_leapfile(ls_fpath): f = open(ls_fpath, 'r') jd = [] offset = [] for line in f: a = line.split() jd.append(float(a[4])) offset.append(float(a[6])) f.close() return (jd, offset)
class BERTFilter(object): def __init__(self, data_file): self.processor = DataProcessor(data_file) self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) self.label_list = self.processor.get_labels() bert_config = BertConfig.from_pretrained('bert-base-uncased', num_labels=len(self.label_list)) self.max_seq_length = 512 self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) self.model = BertForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased', config=bert_config) self.model.load_state_dict(torch.load('./classifier_filter/filter/best_model.pt', map_location='cpu')) self.model.to(self.device) def query_filter(self, dialogue_idx, turn_id, user_utters, turn_label, thresh): examples = self.processor.create_examples(dialogue_idx, turn_id, user_utters, turn_label) data = convert_examples_to_tensor(examples, self.label_list, self.max_seq_length, self.tokenizer) result = self.evaluation(data, thresh) return result def evaluation(self, data, thresh): self.model.eval() prediction_list = [] target_list = [] (input_ids, input_mask, segment_ids, label_ids) = data input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) segment_ids = segment_ids.to(self.device) label_ids = label_ids.to(self.device) with torch.no_grad(): logits = self.model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask) probs = logits.sigmoid() (prediction_list, target_list) = self.acc_pred(probs, label_ids.view((- 1), len(self.label_list)), self.label_list, thresh) result = [] for idx in range(len(prediction_list)): prediction_set = set(prediction_list[idx]) target_set = set(target_list[idx]) if prediction_set.issubset(target_set): result.append(True) else: result.append(False) return result def acc_pred(self, probs, labels, label_list, thresh): batch_size = probs.size(0) preds = (probs > thresh) preds = preds.cpu().numpy() labels = labels.byte().cpu().numpy() prediction_list = [] target_list = [] for idx in range(batch_size): pred = preds[idx] label = labels[idx] prediction_list.append([]) target_list.append([]) for (idx, each_pred) in enumerate(pred): if each_pred: prediction_list[(- 1)].append(label_list[idx]) for (idx, each_label) in enumerate(label): if each_label: target_list[(- 1)].append(label_list[idx]) return (prediction_list, target_list)
class ResnetStack(nn.Module): num_ch: int num_blocks: int use_max_pooling: bool = True def __call__(self, observations: jnp.ndarray) -> jnp.ndarray: initializer = nn.initializers.xavier_uniform() conv_out = nn.Conv(features=self.num_ch, kernel_size=(3, 3), strides=1, kernel_init=initializer, padding='SAME')(observations) if self.use_max_pooling: conv_out = nn.max_pool(conv_out, window_shape=(3, 3), padding='SAME', strides=(2, 2)) for _ in range(self.num_blocks): block_input = conv_out conv_out = nn.relu(conv_out) conv_out = nn.Conv(features=self.num_ch, kernel_size=(3, 3), strides=1, padding='SAME', kernel_init=initializer)(conv_out) conv_out = nn.relu(conv_out) conv_out = nn.Conv(features=self.num_ch, kernel_size=(3, 3), strides=1, padding='SAME', kernel_init=initializer)(conv_out) conv_out += block_input return conv_out
def from_inversion_vector(iv, parent=None): p = iv[:] open_spots = list(range(len(iv))) for (i, ivi) in enumerate(iv): p[open_spots.pop(ivi)] = (i + 1) if (parent is None): parent = Permutations() return parent(p)
class SmoothL1Criterion(Criterion): def __init__(self, sizeAverage=True): super(SmoothL1Criterion, self).__init__() self.sizeAverage = sizeAverage self.output_tensor = None def updateOutput(self, input, target): if (self.output_tensor is None): self.output_tensor = input.new(1) self._backend.SmoothL1Criterion_updateOutput(self._backend.library_state, input, target, self.output_tensor, _Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False)) self.output = self.output_tensor[0].item() return self.output def updateGradInput(self, input, target): implicit_gradOutput = torch.ones(1).type_as(input) self._backend.SmoothL1Criterion_updateGradInput(self._backend.library_state, input, target, implicit_gradOutput, self.gradInput, _Reduction.legacy_get_enum(self.sizeAverage, True, emit_warning=False)) return self.gradInput
def settings_logreg(key): assert (key in ['mnist', '20news', 'adult']) if (key == 'mnist'): module = MnistModule() module.append_one = False (n_tr, n_val, n_test) = (200, 200, 200) (lr, decay, num_epoch, batch_size) = (0.1, True, 5, 5) return (module, (n_tr, n_val, n_test), (lr, decay, num_epoch, batch_size)) elif (key == '20news'): module = NewsModule() module.append_one = False (n_tr, n_val, n_test) = (200, 200, 200) (lr, decay, num_epoch, batch_size) = (0.01, True, 10, 5) return (module, (n_tr, n_val, n_test), (lr, decay, num_epoch, batch_size)) elif (key == 'adult'): module = AdultModule(csv_path='./data') module.append_one = False (n_tr, n_val, n_test) = (200, 200, 200) (lr, decay, num_epoch, batch_size) = (0.1, True, 20, 5) return (module, (n_tr, n_val, n_test), (lr, decay, num_epoch, batch_size))
def setup_loggers(filename, quiet): logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filename=filename, filemode='w') if (not quiet): console = logging.StreamHandler() console.setLevel(logging.INFO) console.setFormatter(logging.Formatter('%(message)s')) logging.getLogger().addHandler(console)
def get_default_config(): cfg = CN() cfg.model = CN() cfg.model.name = 'resnet50' cfg.model.pretrained = True cfg.model.load_weights1 = '' cfg.model.load_weights2 = '' cfg.model.resume1 = '' cfg.model.resume2 = '' cfg.model.deploy = 'model1' cfg.data = CN() cfg.data.type = 'image' cfg.data.root = 'reid-data' cfg.data.sources = ['market1501'] cfg.data.targets = ['market1501'] cfg.data.workers = 4 cfg.data.split_id = 0 cfg.data.height = 256 cfg.data.width = 128 cfg.data.combineall = False cfg.data.transforms = ['random_flip'] cfg.data.norm_mean = [0.485, 0.456, 0.406] cfg.data.norm_std = [0.229, 0.224, 0.225] cfg.data.save_dir = 'log' cfg.data.load_train_targets = False cfg.market1501 = CN() cfg.market1501.use_500k_distractors = False cfg.cuhk03 = CN() cfg.cuhk03.labeled_images = False cfg.cuhk03.classic_split = False cfg.cuhk03.use_metric_cuhk03 = False cfg.sampler = CN() cfg.sampler.train_sampler = 'RandomSampler' cfg.sampler.num_instances = 4 cfg.video = CN() cfg.video.seq_len = 15 cfg.video.sample_method = 'evenly' cfg.video.pooling_method = 'avg' cfg.train = CN() cfg.train.optim = 'adam' cfg.train.lr = 0.0003 cfg.train.weight_decay = 0.0005 cfg.train.max_epoch = 60 cfg.train.start_epoch = 0 cfg.train.batch_size = 32 cfg.train.fixbase_epoch = 0 cfg.train.open_layers = ['classifier'] cfg.train.staged_lr = False cfg.train.new_layers = ['classifier'] cfg.train.base_lr_mult = 0.1 cfg.train.lr_scheduler = 'single_step' cfg.train.stepsize = [20] cfg.train.gamma = 0.1 cfg.train.print_freq = 20 cfg.train.seed = 1 cfg.sgd = CN() cfg.sgd.momentum = 0.9 cfg.sgd.dampening = 0.0 cfg.sgd.nesterov = False cfg.rmsprop = CN() cfg.rmsprop.alpha = 0.99 cfg.adam = CN() cfg.adam.beta1 = 0.9 cfg.adam.beta2 = 0.999 cfg.loss = CN() cfg.loss.name = 'triplet' cfg.loss.softmax = CN() cfg.loss.softmax.label_smooth = True cfg.loss.triplet = CN() cfg.loss.triplet.margin = 0.3 cfg.loss.triplet.weight_t = 1.0 cfg.loss.triplet.weight_x = 0.0 cfg.loss.dml = CN() cfg.loss.dml.weight_ml = 1.0 cfg.test = CN() cfg.test.batch_size = 100 cfg.test.dist_metric = 'euclidean' cfg.test.normalize_feature = False cfg.test.ranks = [1, 5, 10, 20] cfg.test.evaluate = False cfg.test.eval_freq = (- 1) cfg.test.start_eval = 0 cfg.test.rerank = False cfg.test.visrank = False cfg.test.visrank_topk = 10 return cfg
def send_tokensregex_request(request): return send_request(request, TokensRegexResponse, 'edu.stanford.nlp.ling.tokensregex.ProcessTokensRegexRequest')
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--model_dir', type=str, help='Model directory') parser.add_argument('--base_analysis_dir', type=str, default='/tmp', help='Analysis directory') args = parser.parse_args() assert path.exists(args.model_dir) return args
def segment_char_ngrams(args): vocab = [line.split()[0] for line in args.vocab if (len(line.split()) == 2)] vocab = dict(((y, x) for (x, y) in enumerate(vocab))) for line in args.input: for word in line.split(): if ((word not in vocab) or (vocab[word] > args.shortlist)): i = 0 while ((i * args.n) < len(word)): args.output.write(word[(i * args.n):((i * args.n) + args.n)]) i += 1 if ((i * args.n) < len(word)): args.output.write(args.separator) args.output.write(' ') else: args.output.write((word + ' ')) args.output.write('\n')
class FakelyQuantONNXPyTorchExporter(BasePyTorchExporter): def __init__(self, model: torch.nn.Module, is_layer_exportable_fn: Callable, save_model_path: str, repr_dataset: Callable, use_onnx_custom_quantizer_ops: bool=False): super().__init__(model, is_layer_exportable_fn, save_model_path, repr_dataset) self._use_onnx_custom_quantizer_ops = use_onnx_custom_quantizer_ops def export(self) -> None: for layer in self.model.children(): self.is_layer_exportable_fn(layer) if self._use_onnx_custom_quantizer_ops: self._enable_onnx_custom_ops_export() else: self._substitute_fully_quantized_model() if self._use_onnx_custom_quantizer_ops: Logger.info(f'Exporting onnx model with MCTQ quantizers: {self.save_model_path}') else: Logger.info(f'Exporting fake-quant onnx model: {self.save_model_path}') model_input = to_torch_tensor(next(self.repr_dataset())[0]) torch.onnx.export(self.model, model_input, self.save_model_path, opset_version=OPSET_VERSION, verbose=False, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) def _enable_onnx_custom_ops_export(self): for (n, m) in self.model.named_children(): if isinstance(m, PytorchActivationQuantizationHolder): assert isinstance(m.activation_holder_quantizer, pytorch_quantizers.BasePyTorchInferableQuantizer) m.activation_holder_quantizer.enable_custom_impl() if isinstance(m, PytorchQuantizationWrapper): for wq in m.weights_quantizers.values(): assert isinstance(wq, pytorch_quantizers.BasePyTorchInferableQuantizer) wq.enable_custom_impl()
def text_model_inference(model, input_sentence): assert isinstance(input_sentence, str) cfg = model.cfg if (cfg.data.test.get('pipeline', None) is None): if is_2dlist(cfg.data.test.datasets): cfg.data.test.pipeline = cfg.data.test.datasets[0][0].pipeline else: cfg.data.test.pipeline = cfg.data.test.datasets[0].pipeline if is_2dlist(cfg.data.test.pipeline): cfg.data.test.pipeline = cfg.data.test.pipeline[0] test_pipeline = Compose(cfg.data.test.pipeline) data = {'text': input_sentence, 'label': {}} data = test_pipeline(data) if isinstance(data['img_metas'], dict): img_metas = data['img_metas'] else: img_metas = data['img_metas'].data assert isinstance(img_metas, dict) img_metas = {'input_ids': img_metas['input_ids'].unsqueeze(0), 'attention_masks': img_metas['attention_masks'].unsqueeze(0), 'token_type_ids': img_metas['token_type_ids'].unsqueeze(0), 'labels': img_metas['labels'].unsqueeze(0)} with torch.no_grad(): result = model(None, img_metas, return_loss=False) return result
class GraphTransformerLayer(nn.Module): def __init__(self, in_dim, out_dim, num_heads, dropout=0.0, layer_norm=False, batch_norm=True, residual=True, use_bias=False): super().__init__() self.in_channels = in_dim self.out_channels = out_dim self.num_heads = num_heads self.dropout = dropout self.residual = residual self.layer_norm = layer_norm self.batch_norm = batch_norm self.attention = MultiHeadAttentionLayer(in_dim, (out_dim // num_heads), num_heads, use_bias) self.O_h = nn.Linear(out_dim, out_dim) self.O_e = nn.Linear(out_dim, out_dim) if self.layer_norm: self.layer_norm1_h = nn.LayerNorm(out_dim) self.layer_norm1_e = nn.LayerNorm(out_dim) if self.batch_norm: self.batch_norm1_h = nn.BatchNorm1d(out_dim) self.batch_norm1_e = nn.BatchNorm1d(out_dim) self.FFN_h_layer1 = nn.Linear(out_dim, (out_dim * 2)) self.FFN_h_layer2 = nn.Linear((out_dim * 2), out_dim) self.FFN_e_layer1 = nn.Linear(out_dim, (out_dim * 2)) self.FFN_e_layer2 = nn.Linear((out_dim * 2), out_dim) if self.layer_norm: self.layer_norm2_h = nn.LayerNorm(out_dim) self.layer_norm2_e = nn.LayerNorm(out_dim) if self.batch_norm: self.batch_norm2_h = nn.BatchNorm1d(out_dim) self.batch_norm2_e = nn.BatchNorm1d(out_dim) def forward(self, g, h, e): h_in1 = h e_in1 = e (h_attn_out, e_attn_out) = self.attention(g, h, e) h = h_attn_out.view((- 1), self.out_channels) e = e_attn_out.view((- 1), self.out_channels) h = F.dropout(h, self.dropout, training=self.training) e = F.dropout(e, self.dropout, training=self.training) h = self.O_h(h) e = self.O_e(e) if self.residual: h = (h_in1 + h) e = (e_in1 + e) if self.layer_norm: h = self.layer_norm1_h(h) e = self.layer_norm1_e(e) if self.batch_norm: h = self.batch_norm1_h(h) e = self.batch_norm1_e(e) h_in2 = h e_in2 = e h = self.FFN_h_layer1(h) h = F.relu(h) h = F.dropout(h, self.dropout, training=self.training) h = self.FFN_h_layer2(h) e = self.FFN_e_layer1(e) e = F.relu(e) e = F.dropout(e, self.dropout, training=self.training) e = self.FFN_e_layer2(e) if self.residual: h = (h_in2 + h) e = (e_in2 + e) if self.layer_norm: h = self.layer_norm2_h(h) e = self.layer_norm2_e(e) if self.batch_norm: h = self.batch_norm2_h(h) e = self.batch_norm2_e(e) return (h, e) def __repr__(self): return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.num_heads, self.residual)
def get_cache_path(args, out_path): dir_args = get_name(args) path = Path(out_path) path.mkdir(exist_ok=True, parents=True) dir_name = '' dir_keys = save_keys for name in dir_keys: val = dir_args.pop(name) name = '{}_{}'.format(name, val) dir_name = '{}/{}'.format(dir_name, name) dir_name = dir_name[1:] path = (path / dir_name) path.mkdir(exist_ok=True, parents=True) keys = sorted(list(dir_args.keys())) name = '_'.join(('{}_{}'.format(key, dir_args[key]) for key in keys)) name = 'clustering_{}.pkl'.format(name) path = (path / name) return path
def Dataset_wrap_csv(k_fold='No', use_old_split=True, img_size=384, dataset_name='isic2018', split_ratio=[0.8, 0.2], train_aug=False, data_folder='/bigdata/siyiplace/data/skin_lesion'): data_dic = {} data_path = '{}/{}/'.format(data_folder, dataset_name) if (k_fold != 'No'): if use_old_split: try: train_df = pd.read_csv((data_path + 'train_meta_kfold_{}.csv'.format(k_fold)), dtype={'ID': str}) test_df = pd.read_csv((data_path + 'test_meta_kfold_{}.csv'.format(k_fold)), dtype={'ID': str}) data_dic['train'] = SkinDataset_csv(dataset_name, img_size, train_df, use_aug=train_aug, data_path=data_path) data_dic['test'] = SkinDataset_csv(dataset_name, img_size, test_df, use_aug=False, data_path=data_path) data_size = (len(data_dic['train']) + len(data_dic['test'])) print('{} has {} samples, {} are used to train, {} are used to test. \n 5 Folder -- Use {}'.format(dataset_name, data_size, len(data_dic['train']), len(data_dic['test']), k_fold)) return data_dic except: print('No existing k_folder files, start creating new splitting....') print('use new split') df = pd.read_csv((data_path + 'meta_{}.csv'.format(dataset_name)), dtype={'ID': str}) data_size = len(df) index_list = list(range(data_size)) random.Random(42).shuffle(index_list) split_size = int(((data_size / 5.0) + 0.5)) split_ids = [0, split_size, (split_size * 2), (split_size * 3), (split_size * 4), len(index_list)] for i in range(5): train_df = df.iloc[(index_list[:split_ids[i]] + index_list[split_ids[(i + 1)]:])] test_df = df.iloc[index_list[split_ids[i]:split_ids[(i + 1)]]] train_df.to_csv((data_path + 'train_meta_kfold_{}.csv'.format(i)), header=df.columns, index=False) test_df.to_csv((data_path + 'test_meta_kfold_{}.csv'.format(i)), header=df.columns, index=False) train_df = pd.read_csv((data_path + 'train_meta_kfold_{}.csv'.format(k_fold)), dtype={'ID': str}) test_df = pd.read_csv((data_path + 'test_meta_kfold_{}.csv'.format(k_fold)), dtype={'ID': str}) data_dic['train'] = SkinDataset_csv(dataset_name, img_size, train_df, use_aug=train_aug, data_path=data_path) data_dic['test'] = SkinDataset_csv(dataset_name, img_size, test_df, use_aug=False, data_path=data_path) assert (data_size == (len(data_dic['train']) + len(data_dic['test']))) print('Finish creating new 5 folders. {} has {} samples, {} are used to train, {} are used to test. \n 5 Folder -- Use {}'.format(dataset_name, data_size, len(train_df), len(test_df), k_fold)) return data_dic if use_old_split: try: train_df = pd.read_csv((data_path + 'train_meta_{}.csv'.format(int((split_ratio[0] * 100)))), dtype={'ID': str}) test_df = pd.read_csv((data_path + 'test_meta_{}.csv'.format(int((split_ratio[1] * 100)))), dtype={'ID': str}) data_dic['train'] = SkinDataset_csv(dataset_name, img_size, train_df, use_aug=train_aug, data_path=data_path) data_dic['test'] = SkinDataset_csv(dataset_name, img_size, test_df, use_aug=False, data_path=data_path) data_size = (len(data_dic['train']) + len(data_dic['test'])) print('{} has {} samples, {} are used to train, {} are used to test. \n The split ratio is {}'.format(dataset_name, data_size, len(data_dic['train']), len(data_dic['test']), split_ratio)) return data_dic except: print('No existing split files, start creating new splitting....') print('use new split') df = pd.read_csv((data_path + 'meta_{}.csv'.format(dataset_name)), dtype={'ID': str}) data_size = len(df) index_list = list(range(data_size)) random.Random(42).shuffle(index_list) train_df = df.iloc[index_list[:int((data_size * split_ratio[0]))]] test_df = df.iloc[index_list[int((data_size * split_ratio[0])):]] print('{} has {} samples, {} are used to train, {} are used to test. \n The split ratio is {}'.format(dataset_name, data_size, len(train_df), len(test_df), split_ratio)) train_df.to_csv((data_path + 'train_meta_{}.csv'.format(int((split_ratio[0] * 100)))), header=df.columns, index=False) test_df.to_csv((data_path + 'test_meta_{}.csv'.format(int((split_ratio[1] * 100)))), header=df.columns, index=False) data_dic['train'] = SkinDataset_csv(dataset_name, img_size, train_df, use_aug=train_aug, data_path=data_path) data_dic['test'] = SkinDataset_csv(dataset_name, img_size, test_df, use_aug=False, data_path=data_path) return data_dic
class MulCorpusReader(): def __init__(self, *corpuses, control_num=3): self.corpuses = corpuses self.epoch = corpuses[0].epoch self.control_num = control_num def next_batch(self, size, noop=False): src = [] rem = (size % self.control_num) for (idx, corpus) in enumerate(self.corpuses): p = corpus.next_batch(((size // self.control_num) + (rem if (idx == 0) else 0)), noop=noop) src += p[0] self.epoch = self.corpuses[0].epoch return (src, src)
class LaST_Cloth(BaseDataset): dataset_dir = '' def __init__(self, root='data', verbose=True, **kwargs): super(LaST_Cloth, self).__init__() self.dataset_dir = osp.join(root, self.dataset_dir) self.train_dir = osp.join(self.dataset_dir, 'train') self.query_dir = osp.join(self.dataset_dir, 'val', 'query') self.gallery_dir = osp.join(self.dataset_dir, 'val', 'gallery') self.query_test_dir = osp.join(self.dataset_dir, 'test', 'query') self.gallery_test_dir = osp.join(self.dataset_dir, 'test', 'gallery') self._check_before_run() self.pid2label = self.get_pid2label(self.train_dir) self.train = self._process_dir(self.train_dir, pid2label=self.pid2label, relabel=True) self.query = self._process_dir(self.query_dir, relabel=False) self.gallery = self._process_dir(self.gallery_dir, relabel=False, recam=len(self.query)) self.query_test = self._process_dir(self.query_test_dir, relabel=False) self.gallery_test = self._process_dir(self.gallery_test_dir, relabel=False, recam=len(self.query_test)) if verbose: print('=> LaST loaded') self.print_dataset_statistics_movie(self.train, self.query, self.gallery, self.query_test, self.gallery_test) (self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train) (self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query) (self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery) (self.num_query_test_pids, self.num_query_test_imgs, self.num_query_test_cams) = self.get_imagedata_info(self.query_test) (self.num_gallery_test_pids, self.num_gallery_test_imgs, self.num_gallery_test_cams) = self.get_imagedata_info(self.gallery_test) def get_pid2label(self, dir_path): img_paths = glob.glob(osp.join(dir_path, '*/*.jpg')) pid_container = set() for img_path in img_paths: name_s = os.path.basename(img_path).split('.')[0].split('_') pid = ((name_s[0] + '_') + name_s[(- 1)]) pid_container.add(pid) pid_container = np.sort(list(pid_container)) pid2label = {pid: label for (label, pid) in enumerate(pid_container)} return pid2label def _check_before_run(self): if (not osp.exists(self.dataset_dir)): raise RuntimeError("'{}' is not available".format(self.dataset_dir)) if (not osp.exists(self.train_dir)): raise RuntimeError("'{}' is not available".format(self.train_dir)) if (not osp.exists(self.query_dir)): raise RuntimeError("'{}' is not available".format(self.query_dir)) if (not osp.exists(self.gallery_dir)): raise RuntimeError("'{}' is not available".format(self.gallery_dir)) if (not osp.exists(self.query_test_dir)): raise RuntimeError("'{}' is not available".format(self.query_test_dir)) if (not osp.exists(self.gallery_test_dir)): raise RuntimeError("'{}' is not available".format(self.gallery_test_dir)) def _process_dir(self, dir_path, pid2label=None, relabel=False, recam=0): if ('query' in dir_path): img_paths = glob.glob(osp.join(dir_path, '*.jpg')) else: img_paths = glob.glob(osp.join(dir_path, '*/*.jpg')) img_paths = sorted(img_paths) dataset = [] for (ii, img_path) in enumerate(img_paths): name_s = os.path.basename(img_path).split('.')[0].split('_') pid = ((name_s[0] + '_') + name_s[(- 1)]) camid = int((recam + ii)) if (relabel and (pid2label is not None)): pid = pid2label[pid] else: pid = int(pid.split('_')[0]) dataset.append((img_path, pid, camid)) return dataset def print_dataset_statistics_movie(self, train, query, gallery, query_test, gallery_test): (num_train_pids, num_train_imgs, num_train_cams) = self.get_imagedata_info(train) (num_query_pids, num_query_imgs, num_query_cams) = self.get_imagedata_info(query) (num_gallery_pids, num_gallery_imgs, num_gallery_cams) = self.get_imagedata_info(gallery) (num_query_test_pids, num_query_test_imgs, num_query_test_cams) = self.get_imagedata_info(query_test) (num_gallery_test_pids, num_gallery_test_imgs, num_gallery_test_cams) = self.get_imagedata_info(gallery_test) print('Dataset statistics:') print(' ') print(' subset | # ids | # images') print(' ') print(' train | {:5d} | {:8d}'.format(num_train_pids, num_train_imgs)) print(' query | {:5d} | {:8d}'.format(num_query_pids, num_query_imgs)) print(' gallery | {:5d} | {:8d}'.format(num_gallery_pids, num_gallery_imgs)) print(' query_test | {:5d} | {:8d}'.format(num_query_test_pids, num_query_test_imgs)) print(' gallery_test | {:5d} | {:8d}'.format(num_gallery_test_pids, num_gallery_test_imgs))
def main(): import sys if (((len(sys.argv) < 4) or (len(sys.argv) > 6)) or (sys.argv[1] not in ['bert', 'gpt', 'transfo_xl', 'gpt2', 'xlnet', 'xlm'])): print('This command line utility let you convert original (author released) model checkpoint to pytorch.\nIt should be used as one of: \n>> transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT, \n>> transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG], \n>> transformers transfo_xl TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG] or \n>> transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG] or \n>> transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME] or \n>> transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT') elif (sys.argv[1] == 'bert'): try: from .convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: print('transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.') raise if (len(sys.argv) != 5): print('Should be used as `transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`') else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif (sys.argv[1] == 'gpt'): from .convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch if ((len(sys.argv) < 4) or (len(sys.argv) > 5)): print('Should be used as `transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`') else: OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if (len(sys.argv) == 5): OPENAI_GPT_CONFIG = sys.argv[4] else: OPENAI_GPT_CONFIG = '' convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH, OPENAI_GPT_CONFIG, PYTORCH_DUMP_OUTPUT) elif (sys.argv[1] == 'transfo_xl'): try: from .convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch except ImportError: print('transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.') raise if ((len(sys.argv) < 4) or (len(sys.argv) > 5)): print('Should be used as `transformers transfo_xl TF_CHECKPOINT/TF_DATASET_FILE PYTORCH_DUMP_OUTPUT [TF_CONFIG]`') else: if ('ckpt' in sys.argv[2].lower()): TF_CHECKPOINT = sys.argv[2] TF_DATASET_FILE = '' else: TF_DATASET_FILE = sys.argv[2] TF_CHECKPOINT = '' PYTORCH_DUMP_OUTPUT = sys.argv[3] if (len(sys.argv) == 5): TF_CONFIG = sys.argv[4] else: TF_CONFIG = '' convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE) elif (sys.argv[1] == 'gpt2'): try: from .convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch except ImportError: print('transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.') raise if ((len(sys.argv) < 4) or (len(sys.argv) > 5)): print('Should be used as `transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [TF_CONFIG]`') else: TF_CHECKPOINT = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if (len(sys.argv) == 5): TF_CONFIG = sys.argv[4] else: TF_CONFIG = '' convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif (sys.argv[1] == 'xlnet'): try: from .convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch except ImportError: print('transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.') raise if ((len(sys.argv) < 5) or (len(sys.argv) > 6)): print('Should be used as `transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`') else: TF_CHECKPOINT = sys.argv[2] TF_CONFIG = sys.argv[3] PYTORCH_DUMP_OUTPUT = sys.argv[4] if (len(sys.argv) == 6): FINETUNING_TASK = sys.argv[5] else: FINETUNING_TASK = None convert_xlnet_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, FINETUNING_TASK) elif (sys.argv[1] == 'xlm'): from .convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch if (len(sys.argv) != 4): print('Should be used as `transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT`') else: XLM_CHECKPOINT_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] convert_xlm_checkpoint_to_pytorch(XLM_CHECKPOINT_PATH, PYTORCH_DUMP_OUTPUT)
def gen_classifier_loader(name, d): def classifier_loader(): model = EfficientNet.from_name(d['arch']) load_model_state_dict(model, name) return model return classifier_loader
def test_check_num_rows_non_reject_sampling_error(): num_rows = 0 expected_num_rows = 5 is_reject_sampling = False max_tries = 1 error_msg = 'Unable to sample any rows for the given conditions. This may be because the provided values are out-of-bounds in the current model.' with pytest.raises(ValueError, match=error_msg): check_num_rows(num_rows, expected_num_rows, is_reject_sampling, max_tries)
class Agent(): def __init__(self, module_list: Iterable, config: AttrDict): self.config = config parent_folder = config.parent_folder assert parent_folder, "Setting the agent's parent folder is required!" self.agent_name = (config.get('agent_name') or ('agent_' + short_timestamp())) self.agent_folder = os.path.join(parent_folder, self.agent_name) load_agent = False if os.path.exists(self.agent_folder): print('Detected existing agent! Loading agent from checkpoint...') load_agent = True else: os.makedirs(self.agent_folder, exist_ok=True) self._process_experience_registry = [] self._optimize_registry = [] self.config.env_steps = 0 self.config.opt_steps = 0 module_list = flatten_modules(module_list) self.module_dict = AttrDict() for module in module_list: assert module.module_name setattr(self, module.module_name, module) self.module_dict[module.module_name] = module for module in module_list: self._register_module(module) self.training = True if load_agent: self.load() print('Successfully loaded saved agent!') else: self.save() def train_mode(self): self.training = True def eval_mode(self): self.training = False def process_experience(self, experience: AttrDict): self.config.env_steps += (self.env.num_envs if hasattr(self, 'env') else 1) for module in self._process_experience_registry: module._process_experience(experience) def optimize(self): self.config.opt_steps += 1 for module in self._optimize_registry: module._optimize() def _register_module(self, module): self.module_dict[module.module_name] = module module.agent = self module.verify_agent_compatibility() module._setup() module.new_task() if hasattr(module, '_process_experience'): self._process_experience_registry.append(module) if hasattr(module, '_optimize'): self._optimize_registry.append(module) def set_module(self, module_name, module): setattr(self, module_name, module) self._register_module(module) def save(self, subfolder: Optional[str]=None): save_folder = self.agent_folder subfolder = (subfolder or 'checkpoint') save_folder = os.path.join(save_folder, subfolder) if (not os.path.exists(save_folder)): os.makedirs(save_folder) for module in self.module_dict.values(): module.save(save_folder) with open(os.path.join(save_folder, 'config.pickle'), 'wb') as f: pickle.dump(self.config, f) def load(self, subfolder: Optional[str]=None): save_folder = self.agent_folder subfolder = (subfolder or 'checkpoint') save_folder = os.path.join(save_folder, subfolder) assert os.path.exists(save_folder), 'load path does not exist!' with open(os.path.join(save_folder, 'config.pickle'), 'rb') as f: self.config = pickle.load(f) for module in self.module_dict.values(): print('Loading module {}'.format(module.module_name)) module.load(save_folder) def save_checkpoint(self, checkpoint_dir): if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) with open(os.path.join(checkpoint_dir, 'INITIALIZED'), 'w') as f: f.write('INITIALIZED') subfolder1 = os.path.join(checkpoint_dir, '1') subfolder2 = os.path.join(checkpoint_dir, '2') os.makedirs(os.path.join(subfolder1, 'checkpoint'), exist_ok=True) os.makedirs(os.path.join(subfolder2, 'checkpoint'), exist_ok=True) done1 = os.path.join(subfolder1, 'DONE') done2 = os.path.join(subfolder2, 'DONE') if (not os.path.exists(done1)): savedir = subfolder1 done_file = done1 elif (not os.path.exists(done2)): savedir = subfolder2 done_file = done2 else: modtime1 = os.path.getmtime(done1) modtime2 = os.path.getmtime(done2) if (modtime1 < modtime2): savedir = subfolder1 done_file = done1 else: savedir = subfolder2 done_file = done2 os.remove(done_file) savedir_checkpoint = os.path.join(savedir, 'checkpoint') old_save_replay_buf = self.config.save_replay_buf self.config.save_replay_buf = True for module in self.module_dict.values(): module.save(savedir_checkpoint) self.config.save_replay_buf = old_save_replay_buf with open(os.path.join(savedir_checkpoint, 'config.pickle'), 'wb') as f: pickle.dump(self.config, f) files_and_folders = glob.glob(os.path.join(self.agent_folder, '*')) for file_or_folder in files_and_folders: if os.path.isfile(file_or_folder): shutil.copy(file_or_folder, savedir) with open(done_file, 'w') as f: f.write('DONE') def load_from_checkpoint(self, checkpoint_dir): subfolder1 = os.path.join(checkpoint_dir, '1') subfolder2 = os.path.join(checkpoint_dir, '2') done1 = os.path.join(subfolder1, 'DONE') done2 = os.path.join(subfolder2, 'DONE') if (not os.path.exists(done1)): assert os.path.exists(done2) savedir = subfolder2 elif (not os.path.exists(done2)): savedir = subfolder1 else: modtime1 = os.path.getmtime(done1) modtime2 = os.path.getmtime(done2) if (modtime1 > modtime2): savedir = subfolder1 else: savedir = subfolder2 savedir_checkpoint = os.path.join(savedir, 'checkpoint') with open(os.path.join(savedir_checkpoint, 'config.pickle'), 'rb') as f: self.config = pickle.load(f) for module in self.module_dict.values(): print('Loading module {}'.format(module.module_name)) module.load(savedir_checkpoint) files_and_folders = glob.glob(os.path.join(savedir, '*')) for file_or_folder in files_and_folders: if os.path.isfile(file_or_folder): shutil.copy(file_or_folder, self.agent_folder) def torch(self, x, type=torch.float): if isinstance(x, torch.Tensor): return x elif (type == torch.float): return torch.FloatTensor(x).to(self.config.device) elif (type == torch.long): return torch.LongTensor(x).to(self.config.device) elif (type == torch.bool): return torch.BoolTensor(x).to(self.config.device) def numpy(self, x): return x.cpu().detach().numpy()
def setup_classifiers(): rng = np.random.RandomState(654321) (X, y) = make_classification(n_classes=2, n_samples=1000, weights=[0.2, 0.8], random_state=rng) (X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.33, random_state=rng) scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test) (X_train, X_dsel, y_train, y_dsel) = train_test_split(X_train, y_train, test_size=0.5, random_state=rng) pool_classifiers = RandomForestClassifier(n_estimators=10, n_jobs=(- 1), random_state=rng, max_depth=10) pool_classifiers.fit(X_train, y_train) return (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
def force_list(x): if isinstance(x, tuple): return list(x) elif (not isinstance(x, list)): return [x] return x
def load_model_from_config(serialization_directory: str, weight_file: str=None): serialization_directory = Path(serialization_directory) metadata_path = (serialization_directory / 'metadata.json') model_config = json.load(open(metadata_path, 'r'))['model_config'] bert_config = AutoConfig.from_pretrained(model_config['bert_model_name']) config = LukeConfig(entity_vocab_size=model_config['entity_vocab_size'], bert_model_name=model_config['bert_model_name'], entity_emb_size=model_config['entity_emb_size'], **bert_config.to_dict()) model = LukeModel(config) if (weight_file is not None): model_state_dict = torch.load(weight_file, map_location='cpu') model.load_state_dict(model_state_dict, strict=False) else: bert_model = AutoModel.from_pretrained(model_config['bert_model_name']) bert_state_dict = bert_model.state_dict() model.load_bert_weights(bert_state_dict) return model
def monomial_function(n, e): from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing from sage.rings.finite_rings.finite_field_constructor import GF base_ring = GF((2, n), name='x') R = PolynomialRing(base_ring, name='X') X = R.gen() return SBox((X ** e))
def argparser(): ap = argparse.ArgumentParser() ap.add_argument('net', help='network name, should be a .py file under "nns". Choices: {}.'.format(', '.join(all_networks()))) ap.add_argument('--batch', type=int, required=True, help='batch size') ap.add_argument('--word', type=int, default=16, help='word size in bits') ap.add_argument('--nodes', type=int, nargs=2, required=True, metavar=('H', 'W'), help='Parallel node partitioning dimensions') ap.add_argument('--array', type=int, nargs=2, required=True, metavar=('H', 'W'), help='PE array dimensions') ap.add_argument('--regf', type=int, required=True, help='register file size in bytes per PE') ap.add_argument('--gbuf', type=int, required=True, help='global buffer size in bytes') ap.add_argument('--bus-width', type=int, default=0, help='array bus width in bits. set 0 to ignore') ap.add_argument('--dram-bw', type=float, default='inf', help='total DRAM bandwidth in bytes per cycle.') ap.add_argument('--op-cost', type=float, default=1, help='cost of arithmetic operation') ap.add_argument('--hier-cost', type=float, nargs=4, default=[200, 6, 2, 1], metavar=('DRAM_COST', 'GBUF_COST', 'ITCN_COST', 'REGF_COST'), help='cost of access to memory hierarchy') ap.add_argument('--hop-cost', type=float, default=10, help='cost of access through one NoC hop') ap.add_argument('--unit-idle-cost', type=float, default=0, help='static cost over all nodes for unit execution time') ap.add_argument('--mem-type', default='2D', choices=['2D', '3D'], help='memory type. "2D" has memory only on edge nodes; "3D" has memory vertially on top of all nodes.') ap.add_argument('--disable-bypass', nargs='*', default=[], choices=['i', 'o', 'f'], help='whether disallowing gbuf bypass for i (input), o (output), or f (filter)') ap.add_argument('--solve-loopblocking', action='store_true', help='Use analytical solver to choose loop blocking. Otherwise use exhaustive search.') ap.add_argument('--enable-access-forwarding', action='store_true', help='Each node fetches a subset of data and forwards to other nodes.') ap.add_argument('--enable-gbuf-sharing', action='store_true', help='Share gbuf capacity across nodes through NoC.') ap.add_argument('--enable-save-writeback', action='store_true', help='Allow to save the writeback to memory for the intermediate data between layers if able to store the entire data set in on-chip buffers.') ap.add_argument('--disable-interlayer-opt', '--basic-interlayer-partition', action='store_true', help='Disable optimizations and only allow basic inter-layer pipeline.') ap.add_argument('--hybrid-partition', '--hybrid-partition2d', action='store_true', help='Use hybrid partition for layer for node mapping. Otherwise use naive method based on layer type.') ap.add_argument('--batch-partition', action='store_true', help='Allow partitioning batch, i.e., consider data parallelism.') ap.add_argument('--ifmaps-partition', '--ifmap-partition', action='store_true', help='Allow partitioning ifmap channel dimension, which requires extra data synchronization.') ap.add_argument('--interlayer-partition', '--inter-layer-partition', action='store_true', help='Allow partitioning resources across multiple layers and process them simultaneously as an inter-layer pipeline.') ap.add_argument('--layer-pipeline-time-overhead', type=float, default=float('inf'), help='maximum allowed execution time overhead due to layer pipelining.') ap.add_argument('--layer-pipeline-max-degree', type=float, default=float('inf'), help='maximum allowed layer pipelining degree, i.e., number of vertices in a pipeline segment.') ap.add_argument('-g', '--goal', default='e', choices=['e', 'd', 'ed', 'E', 'D', 'ED'], help='Goal of optimization: E(nergy), D(elay), or ED.') ap.add_argument('-t', '--top', type=int, default=1, help='Number of top schedules to keep during search.') ap.add_argument('-p', '--processes', type=int, default=(multiprocessing.cpu_count() // 2), help='Number of parallel processes to use for search.') ap.add_argument('-v', '--verbose', action='store_true', help='Show progress and details.') return ap
def load_dataset(): from keras.datasets import mnist ((x_train, y_train), (x_test, y_test)) = mnist.load_data() x_train = (x_train.reshape((- 1), 28, 28, 1).astype('float32') / 255.0) x_test = (x_test.reshape((- 1), 28, 28, 1).astype('float32') / 255.0) y_train = to_categorical(y_train.astype('float32')) y_test = to_categorical(y_test.astype('float32')) return ((x_train, y_train), (x_test, y_test))
class DicomSeries(object): def __init__(self, suid, progressIndicator): self._entries = [] self._suid = suid self._info = {} self._progressIndicator = progressIndicator def __len__(self): return len(self._entries) def __iter__(self): return iter(self._entries) def __getitem__(self, index): return self._entries[index] def suid(self): return self._suid def shape(self): return self._info['shape'] def sampling(self): return self._info['sampling'] def info(self): return self._info def description(self): info = self.info if (not info): return ('DicomSeries containing %i images' % len(self)) fields = [] if ('PatientName' in info): fields.append(('' + info['PatientName'])) if self.shape: tmp = [str(d) for d in self.shape] fields.append('x'.join(tmp)) if ('SeriesDescription' in info): fields.append((("'" + info['SeriesDescription']) + "'")) if ('ImageComments' in info): fields.append((("'" + info['ImageComments']) + "'")) return ' '.join(fields) def __repr__(self): adr = hex(id(self)).upper() return ('<DicomSeries with %i images at %s>' % (len(self), adr)) def get_numpy_array(self): if (len(self) == 0): raise ValueError('Serie does not contain any files.') elif (len(self) == 1): return self[0].get_numpy_array() if (self.info is None): raise RuntimeError('Cannot return volume if series not finished.') slice = self[0].get_numpy_array() vol = np.zeros(self.shape, dtype=slice.dtype) vol[0] = slice self._progressIndicator.start('loading data', '', len(self)) for z in range(1, len(self)): vol[z] = self[z].get_numpy_array() self._progressIndicator.set_progress((z + 1)) self._progressIndicator.finish() import gc gc.collect() return vol def _append(self, dcm): self._entries.append(dcm) def _sort(self): self._entries.sort(key=(lambda k: k.InstanceNumber)) def _finish(self): L = self._entries if (len(L) == 0): return elif (len(L) == 1): self._info = L[0].info return ds1 = L[0] distance_sum = 0.0 dimensions = (ds1.Rows, ds1.Columns) sampling = ds1.info['sampling'][:2] for index in range(len(L)): ds2 = L[index] pos1 = float(ds1.ImagePositionPatient[2]) pos2 = float(ds2.ImagePositionPatient[2]) distance_sum += abs((pos1 - pos2)) dimensions2 = (ds2.Rows, ds2.Columns) sampling2 = ds2.info['sampling'][:2] if (dimensions != dimensions2): raise ValueError('Dimensions of slices does not match.') if (sampling != sampling2): self._progressIndicator.write('Warn: sampling does not match.') ds1 = ds2 distance_mean = (distance_sum / (len(L) - 1)) self._info = L[0].info.copy() self._info['shape'] = ((len(L),) + ds2.info['shape']) self._info['sampling'] = ((distance_mean,) + ds2.info['sampling'])
def write_file(file_handle: click.utils.LazyFile, api_name: (str | None), location: str, base_url: (str | None), started_at: str, in_queue: Queue, out_queue: Queue, usage_data: (dict[(str, Any)] | None)) -> None: with file_handle.open() as fileobj, tarfile.open(mode='w:gz', fileobj=fileobj) as tar: writer = ReportWriter(tar) ci_environment = ci.environment() writer.add_metadata(api_name=api_name, location=location, base_url=base_url, started_at=started_at, metadata=Metadata(), ci_environment=ci_environment, usage_data=usage_data) result = consume_events(writer, in_queue) if (result == ConsumeResult.INTERRUPT): with suppress(OSError): os.remove(file_handle.name) else: out_queue.put(events.Metadata(size=os.path.getsize(file_handle.name), ci_environment=ci_environment))
def np_quantile_version_above_122(a: ArrayLike, q: ArrayLike, method: str='linear', **kwargs: Any) -> NDArray: return np.quantile(a, q, method=method, **kwargs)
def return_iterator_by_type(data_type): if isinstance(data_type, dict): iterator = data_type.items() else: iterator = enumerate(data_type) return iterator
class Token(tuple): __slots__ = () (lineno, type, value) = (property(itemgetter(x)) for x in range(3)) def __new__(cls, lineno, type, value): return tuple.__new__(cls, (lineno, intern(str(type)), value)) def __str__(self): if (self.type in reverse_operators): return reverse_operators[self.type] elif (self.type == 'name'): return self.value return self.type def test(self, expr): if (self.type == expr): return True elif (':' in expr): return (expr.split(':', 1) == [self.type, self.value]) return False def test_any(self, *iterable): for expr in iterable: if self.test(expr): return True return False def __repr__(self): return ('Token(%r, %r, %r)' % (self.lineno, self.type, self.value))
class NodeDispatch(object): def get_handler_name(node_kind): if (len(node_kind) <= 4): return node_kind.lower() name = re.sub('(.)([A-Z][a-z]+)', '\\1_\\2', node_kind) return re.sub('([a-z0-9])([A-Z])', '\\1_\\2', name).lower() def get_handler(self, node_kind, prefix): name = self.get_handler_name(node_kind) name = '_'.join((prefix, name)) try: return getattr(self, name) except AttributeError: raise NodeDispatchError(('No handler found for node kind: %s (expected: %s)' % (node_kind, name)))
class Module(object): dump_patches = False _version = 1 def __init__(self): self._backend = thnn_backend self._parameters = OrderedDict() self._buffers = OrderedDict() self._backward_hooks = OrderedDict() self._forward_hooks = OrderedDict() self._forward_pre_hooks = OrderedDict() self._modules = OrderedDict() self.training = True def forward(self, *input): raise NotImplementedError def register_buffer(self, name, tensor): if (not isinstance(name, torch._six.string_classes)): raise TypeError('buffer name should be a string. Got {}'.format(torch.typename(name))) elif ('.' in name): raise KeyError('buffer name can\'t contain "."') elif (name == ''): raise KeyError('buffer name can\'t be empty string ""') elif (hasattr(self, name) and (name not in self._buffers)): raise KeyError("attribute '{}' already exists".format(name)) elif ((tensor is not None) and (not isinstance(tensor, torch.Tensor))): raise TypeError("cannot assign '{}' object to buffer '{}' (torch Tensor or None required)".format(torch.typename(tensor), name)) else: self._buffers[name] = tensor def register_parameter(self, name, param): if ('_parameters' not in self.__dict__): raise AttributeError('cannot assign parameter before Module.__init__() call') elif (not isinstance(name, torch._six.string_classes)): raise TypeError('parameter name should be a string. Got {}'.format(torch.typename(name))) elif ('.' in name): raise KeyError('parameter name can\'t contain "."') elif (name == ''): raise KeyError('parameter name can\'t be empty string ""') elif (hasattr(self, name) and (name not in self._parameters)): raise KeyError("attribute '{}' already exists".format(name)) if (param is None): self._parameters[name] = None elif (not isinstance(param, Parameter)): raise TypeError("cannot assign '{}' object to parameter '{}' (torch.nn.Parameter or None required)".format(torch.typename(param), name)) elif param.grad_fn: raise ValueError("Cannot assign non-leaf Tensor to parameter '{0}'. Model parameters must be created explicitly. To express '{0}' as a function of another Tensor, compute the value in the forward() method.".format(name)) else: self._parameters[name] = param def add_module(self, name, module): if ((not isinstance(module, Module)) and (module is not None)): raise TypeError('{} is not a Module subclass'.format(torch.typename(module))) elif (not isinstance(name, torch._six.string_classes)): raise TypeError('module name should be a string. Got {}'.format(torch.typename(name))) elif (hasattr(self, name) and (name not in self._modules)): raise KeyError("attribute '{}' already exists".format(name)) elif ('.' in name): raise KeyError('module name can\'t contain "."') elif (name == ''): raise KeyError('module name can\'t be empty string ""') self._modules[name] = module def _apply(self, fn): for module in self.children(): module._apply(fn) for param in self._parameters.values(): if (param is not None): param.data = fn(param.data) if (param._grad is not None): param._grad.data = fn(param._grad.data) for (key, buf) in self._buffers.items(): if (buf is not None): self._buffers[key] = fn(buf) return self def apply(self, fn): for module in self.children(): module.apply(fn) fn(self) return self def cuda(self, device=None): return self._apply((lambda t: t.cuda(device))) def cpu(self): return self._apply((lambda t: t.cpu())) def type(self, dst_type): return self._apply((lambda t: t.type(dst_type))) def float(self): return self._apply((lambda t: (t.float() if t.is_floating_point() else t))) def double(self): return self._apply((lambda t: (t.double() if t.is_floating_point() else t))) def half(self): return self._apply((lambda t: (t.half() if t.is_floating_point() else t))) def to(self, *args, **kwargs): (device, dtype, non_blocking) = torch._C._nn._parse_to(*args, **kwargs) if (dtype is not None): if (not dtype.is_floating_point): raise TypeError('nn.Module.to only accepts floating point dtypes, but got desired dtype={}'.format(dtype)) def convert(t): return t.to(device, (dtype if t.is_floating_point() else None), non_blocking) return self._apply(convert) def register_backward_hook(self, hook): handle = hooks.RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook return handle def register_forward_pre_hook(self, hook): handle = hooks.RemovableHandle(self._forward_pre_hooks) self._forward_pre_hooks[handle.id] = hook return handle def register_forward_hook(self, hook): handle = hooks.RemovableHandle(self._forward_hooks) self._forward_hooks[handle.id] = hook return handle def _tracing_name(self, tracing_state): if (not tracing_state._traced_module_stack): return None module = tracing_state._traced_module_stack[(- 1)] for (name, child) in module.named_children(): if (child is self): return name return None def _slow_forward(self, *input, **kwargs): input_vars = tuple(torch.autograd.function._iter_tensors(input)) tracing_state = torch.jit.get_tracing_state(input_vars) if (not tracing_state): return self.forward(*input, **kwargs) if (not hasattr(tracing_state, '_traced_module_stack')): tracing_state._traced_module_stack = [] name = self._tracing_name(tracing_state) if name: tracing_state.push_scope(('%s[%s]' % (self.__class__.__name__, name))) else: tracing_state.push_scope(self.__class__.__name__) tracing_state._traced_module_stack.append(self) try: result = self.forward(*input, **kwargs) finally: tracing_state.pop_scope() tracing_state._traced_module_stack.pop() return result def __call__(self, *input, **kwargs): for hook in self._forward_pre_hooks.values(): hook(self, input) if torch.jit._tracing: result = self._slow_forward(*input, **kwargs) else: result = self.forward(*input, **kwargs) for hook in self._forward_hooks.values(): hook_result = hook(self, input, result) if (hook_result is not None): raise RuntimeError("forward hooks should never return any values, but '{}'didn't return None".format(hook)) if (len(self._backward_hooks) > 0): var = result while (not isinstance(var, torch.Tensor)): if isinstance(var, dict): var = next((v for v in var.values() if isinstance(v, torch.Tensor))) else: var = var[0] grad_fn = var.grad_fn if (grad_fn is not None): for hook in self._backward_hooks.values(): wrapper = functools.partial(hook, self) functools.update_wrapper(wrapper, hook) grad_fn.register_hook(wrapper) return result def __setstate__(self, state): self.__dict__.update(state) if ('_forward_pre_hooks' not in self.__dict__): self._forward_pre_hooks = OrderedDict() def __getattr__(self, name): if ('_parameters' in self.__dict__): _parameters = self.__dict__['_parameters'] if (name in _parameters): return _parameters[name] if ('_buffers' in self.__dict__): _buffers = self.__dict__['_buffers'] if (name in _buffers): return _buffers[name] if ('_modules' in self.__dict__): modules = self.__dict__['_modules'] if (name in modules): return modules[name] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name)) def __setattr__(self, name, value): def remove_from(*dicts): for d in dicts: if (name in d): del d[name] params = self.__dict__.get('_parameters') if isinstance(value, Parameter): if (params is None): raise AttributeError('cannot assign parameters before Module.__init__() call') remove_from(self.__dict__, self._buffers, self._modules) self.register_parameter(name, value) elif ((params is not None) and (name in params)): if (value is not None): raise TypeError("cannot assign '{}' as parameter '{}' (torch.nn.Parameter or None expected)".format(torch.typename(value), name)) self.register_parameter(name, value) else: modules = self.__dict__.get('_modules') if isinstance(value, Module): if (modules is None): raise AttributeError('cannot assign module before Module.__init__() call') remove_from(self.__dict__, self._parameters, self._buffers) modules[name] = value elif ((modules is not None) and (name in modules)): if (value is not None): raise TypeError("cannot assign '{}' as child module '{}' (torch.nn.Module or None expected)".format(torch.typename(value), name)) modules[name] = value else: buffers = self.__dict__.get('_buffers') if ((buffers is not None) and (name in buffers)): if ((value is not None) and (not isinstance(value, torch.Tensor))): raise TypeError("cannot assign '{}' as buffer '{}' (torch.Tensor or None expected)".format(torch.typename(value), name)) buffers[name] = value else: object.__setattr__(self, name, value) def __delattr__(self, name): if (name in self._parameters): del self._parameters[name] elif (name in self._buffers): del self._buffers[name] elif (name in self._modules): del self._modules[name] else: object.__delattr__(self, name) def state_dict(self, destination=None, prefix='', keep_vars=False): if (destination is None): destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:(- 1)]] = dict(version=self._version) for (name, param) in self._parameters.items(): if (param is not None): destination[(prefix + name)] = (param if keep_vars else param.data) for (name, buf) in self._buffers.items(): if (buf is not None): destination[(prefix + name)] = (buf if keep_vars else buf.data) for (name, module) in self._modules.items(): if (module is not None): module.state_dict(destination, ((prefix + name) + '.'), keep_vars=keep_vars) return destination def _load_from_state_dict(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs): local_name_params = itertools.chain(self._parameters.items(), self._buffers.items()) local_state = {k: v.data for (k, v) in local_name_params if (v is not None)} for (name, param) in local_state.items(): key = (prefix + name) if (key in state_dict): input_param = state_dict[key] if (input_param.shape != param.shape): error_msgs.append('size mismatch for {}: copying a param of {} from checkpoint, where the shape is {} in current model.'.format(key, param.shape, input_param.shape)) continue if isinstance(input_param, Parameter): input_param = input_param.data try: param.copy_(input_param) except Exception: error_msgs.append('While copying the parameter named "{}", whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(key, param.size(), input_param.size())) elif strict: missing_keys.append(key) if strict: for (key, input_param) in state_dict.items(): if key.startswith(prefix): input_name = key[len(prefix):] input_name = input_name.split('.', 1)[0] if ((input_name not in self._modules) and (input_name not in local_state)): unexpected_keys.append(key) def load_state_dict(self, state_dict, strict=True): missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if (metadata is not None): state_dict._metadata = metadata def load(module, prefix=''): local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {})) module._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) for (name, child) in module._modules.items(): if (child is not None): load(child, ((prefix + name) + '.')) load(self) if strict: error_msg = '' if (len(unexpected_keys) > 0): error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '.join(('"{}"'.format(k) for k in unexpected_keys)))) if (len(missing_keys) > 0): error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '.format(', '.join(('"{}"'.format(k) for k in missing_keys)))) if (len(error_msgs) > 0): raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(self.__class__.__name__, '\n\t'.join(error_msgs))) def parameters(self): for (name, param) in self.named_parameters(): (yield param) def named_parameters(self, memo=None, prefix=''): if (memo is None): memo = set() for (name, p) in self._parameters.items(): if ((p is not None) and (p not in memo)): memo.add(p) (yield (((prefix + ('.' if prefix else '')) + name), p)) for (mname, module) in self.named_children(): submodule_prefix = ((prefix + ('.' if prefix else '')) + mname) for (name, p) in module.named_parameters(memo, submodule_prefix): (yield (name, p)) def _all_buffers(self, memo=None): if (memo is None): memo = set() for (name, b) in self._buffers.items(): if ((b is not None) and (b not in memo)): memo.add(b) (yield b) for module in self.children(): for b in module._all_buffers(memo): (yield b) def children(self): for (name, module) in self.named_children(): (yield module) def named_children(self): memo = set() for (name, module) in self._modules.items(): if ((module is not None) and (module not in memo)): memo.add(module) (yield (name, module)) def modules(self): for (name, module) in self.named_modules(): (yield module) def named_modules(self, memo=None, prefix=''): if (memo is None): memo = set() if (self not in memo): memo.add(self) (yield (prefix, self)) for (name, module) in self._modules.items(): if (module is None): continue submodule_prefix = ((prefix + ('.' if prefix else '')) + name) for m in module.named_modules(memo, submodule_prefix): (yield m) def train(self, mode=True): self.training = mode for module in self.children(): module.train(mode) return self def eval(self): return self.train(False) def zero_grad(self): for p in self.parameters(): if (p.grad is not None): p.grad.detach_() p.grad.zero_() def share_memory(self): return self._apply((lambda t: t.share_memory_())) def _get_name(self): return self.__class__.__name__ def extra_repr(self): return '' def __repr__(self): extra_lines = [] extra_repr = self.extra_repr() if extra_repr: extra_lines = extra_repr.split('\n') child_lines = [] for (key, module) in self._modules.items(): mod_str = repr(module) mod_str = _addindent(mod_str, 2) child_lines.append(((('(' + key) + '): ') + mod_str)) lines = (extra_lines + child_lines) main_str = (self._get_name() + '(') if lines: if ((len(extra_lines) == 1) and (not child_lines)): main_str += extra_lines[0] else: main_str += (('\n ' + '\n '.join(lines)) + '\n') main_str += ')' return main_str def __dir__(self): module_attrs = dir(self.__class__) attrs = list(self.__dict__.keys()) parameters = list(self._parameters.keys()) modules = list(self._modules.keys()) buffers = list(self._buffers.keys()) keys = ((((module_attrs + attrs) + parameters) + modules) + buffers) keys = [key for key in keys if (not key[0].isdigit())] return sorted(keys)
_utils.test(ti.cpu) def test_static_break(): x = ti.field(ti.i32, 5) def func(): for i in ti.static(range(5)): x[i] = 1 if ti.static((i == 2)): break func() assert np.allclose(x.to_numpy(), np.array([1, 1, 1, 0, 0]))
.skipif((not torch.cuda.is_available()), reason='No CUDA device registered.') class TestCustomHighwayLSTM(AllenNlpTestCase): def test_small_model(self): args = self.get_models_and_inputs(5, 3, 11, 2, 5, 0.0) self.forward_and_backward_outputs_match(*args) def test_large_model(self): args = self.get_models_and_inputs(83, 103, 311, 8, 101, 0.0) self.forward_and_backward_outputs_match(*args) def test_validation_forward_pass_is_deterministic_in_model_with_dropout(self): (_, model, _, model_input, lengths) = self.get_models_and_inputs(5, 3, 11, 2, 5, dropout_prob=0.5) model.eval() model_input = pack_padded_sequence(model_input, lengths, batch_first=True) (output, _) = model(model_input) (output, _) = pad_packed_sequence(output, batch_first=True) for i in range(3): (output_new, _) = model(model_input) (output_new, _) = pad_packed_sequence(output_new, batch_first=True) numpy.testing.assert_array_almost_equal(output.data.cpu().numpy(), output_new.data.cpu().numpy()) output = output_new def forward_and_backward_outputs_match(baseline_model, kernel_model, baseline_input, kernel_input, lengths): packed_baseline_input = pack_padded_sequence(baseline_input, lengths, batch_first=True) (baseline_output, _) = baseline_model(packed_baseline_input) (baseline_output, _) = pad_packed_sequence(baseline_output, batch_first=True) packed_kernel_input = pack_padded_sequence(kernel_input, lengths, batch_first=True) (kernel_output, _) = kernel_model(packed_kernel_input) (kernel_output, _) = pad_packed_sequence(kernel_output, batch_first=True) numpy.testing.assert_array_almost_equal(baseline_output.data.cpu().numpy(), kernel_output.data.cpu().numpy()) random_error = torch.randn(baseline_output.size()).cuda() baseline_model.zero_grad() baseline_output.backward(random_error) kernel_model.zero_grad() kernel_output.backward(random_error) numpy.testing.assert_array_almost_equal(baseline_input.grad.data.cpu().numpy(), kernel_input.grad.data.cpu().numpy()) weight_index = 0 bias_index = 0 for layer in range(baseline_model.num_layers): input_grad = getattr(baseline_model, ('layer_%d' % layer)).input_linearity.weight.grad state_grad = getattr(baseline_model, ('layer_%d' % layer)).state_linearity.weight.grad bias_grad = getattr(baseline_model, ('layer_%d' % layer)).state_linearity.bias.grad kernel_input_grad = kernel_model.weight.grad[weight_index:(weight_index + input_grad.nelement())].view(input_grad.size(1), input_grad.size(0)).t() weight_index += input_grad.nelement() kernel_state_grad = kernel_model.weight.grad[weight_index:(weight_index + state_grad.nelement())].view(state_grad.size(1), state_grad.size(0)).t() weight_index += state_grad.nelement() kernel_bias_grad = kernel_model.bias.grad[bias_index:(bias_index + bias_grad.nelement())] bias_index += bias_grad.nelement() numpy.testing.assert_array_almost_equal(kernel_input_grad.data.cpu().numpy(), input_grad.data.cpu().numpy(), decimal=4) numpy.testing.assert_array_almost_equal(kernel_state_grad.data.cpu().numpy(), state_grad.data.cpu().numpy(), decimal=4) numpy.testing.assert_array_almost_equal(kernel_bias_grad.data.cpu().numpy(), bias_grad.data.cpu().numpy(), decimal=4) def get_models_and_inputs(batch_size, input_size, output_size, num_layers, timesteps, dropout_prob): from allennlp.modules.alternating_highway_lstm import AlternatingHighwayLSTM baseline = StackedAlternatingLstm(input_size, output_size, num_layers, dropout_prob, use_input_projection_bias=False).cuda() kernel_version = AlternatingHighwayLSTM(input_size, output_size, num_layers, dropout_prob).cuda() weight_index = 0 bias_index = 0 for layer_index in range(num_layers): layer = getattr(baseline, ('layer_%d' % layer_index)) input_weight = layer.input_linearity.weight state_weight = layer.state_linearity.weight bias = layer.state_linearity.bias kernel_version.weight.data[weight_index:(weight_index + input_weight.nelement())].view_as(input_weight.t()).copy_(input_weight.data.t()) weight_index += input_weight.nelement() kernel_version.weight.data[weight_index:(weight_index + state_weight.nelement())].view_as(state_weight.t()).copy_(state_weight.data.t()) weight_index += state_weight.nelement() kernel_version.bias.data[bias_index:(bias_index + bias.nelement())].copy_(bias.data) bias_index += bias.nelement() inputs = torch.randn(batch_size, timesteps, input_size).cuda() input2 = inputs.clone() baseline_input = Variable(inputs, requires_grad=True) kernel_version_input = Variable(input2, requires_grad=True) lengths = [(timesteps - int((i / 2))) for i in range(batch_size)] lengths = lengths[:batch_size] return (baseline, kernel_version, baseline_input, kernel_version_input, lengths)
def compile(name: str, inputs: List[Tensor], outputs: List[Tensor], cmp=True, opt=2, dyn=False, profile=False, has_custom=False, refs=None): TpuLang.graph.inputs = inputs TpuLang.graph.outputs = outputs converter = TpuLangConverter(name=name, graph=TpuLang.graph) model_transform(name, converter) model_inference(model_name=name, inputs=inputs, has_custom=has_custom) if (cmp and (refs is not None)): model_validate(model_name=name, refs=refs)
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone): if (backbone == 'mobilenetv2'): model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) elif backbone.startswith('resnet'): model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) else: raise NotImplementedError return model
class _CudaBase(object): is_cuda = True is_sparse = False def type(self, *args, **kwargs): with device(self.get_device()): return super(_CudaBase, self).type(*args, **kwargs) __new__ = _lazy_new
class Constraint(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_network(cfg): arch = cfg.network get_model = _network_factory[arch] network = get_model() return network
def filter_dataown(qseq, aseq): filtered_q = [] filtered_p = [] fliter_flag = 0 filtered_a = [] for i in range(len(aseq)): fliter_flag += 1 (qlen, alen) = (len(qseq[i].split(' ')), len(aseq[i].split(' '))) filtered_p.append(fliter_flag) if ((qlen >= limit['minq']) and (qlen <= limit['maxq']) and (alen >= limit['mina']) and (alen <= limit['maxa'])): filtered_q.append(qseq[i]) flag += 1 filtered_a.append(aseq[i]) fliter_flag += 1 return (filtered_q, filtered_a)
class OnnxConfigWithPastTestCaseV2(TestCase): SUPPORTED_WITH_PAST_CONFIGS = {} (OnnxConfigWithPast, __abstractmethods__=set()) def test_use_past(self): for (name, config) in OnnxConfigWithPastTestCaseV2.SUPPORTED_WITH_PAST_CONFIGS: with self.subTest(name): self.assertFalse(OnnxConfigWithPast.from_model_config(config()).use_past, 'OnnxConfigWithPast.from_model_config() should not use_past') self.assertTrue(OnnxConfigWithPast.with_past(config()).use_past, 'OnnxConfigWithPast.from_model_config() should use_past') (OnnxConfigWithPast, __abstractmethods__=set()) def test_values_override(self): for (name, config) in OnnxConfigWithPastTestCaseV2.SUPPORTED_WITH_PAST_CONFIGS: with self.subTest(name): onnx_config_default = OnnxConfigWithPast.from_model_config(config()) self.assertIsNotNone(onnx_config_default.values_override, 'values_override should not be None') self.assertIn('use_cache', onnx_config_default.values_override, 'use_cache should be present') self.assertFalse(onnx_config_default.values_override['use_cache'], 'use_cache should be False if not using past') onnx_config_default = OnnxConfigWithPast.with_past(config()) self.assertIsNotNone(onnx_config_default.values_override, 'values_override should not be None') self.assertIn('use_cache', onnx_config_default.values_override, 'use_cache should be present') self.assertTrue(onnx_config_default.values_override['use_cache'], 'use_cache should be False if not using past')
def analyze(report, templates, accuracy_ks=(1, 2), precision_ks=(1, 2), recall_ks=(1, 2)): template_match_counts = collections.defaultdict(int) template_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} template_valid_choice_ranks = {'all': collections.defaultdict(list), 'templates only': collections.defaultdict(list)} min_valid_ranks = [] for item in report: for entry in item['history']: if (not isinstance(entry['choices'][0], str)): continue all_ranks = {} template_only_ranks = {} template_only_i = 0 for (i, choice) in enumerate(entry['choices']): all_ranks[choice] = i template_only_ranks[choice] = template_only_i if (not re.match('Template(\\d+).*', choice)): template_only_i += 1 min_valid_rank = min((all_ranks[choice] for choice in entry['valid_choices'])) min_valid_ranks.append(min_valid_rank) for choice in entry['choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_choice_ranks['all'][template_id].append(all_ranks[choice]) template_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) for choice in entry['valid_choices']: m = re.match('Template(\\d+).*', choice) if (not m): continue template_id = int(m.group(1)) template_match_counts[template_id] += 1 template_valid_choice_ranks['all'][template_id].append(all_ranks[choice]) template_valid_choice_ranks['templates only'][template_id].append(template_only_ranks[choice]) min_valid_ranks = np.array(min_valid_ranks) top_k_accuracy = {k: (np.sum((min_valid_ranks < k)) / len(min_valid_ranks)) for k in accuracy_ks} top_k_precision = {type_name: {k: {i: (np.sum((np.array(template_valid_choice_ranks[type_name][i]) < k)) / np.sum((np.array(template_choice_ranks[type_name][i]) < k))) for i in template_match_counts.keys()} for k in precision_ks} for type_name in template_valid_choice_ranks} top_k_recall = {type_name: {k: {i: (np.sum((np.array(ranks) < k)) / len(ranks)) for (i, ranks) in ranks_of_type.items()} for k in recall_ks} for (type_name, ranks_of_type) in template_valid_choice_ranks.items()} accuracy_df = pd.DataFrame({'Accuracy {}'.format(k): [top_k_accuracy[k]] for k in accuracy_ks}) pr_df = pd.DataFrame({'Head': {t['id']: t['idiom'][0] for t in templates}, 'Matches': template_match_counts, **{'Precision {} {}'.format(k, type_name): top_k_precision[type_name][k] for type_name in top_k_precision.keys() for k in precision_ks}, **{'Recall {} {}'.format(k, type_name): top_k_recall[type_name][k] for type_name in top_k_recall.keys() for k in recall_ks}}) return (accuracy_df, pr_df)
def env_worker(make_env, make_policy, n_episodes, id_worker): env = make_env(seed=0) print('Env created.') policy = make_policy() print('Policy created.') q_env = Queue(f'env_{id_worker}') q_policy = Queue(f'policy_{id_worker}') print('Queue created.') episodes = [] total_env_step = 0 start_steps = 1000 for i_ep in range(n_episodes): if ((i_ep % 10) == 0): print(f'AGENT {id_worker} EPISODE {i_ep}') episode = [] (state, _) = env.reset() for env_step in range(1000): if (total_env_step <= start_steps): action = env.sample_action() else: action = policy.explore(state) (next_state, reward, terminated, truncated, _) = env.step(action) episode.append([state, action, reward, terminated, next_state]) if (terminated or truncated): break state = next_state total_env_step += 1 q_env.push(pickle.dumps(episode)) while True: data = q_policy.pop() if (data is None): print('Waiting for the policy..') time.sleep(2.0) continue policy.load_state_dict(pickle.loads(data)) break print('Episode by env worker is done.')
.only_pytorch .only_pytorch64 def test_pdf_calculations_pytorch(backend): tb = pyhf.tensorlib values = tb.astensor([0, 0, 1, 1]) mus = tb.astensor([0, 1, 0, 1]) sigmas = tb.astensor([0, 0, 0, 0]) for (x, mu, sigma) in zip(values, mus, sigmas): with pytest.raises(ValueError): _ = tb.normal_logpdf(x, mu, sigma) assert (tb.tolist(tb.normal_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1]), tb.astensor([1, 1, 1, 1]))) == pytest.approx([(- 0.), (- 1.), (- 1.), (- 0.)])) assert (tb.tolist(tb.poisson(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1]))) == pytest.approx([1.0, 0., 0.0, 0.])) with pytest.warns(RuntimeWarning, match='divide by zero encountered in log'): assert (tb.tolist(tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1]))) == pytest.approx(np.log([1.0, 0., 0.0, 0.]).tolist())) assert (tb.tolist(tb.poisson(n=tb.astensor([0.5, 1.1, 1.5]), lam=tb.astensor(1.0))) == pytest.approx([0., 0., 0.]))
class GeodesicDistanceComputer(metaclass=Singleton): def __init__(self): self._pathfinders = {} def _get_pathfinder(self, scene_id) -> PathFinder: scene_name = osp.splitext(osp.basename(scene_id))[0] if (scene_name not in self._pathfinders): navmesh = osp.join(osp.dirname(__file__), 'navmeshes', (scene_name + '.navmesh')) pf = PathFinder() pf.load_nav_mesh(navmesh) if (not pf.is_loaded): raise RuntimeError((f'''Could not find navmesh to load for scene_id '{scene_id}' ''' + f"Tried to load from '{navmesh}'")) self._pathfinders[scene_name] = pf return self._pathfinders[scene_name] def compute_distance(self, scene_id, start_pt, end_pt): path = ShortestPath() self._get_pathfinder(scene_id).find_path(path) return path.geodesic_distance
.parametrize('test_case, recursive', [(test_bucket_medium_file, True), (test_bucket_large_file, False), (test_bucket_small_file, True)]) def test_azure(azure_bucket, gcp_bucket, test_case, recursive): client = SkyplaneClient() src_iface = ObjectStoreInterface.create('gcp:us-west2', test_bucket.split('://')[1]) assert isinstance(azure_bucket.bucket(), str), f'Bucket name is not a string {azure_bucket.bucket()}' assert (len(list(src_iface.list_objects(prefix=test_case.replace(f'{test_bucket}/', '')))) > 0), f'Test case {test_case} does not exist in {test_bucket}' client.copy(test_case, f'{azure_bucket.path()}/{test_case}', recursive=recursive) dst_objects = list(azure_bucket.list_objects()) assert (len(dst_objects) > 0), f'Object {test_case} not copied to {azure_bucket.bucket()}: only container {dst_objects}' client.copy(f'{azure_bucket.path()}/{test_case}', f'gs://{gcp_bucket.bucket()}/azure/', recursive=recursive)
class BrandtModuleElement(HeckeModuleElement): def __init__(self, parent, x): if isinstance(x, HeckeModuleElement): x = x.element() HeckeModuleElement.__init__(self, parent, parent.free_module()(x)) def _richcmp_(self, other, op): return richcmp(self.element(), other.element(), op) def monodromy_pairing(self, x): B = self.parent() w = B.monodromy_weights() x = B(x).element() v = self.element() return sum((((x[i] * v[i]) * w[i]) for i in range(len(v)))) def __mul__(self, right): return self.monodromy_pairing(right) def _add_(self, right): return BrandtModuleElement(self.parent(), (self.element() + right.element())) def _sub_(self, right): return BrandtModuleElement(self.parent(), (self.element() - right.element())) def _neg_(self): return BrandtModuleElement(self.parent(), (- self.element()))
def get_aggregate(cl, matrices, domain): children = [r for r in matrices if ((set(r) < set(cl)) and ((len(r) + 1) == len(cl)))] ans = [sparse.csr_matrix((0, domain.size(cl)))] for c in children: coef = (1.0 / np.sqrt(len(children))) a = tuple((set(cl) - set(c))) cl2 = (a + c) Qc = matrices[c] P = get_permutation_matrix(cl, cl2, domain) T = np.ones(domain.size(a)) Q = (sparse.kron(T, Qc) P) ans.append((coef * Q)) return sparse.vstack(ans)
def calc_one(data): relcnt = 0 score = 0.0 data = sorted(data, key=(lambda d: d[1]), reverse=True) fout = open('meshres.5.txt', 'a') for (idx, item) in enumerate(data): if (idx < 5): fout.write((((item[0][0] + '\t') + item[0][1]) + '\n'))
class VideoDiscriminator(nn.Module): def __init__(self, n_channels, n_output_neurons=1, bn_use_gamma=True, use_noise=False, noise_sigma=None, ndf=64): super(VideoDiscriminator, self).__init__() self.n_channels = n_channels self.n_output_neurons = n_output_neurons self.use_noise = use_noise self.bn_use_gamma = bn_use_gamma self.main = nn.Sequential(Noise(use_noise, sigma=noise_sigma), nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv3d(ndf, (ndf * 2), 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.BatchNorm3d((ndf * 2)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv3d((ndf * 2), (ndf * 4), 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.BatchNorm3d((ndf * 4)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv3d((ndf * 4), (ndf * 8), 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.BatchNorm3d((ndf * 8)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv3d((ndf * 8), (ndf * 16), 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.BatchNorm3d((ndf * 16)), nn.LeakyReLU(0.2, inplace=True), Noise(use_noise, sigma=noise_sigma), nn.Conv3d((ndf * 16), (ndf * 32), (1, 4, 4), stride=(1, 2, 2), padding=(0, 1, 1), bias=False), nn.BatchNorm3d((ndf * 32)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((ndf * 32), n_output_neurons, (1, 4, 4), 1, 0, bias=False)) def forward(self, input): h = self.main(input).squeeze() return (h, None)
def one_hot_encoding(labels, num_classes, scope=None): with tf.op_scope([labels], scope, 'OneHotEncoding'): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense(concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def get_audio(filename): if str(filename).endswith('.wav'): try: a = wave_get_audio(filename) if a: return a except Exception: pass return ffmpeg_get_audio(filename)
def save_checkpoint(state, checkpoint_path, cfg): file_path = osp.join(checkpoint_path, 'checkpoint.pth.tar') torch.save(state, file_path) if ((cfg.data.train.type in ['synth']) or ((state['iter'] == 0) and ((state['epoch'] % 10) == 0))): file_name = ('checkpoint_%dep.pth.tar' % state['epoch']) file_path = osp.join(checkpoint_path, file_name) torch.save(state, file_path)
def test_process_routing_invalid_object(): class InvalidObject(): pass with pytest.raises(AttributeError, match='either implement the routing method'): process_routing(InvalidObject(), 'fit', **{})
class Token(Structure): _fields_ = [('int_data', (c_uint * 4)), ('ptr_data', c_void_p)] def spelling(self): return conf.lib.clang_getTokenSpelling(self._tu, self) def kind(self): return TokenKind.from_value(conf.lib.clang_getTokenKind(self)) def location(self): return conf.lib.clang_getTokenLocation(self._tu, self) def extent(self): return conf.lib.clang_getTokenExtent(self._tu, self) def cursor(self): cursor = Cursor() conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor)) return cursor