code
stringlengths
101
5.91M
class FairseqEncoderModel(BaseFairseqModel): def __init__(self, encoder): super().__init__() self.encoder = encoder assert isinstance(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): encoder_out = net_output['encoder_out'] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=(- 1)) else: return F.softmax(logits, dim=(- 1)) raise NotImplementedError def max_positions(self): return self.encoder.max_positions()
_grad() def show_flops_params(model, device, input_shape=[1, 3, 1024, 2048], logger=None): input = torch.randn(*input_shape).to(torch.device(device)) (flops, params) = profile(model, inputs=(input,), verbose=False) if (logger is not None): logger.info('{} flops: {:.3f}G input shape is {}, params: {:.3f}M'.format(model.__class__.__name__, (flops / ), input_shape[1:], (params / 1000000)))
class SubGaussianInverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning): estimator_name: str = 'sg-ipw' def __post_init__(self) -> None: self.base_ope_estimator = SubGaussianInverseProbabilityWeighting super()._check_lambdas(max_val=1.0) super()._check_init_inputs() self.lambdas.sort() def estimate_policy_value(self, reward: np.ndarray, action: np.ndarray, action_dist: np.ndarray, pscore: Optional[np.ndarray]=None, position: Optional[np.ndarray]=None, estimated_pscore: Optional[np.ndarray]=None, **kwargs) -> float: check_array(array=reward, name='reward', expected_dim=1) check_array(array=action, name='action', expected_dim=1) if self.use_estimated_pscore: check_array(array=estimated_pscore, name='estimated_pscore', expected_dim=1) pscore_ = estimated_pscore else: check_array(array=pscore, name='pscore', expected_dim=1) pscore_ = pscore check_ope_inputs(action_dist=action_dist, position=position, action=action, reward=reward, pscore=pscore_) if (position is None): position = np.zeros(action_dist.shape[0], dtype=int) return super().estimate_policy_value_with_tuning(reward=reward, action=action, position=position, pscore=pscore_, action_dist=action_dist) def estimate_interval(self, reward: np.ndarray, action: np.ndarray, action_dist: np.ndarray, pscore: Optional[np.ndarray]=None, position: Optional[np.ndarray]=None, estimated_pscore: Optional[np.ndarray]=None, alpha: float=0.05, n_bootstrap_samples: int=10000, random_state: Optional[int]=None, **kwargs) -> Dict[(str, float)]: check_array(array=reward, name='reward', expected_dim=1) check_array(array=action, name='action', expected_dim=1) if self.use_estimated_pscore: check_array(array=estimated_pscore, name='estimated_pscore', expected_dim=1) pscore_ = estimated_pscore else: check_array(array=pscore, name='pscore', expected_dim=1) pscore_ = pscore check_ope_inputs(action_dist=action_dist, position=position, action=action, reward=reward, pscore=pscore_) if (position is None): position = np.zeros(action_dist.shape[0], dtype=int) return super().estimate_interval_with_tuning(reward=reward, action=action, position=position, pscore=pscore_, action_dist=action_dist, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
def _render(template, context, app): before_render_template.send(app, template=template, context=context) rv = template.render(context) template_rendered.send(app, template=template, context=context) return rv
def _expmap0(u, c): sqrt_c = (c ** 0.5) u_norm = torch.clamp_min(u.norm(dim=(- 1), p=2, keepdim=True), 1e-05) gamma_1 = ((tanh((sqrt_c * u_norm)) * u) / (sqrt_c * u_norm)) return gamma_1
class Assign(FunctionAssignment): def __str__(self): return ('%s := %s' % (self.fluent, self.expression))
def test_root_get_distance(branchless_codeobject_goal, mocker): mock = mocker.patch('pynguin.ga.coveragegoals.cfd.get_root_control_flow_distance', return_value=42) distance = branchless_codeobject_goal.get_distance(MagicMock(), MagicMock()) assert (distance == 42) mock.assert_called_once()
def test_line_str_parser(): data_ret = ['sample1.jpg hello\n', 'sample2.jpg world'] keys = ['filename', 'text'] keys_idx = [0, 1] separator = ' ' with pytest.raises(AssertionError): parser = LineStrParser('filename', keys_idx, separator) with pytest.raises(AssertionError): parser = LineStrParser(keys, keys_idx, [' ']) with pytest.raises(AssertionError): parser = LineStrParser(keys, [0], separator) parser = LineStrParser(keys, keys_idx, separator) assert (parser.get_item(data_ret, 0) == {'filename': 'sample1.jpg', 'text': 'hello'}) with pytest.raises(Exception): parser = LineStrParser(['filename', 'text', 'ignore'], [0, 1, 2], separator) parser.get_item(data_ret, 0)
def main(unused_argv): config = configs.load_config(save_config=False) config.render_path = True dataset = datasets.load_dataset('test', config.data_dir, config) (model, init_variables) = models.construct_mipnerf(random.PRNGKey(), dataset.peek()['rays'], config) optimizer = flax.optim.Adam(config.lr_init).create(init_variables) state = utils.TrainState(optimizer=optimizer) del optimizer, init_variables if (config.dataset_loader == 'llff'): eval_dict = {'fern': [0., 0.], 'flower': [0., 0.], 'fortress': [0., 0.], 'horns': [0., 0.], 'leaves': [0., 0.], 'orchids': [0., 0.], 'room': [0., 0.], 'trex': [0., 0.]} (lo, hi) = eval_dict[config.llff_scan] elif (config.dataset_loader == 'dtu'): eval_dict = {'scan8': [0.9593777, 1.5342957], 'scan21': [0., 1.7484968], 'scan30': [1.1381109, 1.6074754], 'scan31': [1.0627427, 1.6069319], 'scan34': [1.1172018, 1.5005568], 'scan38': [1.0385504, 1.5373354], 'scan40': [0.8312144, 1.62111], 'scan41': [0.9469194, 1.5374442], 'scan45': [1.0098513, 1.5830635], 'scan55': [0., 1.513227], 'scan63': [1.1894969, 1.7325872], 'scan82': [1.0984676, 1.7162027], 'scan103': [1.0771852, 1.5858444], 'scan110': [0., 1.5147997], 'scan114': [0., 1.548706]} (lo, hi) = eval_dict[config.dtu_scan] def render_eval_fn(variables, _, rays): return jax.lax.all_gather(model.apply(variables, None, rays, resample_padding=config.resample_padding_final, compute_extras=True), axis_name='batch') render_eval_pfn = jax.pmap(render_eval_fn, in_axes=(None, None, 0), donate_argnums=2, axis_name='batch') path_fn = (lambda x: path.join(out_dir, x)) try: state = checkpoints.restore_checkpoint(config.checkpoint_dir, state) except: print('Using pre-trained model.') state_dict = checkpoints.restore_checkpoint(config.checkpoint_dir, None) for i in [9, 17]: del state_dict['optimizer']['target']['params']['MLP_0'][f'Dense_{i}'] state_dict['optimizer']['target']['params']['MLP_0']['Dense_9'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_18'] state_dict['optimizer']['target']['params']['MLP_0']['Dense_10'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_19'] state_dict['optimizer']['target']['params']['MLP_0']['Dense_11'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_20'] del state_dict['optimizerd'] state = flax.serialization.from_state_dict(state, state_dict) step = int(state.optimizer.state.step) print(f'Rendering checkpoint at step {step}.') out_name = ('path_renders' if config.render_path else 'test_preds') out_name = f'{out_name}_step_{step}' base_dir = config.render_dir if (base_dir is None): base_dir = config.checkpoint_dir out_dir = path.join(base_dir, out_name) if (not utils.isdir(out_dir)): utils.makedirs(out_dir) for idx in range(dataset.size): print(f'Evaluating image {(idx + 1)}/{dataset.size}') eval_start_time = time.time() batch = next(dataset) rendering = models.render_image(functools.partial(render_eval_pfn, state.optimizer.target), batch['rays'], None, config) print(f'Rendered in {(time.time() - eval_start_time):0.3f}s') if (jax.host_id() != 0): continue utils.save_img_u8(rendering['rgb'], path_fn(f'color_{idx:03d}.png')) time.sleep(3) utils.save_img_u8(((rendering['normals'] / 2.0) + 0.5), path_fn(f'normals_{idx:03d}.png')) time.sleep(3) utils.save_img_f32(rendering['distance_mean'], path_fn(f'distance_mean_{idx:03d}.tiff')) time.sleep(3) utils.save_img_f32(rendering['distance_median'], path_fn(f'distance_median_{idx:03d}.tiff')) time.sleep(3) utils.save_img_f32(rendering['acc'], path_fn(f'acc_{idx:03d}.tiff'))
class PitchExtractionInterface(metaclass=ABCMeta): def calc_prob(self, x): def calc_embed(self, x): def calc_pitch(self, x):
.imputils.lower_constant(ArrayBuilderType) def lower_const_ArrayBuilder(context, builder, arraybuildertype, arraybuilder): layout = arraybuilder._layout attrs = arraybuilder._attrs rawptr = context.get_constant(numba.intp, arraybuilder._layout._ptr) proxyout = context.make_helper(builder, arraybuildertype) proxyout.rawptr = builder.inttoptr(rawptr, context.get_value_type(numba.types.voidptr)) proxyout.pyptr = context.add_dynamic_addr(builder, id(layout), info=str(type(layout))) proxyout.pyattrs = context.add_dynamic_addr(builder, id(attrs), info=str(type(attrs))) return proxyout._getvalue()
def _load_video_1cam(idx: int, paths: List[str], poses: torch.Tensor, out_h: int, out_w: int, load_every: int=1): filters = [('scale', f'w={out_w}:h={out_h}')] all_frames = iio.imread(paths[idx], plugin='pyav', format='rgb24', constant_framerate=True, thread_count=2, filter_sequence=filters) (imgs, timestamps) = ([], []) for (frame_idx, frame) in enumerate(all_frames): if ((frame_idx % load_every) != 0): continue if (frame_idx >= 300): break imgs.append(torch.from_numpy(frame)) timestamps.append(frame_idx) imgs = torch.stack(imgs, 0) (med_img, _) = torch.median(imgs, dim=0) return (imgs, poses[idx].expand(len(timestamps), (- 1), (- 1)), med_img, torch.tensor(timestamps, dtype=torch.int32))
class _UniformExpertAssignment(torch.autograd.Function): def forward(ctx, x, num_experts): out = torch.arange(x.numel(), dtype=x.dtype, device=x.device) out = torch.remainder(out, num_experts) return out.view(x.shape)
def test_synthetic_sample_results_in_sampled_delay_with_weighted_delays_per_arm(): n_actions = 3 delay_function = ExponentialDelaySampler(max_scale=100.0, min_scale=10.0, random_state=12345).exponential_delay_function_expected_reward_weighted dataset = BanditEnvironmentSimulator(n_actions=n_actions, reward_function=logistic_sparse_reward_function, delay_function=delay_function, random_state=12345) actual_bandits_dataset = dataset.next_bandit_round_batch(n_rounds=1000) ordered_rewards = actual_bandits_dataset.expected_rewards[0].argsort() mean_delays = actual_bandits_dataset.round_delays.sum(axis=0) assert (mean_delays[ordered_rewards[2]] < mean_delays[ordered_rewards[1]] > mean_delays[ordered_rewards[2]])
_model def densenetblur121d(pretrained=False, **kwargs): model = _densenet('densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', aa_layer=BlurPool2d, **kwargs) return model
def check_label_existence(value_label, usr_utt_tok): (in_usr, usr_pos) = get_token_pos(usr_utt_tok, value_label) if ((not in_usr) and (value_label in LABEL_MAPS)): for value_label_variant in LABEL_MAPS[value_label]: (in_usr, usr_pos) = get_token_pos(usr_utt_tok, value_label_variant) if in_usr: break return (in_usr, usr_pos)
class SympyPredictingOptimizer(ABC): def step(self): pass def prediction(self, nsteps): pass
def set_learning_rate(optim, lr): for param_group in optim.param_groups: param_group['lr'] = lr
def get_arguments(): parser = argparse.ArgumentParser() parser.add_argument('l', help=((('Location whose data needs to be trained/tested with' + 'Values can be one of [Bondville, Boulder, Desert_Rock,') + 'Fort_Peck,Goodwin_Creek, Penn_State,') + 'Sioux_Falls]')) parser.add_argument('y', help='4 digit Test year. One among [2009,2015,2016,2017]') parser.add_argument('t', help='True or False.To train using 2010-2011 data or not') parser.add_argument('--num-epochs', default=1000, type=int, help='Number of training, testing epochs') (args, _) = parser.parse_known_args() test_year = args.y if (test_year not in ['2009', '2015', '2016', '2017']): print('Test year argument is not valid. Exiting...') parser.print_help() exit() if (args.t in ['True', 'true']): run_train = True elif (args.t in ['False', 'false']): run_train = False else: print('Train flag is invalid. It should be True or false. Exiting...') parser.print_help() exit() test_location = args.l if (test_location not in ['Bondville', 'Boulder', 'Desert_Rock', 'Fort_Peck,Goodwin_Creek, Penn_State', 'Sioux_Falls']): print('Test location is not valid.Exiting...') parser.print_help() exit() num_epochs = args.num_epochs print('test_location=', test_location, 'test_year=', test_year, 'run_train=', run_train, 'num_epochs=', num_epochs) return (test_location, test_year, run_train, num_epochs)
class Trainer(DefaultTrainer): def build_evaluator(cls, cfg, dataset_name, output_folder=None): if (output_folder is None): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference') if ('coco' in dataset_name): return COCOEvaluator(dataset_name, cfg, True, output_folder) else: assert ('voc' in dataset_name) return PascalVOCDetectionEvaluator(dataset_name)
def make_batches(lines, args, task, max_positions, encode_fn): tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines] lengths = [t.numel() for t in tokens] itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test).next_epoch_itr(shuffle=False) for batch in itr: (yield Batch(ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths']))
class BaseTestCase(unittest.TestCase): def test_base_correct(self): query = '' (corrected_sent, detail) = m.correct(query) print(corrected_sent, detail) self.assertEqual(corrected_sent, '') self.assertEqual(detail, [('', '', 0, 2), ('', '', 9, 11)]) def test_base_demos(self): sents = ['', '', '', '', '', ''] res = [] for name in sents: (s, r) = m.correct(name) print(r) res.append(r) self.assertEqual(res[0], [('', '', 4, 6), ('', '', 10, 11)]) self.assertEqual(res[1], []) self.assertEqual(res[2], [('', '', 14, 15)]) self.assertEqual(res[3], [('', '', 0, 2), ('', '', 9, 11)]) self.assertEqual(res[4], [('', '', 7, 9)]) self.assertEqual(res[5], [('', '', 5, 7)]) def test_confusion_dict_file(self): sents = ['iphonex,', '', ''] res = [] for name in sents: (s, r) = m.correct(name) print(r) res.append(r) self.assertEqual(res[0], []) self.assertEqual(res[1], [('', '', 14, 17)]) m.set_custom_confusion_path_or_dict('../examples/my_custom_confusion.txt') res = [] for name in sents: (s, r) = m.correct(name) print(r) res.append(r) self.assertEqual(res[0], [('iphonex', 'iphoneX', 1, 8)]) self.assertEqual(res[1], []) def test_confusion_dict_dict(self): sents = ['iphonex,', '', ''] res = [] for name in sents: print(name, m.detect(name)) (s, r) = m.correct(name) print(r) res.append(r) self.assertEqual(res[0], []) self.assertEqual(res[1], [('', '', 14, 17)]) print(('*' * 42)) m_dict = {'iphonex': 'iphoneX', '': '', '': ''} m.set_custom_confusion_path_or_dict(m_dict) res = [] for name in sents: print(name, m.detect(name)) (s, r) = m.correct(name) print(r) res.append(r) self.assertEqual(res[0], [('iphonex', 'iphoneX', 1, 8)]) self.assertEqual(res[1], [])
def build_argparse(): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='data/constituency', help='Directory of constituency data.') parser.add_argument('--wordvec_dir', type=str, default='extern_data/wordvec', help='Directory of word vectors') parser.add_argument('--wordvec_file', type=str, default='', help='File that contains word vectors') parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read') parser.add_argument('--pretrain_max_vocab', type=int, default=250000) parser.add_argument('--charlm_forward_file', type=str, default=None, help='Exact path to use for forward charlm') parser.add_argument('--charlm_backward_file', type=str, default=None, help='Exact path to use for backward charlm') parser.add_argument('--bert_model', type=str, default=None, help='Use an external bert model (requires the transformers package)') parser.add_argument('--no_bert_model', dest='bert_model', action='store_const', const=None, help="Don't use bert") parser.add_argument('--bert_hidden_layers', type=int, default=4, help='How many layers of hidden state to use from the transformer') parser.add_argument('--bert_hidden_layers_original', action='store_const', const=None, dest='bert_hidden_layers', help='Use layers 2,3,4 of the Bert embedding') parser.add_argument('--bert_finetune', default=False, action='store_true', help='Finetune the bert (or other transformer)') parser.add_argument('--no_bert_finetune', dest='bert_finetune', action='store_false', help="Don't finetune the bert (or other transformer)") parser.add_argument('--bert_finetune_layers', default=None, type=int, help='Only finetune this many layers from the transformer') parser.add_argument('--bert_finetune_begin_epoch', default=None, type=int, help='Which epoch to start finetuning the transformer') parser.add_argument('--bert_finetune_end_epoch', default=None, type=int, help='Which epoch to stop finetuning the transformer') parser.add_argument('--bert_learning_rate', default=0.009, type=float, help='Scale the learning rate for transformer finetuning by this much') parser.add_argument('--stage1_bert_learning_rate', default=None, type=float, help='Scale the learning rate for transformer finetuning by this much only during an AdaDelta warmup') parser.add_argument('--bert_weight_decay', default=0.0001, type=float, help='Scale the weight decay for transformer finetuning by this much') parser.add_argument('--stage1_bert_finetune', default=None, action='store_true', help="Finetune the bert (or other transformer) during an AdaDelta warmup, even if the second half doesn't use bert_finetune") parser.add_argument('--no_stage1_bert_finetune', dest='stage1_bert_finetune', action='store_false', help="Don't finetune the bert (or other transformer) during an AdaDelta warmup, even if the second half doesn't use bert_finetune") parser.add_argument('--tag_embedding_dim', type=int, default=20, help='Embedding size for a tag. 0 turns off the feature') parser.add_argument('--delta_embedding_dim', type=int, default=100, help='Embedding size for a delta embedding') parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.') parser.add_argument('--silver_file', type=str, default=None, help='Secondary training file.') parser.add_argument('--silver_remove_duplicates', default=False, action='store_true', help="Do/don't remove duplicates from the silver training file. Could be useful for intentionally reweighting some trees") parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.') parser.add_argument('--tokenized_file', type=str, default=None, help='Input file of tokenized text for parsing with parse_text.') parser.add_argument('--tokenized_dir', type=str, default=None, help='Input directory of tokenized text for parsing with parse_text.') parser.add_argument('--mode', default='train', choices=['train', 'parse_text', 'predict', 'remove_optimizer']) parser.add_argument('--num_generate', type=int, default=0, help='When running a dev set, how many sentences to generate beyond the greedy one') add_predict_output_args(parser) parser.add_argument('--lang', type=str, help='Language') parser.add_argument('--shorthand', type=str, help='Treebank shorthand') parser.add_argument('--transition_embedding_dim', type=int, default=20, help='Embedding size for a transition') parser.add_argument('--transition_hidden_size', type=int, default=20, help='Embedding size for transition stack') parser.add_argument('--transition_stack', default=StackHistory.LSTM, type=(lambda x: StackHistory[x.upper()]), help='How to track transitions over a parse. {}'.format(', '.join((x.name for x in StackHistory)))) parser.add_argument('--transition_heads', default=4, type=int, help='How many heads to use in MHA *if* the transition_stack is Attention') parser.add_argument('--constituent_stack', default=StackHistory.LSTM, type=(lambda x: StackHistory[x.upper()]), help='How to track transitions over a parse. {}'.format(', '.join((x.name for x in StackHistory)))) parser.add_argument('--constituent_heads', default=8, type=int, help='How many heads to use in MHA *if* the transition_stack is Attention') parser.add_argument('--hidden_size', type=int, default=512, help='Size of the output layers for constituency stack and word queue') parser.add_argument('--epochs', type=int, default=400) parser.add_argument('--epoch_size', type=int, default=5000, help="Runs this many trees in an 'epoch' instead of going through the training dataset exactly once. Set to 0 to do the whole training set") parser.add_argument('--silver_epoch_size', type=int, default=None, help="Runs this many trees in a silver 'epoch'. If not set, will match --epoch_size") parser.add_argument('--multistage', default=True, action='store_true', help='1/2 epochs with adadelta no pattn or lattn, 1/4 with chosen optim and no lattn, 1/4 full model') parser.add_argument('--no_multistage', dest='multistage', action='store_false', help="don't do the multistage learning") parser.add_argument('--oracle_initial_epoch', type=int, default=1, help='Epoch where we start using the dynamic oracle to let the parser keep going with wrong decisions') parser.add_argument('--oracle_frequency', type=float, default=0.8, help='How often to use the oracle vs how often to force the correct transition') parser.add_argument('--oracle_forced_errors', type=float, default=0.001, help='Occasionally have the model randomly walk through the state space to try to learn how to recover') parser.add_argument('--oracle_level', type=int, default=None, help='Restrict oracle transitions to this level or lower. 0 means off. None means use all oracle transitions.') parser.add_argument('--train_batch_size', type=int, default=30, help='How many trees to train before taking an optimizer step') parser.add_argument('--eval_batch_size', type=int, default=50, help='How many trees to batch when running eval') parser.add_argument('--save_dir', type=str, default='saved_models/constituency', help='Root dir for saving models.') parser.add_argument('--save_name', type=str, default='{shorthand}_{embedding}_{finetune}_constituency.pt', help='File name to save the model') parser.add_argument('--save_each_name', type=str, default=None, help='Save each model in sequence to this pattern. Mostly for testing') parser.add_argument('--seed', type=int, default=1234) utils.add_device_args(parser) parser.add_argument('--learning_rate', default=None, type=float, help='Learning rate for the optimizer. Reasonable values are 1.0 for adadelta or 0.001 for SGD. None uses a default for the given optimizer: {}'.format(DEFAULT_LEARNING_RATES)) parser.add_argument('--learning_eps', default=None, type=float, help='eps value to use in the optimizer. None uses a default for the given optimizer: {}'.format(DEFAULT_LEARNING_EPS)) parser.add_argument('--learning_momentum', default=None, type=float, help='Momentum. None uses a default for the given optimizer: {}'.format(DEFAULT_MOMENTUM)) parser.add_argument('--learning_weight_decay', default=None, type=float, help='Weight decay (eg, l2 reg) to use in the optimizer') parser.add_argument('--learning_rho', default=DEFAULT_LEARNING_RHO, type=float, help='Rho parameter in Adadelta') parser.add_argument('--learning_beta2', default=0.999, type=float, help='Beta2 argument for AdamW') parser.add_argument('--optim', default=None, help='Optimizer type: SGD, AdamW, Adadelta, AdaBelief, Madgrad') parser.add_argument('--stage1_learning_rate', default=None, type=float, help='Learning rate to use in the first stage of --multistage. None means use default: {}'.format(DEFAULT_LEARNING_RATES['adadelta'])) parser.add_argument('--learning_rate_warmup', default=0, type=int, help="Number of epochs to ramp up learning rate from 0 to full. Set to 0 to always use the chosen learning rate. Currently not functional, as it didn't do anything") parser.add_argument('--learning_rate_factor', default=0.6, type=float, help='Plateau learning rate decreate when plateaued') parser.add_argument('--learning_rate_patience', default=5, type=int, help='Plateau learning rate patience') parser.add_argument('--learning_rate_cooldown', default=10, type=int, help='Plateau learning rate cooldown') parser.add_argument('--learning_rate_min_lr', default=None, type=float, help='Plateau learning rate minimum') parser.add_argument('--stage1_learning_rate_min_lr', default=None, type=float, help='Plateau learning rate minimum (stage 1)') parser.add_argument('--grad_clipping', default=None, type=float, help='Clip abs(grad) to this amount. Use --no_grad_clipping to turn off grad clipping') parser.add_argument('--no_grad_clipping', action='store_const', const=None, dest='grad_clipping', help='Use --no_grad_clipping to turn off grad clipping') parser.add_argument('--loss', default='cross', help='cross, large_margin, or focal. Focal requires `pip install focal_loss_torch`') parser.add_argument('--loss_focal_gamma', default=2, type=float, help='gamma value for a focal loss') parser.add_argument('--word_dropout', default=0.2, type=float, help='Dropout on the word embedding') parser.add_argument('--predict_dropout', default=0.2, type=float, help='Dropout on the final prediction layer') parser.add_argument('--lstm_layer_dropout', default=0.0, type=float, help='Dropout in the LSTM layers') parser.add_argument('--lstm_input_dropout', default=0.2, type=float, help='Dropout on the input to an LSTM') parser.add_argument('--transition_scheme', default=TransitionScheme.IN_ORDER, type=(lambda x: TransitionScheme[x.upper()]), help='Transition scheme to use. {}'.format(', '.join((x.name for x in TransitionScheme)))) parser.add_argument('--reversed', default=False, action='store_true', help='Do the transition sequence reversed') parser.add_argument('--combined_dummy_embedding', default=True, action='store_true', help='Use the same embedding for dummy nodes and the vectors used when combining constituents') parser.add_argument('--no_combined_dummy_embedding', dest='combined_dummy_embedding', action='store_false', help="Don't use the same embedding for dummy nodes and the vectors used when combining constituents") parser.add_argument('--nonlinearity', default='relu', choices=NONLINEARITY.keys(), help='Nonlinearity to use in the model. relu is a noticeable improvement over tanh') parser.add_argument('--maxout_k', default=None, type=int, help='Use maxout layers instead of a nonlinearity for the output layers') parser.add_argument('--use_silver_words', default=True, dest='use_silver_words', action='store_true', help="Train/don't train word vectors for words only in the silver dataset") parser.add_argument('--no_use_silver_words', default=True, dest='use_silver_words', action='store_false', help="Train/don't train word vectors for words only in the silver dataset") parser.add_argument('--rare_word_unknown_frequency', default=0.02, type=float, help='How often to replace a rare word with UNK when training') parser.add_argument('--rare_word_threshold', default=0.02, type=float, help='How many words to consider as rare words as a fraction of the dataset') parser.add_argument('--tag_unknown_frequency', default=0.001, type=float, help='How often to replace a tag with UNK when training') parser.add_argument('--num_lstm_layers', default=2, type=int, help='How many layers to use in the LSTMs') parser.add_argument('--num_tree_lstm_layers', default=None, type=int, help='How many layers to use in the TREE_LSTMs, if used. This also increases the width of the word outputs to match the tree lstm inputs. Default 2 if TREE_LSTM or TREE_LSTM_CX, 1 otherwise') parser.add_argument('--num_output_layers', default=3, type=int, help='How many layers to use at the prediction level') parser.add_argument('--sentence_boundary_vectors', default=SentenceBoundary.EVERYTHING, type=(lambda x: SentenceBoundary[x.upper()]), help='Vectors to learn at the start & end of sentences. {}'.format(', '.join((x.name for x in SentenceBoundary)))) parser.add_argument('--constituency_composition', default=ConstituencyComposition.MAX, type=(lambda x: ConstituencyComposition[x.upper()]), help='How to build a new composition from its children. {}'.format(', '.join((x.name for x in ConstituencyComposition)))) parser.add_argument('--reduce_heads', default=8, type=int, help='Number of attn heads to use when reducing children into a parent tree (constituency_composition == attn)') parser.add_argument('--reduce_position', default=None, type=int, help="Dimension of position vector to use when reducing children. None means 1/4 hidden_size, 0 means don't use (constituency_composition == key | untied_key)") parser.add_argument('--relearn_structure', action='store_true', help='Starting from an existing checkpoint, add or remove pattn / lattn. One thing that works well is to train an initial model using adadelta with no pattn, then add pattn with adamw') parser.add_argument('--finetune', action='store_true', help='Load existing model during `train` mode from `load_name` path') parser.add_argument('--checkpoint_save_name', type=str, default=None, help='File name to save the most recent checkpoint') parser.add_argument('--no_checkpoint', dest='checkpoint', action='store_false', help="Don't save checkpoints") parser.add_argument('--load_name', type=str, default=None, help='Model to load when finetuning, evaluating, or manipulating an existing file') parser.add_argument('--load_package', type=str, default=None, help='Download an existing stanza package & use this for tests, finetuning, etc') retagging.add_retag_args(parser) parser.add_argument('--pattn_d_model', default=1024, type=int, help='Partitioned attention model dimensionality') parser.add_argument('--pattn_morpho_emb_dropout', default=0.2, type=float, help='Dropout rate for morphological features obtained from pretrained model') parser.add_argument('--pattn_encoder_max_len', default=512, type=int, help='Max length that can be put into the transformer attention layer') parser.add_argument('--pattn_num_heads', default=8, type=int, help='Partitioned attention model number of attention heads') parser.add_argument('--pattn_d_kv', default=64, type=int, help='Size of the query and key vector') parser.add_argument('--pattn_d_ff', default=2048, type=int, help='Size of the intermediate vectors in the feed-forward sublayer') parser.add_argument('--pattn_relu_dropout', default=0.1, type=float, help='ReLU dropout probability in feed-forward sublayer') parser.add_argument('--pattn_residual_dropout', default=0.2, type=float, help='Residual dropout probability for all residual connections') parser.add_argument('--pattn_attention_dropout', default=0.2, type=float, help='Attention dropout probability') parser.add_argument('--pattn_num_layers', default=0, type=int, help='Number of layers for the Partitioned Attention. Currently turned off') parser.add_argument('--pattn_bias', default=False, action='store_true', help='Whether or not to learn an additive bias') parser.add_argument('--pattn_timing', default='sin', choices=['learned', 'sin'], help='Use a learned embedding or a sin embedding') parser.add_argument('--lattn_d_input_proj', default=None, type=int, help='If set, project the non-positional inputs down to this size before proceeding.') parser.add_argument('--lattn_d_kv', default=64, type=int, help='Dimension of the key/query vector') parser.add_argument('--lattn_d_proj', default=64, type=int, help='Dimension of the output vector from each label attention head') parser.add_argument('--lattn_resdrop', default=True, action='store_true', help='Whether or not to use Residual Dropout') parser.add_argument('--lattn_pwff', default=True, action='store_true', help='Whether or not to use a Position-wise Feed-forward Layer') parser.add_argument('--lattn_q_as_matrix', default=False, action='store_true', help='Whether or not Label Attention uses learned query vectors. False means it does') parser.add_argument('--lattn_partitioned', default=True, action='store_true', help='Whether or not it is partitioned') parser.add_argument('--no_lattn_partitioned', default=True, action='store_false', dest='lattn_partitioned', help='Whether or not it is partitioned') parser.add_argument('--lattn_combine_as_self', default=False, action='store_true', help='Whether or not the layer uses concatenation. False means it does') parser.add_argument('--lattn_d_l', default=32, type=int, help='Number of labels') parser.add_argument('--lattn_attention_dropout', default=0.2, type=float, help='Dropout for attention layer') parser.add_argument('--lattn_d_ff', default=2048, type=int, help='Dimension of the Feed-forward layer') parser.add_argument('--lattn_relu_dropout', default=0.2, type=float, help='Relu dropout for the label attention') parser.add_argument('--lattn_residual_dropout', default=0.2, type=float, help='Residual dropout for the label attention') parser.add_argument('--lattn_combined_input', default=True, action='store_true', help='Combine all inputs for the lattn, not just the pattn') parser.add_argument('--use_lattn', default=False, action='store_true', help='Use the lattn layers - currently turned off') parser.add_argument('--no_lattn_combined_input', dest='lattn_combined_input', action='store_false', help="Don't combine all inputs for the lattn, not just the pattn") parser.add_argument('--log_norms', default=False, action='store_true', help='Log the parameters norms while training. A very noisy option') parser.add_argument('--log_shapes', default=False, action='store_true', help='Log the parameters shapes at the beginning') parser.add_argument('--watch_regex', default=None, help='regex to describe which weights and biases to output, if any') parser.add_argument('--wandb', action='store_true', help='Start a wandb session and write the results of training. Only applies to training. Use --wandb_name instead to specify a name') parser.add_argument('--wandb_name', default=None, help='Name of a wandb session to start when training. Will default to the dataset short name') parser.add_argument('--wandb_norm_regex', default=None, help='Log on wandb any tensor whose norm matches this matrix. Might get cluttered?') return parser
_interface.register_fl_task(model='net_model', data_loader='val_loader', device='device') def validate(net_model, val_loader, device): device = torch.device('cuda') if (not torch.cuda.is_available()): device = 'cpu' net_model.eval() net_model.to(device) val_loader = tqdm.tqdm(val_loader, desc='validate') val_score = 0 total_samples = 0 with torch.no_grad(): for (data, target) in val_loader: samples = target.shape[0] total_samples += samples (data, target) = (torch.tensor(data).to(device), torch.tensor(target).to(device)) output = net_model(data) pred = output.argmax(dim=1) val_score += pred.eq(target).sum().cpu().numpy() return {'acc': (val_score / total_samples)}
class SMPBlock(nn.Module): def __init__(self, in_channels, dw_channels, lk_size, drop_path, n_points=None, n_points_divide=4): super().__init__() self.pw1 = conv_bn_relu(in_channels, dw_channels, 1, 1, 0, groups=1) self.pw2 = conv_bn(dw_channels, in_channels, 1, 1, 0, groups=1) self.large_kernel = SMPCNN(in_channels=dw_channels, out_channels=dw_channels, kernel_size=lk_size, stride=1, groups=dw_channels, n_points=n_points, n_points_divide=n_points_divide) self.lk_nonlinear = nn.ReLU() self.prelkb_bn = get_bn(in_channels) self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity()) print('drop path:', self.drop_path) def forward(self, x): out = self.prelkb_bn(x) out = self.pw1(out) out = self.large_kernel(out) out = self.lk_nonlinear(out) out = self.pw2(out) return (x + self.drop_path(out))
def potential_neighbors(dom1, dom2): if (isinstance(dom1, DOMElementPAD) or isinstance(dom2, DOMElementPAD)): return False if (dom1.ref == dom2.ref): return False return True
class IntegerVectorsModPermutationGroup(UniqueRepresentation): def __classcall__(cls, G, sum=None, max_part=None, sgs=None): if ((sum is None) and (max_part is None)): if G.domain(): return IntegerVectorsModPermutationGroup_All(G, sgs=sgs) else: return IntegerVectorsModPermutationGroup_with_constraints(G, 0, max_part=(- 1), sgs=sgs) else: if (sum is not None): assert (sum == NN(sum)) if (max_part is not None): assert (max_part == NN(max_part)) return IntegerVectorsModPermutationGroup_with_constraints(G, sum, max_part, sgs=sgs)
class MutableString(UserString): def __init__(self, string=''): self.data = string def __hash__(self): raise TypeError('unhashable type (it is mutable)') def __setitem__(self, index, sub): if (index < 0): index += len(self.data) if ((index < 0) or (index >= len(self.data))): raise IndexError self.data = ((self.data[:index] + sub) + self.data[(index + 1):]) def __delitem__(self, index): if (index < 0): index += len(self.data) if ((index < 0) or (index >= len(self.data))): raise IndexError self.data = (self.data[:index] + self.data[(index + 1):]) def __setslice__(self, start, end, sub): start = max(start, 0) end = max(end, 0) if isinstance(sub, UserString): self.data = ((self.data[:start] + sub.data) + self.data[end:]) elif isinstance(sub, basestring): self.data = ((self.data[:start] + sub) + self.data[end:]) else: self.data = ((self.data[:start] + str(sub)) + self.data[end:]) def __delslice__(self, start, end): start = max(start, 0) end = max(end, 0) self.data = (self.data[:start] + self.data[end:]) def immutable(self): return UserString(self.data) def __iadd__(self, other): if isinstance(other, UserString): self.data += other.data elif isinstance(other, basestring): self.data += other else: self.data += str(other) return self def __imul__(self, n): self.data *= n return self
def quantize_model_(model, p=0.2, bits=8, update_step=3000): quantized_layers = get_layers(model, '(.*?)') for layer in quantized_layers: is_master_process = ((not dist.is_initialized()) or (dist.is_initialized() and (dist.get_rank() == 0))) module = attrgetter(layer)(model) if is_master_process: logging.info(f'Quantizing layer {layer} with bits={bits} and QuantNoise={p}') q_params = {'p': p, 'update_step': update_step, 'bits': bits, 'method': 'histogram', 'counter': 0} if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f'Module {module} not yet supported for quantization') continue a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method='histogram') attrsetter(layer)(model, quantized_module) return quantized_layers
def get_dataset(dataset, batch_size=256, augment=False): mean = (0.5, 0.5, 0.5) std = (0.5, 0.5, 0.5) num_workers = 4 if (dataset in ['mnist', 'kmnist', 'fashionmnist']): if augment: transform_train = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) else: transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) elif (dataset in ['mnist_32', 'fashionmnist_32']): transform_train = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) transform_test = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) elif (dataset in ['gtsrb']): transform_train = transforms.Compose([transforms.Resize((32, 32)), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) transform_test = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) elif (dataset in ['cifar10_gray']): if augment: transform_train = transforms.Compose([transforms.Grayscale(), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) else: transform_train = transforms.Compose([transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) transform_test = transforms.Compose([transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=(0.5,), std=(0.5,))]) else: if augment: transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize(mean, std)]) else: transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]) if (dataset in ['mnist', 'mnist_32']): trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test) elif (dataset in ['kmnist']): trainset = torchvision.datasets.KMNIST(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.KMNIST(root='./data', train=False, download=True, transform=transform_test) elif (dataset in ['fashionmnist', 'fashionmnist_32']): trainset = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform_test) elif (dataset in ['cifar10', 'cifar10_gray']): trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) elif (dataset == 'cifar100'): trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test) elif (dataset == 'svhn'): trainset = torchvision.datasets.SVHN(root='./data', split='train', download=True, transform=transform_train) testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test) elif (dataset == 'gtsrb'): trainset = GTSRB(train=True, transform=transform_train) testset = GTSRB(train=False, transform=transform_test) else: sys.exit('Unknown dataset {}'.format(dataset)) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True) return (trainloader, testloader)
def type_check(param_name, param_schema, input_params): type_check_error = [] doc_type = convert_type(param_schema.get('type', '')) if (doc_type and (not isinstance(input_params[param_name], doc_type))): type_error = True if (((doc_type in [int, float, bool]) and (type(input_params[param_name]) == str)) or ((doc_type == float) and (type(input_params[param_name]) == int))): try: input_params[param_name] = doc_type(input_params[param_name]) type_error = False except ValueError: pass if type_error: type_check_error.append((param_name, convert_type(doc_type), convert_type(type(input_params[param_name])))) if (('enum' in param_schema) and (input_params[param_name] != '') and (input_params[param_name] not in param_schema['enum'])): type_check_error.append((param_name, f"one of {param_schema['enum']}", f'"{input_params[param_name]}"')) return type_check_error
(scope='module') .usefixtures('columns') def simple_dataframe_pandas(columns): data = [(1, 2, 19842), (1, 4, 19844), (1, 3, 19843), (1, 5, 19845), (1, 6, 19846), (1, 7, 19847), (2, 1, 19841), (2, 2, 19842), (2, 3, 19843), (2, 4, 19844), (3, 10, 19844), (4, 11, 19843), (4, 12, 19845), (1, 1, 19841)] return pd.DataFrame(data, columns=columns)
def _check_likelihood(model: GPModel, classification: bool, likelihood_variance: Optional[float], empirical_variance: Optional[TensorType], trainable_likelihood: bool) -> None: if classification: assert isinstance(model.likelihood, gpflow.likelihoods.Bernoulli) else: assert isinstance(model.likelihood, gpflow.likelihoods.Gaussian) if (likelihood_variance is not None): npt.assert_allclose(tf.constant(model.likelihood.variance), likelihood_variance, rtol=1e-06) else: npt.assert_allclose(tf.constant(model.likelihood.variance), (empirical_variance / (SIGNAL_NOISE_RATIO_LIKELIHOOD ** 2)), rtol=1e-06) assert isinstance(model.likelihood.variance, gpflow.Parameter) assert (model.likelihood.variance.trainable == trainable_likelihood)
class OrderedMultisetPartitionsIntoSets_n_constraints(OrderedMultisetPartitionsIntoSets): def __init__(self, n, **constraints): self._n = n OrderedMultisetPartitionsIntoSets.__init__(self, True, size=n, **constraints) def _repr_(self): cdict = dict(self.constraints) cdict.pop('size', None) base_repr = ('Ordered Multiset Partitions into Sets of integer %s' % self._n) return (base_repr + self._constraint_repr_(cdict))
class RawExplorationStrategy(ExplorationStrategy, metaclass=abc.ABCMeta): def get_action_from_raw_action(self, action, **kwargs): pass def get_actions_from_raw_actions(self, actions, **kwargs): raise NotImplementedError() def get_action(self, t, policy, *args, **kwargs): (action, agent_info) = policy.get_action(*args, **kwargs) return (self.get_action_from_raw_action(action, t=t), agent_info) def get_actions(self, t, policy, *args, **kwargs): actions = policy.get_actions(*args, **kwargs) return self.get_actions_from_raw_actions(actions, **kwargs) def reset(self): pass
class SAGE(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, num_layers): super().__init__() self.num_layers = num_layers self.convs = torch.nn.ModuleList() self.convs.append(SAGEConv(in_channels, hidden_channels)) for _ in range((num_layers - 2)): self.convs.append(SAGEConv(hidden_channels, hidden_channels)) self.convs.append(SAGEConv(hidden_channels, out_channels)) def reset_parameters(self): for conv in self.convs: conv.reset_parameters() def forward(self, x, adjs): for (i, (edge_index, _, size)) in enumerate(adjs): x_target = x[:size[1]] x = self.convs[i]((x, x_target), edge_index) if (i != (self.num_layers - 1)): x = F.relu(x) x = F.dropout(x, p=0.5, training=self.training) return x.log_softmax(dim=(- 1)) def inference(self, x_all): pbar = tqdm(total=(x_all.size(0) * self.num_layers)) pbar.set_description('Evaluating') total_edges = 0 for i in range(self.num_layers): xs = [] for (batch_size, n_id, adj) in subgraph_loader: (edge_index, _, size) = adj.to(device) total_edges += edge_index.size(1) x = x_all[n_id].to(device) x_target = x[:size[1]] x = self.convs[i]((x, x_target), edge_index) if (i != (self.num_layers - 1)): x = F.relu(x) xs.append(x.cpu()) pbar.update(batch_size) x_all = torch.cat(xs, dim=0) pbar.close() return x_all
.parametrize('tifunc,npfunc', [((lambda x: ti.tanh(x)), (lambda x: np.tanh(x))), ((lambda x: ti.sin(x)), (lambda x: np.sin(x))), ((lambda x: ti.cos(x)), (lambda x: np.cos(x))), ((lambda x: ti.acos(x)), (lambda x: np.arccos(x))), ((lambda x: ti.asin(x)), (lambda x: np.arcsin(x)))]) _has_autograd _utils.test() def test_trigonometric(tifunc, npfunc): grad_test(tifunc, npfunc) grad_test_fwd(tifunc, npfunc)
.pairing .dependency() def test_build_and_search(): global _tempdir conf = utils.relative_file('spacegraphcats/conf/twofoo-short.yaml') target = 'search' status = run_snakemake(conf, verbose=True, outdir=_tempdir, extra_args=[target]) assert (status == 0) output_files = ['twofoo-short_k31/bcalm.unitigs.db', 'twofoo-short_k31/contigs.mphf', 'twofoo-short_k31_r1/catlas.csv', 'twofoo-short_k31_r1_search_oh0/results.csv'] for filename in output_files: fullpath = os.path.join(_tempdir, filename) assert os.path.exists(fullpath), fullpath
def handle_sampling_error(is_tmp_file, output_file_path, sampling_error): if ('Unable to sample any rows for the given conditions' in str(sampling_error)): raise sampling_error error_msg = None if is_tmp_file: error_msg = f'Error: Sampling terminated. Partial results are stored in a temporary file: {output_file_path}. This file will be overridden the next time you sample. Please rename the file if you wish to save these results.' elif (output_file_path is not None): error_msg = f'Error: Sampling terminated. Partial results are stored in {output_file_path}.' if error_msg: raise type(sampling_error)(((error_msg + '\n') + str(sampling_error))) raise sampling_error
def validate_before_compute_similarity(float_tensor: Any, fxp_tensor: Any): assert isinstance(float_tensor, np.ndarray) assert isinstance(fxp_tensor, np.ndarray) assert (float_tensor.shape == fxp_tensor.shape)
def fake_colmap_normal(in_depth_path, out_normal_path): depth_image = read_gipuma_dmb(in_depth_path) image_shape = np.shape(depth_image) normal_image = np.ones_like(depth_image) normal_image = np.reshape(normal_image, (image_shape[0], image_shape[1], 1)) normal_image = np.tile(normal_image, [1, 1, 3]) normal_image = (normal_image / 1.) mask_image = np.squeeze(np.where((depth_image > 0), 1, 0)) mask_image = np.reshape(mask_image, (image_shape[0], image_shape[1], 1)) mask_image = np.tile(mask_image, [1, 1, 3]) mask_image = np.float32(mask_image) normal_image = np.multiply(normal_image, mask_image) normal_image = np.float32(normal_image) write_gipuma_dmb(out_normal_path, normal_image) return
class Embeder(nn.Module): def __init__(self, conf, fields): super(Embeder, self).__init__() self.conf = conf if ('pos' in fields.inputs): self.pos_emb = nn.Embedding(fields.get_vocab_size('pos'), conf.n_pos_embed) else: self.pos_emb = None if ('char' in fields.inputs): self.feat = CharLSTM(n_chars=fields.get_vocab_size('char'), n_embed=conf.n_char_embed, n_out=conf.n_char_out, pad_index=fields.get_pad_index('char'), input_dropout=conf.char_input_dropout) self.feat_name = 'char' if ('bert' in fields.inputs): self.feat = TransformerEmbedding(model=fields.get_bert_name(), n_layers=conf.n_bert_layers, n_out=conf.n_bert_out, pad_index=fields.get_pad_index('bert'), dropout=conf.mix_dropout, requires_grad=conf.finetune, use_projection=conf.use_projection, use_scalarmix=conf.use_scalarmix) self.feat_name = 'bert' print(fields.get_bert_name()) if (('char' not in fields.inputs) and ('bert' not in fields.inputs)): self.feat = None if ('word' in fields.inputs): ext_emb = fields.get_ext_emb() if ext_emb: self.word_emb = copy.deepcopy(ext_emb) else: self.word_emb = nn.Embedding(num_embeddings=fields.get_vocab_size('word'), embedding_dim=conf.n_embed) else: self.word_emb = None def forward(self, ctx): emb = {} if self.pos_emb: emb['pos'] = self.pos_emb(ctx['pos']) if self.word_emb: emb['word'] = self.word_emb(ctx['word']) if self.feat: emb[self.feat_name] = self.feat(ctx[self.feat_name]) ctx['embed'] = emb def get_output_dim(self): size = 0 if self.pos_emb: size += self.conf.n_pos_embed if self.word_emb: if isinstance(self.word_emb, nn.Embedding): size += self.conf.n_embed else: size += self.word_emb.get_dim() if self.feat: if (self.feat_name == 'char'): size += self.conf.n_char_out else: size += self.feat.n_out return size
class FocalLoss(nn.Module): def __init__(self, focusing_param=2, balance_param=0.25): super(FocalLoss, self).__init__() self.focusing_param = focusing_param self.balance_param = balance_param def forward(self, output, target, reduction='mean'): cross_entropy = F.cross_entropy(output, target, reduction=reduction) cross_entropy_log = torch.log(cross_entropy) logpt = (- F.cross_entropy(output, target, reduction=reduction)) pt = torch.exp(logpt) focal_loss = ((- ((1 - pt) ** self.focusing_param)) * logpt) balanced_focal_loss = (self.balance_param * focal_loss) return balanced_focal_loss
def _merge_a_into_b(a, b, stack=None): assert isinstance(a, AttrDict), '`a` (cur type {}) must be an instance of {}'.format(type(a), AttrDict) assert isinstance(b, AttrDict), '`b` (cur type {}) must be an instance of {}'.format(type(b), AttrDict) for (k, v_) in a.items(): full_key = ((('.'.join(stack) + '.') + k) if (stack is not None) else k) if (k not in b): raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) v = _decode_cfg_value(v) v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key) if isinstance(v, AttrDict): try: stack_push = ([k] if (stack is None) else (stack + [k])) _merge_a_into_b(v, b[k], stack=stack_push) except BaseException: raise else: b[k] = v
def distshift_detector_preprocess(data: List[pd.DataFrame], domain_index: Union[(List[int], np.ndarray)], domain_index_name: str='domain_index', n_states: int=2): df = pd.concat(data) domain_index = np.array(domain_index) df[domain_index_name] = domain_index data_array = df.to_numpy() var_names = df.columns.tolist() transforms = Heterogeneous2DiscreteTransform(nstates=n_states) discrete = set_discrete_variable(var_names, domain_index_name) transforms.fit(data_array, var_names=var_names, discrete=discrete) data_transformed = transforms.transform(data_array) data_obj = TabularData(data_transformed, var_names=var_names) return (data_obj, var_names)
def logits_to_probs(logits, is_binary=False): if is_binary: return torch.sigmoid(logits) return F.softmax(logits, dim=(- 1))
def unregister_compiled_sdfg_call_hook(hook_id: int): if (hook_id >= len(_COMPILED_SDFG_CALL_HOOKS)): raise ValueError('Invalid hook ID') _COMPILED_SDFG_CALL_HOOKS[hook_id] = None
('This function is now called SE3ToXYZQUAT. Please change for this new signature to delete this warning.') def se3ToXYZQUAT(M): return pin.SE3ToXYZQUAT(M)
def main(): p = argparse.ArgumentParser() p.add_argument('node_mh_pickle') p.add_argument('lca_db') args = p.parse_args() node_mhs = pickle.load(open(args.node_mh_pickle, 'rb')) lca_obj = LCA_Database() lca_obj.load(args.lca_db) databases = ((lca_obj, args.lca_db, 'LCA'),) d = {} n_pure95 = 0 total = 0 for (k, v) in node_mhs.items(): ss = sourmash.SourmashSignature(v) results = [x[0] for x in gather_databases(ss, databases, 0, True)] sum_f_uniq = sum([result.f_unique_to_query for result in results]) keep_results = [] for result in results: if (result.f_unique_to_query < 0.1): break keep_results.append(result) if (not keep_results): print('** no match for {}'.format(k)) continue idents = [result.name.split()[0].split('.')[0] for result in keep_results] idxlist = [lca_obj.ident_to_idx[ident] for ident in idents] lidlist = [lca_obj.idx_to_lid[idx] for idx in idxlist] lineages = [lca_obj.lid_to_lineage[lid] for lid in lidlist] tree = lca_utils.build_tree(lineages) (lca, reason) = lca_utils.find_lca(tree) level = '*none*' if lca: level = lca[(- 1)].rank lineage = ';'.join(lca_utils.zip_lineage(lca, truncate_empty=True)) this_f_uniq = sum([result.f_unique_to_query for result in keep_results]) print('node {} matches {} {:.1f}'.format(k, level, ((this_f_uniq / sum_f_uniq) * 100))) if ((level in ('strain', 'genus', 'species')) and ((this_f_uniq / sum_f_uniq) >= 0.95)): n_pure95 += 1 total += 1 print('XXX', n_pure95, total)
def GenCircle_PUNGraph(Nodes, NodeOutDeg=1, IsDir=True): return _snap.GenCircle_PUNGraph(Nodes, NodeOutDeg, IsDir)
def process_constraints(constraints, columns, slots): slot_values = {} skip_db_with_one_table = False for constraint in constraints: if ('P0==' == constraint): assert ('{OP0}' in slots) slot_values['{OP0}'] = '=' elif ('P1==' == constraint): assert ('{OP1}' in slots) slot_values['{OP1}'] = '=' elif ('P0=P1==' == constraint): assert (('{OP0}' in slots) and ('{OP1}' in slots)) slot_values['{OP0}'] = '=' slot_values['{OP1}'] = '=' elif ('P0=P1=P2==' == constraint): assert (('{OP0}' in slots) and ('{OP1}' in slots) and ('{OP2}' in slots)) slot_values['{OP0}'] = '=' slot_values['{OP1}'] = '=' slot_values['{OP2}'] = '=' elif ('P0=>' == constraint): assert ('{OP0}' in slots) slot_values['{OP0}'] = '>' elif ('P0=<' == constraint): assert ('{OP0}' in slots) slot_values['{OP0}'] = '<' elif ('{AGG0}=MIN' == constraint): assert ('{AGG0}' in slots) slot_values['{AGG0}'] = 'MIN' elif ('{AGG0}=MAX' == constraint): assert ('{AGG0}' in slots) slot_values['{AGG0}'] = 'MAX' elif ('C0-id' == constraint): skip_db_with_one_table = True assert (('{COLUMN0}' in slots) and ('{COLUMN0}' in columns.keys())) columns['{COLUMN0}'].append('id') elif ('C1-id' == constraint): skip_db_with_one_table = True assert (('{COLUMN1}' in slots) and ('{COLUMN1}' in columns.keys())) columns['{COLUMN1}'].append('id') elif ('C2-id' == constraint): skip_db_with_one_table = True assert (('{COLUMN2}' in slots) and ('{COLUMN2}' in columns.keys())) columns['{COLUMN2}'].append('id') elif ('C3-T1' == constraint): skip_db_with_one_table = True assert (('{COLUMN3}' in slots) and ('{COLUMN3}' in columns.keys())) columns['{COLUMN3}'].append('T1') elif (('T0-T1-JOIN' == constraint) or ('T0-T1-NO-JOIN' == constraint)): skip_db_with_one_table = True columns['{COLUMN0}'].append('T0') if ('{COLUMN1}' in columns.keys()): columns['{COLUMN1}'].append('T1') return (slot_values, columns, skip_db_with_one_table)
class Token(object): def __init__(self, start_mark, end_mark): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in self.__dict__ if (not key.endswith('_mark'))] attributes.sort() arguments = ', '.join([('%s=%r' % (key, getattr(self, key))) for key in attributes]) return ('%s(%s)' % (self.__class__.__name__, arguments))
class CondBatchNorm3d(_CondBatchNorm): def _check_input_dim(self, input): if (input.dim() != 5): raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
class FairseqAdamConfig(FairseqDataclass): adam_betas: Any = field(default=(0.9, 0.999), metadata={'help': 'betas for Adam optimizer'}) adam_eps: float = field(default=1e-08, metadata={'help': 'epsilon for Adam optimizer'}) weight_decay: float = field(default=0.0, metadata={'help': 'weight decay'}) use_old_adam: bool = field(default=False, metadata={'help': 'Use fairseq.optim.adam.Adam'}) fp16_adam_stats: bool = field(default=False, metadata={'help': 'use FP16 stats (with automatic scaling)'}) tpu: bool = II('common.tpu') lr: List[float] = II('optimization.lr')
class RegressionLoss(object): def __call__(self, pred, gtruth): loss = self.criterion(pred, gtruth) return loss
def resnet_mark_before_relu(model): if isinstance(model, DataParallel): model.module.conv1.before_relu = True else: model.conv1.before_relu = True mark_bottlenetck_before_relu(model) mark_basicblock_before_relu(model)
def test_top_selector_find_top_k_binary_values_none(): ts = TopSelector() tst = TopSelectorTorch() with pytest.raises(TypeError): ts.find_top_k_binary(None, k) with pytest.raises(TypeError): tst.find_top_k_binary(None, k)
class MaxMarginRankingLoss(nn.Module): def __init__(self, margin=1.0, negative_weighting=False, batch_size=1, n_pair=1, hard_negative_rate=0.5): super(MaxMarginRankingLoss, self).__init__() self.margin = margin self.n_pair = n_pair self.batch_size = batch_size easy_negative_rate = (1 - hard_negative_rate) self.easy_negative_rate = easy_negative_rate self.negative_weighting = negative_weighting if ((n_pair > 1) and (batch_size > 1)): alpha = (easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))) mm_mask = (((1 - alpha) * np.eye(self.batch_size)) + alpha) mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair))) mm_mask = (torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))) self.mm_mask = mm_mask.float() def forward(self, x): d = torch.diag(x) max_margin = (F.relu(((self.margin + x) - d.view((- 1), 1))) + F.relu(((self.margin + x) - d.view(1, (- 1))))) if (self.negative_weighting and (self.n_pair > 1) and (self.batch_size > 1)): max_margin = (max_margin * self.mm_mask.to(max_margin.device)) return max_margin.mean()
def action_open_cache_dir(): d = misc.get_cache_home() misc.info(f'Opening cache directory: {d}') if (sys.platform == 'win32'): os.startfile(d) elif (sys.platform == 'darwin'): subprocess.Popen(['open', d]) else: subprocess.Popen(['xdg-open', d])
def load_data(args): if (args.dataset_str == 'dblp'): (adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp('dataset/DBLP4057_GAT_with_idx_tra200_val_800.mat') node_size = features.shape[0] node_embedding = features.shape[1] class_size = train_label.shape[1] train_size = len(train_data) paras = [node_size, node_embedding, class_size, train_size] if ((args.dataset_str == 'example') and (args.model != 'GAS')): if (args.model == 'GEM'): (adj_list, features, train_data, train_label, test_data, test_label) = load_example_gem() if (args.model == 'SemiGNN'): (adj_list, features, train_data, train_label, test_data, test_label) = load_example_semi() node_size = features.shape[0] node_embedding = features.shape[1] class_size = train_label.shape[1] train_size = len(train_data) paras = [node_size, node_embedding, class_size, train_size] if ((args.dataset_str == 'example') and (args.model == 'GAS')): (adj_list, features, train_data, train_label, test_data, test_label) = load_data_gas() node_embedding_r = features[0].shape[1] node_embedding_u = features[1].shape[1] node_embedding_i = features[2].shape[1] node_size = features[0].shape[0] h_u_size = (adj_list[0].shape[1] * (node_embedding_r + node_embedding_u)) h_i_size = (adj_list[2].shape[1] * (node_embedding_r + node_embedding_i)) class_size = train_label.shape[1] train_size = len(train_data) paras = [node_size, node_embedding_r, node_embedding_u, node_embedding_i, class_size, train_size, h_u_size, h_i_size] return (adj_list, features, train_data, train_label, test_data, test_label, paras)
def visualize_heatmap(image, mask): masks = norm_image(mask).astype(np.uint8) heatmap = cv2.applyColorMap(masks, cv2.COLORMAP_JET) heatmap = np.float32(heatmap) heatmap = cv2.resize(heatmap, (image.shape[1], image.shape[0])) cam = ((0.4 * heatmap) + (0.6 * np.float32(image))) return cam
.lower_builtin('end_tuple', ArrayBuilderType) def lower_endtuple(context, builder, sig, args): (arraybuildertype,) = sig.args (arraybuilderval,) = args proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval) call(context, builder, libawkward.ArrayBuilder_endtuple, (proxyin.rawptr,)) return context.get_dummy_value()
class TFRobertaModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def create_cumprod_input(rng, shape, axis, with_mask, with_random_zero_pos, zero_pos): x = rng.randn(*shape).astype(np.float32) if with_mask: if with_random_zero_pos: mask = (rng.rand(*shape) > (1.0 / shape[axis])) x = (x * mask) else: x.swapaxes(0, axis)[zero_pos] = 0 return x
def load_config(path, default_path=None): with open(path, 'r') as f: cfg_special = yaml.load(f, Loader=yaml.CLoader) if (default_path is not None): with open(default_path, 'r') as f: cfg_default = yaml.load(f, Loader=yaml.CLoader) else: cfg_default = dict() cfg_import = collect_imports(cfg_special) print('IMPORTS') print(cfg_import) update_default(cfg_special, cfg_import) update_default(cfg_special, cfg_default) print('FINAL CONFIG') print(cfg_special) global CFG CFG = cfg_special return cfg_special
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--in_data', type=str) parser.add_argument('--nb_train', type=int, default=(- 1)) parser.add_argument('--nb_jobs', type=int, default=8) parser.add_argument('--nb_splits', type=int, default=5) parser.add_argument('--eval_split', type=int, default=0) parser.add_argument('--model', type=str, default='cnn') parser.add_argument('--input_len', type=int, default=10) parser.add_argument('--offset_len', type=int, default=10) parser.add_argument('--pred_len', type=int, default=10) parser.add_argument('--inter_list', type=int, nargs='*', default=[]) parser.add_argument('--last_list', type=int, nargs='*', default=[]) parser.add_argument('--channel_list', type=int, nargs='*', default=[]) parser.add_argument('--deconv_list', type=int, nargs='*', default=[]) parser.add_argument('--ksize_list', type=int, nargs='*', default=[]) parser.add_argument('--dc_ksize_list', type=int, nargs='*', default=[]) parser.add_argument('--pad_list', type=int, nargs='*', default=[]) parser.add_argument('--nb_iters', type=int, default=10000) parser.add_argument('--iter_snapshot', type=int, default=1000) parser.add_argument('--iter_display', type=int, default=100) parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--lr_step_list', type=float, nargs='*', default=[]) parser.add_argument('--momentum', type=float, default=0.99) parser.add_argument('--resume', type=str, default='') parser.add_argument('--gpu', type=int, default=(- 1)) parser.add_argument('--debug', action='store_true') parser.add_argument('--save_model', action='store_true') parser.add_argument('--root_dir', type=str, default='outputs') parser.add_argument('--width', type=int, default=1280) parser.add_argument('--height', type=int, default=960) parser.add_argument('--nb_grids', type=int, default=6) parser.add_argument('--seed', type=int, default=1701) parser.add_argument('--ego_type', type=str, default='sfm') return parser.parse_args()
_spec([HookScope.GLOBAL]) def after_call(context: HookContext, case: Case, response: GenericResponse) -> None:
def is_square(input_matrix: Union[(sparse.csr_matrix, np.ndarray)]) -> bool: return (input_matrix.shape[0] == input_matrix.shape[1])
class MatFile5Reader(MatFileReader): def __init__(self, mat_stream, byte_order=None, mat_dtype=False, squeeze_me=False, chars_as_strings=True, matlab_compatible=False, struct_as_record=True, verify_compressed_data_integrity=True, uint16_codec=None): super(MatFile5Reader, self).__init__(mat_stream, byte_order, mat_dtype, squeeze_me, chars_as_strings, matlab_compatible, struct_as_record, verify_compressed_data_integrity) if (not uint16_codec): uint16_codec = sys.getdefaultencoding() self.uint16_codec = uint16_codec self._file_reader = None self._matrix_reader = None def guess_byte_order(self): self.mat_stream.seek(126) mi = self.mat_stream.read(2) self.mat_stream.seek(0) return (((mi == b'IM') and '<') or '>') def read_file_header(self): hdict = {} hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] hdr = read_dtype(self.mat_stream, hdr_dtype) hdict['__header__'] = hdr['description'].item().strip(b' \t\n\x00') v_major = (hdr['version'] >> 8) v_minor = (hdr['version'] & 255) hdict['__version__'] = ('%d.%d' % (v_major, v_minor)) return hdict def initialize_read(self): self._file_reader = VarReader5(self) self._matrix_reader = VarReader5(self) def read_var_header(self): (mdtype, byte_count) = self._file_reader.read_full_tag() if (not (byte_count > 0)): raise ValueError('Did not read any bytes') next_pos = (self.mat_stream.tell() + byte_count) if (mdtype == miCOMPRESSED): stream = ZlibInputStream(self.mat_stream, byte_count) self._matrix_reader.set_stream(stream) check_stream_limit = self.verify_compressed_data_integrity (mdtype, byte_count) = self._matrix_reader.read_full_tag() else: check_stream_limit = False self._matrix_reader.set_stream(self.mat_stream) if (not (mdtype == miMATRIX)): raise TypeError(('Expecting miMATRIX type here, got %d' % mdtype)) header = self._matrix_reader.read_header(check_stream_limit) return (header, next_pos) def read_var_array(self, header, process=True): return self._matrix_reader.array_from_header(header, process) def get_variables(self, variable_names=None): if isinstance(variable_names, string_types): variable_names = [variable_names] elif (variable_names is not None): variable_names = list(variable_names) self.mat_stream.seek(0) self.initialize_read() mdict = self.read_file_header() mdict['__globals__'] = [] while (not self.end_of_stream()): (hdr, next_position) = self.read_var_header() name = asstr(hdr.name) if (name in mdict): warnings.warn(('Duplicate variable name "%s" in stream - replacing previous with new\nConsider mio5.varmats_from_mat to split file into single variable files' % name), MatReadWarning, stacklevel=2) if (name == ''): name = '__function_workspace__' process = False else: process = True if ((variable_names is not None) and (name not in variable_names)): self.mat_stream.seek(next_position) continue try: res = self.read_var_array(hdr, process) except MatReadError as err: warnings.warn(('Unreadable variable "%s", because "%s"' % (name, err)), Warning, stacklevel=2) res = ('Read error: %s' % err) self.mat_stream.seek(next_position) mdict[name] = res if hdr.is_global: mdict['__globals__'].append(name) if (variable_names is not None): variable_names.remove(name) if (len(variable_names) == 0): break return mdict def list_variables(self): self.mat_stream.seek(0) self.initialize_read() self.read_file_header() vars = [] while (not self.end_of_stream()): (hdr, next_position) = self.read_var_header() name = asstr(hdr.name) if (name == ''): name = '__function_workspace__' shape = self._matrix_reader.shape_from_header(hdr) if hdr.is_logical: info = 'logical' else: info = mclass_info.get(hdr.mclass, 'unknown') vars.append((name, shape, info)) self.mat_stream.seek(next_position) return vars
def ismatrix(t): return ((isinstance(t, (list, tuple)) and (len(t) > 0) and issequence(t[0])) or (isinstance(t, np.ndarray) and (t.ndim == 2)))
def test_ListOffset(): builder = ListOffset('int64', Numpy('float64', ''), '') subbuilder = builder.begin_list() subbuilder.append(1.1) subbuilder.append(2.2) subbuilder.append(3.3) builder.end_list() subbuilder = builder.begin_list() builder.end_list() subbuilder = builder.begin_list() subbuilder.append(4.4) subbuilder.append(5.5) builder.end_list() error = Ref('') assert builder.is_valid(error), error.value names_nbytes = {} builder.buffer_nbytes(names_nbytes) assert (len(names_nbytes) == 2) buffers = {name: np.empty(nbytes, np.uint8) for (name, nbytes) in names_nbytes.items()} builder.to_buffers(buffers) assert (buffers['node0-offsets'].view('int64').tolist() == [0, 3, 3, 5]) assert (buffers['node1-data'].view('float64').tolist() == [1.1, 2.2, 3.3, 4.4, 5.5]) assert (builder.form() == '{"class": "ListOffsetArray", "offsets": "i64", "content": {"class": "NumpyArray", "primitive": "float64", "form_key": "node1"}, "form_key": "node0"}') array = ak.from_buffers(builder.form(), builder.length(), buffers) assert (array.tolist() == [[1.1, 2.2, 3.3], [], [4.4, 5.5]])
class ReActNetBlock(nn.Module): def __init__(self, inplanes, planes, stride=1): super(ReActNetBlock, self).__init__() norm_layer = nn.BatchNorm2d self.move11 = LearnableBias(inplanes) self.binary_activation1 = BinaryActivation() self.binary_3x3 = conv3x3(inplanes, inplanes, stride=stride) self.bn1 = norm_layer(inplanes) self.move12 = LearnableBias(inplanes) self.prelu1 = nn.PReLU(inplanes) self.move13 = LearnableBias(inplanes) self.move21 = LearnableBias(inplanes) self.binary_activation2 = BinaryActivation() self.binary_pw = conv1x1(inplanes, planes) self.bn2 = norm_layer(planes) self.move22 = LearnableBias(planes) self.prelu2 = nn.PReLU(planes) self.move23 = LearnableBias(planes) self.binary_pw_down1 = conv1x1(inplanes, inplanes) self.binary_pw_down2 = conv1x1(inplanes, inplanes) self.bn2_1 = norm_layer(inplanes) self.bn2_2 = norm_layer(inplanes) self.stride = stride self.inplanes = inplanes self.planes = planes if (self.inplanes != self.planes): self.pooling = nn.AvgPool2d(2, 2) def forward(self, x, group=1): out1 = self.move11(x) out1 = self.binary_activation1(out1) out1 = self.binary_3x3(out1) out1 = self.bn1(out1) if (self.stride == 2): x = self.pooling(x) out1 = (x + out1) out1 = self.move12(out1) out1 = self.prelu1(out1) out1 = self.move13(out1) out2 = self.move21(out1) out2 = self.binary_activation2(out2) if (self.inplanes == self.planes): out2 = self.binary_pw(out2) out2 = self.bn2(out2) out2 += out1 else: assert (self.planes == (self.inplanes * 2)) out2_1 = self.binary_pw_down1(out2) out2_2 = self.binary_pw_down2(out2) out2_1 = self.bn2_1(out2_1) out2_2 = self.bn2_2(out2_2) out2_1 += out1 out2_2 += out1 out2 = torch.cat([out2_1, out2_2], dim=1) out2 = self.move22(out2) out2 = self.prelu2(out2) out2 = self.move23(out2) return out2
def get_quantizers(cfg, test, pname, with_bias=True): if (cfg.w_quantize in ['fp', 'parametric_fp_b_xmax', 'parametric_fp_d_xmax', 'parametric_fp_d_b', 'pow2', 'parametric_pow2_b_xmax', 'parametric_pow2_b_xmin', 'parametric_pow2_xmin_xmax']): if (pname in nn.get_parameters()): delta = find_delta(nn.get_parameters()[pname], cfg.w_bitwidth) else: delta = cfg.w_stepsize xmax = (delta * ((2 ** (cfg.w_bitwidth - 1)) - 1)) if ('pow2' in cfg.w_quantize): xmax = (2.0 ** np.round(np.log2(xmax))) xmin = (xmax / (2.0 ** ((2.0 ** (cfg.w_bitwidth - 1)) - 1))) xmin = np.clip(xmin, (cfg.w_xmin_min + 1e-05), (cfg.w_xmin_max - 1e-05)) xmax = np.clip(xmax, (cfg.w_xmax_min + 1e-05), (cfg.w_xmax_max - 1e-05)) if (not test): print(f'Quantized affine/conv initialized to delta={delta}, xmax={xmax}') quantization_b = None if (cfg.w_quantize == 'fp'): quantization_w = partial(F.fixed_point_quantize, sign=True, n=cfg.w_bitwidth, delta=delta) quantization_b = partial(F.fixed_point_quantize, sign=True, n=cfg.w_bitwidth, delta=delta) elif (cfg.w_quantize == 'parametric_fp_b_xmax'): quantization_w = partial(PQ.parametric_fixed_point_quantize_b_xmax, sign=True, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_fixed_point_quantize_b_xmax, sign=True, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) elif (cfg.w_quantize == 'parametric_fp_d_xmax'): quantization_w = partial(PQ.parametric_fixed_point_quantize_d_xmax, sign=True, d_init=delta, d_min=cfg.w_stepsize_min, d_max=cfg.w_stepsize_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_fixed_point_quantize_d_xmax, sign=True, d_init=delta, d_min=cfg.w_stepsize_min, d_max=cfg.w_stepsize_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) elif (cfg.w_quantize == 'parametric_fp_d_b'): quantization_w = partial(PQ.parametric_fixed_point_quantize_d_b, sign=True, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, d_init=delta, d_min=cfg.w_stepsize_min, d_max=cfg.w_stepsize_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_fixed_point_quantize_d_b, sign=True, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, d_init=delta, d_min=cfg.w_stepsize_min, d_max=cfg.w_stepsize_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) elif (cfg.w_quantize == 'pow2'): quantization_w = partial(F.pow2_quantize, sign=True, with_zero=False, n=cfg.w_bitwidth, m=np.round(np.log2(xmax))) if with_bias: quantization_b = partial(F.pow2_quantize, sign=True, with_zero=False, n=cfg.w_bitwidth, m=np.round(np.log2(xmax))) elif (cfg.w_quantize == 'parametric_pow2_b_xmax'): quantization_w = partial(PQ.parametric_pow2_quantize_b_xmax, sign=True, with_zero=False, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_pow2_quantize_b_xmax, sign=True, with_zero=False, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) elif (cfg.w_quantize == 'parametric_pow2_b_xmin'): quantization_w = partial(PQ.parametric_pow2_quantize_b_xmin, sign=True, with_zero=False, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmin_init=xmin, xmin_min=cfg.w_xmin_min, xmin_max=cfg.w_xmin_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_pow2_quantize_b_xmin, sign=True, with_zero=False, n_init=cfg.w_bitwidth, n_min=cfg.w_bitwidth_min, n_max=cfg.w_bitwidth_max, xmin_init=xmin, xmin_min=cfg.w_xmin_min, xmin_max=cfg.w_xmin_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) elif (cfg.w_quantize == 'parametric_pow2_xmin_xmax'): quantization_w = partial(PQ.parametric_pow2_quantize_xmin_xmax, sign=True, with_zero=False, xmin_init=xmin, xmin_min=cfg.w_xmin_min, xmin_max=cfg.w_xmin_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'Wquant', pname)) if with_bias: quantization_b = partial(PQ.parametric_pow2_quantize_xmin_xmax, sign=True, with_zero=False, xmin_init=xmin, xmin_min=cfg.w_xmin_min, xmin_max=cfg.w_xmin_max, xmax_init=xmax, xmax_min=cfg.w_xmax_min, xmax_max=cfg.w_xmax_max, name=re.sub('quantized_[^/]*/W$', 'bquant', pname)) else: quantization_w = None quantization_b = None return (quantization_w, quantization_b)
.parametrize('grid', [1, 3, 6]) .parametrize('block_size, context', [(40, 0), (55, 3), (80, 10), (128, 17), (256, 80), (512, 93)]) def test_cover2D(block_size, context, grid): lbl = real_image2d()[1] lbl = lbl.astype(np.int32) max_sizes = tuple(calculate_extents(lbl, func=np.max)) min_overlap = tuple(((1 + v) for v in max_sizes)) lbl = repeat(lbl, 4) assert (max_sizes == tuple(calculate_extents(lbl, func=np.max))) reassemble(lbl, 'YX', block_size, min_overlap, context, grid)
class Normal_Loader(Dataset): def __init__(self, is_train=1, path='/workspace/DATA/UCF-Crime/'): super(Normal_Loader, self).__init__() self.is_train = is_train self.path = path if (self.is_train == 1): data_list = os.path.join(path, 'train_normal.txt') with open(data_list, 'r') as f: self.data_list = f.readlines() else: data_list = os.path.join(path, 'test_normalv2.txt') with open(data_list, 'r') as f: self.data_list = f.readlines() random.shuffle(self.data_list) self.data_list = self.data_list[:(- 10)] def __len__(self): return len(self.data_list) def __getitem__(self, idx): if (self.is_train == 1): rgb_npy = np.load(os.path.join((self.path + 'all_rgbs'), (self.data_list[idx][:(- 1)] + '.npy'))) flow_npy = np.load(os.path.join((self.path + 'all_flows'), (self.data_list[idx][:(- 1)] + '.npy'))) concat_npy = np.concatenate([rgb_npy, flow_npy], axis=1) return concat_npy else: (name, frames, gts) = (self.data_list[idx].split(' ')[0], int(self.data_list[idx].split(' ')[1]), int(self.data_list[idx].split(' ')[2][:(- 1)])) rgb_npy = np.load(os.path.join((self.path + 'all_rgbs'), (name + '.npy'))) flow_npy = np.load(os.path.join((self.path + 'all_flows'), (name + '.npy'))) concat_npy = np.concatenate([rgb_npy, flow_npy], axis=1) return (concat_npy, gts, frames)
class MBInvertedConvLayer(MyModule): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False): super(MBInvertedConvLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.expand_ratio = expand_ratio self.mid_channels = mid_channels self.act_func = act_func self.use_se = use_se if (self.mid_channels is None): feature_dim = round((self.in_channels * self.expand_ratio)) else: feature_dim = self.mid_channels if (self.expand_ratio == 1): self.inverted_bottleneck = None else: self.inverted_bottleneck = nn.Sequential(OrderedDict([('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', build_activation(self.act_func, inplace=True))])) pad = get_same_padding(self.kernel_size) depth_conv_modules = [('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', build_activation(self.act_func, inplace=True))] if self.use_se: depth_conv_modules.append(('se', SEModule(feature_dim))) self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules)) self.point_linear = nn.Sequential(OrderedDict([('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(out_channels))])) def forward(self, x): if self.inverted_bottleneck: x = self.inverted_bottleneck(x) x = self.depth_conv(x) x = self.point_linear(x) return x def module_str(self): if (self.mid_channels is None): expand_ratio = self.expand_ratio else: expand_ratio = (self.mid_channels // self.in_channels) layer_str = ('%dx%d_MBConv%d_%s' % (self.kernel_size, self.kernel_size, expand_ratio, self.act_func.upper())) if self.use_se: layer_str = ('SE_' + layer_str) layer_str += ('_O%d' % self.out_channels) return layer_str def config(self): return {'name': MBInvertedConvLayer.__name__, 'in_channels': self.in_channels, 'out_channels': self.out_channels, 'kernel_size': self.kernel_size, 'stride': self.stride, 'expand_ratio': self.expand_ratio, 'mid_channels': self.mid_channels, 'act_func': self.act_func, 'use_se': self.use_se} def build_from_config(config): return MBInvertedConvLayer(**config)
def import_cves(): cf.logger.info(('-' * 70)) if db.table_exists('cve'): cf.logger.warning('The cve table already exists, loading and continuing extraction...') else: for year in range(initYear, (currentYear + 1)): extract_target = (('nvdcve-1.1-' + str(year)) + '.json') zip_file_url = ((urlhead + str(year)) + urltail) if os.path.isfile(((Path(cf.DATA_PATH) / 'json') / extract_target)): cf.logger.warning(f'Reusing the {year} CVE json file that was downloaded earlier...') json_file = ((Path(cf.DATA_PATH) / 'json') / extract_target) else: r = requests.get(zip_file_url) z = ZipFile(BytesIO(r.content)) json_file = z.extract(extract_target, (Path(cf.DATA_PATH) / 'json')) with open(json_file) as f: yearly_data = json.load(f) if (year == initYear): df_cve = pd.DataFrame(yearly_data) else: df_cve = df_cve.append(pd.DataFrame(yearly_data)) cf.logger.info(f'The CVE json for {year} has been merged') df_cve = preprocess_jsons(df_cve) df_cve = df_cve.applymap(str) assert df_cve.cve_id.is_unique, 'Primary keys are not unique in cve records!' df_cve.to_sql(name='cve', con=db.conn, if_exists='replace', index=False) cf.logger.info('All CVEs have been merged into the cve table') cf.logger.info(('-' * 70)) assign_cwes_to_cves(df_cve=df_cve)
def dump_paths(Graph, rating_pair, maxLen, sample_size, fw_file): for pair in rating_pair: user_id = pair[0] movie_id = pair[1] user_node = ('u' + user_id) movie_node = ('i' + movie_id) if (Graph.has_node(user_node) and Graph.has_node(movie_node)): mine_paths_between_nodes(Graph, user_node, movie_node, maxLen, sample_size, fw_file)
class FrameLevel(nn.Module): def __init__(self, input_dim, output_dim, hiddens=None, activation='ReLU', **kwargs): super().__init__() latest_dim = input_dim self.hiddens = [] if (hiddens is not None): for dim in hiddens: self.hiddens += [nn.Linear(latest_dim, dim), getattr(nn, activation)()] latest_dim = dim self.hiddens = nn.Sequential(*self.hiddens) self.linear = nn.Linear(latest_dim, output_dim) def forward(self, hidden_state, features_len=None): hidden_state = self.hiddens(hidden_state) logit = self.linear(hidden_state) return (logit, features_len)
def load_bootstrap_config(cfg: CN): if (not cfg.BOOTSTRAP_DATASETS): return bootstrap_datasets_cfgnodes = [] for dataset_cfg in cfg.BOOTSTRAP_DATASETS: _C = get_bootstrap_dataset_config().clone() _C.merge_from_other_cfg(CN(dataset_cfg)) bootstrap_datasets_cfgnodes.append(_C) cfg.BOOTSTRAP_DATASETS = bootstrap_datasets_cfgnodes
def get_solver(solver_, warm_start_, num_cpu_, default_action_): if (solver_ == 'random_shooting'): mpc_solver = RandomShooting(dynamical_model=dynamical_model, reward_model=reward_model, horizon=HORIZON, gamma=1.0, num_samples=NUM_SAMPLES, num_elites=NUM_ELITES, termination=termination, terminal_reward=value_function, warm_start=warm_start_, default_action=default_action_, num_cpu=num_cpu_) elif (solver_ == 'cem_shooting'): mpc_solver = CEMShooting(dynamical_model=dynamical_model, reward_model=reward_model, horizon=HORIZON, gamma=1.0, num_iter=NUM_ITER, num_samples=NUM_SAMPLES, num_elites=NUM_ELITES, termination=termination, terminal_reward=value_function, warm_start=warm_start_, default_action=default_action_, num_cpu=num_cpu_) elif (solver_ == 'mppi_shooting'): mpc_solver = MPPIShooting(dynamical_model=dynamical_model, reward_model=reward_model, horizon=HORIZON, gamma=1.0, num_iter=NUM_ITER, kappa=KAPPA, filter_coefficients=BETAS, num_samples=NUM_SAMPLES, termination=termination, terminal_reward=value_function, warm_start=warm_start_, default_action=default_action_, num_cpu=num_cpu_) else: raise NotImplementedError return mpc_solver
def compatible_system_lift(compatible_system, split_primes_list): if (len(split_primes_list) != len(compatible_system)): raise ValueError('The number of primes does not match the length of the given exponent vectors.') exponent_vector_lift = [ZZ(compatible_system[0][0][0])] complement_vector_lift = [ZZ(compatible_system[0][1][0])] moduli_list = [(q - 1) for q in split_primes_list] L = lcm(moduli_list) t = len(compatible_system[0][0]) for i in range(1, t): exp_coord_residues = [pair[0][i] for pair in compatible_system] comp_coord_residues = [pair[1][i] for pair in compatible_system] ev_lift_coordinate = CRT(exp_coord_residues, moduli_list) cv_lift_coordinate = CRT(comp_coord_residues, moduli_list) if (ev_lift_coordinate > (L / 2)): ev_lift_coordinate -= L if (cv_lift_coordinate > (L / 2)): cv_lift_coordinate -= L exponent_vector_lift.append(ev_lift_coordinate) complement_vector_lift.append(cv_lift_coordinate) return [tuple(exponent_vector_lift), tuple(complement_vector_lift)]
class EarlyStopping(): def __init__(self, early_stopping_ns: SimpleNamespace, validation_metric: str, validation_k: int, cutoffs: t.List, simple_metrics: t.List): self.logger = logging.get_logger(self.__class__.__name__, pylog.DEBUG) self.validation_metric = validation_metric self.validation_k = validation_k self.cutoffs = cutoffs self.simple_metrics = simple_metrics self.monitor = getattr(early_stopping_ns, 'monitor', self.validation_metric) if (not len(early_stopping_ns.__dict__)): self.active = False else: if (not hasattr(early_stopping_ns, 'patience')): self.patience = 0 else: self.patience = early_stopping_ns.patience if (self.monitor == 'loss'): if (not hasattr(early_stopping_ns, 'mode')): self.mode = 'min' elif (early_stopping_ns.mode == 'auto'): self.mode = 'min' self.metric = False else: if (not hasattr(early_stopping_ns, 'mode')): self.mode = 'max' elif (early_stopping_ns.mode == 'auto'): self.mode = 'max' metric = self.monitor.split('') if (metric[0].lower() not in [m.lower() for m in self.simple_metrics]): raise Exception('Early stopping metric must be in the list of simple metrics') self.metric_k = (int(metric[1]) if (len(metric) > 1) else self.validation_k) if (self.metric_k not in self.cutoffs): raise Exception('Validation cutoff must be in general cutoff values') self.metric = metric[0] if hasattr(early_stopping_ns, 'min_delta'): self.min_delta = early_stopping_ns.min_delta if hasattr(early_stopping_ns, 'rel_delta'): self.rel_delta = early_stopping_ns.rel_delta if hasattr(early_stopping_ns, 'baseline'): self.baseline = early_stopping_ns.baseline self.verbose = getattr(early_stopping_ns, 'verbose', False) self.active = True def stop(self, losses, results): if (not self.active): return False else: if (not self.metric): observed_quantity = losses[:] else: observed_quantity = [r[self.metric_k]['val_results'][self.metric] for r in results] if (len(observed_quantity) > self.patience): observed_quantity = observed_quantity[:(- (2 + self.patience)):(- 1)] if (self.mode == 'min'): observed_quantity = observed_quantity[::(- 1)] check = [] for p in range((len(observed_quantity) - 1)): if self.check_conditions(observed_quantity[p], observed_quantity[(p + 1)]): check.append(True) else: check.append(False) if self.verbose: self.logger.info(f'Analyzed pair: ({round(observed_quantity[p], 5)}, {round(observed_quantity[(p + 1)], 5)}): {check[(- 1)]}') if self.verbose: self.logger.info(f'Check List: {check}') if (check and all(check)): return True else: return False def check_conditions(self, obs_0: float, obs_1: float): if (hasattr(self, 'min_delta') and hasattr(self, 'rel_delta') and hasattr(self, 'baseline')): return (self.condition_base(obs_0, obs_1) or self.condition_min_delta(obs_0, obs_1) or self.condition_rel_delta(obs_0, obs_1) or self.condition_baseline(obs_0, obs_1)) elif (hasattr(self, 'min_delta') and hasattr(self, 'rel_delta')): return (self.condition_base(obs_0, obs_1) or self.condition_min_delta(obs_0, obs_1) or self.condition_rel_delta(obs_0, obs_1)) elif (hasattr(self, 'min_delta') and hasattr(self, 'baseline')): return (self.condition_base(obs_0, obs_1) or self.condition_min_delta(obs_0, obs_1) or self.condition_baseline(obs_0, obs_1)) elif (hasattr(self, 'baseline') and hasattr(self, 'rel_delta')): return (self.condition_base(obs_0, obs_1) or self.condition_baseline(obs_0, obs_1) or self.condition_rel_delta(obs_0, obs_1)) elif hasattr(self, 'min_delta'): return (self.condition_base(obs_0, obs_1) or self.condition_min_delta(obs_0, obs_1)) elif hasattr(self, 'rel_delta'): return (self.condition_base(obs_0, obs_1) or self.condition_rel_delta(obs_0, obs_1)) elif hasattr(self, 'baseline'): return (self.condition_base(obs_0, obs_1) or self.condition_baseline(obs_0, obs_1)) else: return self.condition_base(obs_0, obs_1) def condition_base(self, obs_0: float, obs_1: float): return (obs_1 > obs_0) def condition_min_delta(self, obs_0: float, obs_1: float): return ((obs_0 - obs_1) <= self.min_delta) def condition_rel_delta(self, obs_0: float, obs_1: float): return ((obs_0 - obs_1) <= (obs_0 * self.rel_delta)) def condition_baseline(self, obs_0: float, obs_1: float): if (self.mode == 'min'): return (obs_0 >= self.baseline) elif (self.mode == 'max'): return (obs_0 <= self.baseline) else: raise ValueError('mode option must be in the list [min, max, auto]') def __str__(self): return ', '.join([f'{str(k)}: {str(v)}' for (k, v) in self.__dict__.items()])
() def get(key: str): try: value = cloud_config.get_flag(key) console.print(f'[bold][blue]{key}[/blue] = [italic][green]{value}[/italic][/green]') except KeyError: console.print(f'[red][bold]{key}[/bold] is not a valid config key[/red]')
class JonesDatabase(): def __init__(self): self.root = None def __repr__(self): return "John Jones's table of number fields with bounded ramification and degree <= 6" def _load(self, path, filename): print(filename) i = 0 while filename[i].isalpha(): i += 1 j = (len(filename) - 1) while (filename[j].isalpha() or (filename[j] in ['.', '_'])): j -= 1 S = sorted([eval(z) for z in filename[i:(j + 1)].split('-')]) with open(((path + '/') + filename)) as f: data = f.read() data = data.replace('^', '**') x = PolynomialRing(RationalField(), 'x').gen() v = eval(data) s = tuple(S) if (s in self.root): self.root[s] += v self.root[s].sort() else: self.root[s] = v def _init(self, path): x = PolynomialRing(RationalField(), 'x').gen() self.root = {} self.root[tuple()] = [(x - 1)] if (not os.path.exists(path)): raise IOError(('Path %s does not exist.' % path)) for X in os.listdir(path): if (X[(- 4):] == 'solo'): Z = ((path + '/') + X) print(X) for Y in os.listdir(Z): if (Y[(- 3):] == '.gp'): self._load(Z, Y) os.makedirs(JONESDATA, exist_ok=True) save(self.root, (JONESDATA + '/jones.sobj')) def unramified_outside(self, S, d=None, var='a'): try: S = list(S) except TypeError: S = [S] Z = [] for X in powerset(S): Z += self.ramified_at(X, d=d, var=var) return sorted(Z, key=sortkey) def __getitem__(self, S): return self.get(S) def get(self, S, var='a'): if (self.root is None): self.root = load(DatabaseJones().absolute_filename()) try: S = list(S) except TypeError: S = [S] if (not all((p.is_prime() for p in S))): raise ValueError('S must be a list of primes') S.sort() s = tuple(S) if (s not in self.root): return [] return [NumberField(f, var, check=False) for f in self.root[s]] def ramified_at(self, S, d=None, var='a'): Z = self.get(S, var=var) if (d is not None): Z = [k for k in Z if (k.degree() == d)] return sorted(Z, key=sortkey)
class Max(Module): def __init__(self, dimension=0): super(Max, self).__init__() self.dimension = dimension self._output = None self._indices = None def _getPositiveDimension(self, input): dimension = self.dimension if (dimension < 0): dimension = (input.dim() + dimension) return dimension def _lazyInit(self): if (self._output is None): self._output = self.output.new() if (self._indices is None): self._indices = (torch.cuda.LongTensor() if self.output.is_cuda else torch.LongTensor()) def updateOutput(self, input): self._lazyInit() dimension = self._getPositiveDimension(input) torch.max(input, dimension, out=(self._output, self._indices), keepdim=True) if (input.dim() > 1): self.output.set_(self._output.select(dimension, 0)) else: self.output.set_(self._output) return self.output def updateGradInput(self, input, gradOutput): self._lazyInit() dimension = self._getPositiveDimension(input) if (input.dim() > 1): gradOutputView = addSingletondimension(gradOutput, dimension) else: gradOutputView = gradOutput self.gradInput.resize_as_(input).zero_().scatter_(dimension, self._indices, gradOutputView) return self.gradInput def type(self, type, tensorCache=None): if (type == 'torch.cuda.FloatTensor'): (indices, self._indices) = (self._indices, None) super(Max, self).type(type, tensorCache) self._indices = (indices.type('torch.cuda.LongTensor') if (indices is not None) else None) else: (indices, self._indices) = (self._indices, None) super(Max, self).type(type, tensorCache) self._indices = (indices.long() if (indices is not None) else None) return self def clearState(self): clear(self, '_indices', '_output') return super(Max, self).clearState()
def ResNet18(num_classes=10): return ResNet(BasicBlock, layers=[2, 2, 2, 2], filters=[64, 128, 256, 512], num_classes=num_classes)
class CPPInliner(): def __init__(self, inline_target, inline_val): self.inline_target = inline_target self.inline_val = inline_val def inline(self, code: str): return re.sub(('\\b%s\\b' % re.escape(self.inline_target)), (('(' + self.inline_val) + ')'), code)
def test_interval_real_not_int(): constraint = Interval(RealNotInt, 0, 1, closed='both') assert constraint.is_satisfied_by(1.0) assert (not constraint.is_satisfied_by(1))
class TFViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def max_pool_3d(input, ds, ignore_border=False): if (input.ndim < 3): raise NotImplementedError('max_pool_3d requires a dimension >= 3') vid_dim = input.ndim if ((ds[1] > 1) or (ds[2] > 1)): frame_shape = input.shape[(- 2):] batch_size = tensor.prod(input.shape[:(- 2)]) batch_size = tensor.shape_padright(batch_size, 1) new_shape = tensor.cast(tensor.join(0, batch_size, tensor.as_tensor([1]), frame_shape), 'int32') input_4D = tensor.reshape(input, new_shape, ndim=4) op = DownsampleFactorMax((ds[1], ds[2]), ignore_border) output = op(input_4D) outshape = tensor.join(0, input.shape[:(- 2)], output.shape[(- 2):]) out = tensor.reshape(output, outshape, ndim=input.ndim) else: out = input if (ds[0] == 1): return out shufl = (0, 2, 3, 4, 1) input_time = out.dimshuffle(shufl) vid_shape = input_time.shape[(- 2):] batch_size = tensor.prod(input_time.shape[:(- 2)]) batch_size = tensor.shape_padright(batch_size, 1) new_shape = tensor.cast(tensor.join(0, batch_size, tensor.as_tensor([1]), vid_shape), 'int32') input_4D_time = tensor.reshape(input_time, new_shape, ndim=4) op = DownsampleFactorMax((1, ds[0]), ignore_border) outtime = op(input_4D_time) outshape = tensor.join(0, input_time.shape[:(- 2)], outtime.shape[(- 2):]) shufl = (0, 4, 1, 2, 3) return tensor.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
def simplify_onnx_model(model: onnx.ModelProto, auto_merge: bool) -> onnx.ModelProto: (model, check) = onnxsim.simplify(model, skip_fuse_bn=True) if (not check): raise RuntimeError('onnx-simplifier optimizations failed') return model
def load_frames_directory_dict(directory: str, pattern: str) -> OpenPoseFrames: frames = {} with os.scandir(directory) as entry_iterator: for entry in entry_iterator: with open(entry.path, 'r') as f: frame_id = get_frame_id(entry.name, pattern=pattern) frame_dict = json.load(f) frames[frame_id] = frame_dict return frames
def _reconstitute(form, length, container, getkey, backend, byteorder, simplify): if isinstance(form, ak.forms.EmptyForm): if (length != 0): raise ValueError(f'EmptyForm node, but the expected length is {length}') return ak.contents.EmptyArray() elif isinstance(form, ak.forms.NumpyForm): dtype = ak.types.numpytype.primitive_to_dtype(form.primitive) raw_array = container[getkey(form, 'data')] real_length = (length * math.prod(form.inner_shape)) data = _from_buffer(backend.nplike, raw_array, dtype=dtype, count=real_length, byteorder=byteorder) if (form.inner_shape != ()): data = backend.nplike.reshape(data, (length, *form.inner_shape)) return ak.contents.NumpyArray(data, parameters=form._parameters, backend=backend) elif isinstance(form, ak.forms.UnmaskedForm): content = _reconstitute(form.content, length, container, getkey, backend, byteorder, simplify) if simplify: make = ak.contents.UnmaskedArray.simplified else: make = ak.contents.UnmaskedArray return make(content, parameters=form._parameters) elif isinstance(form, ak.forms.BitMaskedForm): raw_array = container[getkey(form, 'mask')] if (length is unknown_length): next_length = unknown_length else: next_length = int(math.ceil((length / 8.0))) mask = _from_buffer(backend.index_nplike, raw_array, dtype=index_to_dtype[form.mask], count=next_length, byteorder=byteorder) content = _reconstitute(form.content, length, container, getkey, backend, byteorder, simplify) if simplify: make = ak.contents.BitMaskedArray.simplified else: make = ak.contents.BitMaskedArray return make(ak.index.Index(mask), content, form.valid_when, length, form.lsb_order, parameters=form._parameters) elif isinstance(form, ak.forms.ByteMaskedForm): raw_array = container[getkey(form, 'mask')] mask = _from_buffer(backend.index_nplike, raw_array, dtype=index_to_dtype[form.mask], count=length, byteorder=byteorder) content = _reconstitute(form.content, length, container, getkey, backend, byteorder, simplify) if simplify: make = ak.contents.ByteMaskedArray.simplified else: make = ak.contents.ByteMaskedArray return make(ak.index.Index(mask), content, form.valid_when, parameters=form._parameters) elif isinstance(form, ak.forms.IndexedOptionForm): raw_array = container[getkey(form, 'index')] index = _from_buffer(backend.index_nplike, raw_array, dtype=index_to_dtype[form.index], count=length, byteorder=byteorder) if isinstance(index, PlaceholderArray): next_length = unknown_length else: next_length = (0 if (len(index) == 0) else max(0, (backend.index_nplike.max(index) + 1))) content = _reconstitute(form.content, next_length, container, getkey, backend, byteorder, simplify) if simplify: make = ak.contents.IndexedOptionArray.simplified else: make = ak.contents.IndexedOptionArray return make(ak.index.Index(index), content, parameters=form._parameters) elif isinstance(form, ak.forms.IndexedForm): raw_array = container[getkey(form, 'index')] index = _from_buffer(backend.index_nplike, raw_array, dtype=index_to_dtype[form.index], count=length, byteorder=byteorder) if isinstance(index, PlaceholderArray): next_length = unknown_length else: next_length = (0 if (len(index) == 0) else backend.index_nplike.index_as_shape_item((backend.index_nplike.max(index) + 1))) content = _reconstitute(form.content, next_length, container, getkey, backend, byteorder, simplify) if simplify: make = ak.contents.IndexedArray.simplified else: make = ak.contents.IndexedArray return make(ak.index.Index(index), content, parameters=form._parameters) elif isinstance(form, ak.forms.ListForm): raw_array1 = container[getkey(form, 'starts')] raw_array2 = container[getkey(form, 'stops')] starts = _from_buffer(backend.index_nplike, raw_array1, dtype=index_to_dtype[form.starts], count=length, byteorder=byteorder) stops = _from_buffer(backend.index_nplike, raw_array2, dtype=index_to_dtype[form.stops], count=length, byteorder=byteorder) if isinstance(stops, PlaceholderArray): next_length = unknown_length else: reduced_stops = stops[(starts != stops)] next_length = (0 if (len(starts) == 0) else backend.index_nplike.max(reduced_stops)) content = _reconstitute(form.content, next_length, container, getkey, backend, byteorder, simplify) return ak.contents.ListArray(ak.index.Index(starts), ak.index.Index(stops), content, parameters=form._parameters) elif isinstance(form, ak.forms.ListOffsetForm): raw_array = container[getkey(form, 'offsets')] offsets = _from_buffer(backend.index_nplike, raw_array, dtype=index_to_dtype[form.offsets], count=(length + 1), byteorder=byteorder) if isinstance(offsets, PlaceholderArray): next_length = unknown_length else: next_length = (0 if (len(offsets) == 1) else offsets[(- 1)]) content = _reconstitute(form.content, next_length, container, getkey, backend, byteorder, simplify) return ak.contents.ListOffsetArray(ak.index.Index(offsets), content, parameters=form._parameters) elif isinstance(form, ak.forms.RegularForm): next_length = (length * form.size) content = _reconstitute(form.content, next_length, container, getkey, backend, byteorder, simplify) return ak.contents.RegularArray(content, form.size, length, parameters=form._parameters) elif isinstance(form, ak.forms.RecordForm): contents = [_reconstitute(content, length, container, getkey, backend, byteorder, simplify) for content in form.contents] return ak.contents.RecordArray(contents, (None if form.is_tuple else form.fields), length, parameters=form._parameters) elif isinstance(form, ak.forms.UnionForm): raw_array1 = container[getkey(form, 'tags')] raw_array2 = container[getkey(form, 'index')] tags = _from_buffer(backend.index_nplike, raw_array1, dtype=index_to_dtype[form.tags], count=length, byteorder=byteorder) index = _from_buffer(backend.index_nplike, raw_array2, dtype=index_to_dtype[form.index], count=length, byteorder=byteorder) if (isinstance(index, PlaceholderArray) or isinstance(tags, PlaceholderArray)): lengths = ([unknown_length] * len(form.contents)) else: lengths = [] for tag in range(len(form.contents)): selected_index = index[(tags == tag)] if (len(selected_index) == 0): lengths.append(0) else: lengths.append((backend.index_nplike.max(selected_index) + 1)) contents = [_reconstitute(content, lengths[i], container, getkey, backend, byteorder, simplify) for (i, content) in enumerate(form.contents)] if simplify: make = ak.contents.UnionArray.simplified else: make = ak.contents.UnionArray return make(ak.index.Index(tags), ak.index.Index(index), contents, parameters=form._parameters) else: raise AssertionError(('unexpected form node type: ' + str(type(form))))
class Polynomial_generic_sparse_cdvf(Polynomial_generic_sparse_cdv, Polynomial_generic_cdvf): pass
def test_serialize_int_float(): obj = MyObject(1.0) assert (obj.float_prop == 1.0) json_obj = obj.to_json() json_obj['float_prop'] = int(json_obj['float_prop']) obj = MyObject.from_json(json_obj) assert (obj.float_prop == 1.0)
class TarIO(ContainerIO.ContainerIO): def __init__(self, tarfile, file): self.fh = open(tarfile, 'rb') while True: s = self.fh.read(512) if (len(s) != 512): raise OSError('unexpected end of tar file') name = s[:100].decode('utf-8') i = name.find('\x00') if (i == 0): raise OSError('cannot find subfile') if (i > 0): name = name[:i] size = int(s[124:135], 8) if (file == name): break self.fh.seek(((size + 511) & (~ 511)), io.SEEK_CUR) super().__init__(self.fh, self.fh.tell(), size) def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): self.fh.close()
class BasicBlock(nn.Sequential): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, conv3x3=default_conv, norm=default_norm, act=default_act): modules = [] modules.append(conv3x3(in_channels, out_channels, kernel_size, stride=stride, bias=bias)) if (norm is not None): modules.append(norm(out_channels)) if (act is not None): modules.append(act()) super(BasicBlock, self).__init__(*modules)
def iter_rows(rows, col_count): for row in rows: row = tuple(row) (yield (row + (('',) * (col_count - len(row)))))
def _assert_override(spy, arg, original, overridden): for (key, value) in {**original, **overridden}.items(): kwargs = spy.call_args[1] assert (kwargs[arg][key] == value) assert all(((key not in kwargs) for key in overridden))