code
stringlengths
101
5.91M
class GANLoss(nn.Module): def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0): super(GANLoss, self).__init__() self.gan_type = gan_type self.real_label_val = real_label_val self.fake_label_val = fake_label_val if (self.gan_type == 'gan'): self.loss = nn.BCEWithLogitsLoss() elif (self.gan_type == 'lsgan'): self.loss = nn.MSELoss() else: raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type)) def get_target_label(self, input, target_is_real): if target_is_real: return torch.zeros_like(input).fill_(self.real_label_val) else: return torch.zeros_like(input).fill_(self.fake_label_val) def forward(self, input, target_is_real): target_label = self.get_target_label(input, target_is_real) loss = self.loss(input, target_label) return loss
def btn_eventhandler(obj): output.clear_output() plot_output.clear_output() with output: print(f'SEED: {slider_seed.value}') print(f'Softmax Temperature: {slider_temp.value}') print(f'Top-K: {slider_topk.value}') print(f'Text prompt: {wd_text.value}') with plot_output: sampling(prompt=wd_text.value, top_k=slider_topk.value, softmax_temperature=slider_temp.value, seed=slider_seed.value)
def text_featurize(feature_set, transcript, glovemodel, w2vmodel, fastmodel, bert_model): if (feature_set == 'nltk_features'): (features, labels) = nf.nltk_featurize(transcript) elif (feature_set == 'spacy_features'): (features, labels) = sf.spacy_featurize(transcript) elif (feature_set == 'glove_features'): (features, labels) = gf.glove_featurize(transcript, glovemodel) elif (feature_set == 'w2v_features'): (features, labels) = w2v.w2v_featurize(transcript, w2vmodel) elif (feature_set == 'fast_features'): (features, labels) = ff.fast_featurize(transcript, fastmodel) elif (feature_set == 'text_features'): (features, labels) = textf.text_featurize(transcript) elif (feature_set == 'grammar_features'): (features, labels) = grammarf.grammar_featurize(transcript) elif (feature_set == 'bert_features'): (features, labels) = bertf.bert_featurize(transcript, bert_model) elif (feature_set == 'blabla_feature'): (features, labels) = bbf.blabla_featurize(transcript) features = np.nan_to_num(np.array(features)) features = features.tolist() return (features, labels)
class PetOwnerSchema(BaseSchema): auto_pets = TryFrom([CatSchema, DogSchema], many=True) by_attribute_pets = ByAttribute({'fur_density': CatSchema, 'barking_power': DogSchema}, many=True) by_type_contact = ByType([fields.Email(), fields.Url()])
class FunctionWrapperDouble(Repr): def __init__(self, function: Callable, input: bool=True, target: bool=False, *args, **kwargs): self.function = partial(function, *args, **kwargs) self.input = input self.target = target def __call__(self, inp: np.ndarray, tar: dict): if self.input: inp = self.function(inp) if self.target: tar = self.function(tar) return (inp, tar)
def get_ray_xshards(): from bigdl.orca.data import XShards import numpy as np ndarray_dict = {'x': np.random.randn(10, 4), 'y': np.random.randn(10, 4)} spark_xshards = XShards.partition(ndarray_dict) ray_xshards = RayXShards.from_spark_xshards(spark_xshards) return (ray_xshards, ndarray_dict)
def relabel(lines, annotations, file_name): global options offset_label = {} for tb in annotations: for i in range(tb.start, tb.end): if (i in offset_label): print('Warning: overlapping annotations in ', file=sys.stderr) offset_label[i] = tb prev_label = None for (i, l) in enumerate(lines): if (not l): prev_label = None continue (tag, start, end, token) = l label = None for o in range(start, end): if (o in offset_label): if (o != start): print(('Warning: annotation-token boundary mismatch: "%s" --- "%s"' % (token, offset_label[o].text)), file=sys.stderr) label = offset_label[o].type break if (label is not None): if (label == prev_label): tag = ('I-' + label) else: tag = ('B-' + label) prev_label = label lines[i] = [tag, start, end, token] if options.singleclass: for l in lines: if (l and (l[0] != 'O')): l[0] = (l[0][:2] + options.singleclass) return lines
class WordEmbedding(nn.Module): def __init__(self, vocab_size, embd_size, pre_embd_w=None, is_train_embd=False): super(WordEmbedding, self).__init__() self.embedding = nn.Embedding(vocab_size, embd_size) if (pre_embd_w is not None): print('pre embedding weight is set') self.embedding.weight = nn.Parameter(pre_embd_w, requires_grad=is_train_embd) def forward(self, x): return self.embedding(x)
def llm_convert(model, outfile, model_family, outtype='int4', model_format='pth', **kwargs): if (model_format == 'pth'): from bigdl.llm.ggml.convert_model import convert_model as ggml_convert_model (_, _used_args) = _special_kwarg_check(kwargs=kwargs, check_args=['tmp_path']) return ggml_convert_model(input_path=model, output_path=outfile, model_family=model_family, dtype=outtype, **_used_args) elif (model_format == 'gptq'): from bigdl.llm.gptq.convert.convert_gptq_to_ggml import convert_gptq2ggml invalidInputError(((model_family == 'llama') and (outtype == 'int4')), 'Convert GPTQ models should always specify `--model-family llama --dtype int4` in the command line.') os.makedirs(outfile, exist_ok=True) invalidInputError(os.path.isdir(outfile), 'The output_path {} is not a directory'.format(outfile)) (_, _used_args) = _special_kwarg_check(kwargs=kwargs, check_args=['tokenizer_path']) output_filename = 'bigdl_llm_{}_{}_from_gptq.bin'.format(model_family, outtype.lower()) outfile = os.path.join(outfile, output_filename) if ('tokenizer_path' in _used_args): gptq_tokenizer_path = _used_args['tokenizer_path'] else: gptq_tokenizer_path = None convert_gptq2ggml(model_path=model, output_path=outfile, tokenizer_path=gptq_tokenizer_path) return outfile else: invalidInputError(False, f'Unsupported input model_type: {model_format}') return None
class TrajectoryTransformerConfig(PretrainedConfig): model_type = 'trajectory_transformer' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=100, action_weight=5, reward_weight=1, value_weight=1, block_size=249, action_dim=6, observation_dim=17, transition_dim=25, n_layer=4, n_head=4, n_embd=128, embd_pdrop=0.1, attn_pdrop=0.1, resid_pdrop=0.1, learning_rate=0.0006, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, kaiming_initializer_range=1, use_cache=True, pad_token_id=1, bos_token_id=50256, eos_token_id=50256, **kwargs): self.vocab_size = vocab_size self.action_weight = action_weight self.reward_weight = reward_weight self.value_weight = value_weight self.max_position_embeddings = max_position_embeddings self.block_size = block_size self.action_dim = action_dim self.observation_dim = observation_dim self.transition_dim = transition_dim self.learning_rate = learning_rate self.n_layer = n_layer self.n_head = n_head self.n_embd = n_embd self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.resid_pdrop = resid_pdrop self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.kaiming_initializer_range = kaiming_initializer_range self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
def batch_norm(inputs, training, data_format): print(_BATCH_NORM_DECAY) return tf.compat.v1.layers.batch_normalization(inputs=inputs, axis=(1 if (data_format == 'channels_first') else 3), momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True)
class TestReproducibility(unittest.TestCase): def _test_reproducibility(self, name, extra_flags=None, delta=0.0001, resume_checkpoint='checkpoint1.pt', max_epoch=3): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::(- 1)]: if (isinstance(log_record.msg, str) and (search_string in log_record.msg)): return json.loads(log_record.msg) if (extra_flags is None): extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) with self.assertLogs() as logs: test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags)) train_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') os.rename(os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, 'checkpoint_last.pt')) with self.assertLogs() as logs: test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags)) train_res_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_res_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']: self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta) for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']: self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta) def test_reproducibility(self): self._test_reproducibility('test_reproducibility') ((not torch.cuda.is_available()), 'test requires a GPU') def test_reproducibility_fp16(self): self._test_reproducibility('test_reproducibility_fp16', ['--fp16', '--fp16-init-scale', '4096'], delta=0.011) ((not torch.cuda.is_available()), 'test requires a GPU') def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility('test_reproducibility_memory_efficient_fp16', ['--memory-efficient-fp16', '--fp16-init-scale', '4096']) def test_mid_epoch_reproducibility(self): self._test_reproducibility('test_mid_epoch_reproducibility', ['--save-interval-updates', '3'], resume_checkpoint='checkpoint_1_3.pt', max_epoch=1)
class QuadkeyTest(TestCase): def testInit(self): qk = quadkey.from_str('') with self.assertRaises(AssertionError): qk = quadkey.from_str('') with self.assertRaises(AssertionError): qk = quadkey.from_str('') def testFromGeo(self): geo = (40, (- 105)) level = 7 key = quadkey.from_str('0231010') self.assertEqual(key, quadkey.from_geo(geo, level)) def testEquality(self): one = quadkey.from_str('00') two = quadkey.from_str('00') self.assertEqual(one, two) three = quadkey.from_str('0') self.assertNotEqual(one, three) def testChildren(self): qk = quadkey.from_str('0') self.assertEqual([c.key for c in qk.children()], ['00', '01', '02', '03']) qk = quadkey.from_str(''.join(['0' for x in xrange(23)])) self.assertEqual(qk.children(), []) def testAncestry(self): one = quadkey.from_str('0') two = quadkey.from_str('0101') self.assertEqual(3, one.is_descendent(two)) self.assertIsNone(two.is_descendent(one)) self.assertEqual(3, two.is_ancestor(one)) three = quadkey.from_str('1') self.assertIsNone(three.is_ancestor(one)) def testNearby(self): qk = quadkey.from_str('0') self.assertEqual(set(['1', '2', '3']), set(qk.nearby())) def testUnwind(self): qk = quadkey.from_str('0123') self.assertEqual(['0123', '012', '01', '0'], [qk.key for qk in qk.unwind()]) def testDifference(self): _from = quadkey.from_str('') _to = quadkey.from_str('') diff = set(['', '', '', '', '', '']) self.assertEqual(diff, set([qk.key for qk in _to.difference(_from)])) self.assertEqual(diff, set([qk.key for qk in _from.difference(_to)]))
def setup_logger(name, save_dir, distributed_rank, filename='log.txt', mode='w'): if (distributed_rank > 0): return logging.root.name = name logging.root.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s') if save_dir: if (not os.path.exists(save_dir)): os.makedirs(save_dir) fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logging.root.addHandler(fh) else: ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logging.root.addHandler(ch)
def test_digits_sigmoid_naive_init(): model = FeatureBasedSelection(100, 'sigmoid', optimizer='naive', initial_subset=digits_sigmoid_ranking[:5]) model.fit(X_digits) assert_array_equal(model.ranking[:(- 5)], digits_sigmoid_ranking[5:]) assert_array_almost_equal(model.gains[:(- 5)], digits_sigmoid_gains[5:], 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
class ExperimentRunner(tune.Trainable): def _setup(self, variant): set_seed(variant['run_params']['seed']) self._variant = variant gpu_options = tf.GPUOptions(allow_growth=True) session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) tf.keras.backend.set_session(session) self._session = tf.keras.backend.get_session() self.train_generator = None self._built = False def _stop(self): tf.reset_default_graph() tf.keras.backend.clear_session() def _build(self): variant = copy.deepcopy(self._variant) environment_params = variant['environment_params'] training_environment = self.training_environment = get_environment_from_params(environment_params['training']) evaluation_environment = self.evaluation_environment = (get_environment_from_params(environment_params['evaluation']) if ('evaluation' in environment_params) else training_environment) replay_pool = self.replay_pool = get_replay_pool_from_variant(variant, training_environment) sampler = self.sampler = get_sampler_from_variant(variant) Qs = self.Qs = get_Q_function_from_variant(variant, training_environment) policy = self.policy = get_policy_from_variant(variant, training_environment, Qs) initial_exploration_policy = self.initial_exploration_policy = get_policy('UniformPolicy', training_environment) domain = environment_params['training']['domain'] static_fns = meee.static[domain.lower()] self.algorithm = get_algorithm_from_variant(variant=self._variant, training_environment=training_environment, evaluation_environment=evaluation_environment, policy=policy, initial_exploration_policy=initial_exploration_policy, Qs=Qs, pool=replay_pool, static_fns=static_fns, sampler=sampler, session=self._session) initialize_tf_variables(self._session, only_uninitialized=True) self._built = True def _train(self): if (not self._built): self._build() if (self.train_generator is None): self.train_generator = self.algorithm.train() diagnostics = next(self.train_generator) return diagnostics def _pickle_path(self, checkpoint_dir): return os.path.join(checkpoint_dir, 'checkpoint.pkl') def _replay_pool_pickle_path(self, checkpoint_dir): return os.path.join(checkpoint_dir, 'replay_pool.pkl') def _tf_checkpoint_prefix(self, checkpoint_dir): return os.path.join(checkpoint_dir, 'checkpoint') def _get_tf_checkpoint(self): tf_checkpoint = tf.train.Checkpoint(**self.algorithm.tf_saveables) return tf_checkpoint def picklables(self): return {'variant': self._variant, 'training_environment': self.training_environment, 'evaluation_environment': self.evaluation_environment, 'sampler': self.sampler, 'algorithm': self.algorithm, 'Qs': self.Qs, 'policy_weights': self.policy.get_weights()} def _save(self, checkpoint_dir): pickle_path = self._pickle_path(checkpoint_dir) with open(pickle_path, 'wb') as f: pickle.dump(self.picklables, f) if self._variant['run_params'].get('checkpoint_replay_pool', False): self._save_replay_pool(checkpoint_dir) tf_checkpoint = self._get_tf_checkpoint() tf_checkpoint.save(file_prefix=self._tf_checkpoint_prefix(checkpoint_dir), session=self._session) return os.path.join(checkpoint_dir, '') def _save_replay_pool(self, checkpoint_dir): replay_pool_pickle_path = self._replay_pool_pickle_path(checkpoint_dir) self.replay_pool.save_latest_experience(replay_pool_pickle_path) def _restore_replay_pool(self, current_checkpoint_dir): experiment_root = os.path.dirname(current_checkpoint_dir) experience_paths = [self._replay_pool_pickle_path(checkpoint_dir) for checkpoint_dir in sorted(glob.iglob(os.path.join(experiment_root, 'checkpoint_*')))] for experience_path in experience_paths: self.replay_pool.load_experience(experience_path) def _restore(self, checkpoint_dir): assert isinstance(checkpoint_dir, str), checkpoint_dir checkpoint_dir = checkpoint_dir.rstrip('/') with self._session.as_default(): pickle_path = self._pickle_path(checkpoint_dir) with open(pickle_path, 'rb') as f: picklable = pickle.load(f) training_environment = self.training_environment = picklable['training_environment'] evaluation_environment = self.evaluation_environment = picklable['evaluation_environment'] replay_pool = self.replay_pool = get_replay_pool_from_variant(self._variant, training_environment) if self._variant['run_params'].get('checkpoint_replay_pool', False): self._restore_replay_pool(checkpoint_dir) sampler = self.sampler = picklable['sampler'] Qs = self.Qs = picklable['Qs'] policy = self.policy = get_policy_from_variant(self._variant, training_environment, Qs) self.policy.set_weights(picklable['policy_weights']) initial_exploration_policy = self.initial_exploration_policy = get_policy('UniformPolicy', training_environment) self.algorithm = get_algorithm_from_variant(variant=self._variant, training_environment=training_environment, evaluation_environment=evaluation_environment, policy=policy, initial_exploration_policy=initial_exploration_policy, Qs=Qs, pool=replay_pool, sampler=sampler, session=self._session) self.algorithm.__setstate__(picklable['algorithm'].__getstate__()) tf_checkpoint = self._get_tf_checkpoint() status = tf_checkpoint.restore(tf.train.latest_checkpoint(os.path.split(self._tf_checkpoint_prefix(checkpoint_dir))[0])) status.assert_consumed().run_restore_ops(self._session) initialize_tf_variables(self._session, only_uninitialized=True) for (Q, Q_target) in zip(self.algorithm._Qs, self.algorithm._Q_targets): Q_target.set_weights(Q.get_weights()) self._built = True
def read_nb_content(cells, mod_name): doc_fns = {} for (i, cell) in enumerate(cells): if (cell['cell_type'] == 'code'): for match in SHOW_DOC_RE.findall(cell['source']): doc_fns[match] = i return doc_fns
def rgb_as_png_binary_bytes(rgb_np_image): pil_image = PIL.Image.fromarray(rgb_np_image, mode='RGB') output = io.BytesIO() pil_image.save(output, format='PNG') bytevalues = output.getvalue() return bytevalues
def get_wrn(blocks, width_factor, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): if (blocks == 50): layers = [3, 4, 6, 3] elif (blocks == 101): layers = [3, 4, 23, 3] elif (blocks == 152): layers = [3, 8, 36, 3] elif (blocks == 200): layers = [3, 24, 36, 3] else: raise ValueError('Unsupported WRN with number of blocks: {}'.format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)] net = WRN(channels=channels, init_block_channels=init_block_channels, width_factor=width_factor, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
class ContactReward(abstract_task.AbstractTask): def __init__(self, reward_fn, layers_0, layers_1, condition=None, reset_steps_after_contact=np.inf): if (not callable(reward_fn)): self._reward_fn = (lambda sprite_0, sprite_1: reward_fn) else: self._reward_fn = reward_fn if (not isinstance(layers_0, (list, tuple))): layers_0 = [layers_0] self._layers_0 = layers_0 if (not isinstance(layers_1, (list, tuple))): layers_1 = [layers_1] self._layers_1 = layers_1 if (condition is None): self._condition = (lambda s_agent, s_target, meta_state: True) elif (len(inspect.signature(condition).parameters.values()) == 2): self._condition = (lambda s_a, s_t, meta_state: condition(s_a, s_t)) else: self._condition = condition self._reset_steps_after_contact = reset_steps_after_contact def reset(self, state, meta_state): self._steps_until_reset = np.inf def reward(self, state, meta_state, step_count): reward = 0 sprites_0 = [s for k in self._layers_0 for s in state[k]] sprites_1 = [s for k in self._layers_1 for s in state[k]] for s_0 in sprites_0: for s_1 in sprites_1: if (not self._condition(s_0, s_1, meta_state)): continue if s_0.overlaps_sprite(s_1): reward = self._reward_fn(s_0, s_1) if (self._steps_until_reset == np.inf): self._steps_until_reset = self._reset_steps_after_contact self._steps_until_reset -= 1 should_reset = (self._steps_until_reset < 0) return (reward, should_reset)
class ModelArguments(): model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."}) model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
def trades_loss(model, x_natural, y, logits_natural, optimizer, epsilon=0.031, step_size=0.003, perturb_steps=10, clip_min=0.0, clip_max=1.0, beta=1.0, distance='linf'): criterion_kl = torch.nn.KLDivLoss(size_average=False) model.eval() batch_size = len(x_natural) x_adv = (x_natural.detach() + (0.001 * torch.randn(x_natural.shape).cuda().detach())) if (distance == 'linf'): for _ in range(perturb_steps): x_adv.requires_grad_() with torch.enable_grad(): loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1), F.softmax(logits_natural.detach(), dim=1)) grad = torch.autograd.grad(loss_kl, [x_adv])[0] x_adv = (x_adv.detach() + (step_size * torch.sign(grad.detach()))) x_adv = torch.min(torch.max(x_adv, (x_natural - epsilon)), (x_natural + epsilon)) x_adv = torch.clamp(x_adv, clip_min, clip_max) elif (distance == 'l2'): delta = (0.001 * torch.randn(x_natural.shape).cuda().detach()) delta = Variable(delta.data, requires_grad=True) optimizer_delta = torch.optim.SGD([delta], lr=((epsilon / perturb_steps) * 2)) for _ in range(perturb_steps): adv = (x_natural + delta) optimizer_delta.zero_grad() with torch.enable_grad(): loss = ((- 1) * criterion_kl(F.log_softmax(model(adv), dim=1), F.softmax(logits_natural.detach(), dim=1))) loss.backward() grad_norms = delta.grad.view(batch_size, (- 1)).norm(p=2, dim=1) delta.grad.div_(grad_norms.view((- 1), 1, 1, 1)) if (grad_norms == 0).any(): delta.grad[(grad_norms == 0)] = torch.randn_like(delta.grad[(grad_norms == 0)]) optimizer_delta.step() delta.data.add_(x_natural) delta.data.clamp_(0, 1).sub_(x_natural) delta.data.renorm_(p=2, dim=0, maxnorm=epsilon) x_adv = Variable((x_natural + delta), requires_grad=False) else: x_adv = torch.clamp(x_adv, clip_min, clip_max) model.train() x_adv = Variable(torch.clamp(x_adv, clip_min, clip_max), requires_grad=False) optimizer.zero_grad() logits = model(x_natural) logits_adv = model(x_adv) loss_natural = F.cross_entropy(logits, y) loss_robust = ((1.0 / batch_size) * criterion_kl(F.log_softmax(logits_adv, dim=1), F.softmax(logits, dim=1))) loss = (loss_natural + (beta * loss_robust)) return (logits_adv, loss)
def main(): args = parse_args() if (args.num <= 0): return if ((not args.save_raw_synthesis) and (not args.generate_html)): return if (args.model_name not in MODEL_ZOO): raise SystemExit(f'Model `{args.model_name}` is not registered in `models/model_zoo.py`!') model_config = MODEL_ZOO[args.model_name].copy() url = model_config.pop('url') if args.save_dir: work_dir = args.save_dir else: work_dir = os.path.join('work_dirs', 'synthesis') os.makedirs(work_dir, exist_ok=True) job_name = f'{args.model_name}_{args.num}' if args.save_raw_synthesis: os.makedirs(os.path.join(work_dir, job_name), exist_ok=True) print(f'Building generator for model `{args.model_name}` ...') generator = build_generator(**model_config) synthesis_kwargs = dict(trunc_psi=args.trunc_psi, trunc_layers=args.trunc_layers, randomize_noise=args.randomize_noise) print(f'Finish building generator.') os.makedirs('/import/nobackup_mmv_ioannisp/jo001/genforce_models', exist_ok=True) checkpoint_path = os.path.join('/import/nobackup_mmv_ioannisp/jo001/genforce_models', (args.model_name + '.pth')) print(f'Loading checkpoint from `{checkpoint_path}` ...') if (not os.path.exists(checkpoint_path)): print(f' Downloading checkpoint from `{url}` ...') subprocess.call(['wget', '--quiet', '-O', checkpoint_path, url]) print(f' Finish downloading checkpoint.') checkpoint = torch.load(checkpoint_path, map_location='cpu') if ('generator_smooth' in checkpoint): generator.load_state_dict(checkpoint['generator_smooth']) else: generator.load_state_dict(checkpoint['generator']) generator = generator.cuda() generator.eval() print(f'Finish loading checkpoint.') np.random.seed(args.seed) torch.manual_seed(args.seed) print(f'Synthesizing {args.num} samples ...') indices = list(range(args.num)) if args.generate_html: html = HtmlPageVisualizer(grid_size=args.num) for batch_idx in tqdm(range(0, args.num, args.batch_size)): sub_indices = indices[batch_idx:(batch_idx + args.batch_size)] code = torch.randn(len(sub_indices), generator.z_space_dim).cuda() with torch.no_grad(): images = generator(code, **synthesis_kwargs)['image'] images = postprocess_image(images.detach().cpu().numpy()) for (sub_idx, image) in zip(sub_indices, images): if args.save_raw_synthesis: save_path = os.path.join(work_dir, job_name, f'{sub_idx:06d}.jpg') save_image(save_path, image) if args.generate_html: (row_idx, col_idx) = divmod(sub_idx, html.num_cols) html.set_cell(row_idx, col_idx, image=image, text=f'Sample {sub_idx:06d}') if args.generate_html: html.save(os.path.join(work_dir, f'{job_name}.html')) print(f'Finish synthesizing {args.num} samples.')
def train(train_loader, model, criterion, optimizer, epoch, args, logger): batch_time = AverageMeter('Batch Time', ':5.3f') data_time = AverageMeter('Data Time', ':5.3f') losses = AverageMeter('Loss', ':5.3f') lr = ValueMeter('LR', ':5.3f') progress = ProgressMeter(len(train_loader), [batch_time, data_time, lr, losses], prefix='Epoch: [{}]'.format(epoch)) def get_learning_rate(optimizer): for param_group in optimizer.param_groups: return param_group['lr'] lr.update(get_learning_rate(optimizer)) model.train() if args.distributed: model.module.teacher.eval() for (name, param) in model.module.teacher.named_parameters(): if param.requires_grad: logger.info('> Key-encoder Sanity Failed, parameters are not frozen.') else: model.teacher.eval() for (name, param) in model.teacher.named_parameters(): if param.requires_grad: logger.info('> Key-encoder Sanity Failed, parameters are not frozen.') end = time.time() scaler = torch.cuda.amp.GradScaler(enabled=True) for (i, (images, small_patches)) in enumerate(train_loader): if (not args.distributed): images = images.cuda() small_patches = small_patches.cuda() data_time.update((time.time() - end)) with torch.cuda.amp.autocast(enabled=True): (logit, label, s_logit, s_label) = model(image=images, small_image=small_patches) loss = (criterion(logit, label) + criterion(s_logit, s_label)) losses.update(loss.item(), images[0].size(0)) optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() batch_time.update((time.time() - end)) end = time.time() if ((i % args.print_freq) == 0): progress.display(i, logger) return losses.avg
class AnomalyDetector(ABC): def fit(self, y): pass def score(self): pass def anomaly_indexes(self): pass
def linear_eval_epoch(encoder, classifier, val_loader, val_transform, criterion): epoch_loss = 0.0 if (args.dataset == 'ICBHI'): TP = [0, 0, 0, 0] GT = [0, 0, 0, 0] elif (args.dataset == 'SPRS'): TP = [0, 0, 0, 0, 0, 0, 0] GT = [0, 0, 0, 0, 0, 0, 0] classifier.eval() encoder.eval() with torch.no_grad(): for (data, target, _) in val_loader: (data, target) = (data.to(args.device), target.to(args.device)) output = classifier(encoder(val_transform(data))) loss = criterion(output, target) epoch_loss += loss.item() (_, labels_predicted) = torch.max(output, dim=1) for idx in range(len(TP)): TP[idx] += torch.logical_and((labels_predicted == idx), (target == idx)).sum().item() GT[idx] += (target == idx).sum().item() epoch_loss = (epoch_loss / len(val_loader)) se = (sum(TP[1:]) / sum(GT[1:])) sp = (TP[0] / GT[0]) icbhi_score = ((se + sp) / 2) acc = (sum(TP) / sum(GT)) return (epoch_loss, se, sp, icbhi_score, acc)
def get_num_default_workers(): try: return int(os.environ['NUM_DEFAULT_WORKERS']) except KeyError: return 1
def get_model(args): model = Localizer(args) if (torch.cuda.is_available() and args.gpu): model = model.to('cuda') return model
def main(): parser = utils.prepare_parser() parser = utils.add_dgp_parser(parser) config = vars(parser.parse_args()) utils.dgp_update_config(config) print(config) rank = 0 if (mp.get_start_method(allow_none=True) != 'spawn'): mp.set_start_method('spawn', force=True) if config['dist']: (rank, world_size) = dist_init(config['port']) utils.seed_rng((rank + config['seed'])) torch.backends.cudnn.benchmark = True trainer = Trainer(config) trainer.run()
def train(args): if (args.local_rank != (- 1)): torch.distributed.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) logger.info(f'process_{args.local_rank} starts training ...') device = torch.device(('cuda' if (not args.cpu) else 'cpu')) np.random.seed(args.seed) torch.manual_seed(args.seed) if (not args.cpu): torch.cuda.manual_seed(args.seed) log_dir = (args.saved_dir / 'tensorboard') log_dir.mkdir(exist_ok=True, parents=True) writer = SummaryWriter(log_dir=str(log_dir)) model = _make_model(args, device) optimizer = _make_optimizer(args, model) scheduler = _make_scheduler(args, optimizer) if (args.pair_dir is None): source_loader_ = _make_data(args, type='source') source_loader = iter(source_loader_) target_loader_ = _make_data(args, type='target') target_loader = iter(target_loader_) else: assert (args.pair_dir is not None) logger.info(f'Using Pair Dataset ...') data_loader_ = _make_data(args, type='pair') data_loader = iter(data_loader_) iteration = (- 1) if (args.resume is not None): logger.info(f'Resuming from {args.resume}') checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['modelstate']) optimizer.load_state_dict(checkpoint['optimstate']) iteration = checkpoint['iteration'] ell_warp_TV_list = [] ell_warp_list = [] ell_warp_sem_list = [] ell_list = [] model.train() try: progress_bar = tqdm(range((iteration + 1), args.num_iter)) for batch_id in progress_bar: (source_2, target_2) = (None, None) if (args.pair_dir is None): try: source = source_loader.next() except StopIteration: source_loader = iter(source_loader_) source = source_loader.next() except: continue try: target = target_loader.next() except StopIteration: target_loader = iter(target_loader_) target = target_loader.next() except: continue if isinstance(source, list): (source, source_2) = source source_2 = source_2.to(device) if isinstance(target, list): (target, target_2) = target target_2 = target_2.to(device) else: try: (source, target) = data_loader.next() except StopIteration: data_loader = iter(data_loader_) (source, target) = data_loader.next() except: continue (_, _, ell_warp, ell_warp_TV, ell_warp_sem) = model(source.to(device), target.to(device), refine_time=args.train_refine_time, image1_mask=source_2, image2_mask=target_2) ell = (((args.warp_weight * ell_warp) + (args.reg_weight * ell_warp_TV)) + (args.sem_weight * ell_warp_sem)) if (args.local_rank in [(- 1), 0]): progress_bar.set_description('Iteration: {}/{} warp_loss: {:.5f} reg_loss: {:.5f} sem_loss: {:.5f} Loss: {:.5f} lr: {:.4f}'.format(batch_id, args.num_iter, ell_warp.item(), ell_warp_TV.item(), ell_warp_sem.item(), ell.item(), optimizer.state_dict()['param_groups'][0]['lr'])) ell_warp_TV_list.append(ell_warp_TV.item()) ell_warp_list.append(ell_warp.item()) ell_warp_sem_list.append(ell_warp_sem.item()) ell_list.append(ell.item()) writer.add_scalars('Loss', {'Total Loss': ell.item(), 'Warp Loss': ell_warp.item(), 'Warp_TV Loss': ell_warp_TV.item(), 'Warp_Sem Loss': ell_warp_sem.item()}, batch_id) if ((not args.no_visual) and ((batch_id % args.visual_interval) == 0)): logger.info('Running Time Visualization ...') model.eval() intermediate_visual(model, args, device, batch_id) model.train() if (((batch_id % args.save_checkpoint_interval) == 0) and (batch_id != 0)): checkpoint_path = (args.saved_dir / f'{batch_id}.pth') save_checkpoint(checkpoint_path, model, optimizer, batch_id) logger.info(f'Intermediate save, Model saved at {str(checkpoint_path)}') if ((batch_id % args.write_loss_interval) == 0): save_loss(str(args.saved_dir), ell_warp_list, ell_warp_TV_list, ell_warp_sem_list, ell_list, args.warp_weight, args.reg_weight, args.sem_weight) optimizer.zero_grad() ell.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() scheduler.step() except KeyboardInterrupt: logger.info('Catch a KeyboardInterupt') if (args.local_rank in [(- 1), 0]): checkpoint_path = (args.saved_dir / f'{batch_id}.pth') save_checkpoint(checkpoint_path, model, optimizer, batch_id) logger.info(f'Training Done, Model saved at {str(checkpoint_path)}')
class EvalAIAnswerProcessor(): CONTRACTIONS = {'aint': "ain't", 'arent': "aren't", 'cant': "can't", 'couldve': "could've", 'couldnt': "couldn't", "couldn'tve": "couldn't've", "couldnt've": "couldn't've", 'didnt': "didn't", 'doesnt': "doesn't", 'dont': "don't", 'hadnt': "hadn't", "hadnt've": "hadn't've", "hadn'tve": "hadn't've", 'hasnt': "hasn't", 'havent': "haven't", 'hed': "he'd", "hed've": "he'd've", "he'dve": "he'd've", 'hes': "he's", 'howd': "how'd", 'howll': "how'll", 'hows': "how's", "Id've": "I'd've", "I'dve": "I'd've", 'Im': "I'm", 'Ive': "I've", 'isnt': "isn't", 'itd': "it'd", "itd've": "it'd've", "it'dve": "it'd've", 'itll': "it'll", "let's": "let's", 'maam': "ma'am", 'mightnt': "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", 'mightve': "might've", 'mustnt': "mustn't", 'mustve': "must've", 'neednt': "needn't", 'notve': "not've", 'oclock': "o'clock", 'oughtnt': "oughtn't", "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", 'shant': "shan't", "shed've": "she'd've", "she'dve": "she'd've", "she's": "she's", 'shouldve': "should've", 'shouldnt': "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", "somebody'd": 'somebodyd', "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", 'somebodyll': "somebody'll", 'somebodys': "somebody's", 'someoned': "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", 'someonell': "someone'll", 'someones': "someone's", 'somethingd': "something'd", "somethingd've": "something'd've", "something'dve": "something'd've", 'somethingll': "something'll", 'thats': "that's", 'thered': "there'd", "thered've": "there'd've", "there'dve": "there'd've", 'therere': "there're", 'theres': "there's", 'theyd': "they'd", "theyd've": "they'd've", "they'dve": "they'd've", 'theyll': "they'll", 'theyre': "they're", 'theyve': "they've", 'twas': "'twas", 'wasnt': "wasn't", "wed've": "we'd've", "we'dve": "we'd've", 'weve': "we've", 'werent': "weren't", 'whatll': "what'll", 'whatre': "what're", 'whats': "what's", 'whatve': "what've", 'whens': "when's", 'whered': "where'd", 'wheres': "where's", 'whereve': "where've", 'whod': "who'd", "whod've": "who'd've", "who'dve": "who'd've", 'wholl': "who'll", 'whos': "who's", 'whove': "who've", 'whyll': "why'll", 'whyre': "why're", 'whys': "why's", 'wont': "won't", 'wouldve': "would've", 'wouldnt': "wouldn't", "wouldnt've": "wouldn't've", "wouldn'tve": "wouldn't've", 'yall': "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", 'youd': "you'd", "youd've": "you'd've", "you'dve": "you'd've", 'youll': "you'll", 'youre': "you're", 'youve': "you've"} NUMBER_MAP = {'none': '0', 'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10'} ARTICLES = ['a', 'an', 'the'] PERIOD_STRIP = re.compile('(?!<=\\d)(\\.)(?!\\d)') COMMA_STRIP = re.compile('(?<=\\d)(\\,)+(?=\\d)') PUNCTUATIONS = [';', '/', '[', ']', '"', '{', '}', '(', ')', '=', '+', '\\', '_', '-', '>', '<', '', '`', ',', '?', '!'] def __init__(self, *args, **kwargs): pass def word_tokenize(self, word): word = word.lower() word = word.replace(',', '').replace('?', '').replace("'s", " 's") return word.strip() def process_punctuation(self, in_text): out_text = in_text for p in self.PUNCTUATIONS: if ((((p + ' ') in in_text) or ((' ' + p) in in_text)) or (re.search(self.COMMA_STRIP, in_text) is not None)): out_text = out_text.replace(p, '') else: out_text = out_text.replace(p, ' ') out_text = self.PERIOD_STRIP.sub('', out_text, re.UNICODE) return out_text def process_digit_article(self, in_text): out_text = [] temp_text = in_text.lower().split() for word in temp_text: word = self.NUMBER_MAP.setdefault(word, word) if (word not in self.ARTICLES): out_text.append(word) else: pass for (word_id, word) in enumerate(out_text): if (word in self.CONTRACTIONS): out_text[word_id] = self.CONTRACTIONS[word] out_text = ' '.join(out_text) return out_text def __call__(self, item): item = self.word_tokenize(item) item = item.replace('\n', ' ').replace('\t', ' ').strip() item = self.process_punctuation(item) item = self.process_digit_article(item) return item
class DefaultFlowCallback(TrainerCallback): def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if ((state.global_step == 1) and args.logging_first_step): control.should_log = True if ((args.logging_strategy == IntervalStrategy.STEPS) and ((state.global_step % args.logging_steps) == 0)): control.should_log = True if ((args.evaluation_strategy == IntervalStrategy.STEPS) and ((state.global_step % args.eval_steps) == 0) and (args.eval_delay <= state.global_step)): control.should_evaluate = True if ((args.save_strategy == IntervalStrategy.STEPS) and (args.save_steps > 0) and ((state.global_step % args.save_steps) == 0)): control.should_save = True if (state.global_step >= state.max_steps): control.should_training_stop = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if (args.logging_strategy == IntervalStrategy.EPOCH): control.should_log = True if ((args.evaluation_strategy == IntervalStrategy.EPOCH) and (args.eval_delay <= state.epoch)): control.should_evaluate = True if (args.save_strategy == IntervalStrategy.EPOCH): control.should_save = True return control
def Shampoo(model_param, lr=0.1, momentum=0.0, weight_decay=0.0, epsilon=0.0001, update_freq=1): optimizer = optim.Shampoo(model_param, lr=lr, momentum=momentum, weight_decay=weight_decay, epsilon=epsilon, update_freq=update_freq) return optimizer
def mask_inside(mask_a, mask_b): if (mask_a.shape[1:] != mask_b.shape[1:]): raise IndexError xp = cuda.get_array_module(mask_a) n_mask_a = len(mask_a) n_mask_b = len(mask_b) iou = xp.empty((n_mask_a, n_mask_b), dtype=xp.float32) for (n, m_a) in enumerate(mask_a): for (k, m_b) in enumerate(mask_b): intersect = xp.bitwise_and(m_a, m_b).sum() union = xp.bitwise_or(m_b, m_b).sum() iou[(n, k)] = (intersect / union) return iou
def original_match(flat_preds, flat_targets, preds_k, targets_k): assert (isinstance(flat_preds, torch.Tensor) and isinstance(flat_targets, torch.Tensor) and flat_preds.is_cuda and flat_targets.is_cuda) out_to_gts = {} out_to_gts_scores = {} for out_c in range(preds_k): for gt_c in range(targets_k): tp_score = int(((flat_preds == out_c) * (flat_targets == gt_c)).sum()) if ((out_c not in out_to_gts) or (tp_score > out_to_gts_scores[out_c])): out_to_gts[out_c] = gt_c out_to_gts_scores[out_c] = tp_score flat_preds_reorder = torch.zeros_like(flat_preds) for (k, v) in out_to_gts.items(): flat_preds_reorder[(flat_preds == k)] = torch.Tensor([v]) return (flat_preds_reorder.to(flat_preds.device), out_to_gts)
class CrashingAlgo(): def train(self, runner): for epoch in runner.step_epochs(): runner.obtain_samples(epoch)
class Lga3dFunction(Function): def forward(ctx, input, filters, radius=1): ctx.radius = radius ctx.save_for_backward(input, filters) assert ((input.is_contiguous() == True) and (filters.is_contiguous() == True)) with torch.cuda.device_of(input): (num, channels, depth, height, width) = input.size() output = input.new().resize_(num, channels, depth, height, width).zero_() GANet.lga3d_cuda_forward(input, filters, output, radius) output = output.contiguous() return output def backward(ctx, gradOutput): (input, filters) = ctx.saved_tensors assert (gradOutput.is_contiguous() == True) with torch.cuda.device_of(gradOutput): (num, channels, depth, height, width) = input.size() (_, _, fsize, _, _) = filters.size() gradInput = gradOutput.new().resize_(num, channels, depth, height, width).zero_() gradFilters = gradOutput.new().resize_(num, channels, fsize, height, width).zero_() GANet.lga3d_cuda_backward(input, filters, gradOutput, gradInput, gradFilters, ctx.radius) gradInput = gradInput.contiguous() gradFilters = gradFilters.contiguous() return (gradInput, gradFilters, None)
def test_function_with_string_and_vector_string_arg(): assert (m.func_with_string_or_vector_string_arg_overload(('A', 'B')) == 2) assert (m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2) assert (m.func_with_string_or_vector_string_arg_overload('A') == 3)
def main(lm_root_dir, dataset_path, **args): lm_file_path = train(lm_dir=lm_root_dir, dataset_path=dataset_path, n_gram=args['n_gram'], dataset_name=args['dataset_name']) print(f'''done doing training of KenLM check the output folder: {lm_file_path}''')
class Res16UNetSN50(Res16UNet50): NORM_TYPE = NormType.SPARSE_SWITCH_NORM BLOCK = BottleneckSN
class ResNet(nn.Module): def __init__(self, block, layers, feature_channels=128, norm_layer=None): super(ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.spp_channels = feature_channels self.spp_level_num = 3 self.spp_level_channels = (self.spp_channels // self.spp_level_num) upsample_layers = [] self.layer1 = self._make_layer(block, 64, layers[0]) upsample_layers += [_Upsample(self.spp_channels, 64, self.spp_channels, kernel_size=3)] self.layer2 = self._make_layer(block, 128, layers[1], stride=2) upsample_layers += [_Upsample(self.spp_channels, 128, self.spp_channels, kernel_size=3)] self.layer3 = self._make_layer(block, 256, layers[2], stride=2) upsample_layers += [_Upsample(self.spp_channels, 256, self.spp_channels, kernel_size=3)] self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.spp = MySpatialPyramidPooling(channels_in=self.inplanes, channels_out=self.spp_channels, level_num=self.spp_level_num, spp_channels=self.spp_channels, level_channels=self.spp_level_channels, grid=(8, 4, 2, 1), bn_momentum=(0.01 / 2)) self.upsample_layers = nn.ModuleList(list(reversed(upsample_layers))) self.out_feature_channels = feature_channels for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.fine_tune = [self.conv1, self.maxpool, self.layer1, self.layer2, self.layer3, self.layer4, self.bn1] self.random_init = [self.spp, self.upsample_layers] def random_init_params(self): return chain(*[f.parameters() for f in self.random_init]) def fine_tune_params(self): return chain(*[f.parameters() for f in self.fine_tune]) def _make_layer(self, block, planes, blocks, stride=1): norm_layer = self._norm_layer downsample = None if ((stride != 1) or (self.inplanes != planes)): downsample = nn.Sequential(conv1x1(self.inplanes, planes, stride), norm_layer(planes)) layers = [block(self.inplanes, planes, stride, downsample, norm_layer)] self.inplanes = planes for _ in range(1, blocks): layers.append(block(self.inplanes, planes, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) (x, skip1) = self.layer1(x) (x, skip2) = self.layer2(x) (x, skip3) = self.layer3(x) (x, skip) = self.layer4(x) feature = self.spp(skip) for (skip, up_module) in zip([skip3, skip2, skip1], self.upsample_layers): feature = up_module(feature, skip) return feature
class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, bn=False, nonlin=True): super().__init__() self.conv = Conv3x3(in_channels, out_channels) if bn: self.bn = nn.BatchNorm2d(out_channels) else: self.bn = None if nonlin: self.nonlin = nn.ELU(inplace=True) else: self.nonlin = None def forward(self, x): out = self.conv(x) if (self.bn is not None): out = self.bn(out) if (self.nonlin is not None): out = self.nonlin(out) return out
class DistributionalHeadModel(torch.nn.Module): def __init__(self, input_size, layer_sizes, output_size, n_atoms): super().__init__() self.mlp = MlpModel(input_size, layer_sizes, (output_size * n_atoms)) self._output_size = output_size self._n_atoms = n_atoms def forward(self, input): return self.mlp(input).view((- 1), self._output_size, self._n_atoms)
def latest_checkpoint_path(dir_path, regex='G_*.pth'): f_list = glob.glob(os.path.join(dir_path, regex)) f_list.sort(key=(lambda f: int(''.join(filter(str.isdigit, f))))) x = f_list[(- 1)] print(x) return x
def register_lvis_instances(name, metadata, json_file, image_root): DatasetCatalog.register(name, (lambda : load_lvis_json(json_file, image_root, name))) MetadataCatalog.get(name).set(json_file=json_file, image_root=image_root, evaluator_type='lvis', **metadata)
class Conv2d(fa_constructor.Conv2d): def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None): if (layer_config is None): layer_config = {} layer_config['type'] = 'usf' super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
def import_jax_weights_(model, npz_path, version='model_1'): data = np.load(npz_path) LinearWeight = (lambda l: Param(l, param_type=ParamType.LinearWeight)) LinearBias = (lambda l: Param(l)) LinearWeightMHA = (lambda l: Param(l, param_type=ParamType.LinearWeightMHA)) LinearBiasMHA = (lambda b: Param(b, param_type=ParamType.LinearBiasMHA)) LinearWeightOPM = (lambda l: Param(l, param_type=ParamType.LinearWeightOPM)) LinearParams = (lambda l: {'weights': LinearWeight(l.weight), 'bias': LinearBias(l.bias)}) LayerNormParams = (lambda l: {'scale': Param(l.weight), 'offset': Param(l.bias)}) AttentionParams = (lambda att: {'query_w': LinearWeightMHA(att.linear_q.weight), 'key_w': LinearWeightMHA(att.linear_k.weight), 'value_w': LinearWeightMHA(att.linear_v.weight), 'output_w': Param(att.linear_o.weight, param_type=ParamType.LinearMHAOutputWeight), 'output_b': LinearBias(att.linear_o.bias)}) AttentionGatedParams = (lambda att: dict(**AttentionParams(att), **{'gating_w': LinearWeightMHA(att.linear_g.weight), 'gating_b': LinearBiasMHA(att.linear_g.bias)})) GlobalAttentionParams = (lambda att: dict(AttentionGatedParams(att), key_w=LinearWeight(att.linear_k.weight), value_w=LinearWeight(att.linear_v.weight))) TriAttParams = (lambda tri_att: {'query_norm': LayerNormParams(tri_att.layer_norm), 'feat_2d_weights': LinearWeight(tri_att.linear.weight), 'attention': AttentionGatedParams(tri_att.mha)}) TriMulOutParams = (lambda tri_mul: {'layer_norm_input': LayerNormParams(tri_mul.layer_norm_in), 'left_projection': LinearParams(tri_mul.linear_a_p), 'right_projection': LinearParams(tri_mul.linear_b_p), 'left_gate': LinearParams(tri_mul.linear_a_g), 'right_gate': LinearParams(tri_mul.linear_b_g), 'center_layer_norm': LayerNormParams(tri_mul.layer_norm_out), 'output_projection': LinearParams(tri_mul.linear_z), 'gating_linear': LinearParams(tri_mul.linear_g)}) TriMulInParams = (lambda tri_mul: {'layer_norm_input': LayerNormParams(tri_mul.layer_norm_in), 'left_projection': LinearParams(tri_mul.linear_b_p), 'right_projection': LinearParams(tri_mul.linear_a_p), 'left_gate': LinearParams(tri_mul.linear_b_g), 'right_gate': LinearParams(tri_mul.linear_a_g), 'center_layer_norm': LayerNormParams(tri_mul.layer_norm_out), 'output_projection': LinearParams(tri_mul.linear_z), 'gating_linear': LinearParams(tri_mul.linear_g)}) PairTransitionParams = (lambda pt: {'input_layer_norm': LayerNormParams(pt.layer_norm), 'transition1': LinearParams(pt.linear_1), 'transition2': LinearParams(pt.linear_2)}) MSAAttParams = (lambda matt: {'query_norm': LayerNormParams(matt.layer_norm_m), 'attention': AttentionGatedParams(matt.mha)}) MSAColAttParams = (lambda matt: {'query_norm': LayerNormParams(matt._msa_att.layer_norm_m), 'attention': AttentionGatedParams(matt._msa_att.mha)}) MSAGlobalAttParams = (lambda matt: {'query_norm': LayerNormParams(matt.layer_norm_m), 'attention': GlobalAttentionParams(matt.global_attention)}) MSAAttPairBiasParams = (lambda matt: dict(**MSAAttParams(matt), **{'feat_2d_norm': LayerNormParams(matt.layer_norm_z), 'feat_2d_weights': LinearWeight(matt.linear_z.weight)})) IPAParams = (lambda ipa: {'q_scalar': LinearParams(ipa.linear_q), 'kv_scalar': LinearParams(ipa.linear_kv), 'q_point_local': LinearParams(ipa.linear_q_points), 'kv_point_local': LinearParams(ipa.linear_kv_points), 'trainable_point_weights': Param(param=ipa.head_weights, param_type=ParamType.Other), 'attention_2d': LinearParams(ipa.linear_b), 'output_projection': LinearParams(ipa.linear_out)}) TemplatePairBlockParams = (lambda b: {'triangle_attention_starting_node': TriAttParams(b.tri_att_start), 'triangle_attention_ending_node': TriAttParams(b.tri_att_end), 'triangle_multiplication_outgoing': TriMulOutParams(b.tri_mul_out), 'triangle_multiplication_incoming': TriMulInParams(b.tri_mul_in), 'pair_transition': PairTransitionParams(b.pair_transition)}) MSATransitionParams = (lambda m: {'input_layer_norm': LayerNormParams(m.layer_norm), 'transition1': LinearParams(m.linear_1), 'transition2': LinearParams(m.linear_2)}) OuterProductMeanParams = (lambda o: {'layer_norm_input': LayerNormParams(o.layer_norm), 'left_projection': LinearParams(o.linear_1), 'right_projection': LinearParams(o.linear_2), 'output_w': LinearWeightOPM(o.linear_out.weight), 'output_b': LinearBias(o.linear_out.bias)}) def EvoformerBlockParams(b, is_extra_msa=False): if is_extra_msa: col_att_name = 'msa_column_global_attention' msa_col_att_params = MSAGlobalAttParams(b.msa_att_col) else: col_att_name = 'msa_column_attention' msa_col_att_params = MSAColAttParams(b.msa_att_col) d = {'msa_row_attention_with_pair_bias': MSAAttPairBiasParams(b.msa_att_row), col_att_name: msa_col_att_params, 'msa_transition': MSATransitionParams(b.core.msa_transition), 'outer_product_mean': OuterProductMeanParams(b.core.outer_product_mean), 'triangle_multiplication_outgoing': TriMulOutParams(b.core.tri_mul_out), 'triangle_multiplication_incoming': TriMulInParams(b.core.tri_mul_in), 'triangle_attention_starting_node': TriAttParams(b.core.tri_att_start), 'triangle_attention_ending_node': TriAttParams(b.core.tri_att_end), 'pair_transition': PairTransitionParams(b.core.pair_transition)} return d ExtraMSABlockParams = partial(EvoformerBlockParams, is_extra_msa=True) FoldIterationParams = (lambda sm: {'invariant_point_attention': IPAParams(sm.ipa), 'attention_layer_norm': LayerNormParams(sm.layer_norm_ipa), 'transition': LinearParams(sm.transition.layers[0].linear_1), 'transition_1': LinearParams(sm.transition.layers[0].linear_2), 'transition_2': LinearParams(sm.transition.layers[0].linear_3), 'transition_layer_norm': LayerNormParams(sm.transition.layer_norm), 'affine_update': LinearParams(sm.bb_update.linear), 'rigid_sidechain': {'input_projection': LinearParams(sm.angle_resnet.linear_in), 'input_projection_1': LinearParams(sm.angle_resnet.linear_initial), 'resblock1': LinearParams(sm.angle_resnet.layers[0].linear_1), 'resblock2': LinearParams(sm.angle_resnet.layers[0].linear_2), 'resblock1_1': LinearParams(sm.angle_resnet.layers[1].linear_1), 'resblock2_1': LinearParams(sm.angle_resnet.layers[1].linear_2), 'unnormalized_angles': LinearParams(sm.angle_resnet.linear_out)}}) tps_blocks = model.template_pair_stack.blocks tps_blocks_params = stacked([TemplatePairBlockParams(b) for b in tps_blocks]) ems_blocks = model.extra_msa_stack.blocks ems_blocks_params = stacked([ExtraMSABlockParams(b) for b in ems_blocks]) evo_blocks = model.evoformer.blocks evo_blocks_params = stacked([EvoformerBlockParams(b) for b in evo_blocks]) translations = {'evoformer': {'preprocess_1d': LinearParams(model.input_embedder.linear_tf_m), 'preprocess_msa': LinearParams(model.input_embedder.linear_msa_m), 'left_single': LinearParams(model.input_embedder.linear_tf_z_i), 'right_single': LinearParams(model.input_embedder.linear_tf_z_j), 'prev_pos_linear': LinearParams(model.recycling_embedder.linear), 'prev_msa_first_row_norm': LayerNormParams(model.recycling_embedder.layer_norm_m), 'prev_pair_norm': LayerNormParams(model.recycling_embedder.layer_norm_z), 'pair_activiations': LinearParams(model.input_embedder.linear_relpos), 'template_embedding': {'single_template_embedding': {'embedding2d': LinearParams(model.template_pair_embedder.linear), 'template_pair_stack': {'__layer_stack_no_state': tps_blocks_params}, 'output_layer_norm': LayerNormParams(model.template_pair_stack.layer_norm)}, 'attention': AttentionParams(model.template_pointwise_att.mha)}, 'extra_msa_activations': LinearParams(model.extra_msa_embedder.linear), 'extra_msa_stack': ems_blocks_params, 'template_single_embedding': LinearParams(model.template_angle_embedder.linear_1), 'template_projection': LinearParams(model.template_angle_embedder.linear_2), 'evoformer_iteration': evo_blocks_params, 'single_activations': LinearParams(model.evoformer.linear)}, 'structure_module': {'single_layer_norm': LayerNormParams(model.structure_module.layer_norm_s), 'initial_projection': LinearParams(model.structure_module.linear_in), 'pair_layer_norm': LayerNormParams(model.structure_module.layer_norm_z), 'fold_iteration': FoldIterationParams(model.structure_module)}, 'predicted_lddt_head': {'input_layer_norm': LayerNormParams(model.aux_heads.plddt.layer_norm), 'act_0': LinearParams(model.aux_heads.plddt.linear_1), 'act_1': LinearParams(model.aux_heads.plddt.linear_2), 'logits': LinearParams(model.aux_heads.plddt.linear_3)}, 'distogram_head': {'half_logits': LinearParams(model.aux_heads.distogram.linear)}, 'experimentally_resolved_head': {'logits': LinearParams(model.aux_heads.experimentally_resolved.linear)}, 'masked_msa_head': {'logits': LinearParams(model.aux_heads.masked_msa.linear)}} no_templ = ['model_3', 'model_4', 'model_5', 'model_3_ptm', 'model_4_ptm', 'model_5_ptm'] if (version in no_templ): evo_dict = translations['evoformer'] keys = list(evo_dict.keys()) for k in keys: if ('template_' in k): evo_dict.pop(k) if ('_ptm' in version): translations['predicted_aligned_error_head'] = {'logits': LinearParams(model.aux_heads.tm.linear)} flat = _process_translations_dict(translations) keys = list(data.keys()) flat_keys = list(flat.keys()) incorrect = [k for k in flat_keys if (k not in keys)] missing = [k for k in keys if (k not in flat_keys)] assert (len(incorrect) == 0) assign(flat, data)
def block_required_error(hf_parser: HfArgumentParser) -> Tuple[(HfArgumentParser, List)]: required = [] for action in hf_parser._actions: if action.required: required.append(action.dest) action.required = False action.default = SUPPRESS return (hf_parser, required)
def test_compute_closest_points(): vertices = o3d.core.Tensor([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=o3d.core.float32) triangles = o3d.core.Tensor([[0, 1, 2]], dtype=o3d.core.uint32) scene = o3d.t.geometry.RaycastingScene() geom_id = scene.add_triangles(vertices, triangles) query_points = o3d.core.Tensor([[0.2, 0.1, 1], [10, 10, 10]], dtype=o3d.core.float32) ans = scene.compute_closest_points(query_points) assert (geom_id == ans['geometry_ids']).all() assert (0 == ans['primitive_ids']).all() np.testing.assert_allclose(ans['points'].numpy(), np.array([[0.2, 0.1, 0.0], [1, 1, 0]]), rtol=1e-06, atol=1e-06)
def check_preference(query): rm_index = [] for i in range(len(PREF_PROMPTS)): p = PREF_PROMPTS[i] if (p in query): rm_index.append(i) query = query.replace(p, '') query = query.replace('<|user|>\n', '') query = query.replace('\n<|assistant|>\n', '') return (query, rm_index)
class MaskedLSTMCellCheckpoint(MaskMixin, nn.LSTMCell): def __init__(self, input_size: int, hidden_size: int, mask_type: str, mask_init_value: float, bypass_sigmoid_grad: bool=False, **kwargs) -> None: super().__init__(input_size, hidden_size, **kwargs) self.setup_masks(('weight_ih', 'weight_hh'), mask_type, mask_init_value, bypass_sigmoid_grad) def _lstm(self, input, hx0, hx1): return torch._VF.lstm_cell(input, (hx0, hx1), self.get_masked_weight('weight_ih'), self.get_masked_weight('weight_hh'), self.bias_ih, self.bias_hh) def forward(self, input: Tensor, hx: Optional[Tuple[(Tensor, Tensor)]]=None) -> Tuple[(Tensor, Tensor)]: self.check_forward_input(input) if (hx is None): zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) hx = (zeros, zeros) self.check_forward_hidden(input, hx[0], '[0]') self.check_forward_hidden(input, hx[1], '[1]') return torch.utils.checkpoint.checkpoint(self._lstm, input, *hx)
class JdtLspAnalyzer(Process): def __init__(self, conn: Connection, server_cmd: list[str], proj_path: PathLike, model: ModelType, java8_home: str, verbose: bool=False) -> None: super().__init__() self.conn = conn self.server_cmd = server_cmd self.proj_path = proj_path self.java8_home = java8_home self.verbose = verbose self.counter = itertools.count(0) self.model = model self.mem: dict[(bytes, Optional[list[dict]])] = {} def init_lsp(self): self.process = subprocess.Popen(self.server_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) assert ((self.process.stdin is not None) and (self.process.stdout is not None)) self.client = LSPClient(self.process.stdin, self.process.stdout, self.verbose, TIMEOUT_THRESHOULD) self.client.start() def stop_lsp(self): self.client.shutdown(None) self.client.exit(None) self.client.join() return_code = self.process.wait() assert (return_code == 0) def run(self) -> None: self.init_lsp() while True: message: Optional[Message] = self.conn.recv() if (message is None): break assert isinstance(message, Message) result = getattr(self, message.method)(*message.args, **message.kwargs) if message.return_result: self.conn.send(result) self.stop_lsp() print('Analyzer terminated') def get_diagnostics(self) -> list[spec.ResponseMessage]: return self.client.current_diagnostics def clear_diagnostics(self): self.client.current_diagnostics.clear() def init(self) -> spec.ResponseMessage: path = Path(self.proj_path) msg = self.client.initialize({'processId': self.process.pid, 'clientInfo': {'name': path.name, 'version': '0.0.0'}, 'locale': 'en', 'rootPath': str(path.absolute()), 'rootUri': path.as_uri(), 'capabilities': spec.ClientCapabilities({'textDocument': {'synchronization': {'dynamicRegistration': True, 'willSave': True, 'willSaveWaitUntil': True, 'didSave': True}, 'completion': {'dynamicRegistration': True, 'contextSupport': True, 'completionItem': {'snippetSupport': True, 'commitCharactersSupport': True, 'documentationFormat': ['markdown', 'plaintext'], 'deprecatedSupport': True, 'preselectSupport': True, 'tagSupport': {'valueSet': [1]}, 'insertReplaceSupport': True, 'resolveSupport': {'properties': ['documentation', 'detail', 'additionalTextEdits']}, 'insertTextModeSupport': {'valueSet': [1, 2]}, 'labelDetailsSupport': True}, 'insertTextMode': 2, 'completionItemKind': {'valueSet': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]}}}}), 'initializationOptions': {'bundles': [], 'workspaceFolders': [path.as_uri()], 'settings': {'java': {'jdt': {'ls': {'java': {'home': None}, 'vmargs': '-XX:+UseParallelGC -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90 -Dsun.zip.disableMemoryMapping=true -Xmx1G -Xms100m', 'lombokSupport': {'enabled': True}}}, 'errors': {'incompleteClasspath': {'severity': 'warning'}}, 'configuration': {'checkProjectSettingsExclusions': False, 'updateBuildConfiguration': 'interactive', 'maven': {'userSettings': None, 'globalSettings': None, 'notCoveredPluginExecutionSeverity': 'warning'}, 'workspaceCacheLimit': 90, 'runtimes': [{'name': 'JavaSE-1.8', 'path': self.java8_home, 'default': True}]}, 'trace': {'server': 'off'}, 'import': {'maven': {'enabled': True}, 'gradle': {'enabled': True, 'wrapper': {'enabled': True}, 'version': None, 'home': None, 'java': {'home': None}, 'offline': {'enabled': False}, 'arguments': None, 'jvmArguments': None, 'user': {'home': None}}, 'exclusions': ['**/node_modules/**', '**/.metadata/**', '**/archetype-resources/**', '**/META-INF/maven/**'], 'generatesMetadataFilesAtProjectRoot': False}, 'maven': {'downloadSources': False, 'updateSnapshots': False}, 'eclipse': {'downloadSources': False}, 'referencesCodeLens': {'enabled': False}, 'signatureHelp': {'enabled': False, 'description': {'enabled': False}}, 'implementationsCodeLens': {'enabled': False}, 'format': {'enabled': True, 'settings': {'url': None, 'profile': None}, 'comments': {'enabled': True}, 'onType': {'enabled': True}, 'insertSpaces': True, 'tabSize': 4}, 'saveActions': {'organizeImports': False}, 'project': {'referencedLibraries': ['lib/**/*.jar'], 'importOnFirstTimeStartup': 'automatic', 'importHint': True, 'resourceFilters': ['node_modules', '.git'], 'encoding': 'ignore'}, 'contentProvider': {'preferred': None}, 'autobuild': {'enabled': True}, 'maxConcurrentBuilds': 1, 'recommendations': {'dependency': {'analytics': {'show': True}}}, 'completion': {'maxResults': 0, 'enabled': True, 'guessMethodArguments': False, 'favoriteStaticMembers': ['org.junit.Assert.*', 'org.junit.Assume.*', 'org.junit.jupiter.api.Assertions.*', 'org.junit.jupiter.api.Assumptions.*', 'org.junit.jupiter.api.DynamicContainer.*', 'org.junit.jupiter.api.DynamicTest.*', 'org.mockito.Mockito.*', 'org.mockito.ArgumentMatchers.*', 'org.mockito.Answers.*'], 'filteredTypes': ['java.awt.*', 'com.sun.*', 'sun.*', 'jdk.*', 'org.graalvm.*', 'io.micrometer.shaded.*'], 'importOrder': ['java', 'javax', 'org', 'com']}, 'foldingRange': {'enabled': False}, 'progressReports': {'enabled': False}, 'codeGeneration': {'hashCodeEquals': {'useJava7Objects': False, 'useInstanceof': False}, 'useBlocks': False, 'generateComments': False, 'toString': {'template': '${object.className} [${member.name()}=${member.value}, ${otherMembers}]', 'codeStyle': 'STRING_CONCATENATION', 'SkipNullValues': False, 'listArrayContents': True, 'limitElements': 0}, 'insertionLocation': 'afterCursor'}, 'selectionRange': {'enabled': True}, 'showBuildStatusOnStart': {'enabled': 'off'}, 'server': {'launchMode': 'Hybrid'}, 'sources': {'organizeImports': {'starThreshold': 99, 'staticStarThreshold': 99}}, 'imports': {'gradle': {'wrapper': {'checksums': []}}}, 'templates': {'fileHeader': [], 'typeComment': []}, 'references': {'includeAccessors': True, 'includeDecompiledSources': True}, 'typeHierarchy': {'lazyLoad': False}, 'settings': {'url': None}, 'symbols': {'includeSourceMethodDeclarations': False}, 'quickfix': {'showAt': 'line'}, 'inlayHints': {'parameterNames': {'enabled': 'literals', 'exclusions': []}}}}, 'extendedClientCapabilities': {'progressReportProvider': True, 'classFileContentsSupport': True, 'overrideMethodsPromptSupport': True, 'hashCodeEqualsPromptSupport': True, 'advancedOrganizeImportsSupport': True, 'generateToStringPromptSupport': True, 'advancedGenerateAccessorsSupport': True, 'generateConstructorsPromptSupport': True, 'generateDelegateMethodsPromptSupport': True, 'advancedExtractRefactoringSupport': True, 'inferSelectionSupport': ['extractMethod', 'extractVariable', 'extractField'], 'moveRefactoringSupport': True, 'clientHoverProvider': True, 'clientDocumentSymbolProvider': True, 'gradleChecksumWrapperPromptSupport': True, 'resolveAdditionalTextEditsSupport': True, 'advancedIntroduceParameterRefactoringSupport': True, 'actionableRuntimeNotificationSupport': True, 'shouldLanguageServerExitOnShutdown': True, 'onCompletionItemSelectedCommand': 'editor.action.triggerParameterHints'}, 'triggerFiles': []}, 'workspaceFolders': [{'uri': path.as_uri(), 'name': path.name}]}) self.client.initialized(cast(spec.InitializedParams, {})) return msg def sync(self, text: TextFile): self.active_text = text def open(self, text: TextFile): self.sync(text) self.client.textDocument_didOpen(cast(spec.DidOpenTextDocumentParams, {'textDocument': {'uri': text.path.as_uri(), 'languageId': 'java', 'version': next(self.counter), 'text': text.content}})) def change(self, text: TextFile): self.sync(text) self.client.textDocument_didChange({'textDocument': {'uri': text.path.as_uri(), 'version': next(self.counter)}, 'contentChanges': [{'text': text.content}]}) def completion(self, params: spec.CompletionParams) -> spec.ResponseMessage: return self.client.newCompletion(params) def save(self): self.client.textDocument_didSave({'textDocument': {'uri': self.active_text.path.as_uri()}, 'text': self.active_text.content}) def is_free(self, timeout) -> bool: return self.client.is_free(timeout) def pruned_decode(self, text_file: TextFile, gen_context: GenerationContext, trying_token_id: int, trying_token: str) -> (bool | str): text_file.add(trying_token) self.change(text_file) pos = text_file.get_cursor_position() result = self.feasible(gen_context.generated_ids, gen_context.generated_tokens, text_file.path.as_uri(), trying_token_id, trying_token, pos) if (result == True): return True elif (result == False): return False else: assert isinstance(result, list) assert (len(result) > 0) return utils.longest_common_prefix(result) def feasible(self, generated_ids: list[int], generated_tokens: list[str], uri: str, token_id: int, token: str, pos: spec.Position) -> (bool | list[str]): completions = self.get_completions(uri, pos) context = {'ids': [id for id in generated_ids], 'text': ''.join(generated_tokens), 'new_token': token} print(context) if (completions is None): print('UNKNOWN:', token) return True continuations = [target[len(source):] for c in completions if (target := c['target']).startswith((source := c['source']))] if (len(continuations) > 0): print('Accepted:', token, f'Completions[0]: {continuations[0]}') return continuations else: print('Denied', token) return False def get_completions(self, uri: str, pos: spec.Position) -> Optional[list[dict]]: new_completion_result = self.completion({'textDocument': {'uri': uri}, 'position': pos}) return new_completion_result['result']
def extract_features(model: Module, loader: DataLoader) -> Tuple[(Tensor, Tensor)]: (x, y) = ([], []) for (x_i, y_i) in iter(loader): x.append(model(x_i)) y.append(y_i) x = torch.cat(x) y = torch.cat(y) return (x, y)
def test_enums_vs_fangraphs_column_list() -> None: sample_pitching_url = ' sample_pitching_result = requests.get(sample_pitching_url) parsed_result = lxml.etree.HTML(sample_pitching_result.content.decode('utf-8')) custom_leaderboards_items = sorted(list({x for x in parsed_result.xpath('//ul[="rlbList"]/li[="rlbItem"]/text()') if (x != 'Line Break')})) custom_leaderboards_items = sorted([transform_leaderboard_item(x) for x in custom_leaderboards_items]) current_leaderboard_items = sorted([str(x).split('.')[1] for x in FangraphsPitchingStats.ALL() if (x not in [FangraphsPitchingStats.COMMON, FangraphsPitchingStats.LINE_BREAK])]) assert (custom_leaderboards_items == current_leaderboard_items)
def make_placement_plot(nodes, pl, scl, dest): parse_bookshelf_scl(scl) node_dict = dict() parse_bookshelf_nodes(nodes, node_dict) parse_bookshelf_pl(pl, node_dict) f_dest = open((dest + '.plt'), 'w') png_name = (dest + '.png') print_gnuplot_header(f_dest, png_name) node_list = list(node_dict.values()) fixed_nodes = [n for n in node_list if n.is_fixed] regs = [n for n in node_list if n.name.startswith('l')] nodes = [n for n in node_list if (not n.is_fixed)] draw_nodes(f_dest, nodes, 1, 0.5) draw_nodes(f_dest, regs, 2, 0.33) draw_nodes(f_dest, fixed_nodes, 3, 0.9) f_dest.close()
class BatchExpand(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def call(self, inputs, mask=None): (x, y) = inputs outputs = (x * K.ones_like(y, dtype=x.dtype)) return outputs
class CuQuantumContractor(): def __init__(self, tree, handle_slicing=False, autotune=False, **kwargs): if handle_slicing: self.eq = tree.get_eq() self.shapes = tree.get_shapes() else: self.eq = tree.get_eq_sliced() self.shapes = tree.get_shapes_sliced() if tree.is_complete(): kwargs.setdefault('optimize', {}) kwargs['optimize'].setdefault('path', tree.get_path()) if (handle_slicing and tree.sliced_inds): kwargs['optimize'].setdefault('slicing', [(ix, (tree.size_dict[ix] - 1)) for ix in tree.sliced_inds]) self.kwargs = kwargs self.autotune = (3 if (autotune is True) else autotune) self.handle = None self.network = None def setup(self, *arrays): from cuquantum import cutensornet as cutn from cuquantum import Network, NetworkOptions self.handle = cutn.create() self.network = Network(self.eq, *arrays, options=NetworkOptions(handle=self.handle)) self.network.contract_path(**self.kwargs) if self.autotune: self.network.autotune(iterations=self.autotune) def __call__(self, *arrays, check_zero=False, backend=None, progbar=False): assert (not check_zero) assert (not progbar) assert (backend is None) if (self.network is None): self.setup(*arrays) else: self.network.reset_operands(*arrays) return self.network.contract() def __del__(self): from cuquantum import cutensornet as cutn if (self.network is not None): self.network.free() if (self.handle is not None): cutn.destroy(self.handle)
def get_remote_file_to_local(remote_path, local_path, over_write=False): callZooFunc('float', 'getRemoteFileToLocal', remote_path, local_path, over_write)
def save_h5_data_label_normal(h5_filename, data, label, normal, data_dtype='float32', label_dtype='uint8', noral_dtype='float32'): h5_fout = h5py.File(h5_filename) h5_fout.create_dataset('data', data=data, compression='gzip', compression_opts=4, dtype=data_dtype) h5_fout.create_dataset('normal', data=normal, compression='gzip', compression_opts=4, dtype=normal_dtype) h5_fout.create_dataset('label', data=label, compression='gzip', compression_opts=1, dtype=label_dtype) h5_fout.close()
class CIFAR10MSDInitLayer(nn.Module): def __init__(self, in_channels, out_channels): super(CIFAR10MSDInitLayer, self).__init__() self.scale_blocks = MultiOutputSequential() for (i, out_channels_per_scale) in enumerate(out_channels): stride = (1 if (i == 0) else 2) self.scale_blocks.add_module('scale_block{}'.format((i + 1)), conv3x3_block(in_channels=in_channels, out_channels=out_channels_per_scale, stride=stride)) in_channels = out_channels_per_scale def forward(self, x): y = self.scale_blocks(x) return y
def channel_drop(image): orig_dtype = image.dtype (r, g, b) = tf.split(image, 3, axis=2) zeros = tf.zeros_like(r, dtype=orig_dtype) indexes_r = tf.concat([zeros, g, b], axis=2) indexes_g = tf.concat([r, zeros, b], axis=2) indexes_b = tf.concat([r, g, zeros], axis=2) image = random_choice([indexes_r, indexes_g, indexes_b], 1)[0] return image
class QSPCircuit(cirq.Circuit): def __init__(self, phis): super(QSPCircuit, self).__init__() self.phis = (np.array(phis).flatten() * (- 2)) self.theta = sympy.Symbol('theta') self.q = cirq.GridQubit(0, 0) self._build_qsp_sequence(self.q) def _build_qsp_sequence(self, q): self.append(cirq.Circuit(cirq.rz(self.phis[0])(q))) for phi in self.phis[1:]: c = cirq.Circuit(cirq.rx(self.theta)(q), cirq.rz(phi)(q)) self.append(c) def svg(self): return SVGCircuit(self) def qsp_response(self, thetas): return (np.real(self.eval_px(thetas)) + ((1j * np.real(self.eval_qx(thetas))) * np.sin(thetas))) def eval_px(self, thetas): pxs = [] for theta in np.array(thetas).flatten(): resolver = cirq.ParamResolver({'theta': (theta * (- 2))}) u = cirq.resolve_parameters(self, resolver).unitary() pxs.append(u[(0, 0)]) return np.array(pxs) def eval_real_px(self, thetas): return np.real(self.eval_px(thetas)) def eval_imag_px(self, thetas): return np.imag(self.eval_px(thetas)) def eval_qx(self, thetas): qxs = [] for theta in np.array(thetas).flatten(): resolver = cirq.ParamResolver({'theta': (theta * (- 2))}) u = cirq.resolve_parameters(self, resolver).unitary() denom = np.sin(theta) if (denom == 0): denom = 1e-08 qxs.append((u[(0, 1)] / (1j * denom))) return np.array(qxs)
def batch_assign_targets(target_assigner, anchors_batch, gt_box_batch, gt_class_targets_batch): if (not isinstance(anchors_batch, list)): anchors_batch = (len(gt_box_batch) * [anchors_batch]) if (not all((isinstance(anchors, box_list.BoxList) for anchors in anchors_batch))): raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') if (not (len(anchors_batch) == len(gt_box_batch) == len(gt_class_targets_batch))): raise ValueError('batch size incompatible with lengths of anchors_batch, gt_box_batch and gt_class_targets_batch.') cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] match_list = [] for (anchors, gt_boxes, gt_class_targets) in zip(anchors_batch, gt_box_batch, gt_class_targets_batch): (cls_targets, cls_weights, reg_targets, reg_weights, match) = target_assigner.assign(anchors, gt_boxes, gt_class_targets) cls_targets_list.append(cls_targets) cls_weights_list.append(cls_weights) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) match_list.append(match) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, match_list)
def padded_accuracy(logits, labels): with tf.variable_scope('padded_accuracy', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=(- 1))) padded_labels = tf.to_int32(labels) return (tf.to_float(tf.equal(outputs, padded_labels)), weights)
_module() class CityscapesDataset(CocoDataset): CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') def _filter_imgs(self, min_size=32): valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) ids_in_cat = set() for (i, class_id) in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) ids_in_cat &= ids_with_ann valid_img_ids = [] for (i, img_info) in enumerate(self.data_infos): img_id = img_info['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) all_iscrowd = all([_['iscrowd'] for _ in ann_info]) if (self.filter_empty_gt and ((self.img_ids[i] not in ids_in_cat) or all_iscrowd)): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue if (ann['category_id'] not in self.cat_ids): continue bbox = [x1, y1, (x1 + w), (y1 + h)] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann['segmentation']) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file']) return ann def results2txt(self, results, outfile_prefix): try: import cityscapesscripts.helpers.labels as CSLabels except ImportError: raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.') result_files = [] os.makedirs(outfile_prefix, exist_ok=True) prog_bar = mmcv.ProgressBar(len(self)) for idx in range(len(self)): result = results[idx] filename = self.data_infos[idx]['filename'] basename = osp.splitext(osp.basename(filename))[0] pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt')) (bbox_result, segm_result) = result bboxes = np.vstack(bbox_result) if isinstance(segm_result, tuple): segms = mmcv.concat_list(segm_result[0]) mask_score = segm_result[1] else: segms = mmcv.concat_list(segm_result) mask_score = [bbox[(- 1)] for bbox in bboxes] labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)] labels = np.concatenate(labels) assert (len(bboxes) == len(segms) == len(labels)) num_instances = len(bboxes) prog_bar.update() with open(pred_txt, 'w') as fout: for i in range(num_instances): pred_class = labels[i] classes = self.CLASSES[pred_class] class_id = CSLabels.name2label[classes].id score = mask_score[i] mask = maskUtils.decode(segms[i]).astype(np.uint8) png_filename = osp.join(outfile_prefix, (basename + f'_{i}_{classes}.png')) mmcv.imwrite(mask, png_filename) fout.write(f'''{osp.basename(png_filename)} {class_id} {score} ''') result_files.append(pred_txt) return result_files def format_results(self, results, txtfile_prefix=None): assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if (txtfile_prefix is None): tmp_dir = tempfile.TemporaryDirectory() txtfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2txt(results, txtfile_prefix) return (result_files, tmp_dir) def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): eval_results = dict() metrics = (metric.copy() if isinstance(metric, list) else [metric]) if ('cityscapes' in metrics): eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger)) metrics.remove('cityscapes') if (len(metrics) > 0): self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt) self_coco.CLASSES = self.CLASSES self_coco.data_infos = self_coco.load_annotations(self.ann_file) eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs)) return eval_results def _evaluate_cityscapes(self, results, txtfile_prefix, logger): try: import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval except ImportError: raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if (logger is None): msg = ('\n' + msg) print_log(msg, logger=logger) (result_files, tmp_dir) = self.format_results(results, txtfile_prefix) if (tmp_dir is None): result_dir = osp.join(txtfile_prefix, 'results') else: result_dir = osp.join(tmp_dir.name, 'results') eval_results = {} print_log(f'Evaluating results under {result_dir} ...', logger=logger) CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') CSEval.args.predictionPath = os.path.abspath(result_dir) CSEval.args.predictionWalk = None CSEval.args.JSONOutput = False CSEval.args.colorized = False CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json') CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png') groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) assert len(groundTruthImgList), f'Cannot find ground truth images in {CSEval.args.groundTruthSearch}.' predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages'] eval_results['mAP'] = CSEval_results['allAp'] eval_results[''] = CSEval_results['allAp50%'] if (tmp_dir is not None): tmp_dir.cleanup() return eval_results
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config-file', type=str, default='', help='path to config file') parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)') parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)') parser.add_argument('--transforms', type=str, nargs='+', help='data augmentation') parser.add_argument('--root', type=str, default='', help='path to data root') parser.add_argument('opts', default=None, nargs=argparse.REMAINDER, help='Modify config options using the command-line') args = parser.parse_args() cfg = get_default_config() cfg.use_gpu = torch.cuda.is_available() if args.config_file: cfg.merge_from_file(args.config_file) reset_config(cfg, args) cfg.merge_from_list(args.opts) set_random_seed(cfg.train.seed) log_name = ('test.log' if cfg.test.evaluate else 'train.log') log_name += time.strftime('-%Y-%m-%d-%H-%M-%S') sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name)) print('Show configuration\n{}\n'.format(cfg)) print('Collecting env info ...') print('** System info **\n{}\n'.format(collect_env_info())) if cfg.use_gpu: torch.backends.cudnn.benchmark = True datamanager = build_datamanager(cfg) model_factory = {'resnet50_fc512': resnet50_fc512, 'osnet_x1_0': osnet_x1_0, 'resnet50_fc512_ms12_a0d1': resnet50_fc512_ms12_a0d1, 'resnet50_fc512_ms12_a0d2': resnet50_fc512_ms12_a0d2, 'resnet50_fc512_ms12_a0d3': resnet50_fc512_ms12_a0d3, 'resnet50_fc512_ms12_a0d1_domprior': resnet50_fc512_ms12_a0d1_domprior, 'resnet50_fc512_efdmix12_a0d1_domprior': resnet50_fc512_efdmix12_a0d1_domprior, 'osnet_x1_0_ms23_a0d1': osnet_x1_0_ms23_a0d1, 'osnet_x1_0_ms2_a0d1': osnet_x1_0_ms2_a0d1, 'osnet_x1_0_ms234_a0d1': osnet_x1_0_ms234_a0d1, 'osnet_x1_0_ms2345_a0d1': osnet_x1_0_ms2345_a0d1, 'osnet_x1_0_ms25_a0d1': osnet_x1_0_ms25_a0d1, 'osnet_x1_0_ms34_a0d1': osnet_x1_0_ms34_a0d1, 'osnet_x1_0_ms23_a0d2': osnet_x1_0_ms23_a0d2, 'osnet_x1_0_ms23_a0d3': osnet_x1_0_ms23_a0d3, 'osnet_x1_0_ms23_a0d1_domprior': osnet_x1_0_ms23_a0d1_domprior, 'osnet_x1_0_efdmix23_a0d1_domprior': osnet_x1_0_efdmix23_a0d1_domprior, 'resnet50_fc512_ms1_a0d1': resnet50_fc512_ms1_a0d1, 'resnet50_fc512_ms123_a0d1': resnet50_fc512_ms123_a0d1, 'resnet50_fc512_ms1234_a0d1': resnet50_fc512_ms1234_a0d1, 'resnet50_fc512_ms14_a0d1': resnet50_fc512_ms14_a0d1, 'resnet50_fc512_ms23_a0d1': resnet50_fc512_ms23_a0d1, 'resnet50_fc512_db12': resnet50_fc512_db12, 'osnet_x1_0_db23': osnet_x1_0_db23, 'resnet50_fc512_efdmix12_a0d1': resnet50_fc512_efdmix12_a0d1, 'resnet50_fc512_efdmix12_a0d2': resnet50_fc512_efdmix12_a0d2, 'resnet50_fc512_efdmix12_a0d3': resnet50_fc512_efdmix12_a0d3, 'osnet_x1_0_efdmix23_a0d1': osnet_x1_0_efdmix23_a0d1, 'osnet_x1_0_efdmix2_a0d1': osnet_x1_0_efdmix2_a0d1, 'osnet_x1_0_efdmix234_a0d1': osnet_x1_0_efdmix234_a0d1, 'osnet_x1_0_efdmix2345_a0d1': osnet_x1_0_efdmix2345_a0d1, 'osnet_x1_0_efdmix25_a0d1': osnet_x1_0_efdmix25_a0d1, 'osnet_x1_0_efdmix34_a0d1': osnet_x1_0_efdmix34_a0d1, 'osnet_x1_0_efdmix23_a0d2': osnet_x1_0_efdmix23_a0d2, 'osnet_x1_0_efdmix23_a0d3': osnet_x1_0_efdmix23_a0d3, 'resnet50_fc512_efdmix1_a0d1': resnet50_fc512_efdmix1_a0d1, 'resnet50_fc512_efdmix123_a0d1': resnet50_fc512_efdmix123_a0d1, 'resnet50_fc512_efdmix1234_a0d1': resnet50_fc512_efdmix1234_a0d1, 'resnet50_fc512_efdmix14_a0d1': resnet50_fc512_efdmix14_a0d1, 'resnet50_fc512_efdmix23_a0d1': resnet50_fc512_efdmix23_a0d1} print('Building model: {}'.format(cfg.model.name)) model = model_factory[cfg.model.name](num_classes=datamanager.num_train_pids, loss=cfg.loss.name, pretrained=cfg.model.pretrained, use_gpu=cfg.use_gpu) (num_params, flops) = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width)) print('Model complexity: params={:,} flops={:,}'.format(num_params, flops)) if (cfg.model.load_weights and check_isfile(cfg.model.load_weights)): load_pretrained_weights(model, cfg.model.load_weights) if cfg.use_gpu: model = nn.DataParallel(model).cuda() optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg)) scheduler = torchreid.optim.build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg)) if (cfg.model.resume and check_isfile(cfg.model.resume)): cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler) print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type)) engine = build_engine(cfg, datamanager, model, optimizer, scheduler) engine.run(**engine_run_kwargs(cfg))
def test_odd_even_two_agents(): env = MockFSMEnv() assert (env.reset() == ({'odd_agent': np.array([0])}, {})) assert (env.current_stage == 'ODD') assert (env.agents['odd_agent'].compute_reward_count == 0) assert (env.agents['odd_agent'].encode_obs_count == 1) assert (env.agents['odd_agent'].decode_action_count == 0) assert (env.agents['even_agent'].compute_reward_count == 0) assert (env.agents['even_agent'].encode_obs_count == 0) assert (env.agents['even_agent'].decode_action_count == 0) step = env.step({'odd_agent': np.array([1])}) assert (env.current_stage == 'EVEN') assert (step.observations == {'even_agent': np.array([(1.0 / 3.0)])}) assert (step.rewards == {'even_agent': None}) assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False}) assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False}) assert (step.infos == {'even_agent': {}}) assert (env.agents['odd_agent'].compute_reward_count == 1) assert (env.agents['odd_agent'].encode_obs_count == 1) assert (env.agents['odd_agent'].decode_action_count == 1) assert (env.agents['even_agent'].compute_reward_count == 0) assert (env.agents['even_agent'].encode_obs_count == 1) assert (env.agents['even_agent'].decode_action_count == 0) step = env.step({'even_agent': np.array([0])}) assert (env.current_stage == 'ODD') assert (step.observations == {'odd_agent': np.array([(2.0 / 3.0)])}) assert (step.rewards == {'odd_agent': 0.0}) assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False}) assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False}) assert (step.infos == {'odd_agent': {}}) assert (env.agents['odd_agent'].compute_reward_count == 1) assert (env.agents['odd_agent'].encode_obs_count == 2) assert (env.agents['odd_agent'].decode_action_count == 1) assert (env.agents['even_agent'].compute_reward_count == 1) assert (env.agents['even_agent'].encode_obs_count == 1) assert (env.agents['even_agent'].decode_action_count == 1)
def download_url(url: str, dest: str, overwrite: bool=False, pbar: ProgressBar=None, show_progress=True, chunk_size=(1024 * 1024), timeout=4, retries=5) -> None: if (os.path.exists(dest) and (not overwrite)): return s = requests.Session() s.mount(' requests.adapters.HTTPAdapter(max_retries=retries)) u = s.get(url, stream=True, timeout=timeout) try: file_size = int(u.headers['Content-Length']) except: show_progress = False with open(dest, 'wb') as f: nbytes = 0 if show_progress: pbar = progress_bar(range(file_size), auto_update=False, leave=False, parent=pbar) try: for chunk in u.iter_content(chunk_size=chunk_size): nbytes += len(chunk) if show_progress: pbar.update(nbytes) f.write(chunk) except requests.exceptions.ConnectionError as e: fname = url.split('/')[(- 1)] from fastai.datasets import Config data_dir = Config().data_path() timeout_txt = f''' Download of {url} has failed after {retries} retries Fix the download manually: $ mkdir -p {data_dir} $ cd {data_dir} $ wget -c {url} $ tar -zxvf {fname} And re-run your code once the download is successful ''' print(timeout_txt) import sys sys.exit(1)
def pad(t: TensorType, paddings: Tuple[(Tuple[(int, int)], ...)], mode: str='constant', value: float=0) -> TensorType: return t.pad(paddings, mode=mode, value=value)
def image_transform(image_size: int, is_train: bool, mean: Optional[Tuple[(float, ...)]]=None, std: Optional[Tuple[(float, ...)]]=None, resize_longest_max: bool=False, fill_color: int=0): mean = (mean or OPENAI_DATASET_MEAN) if (not isinstance(mean, (list, tuple))): mean = ((mean,) * 3) std = (std or OPENAI_DATASET_STD) if (not isinstance(std, (list, tuple))): std = ((std,) * 3) if (isinstance(image_size, (list, tuple)) and (image_size[0] == image_size[1])): image_size = image_size[0] normalize = Normalize(mean=mean, std=std) if is_train: return Compose([RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC), _convert_to_rgb, ToTensor(), normalize]) else: if resize_longest_max: transforms = [ResizeMaxSize(image_size, fill=fill_color)] else: transforms = [Resize(image_size, interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size)] transforms.extend([_convert_to_rgb, ToTensor(), normalize]) return Compose(transforms)
class LabelArray(object): def __init__(self, dim, labels=None): if (labels is not None): if (len(dim) != dim): raise 'The length of labels has to be equal to dim if defined' else: self.labels = deepcopy(labels) else: self.labels = [[] for _ in range(dim)] self.arr = None self.update_arr() def update_arr(self): if (self.arr is not None): oldarr = self.arr self.arr = (np.zeros([len(dim) for dim in self.labels]) * np.nan) self.arr[tuple([slice(0, sh_dim, 1) for sh_dim in oldarr.shape])] = oldarr else: self.arr = (np.zeros([len(dim) for dim in self.labels]) * np.nan) self.shape = self.arr.shape def __getitem__(self, label_list): index_list = [] for (dim, label) in enumerate(label_list): if isinstance(label, str): index_list.append(self.labels[dim].index(label)) elif isinstance(label, slice): index_list.append(label) elif isinstance(label, int): index_list.append(label) else: raise NotImplementedError return self.arr[tuple(index_list)] def __setitem__(self, label_list, val): index_list = [] for (dim, label) in enumerate(label_list): if isinstance(label, str): if (not (label in self.labels[dim])): self.labels[dim].append(label) self.update_arr() index_list.append(self.labels[dim].index(label)) elif isinstance(label, slice): index_list.append(label) elif isinstance(label, int): index_list.append(label) else: raise NotImplementedError self.arr[tuple(index_list)] = val def __str__(self): return ((str(self.arr) + '\n\n') + str(self.labels)) def to_np(self): return self.arr def from_np(self, array): assert ([len[label_list] for label_list in self.labels] == self.arr.shape) self.arr = array
def read_bart_coref(filename, gold_text): regex = '(<[^>]*>)|([^<]* *)' text = [[]] mentions = {} clusters = defaultdict((lambda : [])) unmatched_mentions = [] sentence = 0 word = 0 prev = [] for line in open(filename): for (tag, token) in re.findall(regex, line.strip()): if (token != ''): if ('&amp;' in token): token = '&'.join(token.split('&amp;')) if ('&middot;' in token): pass elif (token != gold_text[sentence][word]): if (len(prev) == 0): prev.append(token) token = None elif (''.join((prev + [token])) == gold_text[sentence][word]): token = ''.join((prev + [token])) prev = [] else: prev.append(token) token = None if (token is not None): text[(- 1)].append(token) word += 1 if (word == len(gold_text[sentence])): word = 0 sentence += 1 text.append([]) elif ('<coref' in tag): cluster = int(tag.split('set_')[1].split('"')[0]) unmatched_mentions.append((cluster, sentence, word)) elif (tag == '</coref>'): (cluster, msentence, start) = unmatched_mentions.pop() end = word if (sentence != msentence): end = len(text[msentence]) if (len(prev) > 0): end += 1 mentions[(msentence, start, end)] = cluster clusters[cluster].append((msentence, start, end)) if (len(text[(- 1)]) == 0): text.pop() return {'clusters': clusters, 'mentions': mentions, 'text': text}
class TestRayTuneSearchEngine(ZooTestCase): def setup_method(self, method): init_orca_context(init_ray_on_spark=True) def teardown_method(self, method): stop_orca_context() def test_numpy_input(self): (train_x, train_y, val_x, val_y) = get_np_input() data = (train_x, train_y) val_data = (val_x, val_y) searcher = prepare_searcher(data=data, validation_data=val_data, name='test_ray_numpy_with_val', search_space=create_simple_search_space(), stop=create_stop()) searcher.run() best_trials = searcher.get_best_trials(k=1) assert (best_trials is not None) def test_searcher_metric(self): (train_x, train_y, val_x, val_y) = get_np_input() data = (train_x, train_y) val_data = (val_x, val_y) searcher = prepare_searcher(data=data, validation_data=val_data, name='test_searcher_metric_name', metric='mse', metric_mode='min', search_space=create_simple_search_space(), stop=create_stop(float('inf'))) analysis = searcher.run() sorted_results = list(map((lambda x: x.last_result['mse']), RayTuneSearchEngine._get_sorted_trials(analysis.trials, metric='mse', mode='min'))) assert ('mse' in analysis.trials[0].last_result.keys()) assert all(((sorted_results[i] <= sorted_results[(i + 1)]) for i in range((len(sorted_results) - 1)))) assert (RayTuneSearchEngine._get_best_result(analysis.trials, metric='mse', mode='min')['mse'] == sorted_results[0]) assert all(((analysis.trials[i].last_result['mse'] >= analysis.trials[i].last_result['best_mse']) for i in range(len(sorted_results)))) assert (analysis.trials[0].last_result['iterations_since_restore'] == 1) searcher = prepare_searcher(data=data, validation_data=val_data, name='test_searcher_metric_name', metric='r2', metric_mode='max', search_space=create_simple_search_space(), stop=create_stop(0)) analysis = searcher.run() sorted_results = list(map((lambda x: x.last_result['r2']), RayTuneSearchEngine._get_sorted_trials(analysis.trials, metric='r2', mode='max'))) assert ('r2' in analysis.trials[0].last_result.keys()) assert all(((sorted_results[i] >= sorted_results[(i + 1)]) for i in range((len(sorted_results) - 1)))) assert (RayTuneSearchEngine._get_best_result(analysis.trials, metric='r2', mode='max')['r2'] == sorted_results[0]) assert all(((analysis.trials[i].last_result['r2'] <= analysis.trials[i].last_result['best_r2']) for i in range(len(sorted_results)))) assert (analysis.trials[0].last_result['iterations_since_restore'] == 1) searcher = prepare_searcher(data=data, validation_data=val_data, name='test_searcher_metric_name', metric='mae', metric_mode='min', search_space=create_simple_search_space(), stop=create_stop(0)) analysis = searcher.run() sorted_results = list(map((lambda x: x.last_result['mae']), RayTuneSearchEngine._get_sorted_trials(analysis.trials, metric='mae', mode='min'))) assert ('mae' in analysis.trials[0].last_result.keys()) assert all(((sorted_results[i] <= sorted_results[(i + 1)]) for i in range((len(sorted_results) - 1)))) assert (RayTuneSearchEngine._get_best_result(analysis.trials, metric='mae', mode='min')['mae'] == sorted_results[0]) assert all(((analysis.trials[i].last_result['mae'] >= analysis.trials[i].last_result['best_mae']) for i in range(len(sorted_results)))) assert (analysis.trials[0].last_result['iterations_since_restore'] == 20)
class BalMask(Mask): def __init__(self, config): self.logger = logging.getLogger(__name__) super().__init__(config) filename = config.get('filename') if (filename is None): raise MaskError("Missing argument 'filename' required by BalMask") los_id_name = config.get('los_id name') if (los_id_name is None): raise MaskError("Missing argument 'los_id name' required by BalMask") if (los_id_name == 'THING_ID'): ext_name = 'BALCAT' elif (los_id_name == 'TARGETID'): extnames = [ext.get_extname() for ext in fitsio.FITS(filename)] if ('QSO_CAT' in extnames): ext_name = 'QSO_CAT' elif ('ZCATALOG' in extnames): ext_name = 'ZCATALOG' else: raise MaskError(f'Could not find valid quasar catalog extension in fits file: {filename}') else: raise MaskError(f"Unrecognized los_id name. Expected one of 'THING_ID' or 'TARGETID'. Found {los_id_name}") self.bal_index_type = config.get('bal index type') if (self.bal_index_type is None): raise MaskError("Missing argument 'bal index type' required by BalMask") columns_list = None if (self.bal_index_type == 'ai'): columns_list = [los_id_name, 'VMIN_CIV_450', 'VMAX_CIV_450'] elif (self.bal_index_type == 'bi'): columns_list = [los_id_name, 'VMIN_CIV_2000', 'VMAX_CIV_2000'] else: self.bal_index_type = MaskError(f"In BalMask, unrecognized value for 'bal_index_type'. Expected one of 'ai' or 'bi'. Found {self.bal_index_type}") self.velocity_list = columns_list[1:] self.logger.progress(f'Reading BAL catalog from: {filename}') try: hdul = fitsio.FITS(filename) self.cat = {col: (hdul[ext_name][col][:].astype(np.int64) if (col == los_id_name) else hdul[ext_name][col][:].astype(np.float64)) for col in columns_list} except OSError as error: raise MaskError(f"Error loading BalMask. File {filename} does not have extension '{ext_name}'") from error except ValueError as error: aux = "', '".join(columns_list) raise MaskError(f"Error loading BalMask. File {filename} does not have fields '{aux}' in HDU '{ext_name}'") from error finally: hdul.close() self.los_ids = {los_id: (los_id, self.cat[los_id_name], self.cat[self.velocity_list[0]], self.cat[self.velocity_list[1]]) for los_id in np.unique(self.cat[los_id_name])} num_bals = len(self.los_ids) self.logger.progress(f'In catalog: {num_bals} BAL quasars') def apply_mask(self, forest): if (self.los_ids.get(forest.los_id) is None): return (log_lambda_min, log_lambda_max) = add_bal_rest_frame(*self.los_ids.get(forest.los_id)) if (len(log_lambda_min) == 0): return w = np.ones(forest.log_lambda.size, dtype=bool) rest_frame_log_lambda = (forest.log_lambda - np.log10((1.0 + forest.z))) mask_idx_ranges = np.searchsorted(rest_frame_log_lambda, [log_lambda_min, log_lambda_max]).T mask_idx_ranges.sort(axis=1) for (idx1, idx2) in mask_idx_ranges: w[idx1:idx2] = 0 for param in Forest.mask_fields: self._masker(forest, param, w)
class PatchDiscriminator(nn.ModelBase): def on_build(self, patch_size, in_ch, base_ch=None, conv_kernel_initializer=None): (suggested_base_ch, kernels_strides) = patch_discriminator_kernels[patch_size] if (base_ch is None): base_ch = suggested_base_ch prev_ch = in_ch self.convs = [] for (i, (kernel_size, strides)) in enumerate(kernels_strides): cur_ch = (base_ch * min((2 ** i), 8)) self.convs.append(nn.Conv2D(prev_ch, cur_ch, kernel_size=kernel_size, strides=strides, padding='SAME', kernel_initializer=conv_kernel_initializer)) prev_ch = cur_ch self.out_conv = nn.Conv2D(prev_ch, 1, kernel_size=1, padding='VALID', kernel_initializer=conv_kernel_initializer) def forward(self, x): for conv in self.convs: x = tf.nn.leaky_relu(conv(x), 0.1) return self.out_conv(x)
def prepare_data_gluon(df): data = np.array(df['data'].values.tolist()) label = df['label'].values return {'x': data, 'y': label}
def debounce(wait: float) -> Callable[([Callable[(..., None)]], Callable[(..., bool)])]: def decorator(fn: Callable[(..., None)]) -> Callable[(..., bool)]: def debounced(*args, **kwargs) -> bool: def call_it(): fn(*args, **kwargs) try: did_start_new = debounced.t.finished.is_set() debounced.t.cancel() except AttributeError: did_start_new = True debounced.t = Timer(wait, call_it) debounced.t.start() return did_start_new return debounced return decorator
class MaskLayer1d(nn.Module): def __init__(self, append=True, value=0): super().__init__() self.append = append self.value = value def forward(self, input_tuple): (x, S) = input_tuple x = ((x * S) + (self.value * (1 - S))) if self.append: x = torch.cat((x, S), dim=1) return x
def list_materials(): print("\nAVAILABLE MATERIALS (for fc.Phantom.phan_map = ['MATERIAL']:\nIntegers are atomic numbers\n") print_files(os.path.join(data_path, 'mu'))
.slow .parametrize('alg', algos_cont) def test_continuous_identity(alg): kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = (lambda e: get_learn_function(alg)(env=e, **kwargs)) env_fn = (lambda : BoxIdentityEnv((1,), episode_len=100)) simple_test(env_fn, learn_fn, (- 0.1))
class DensePoseResult(object): def __init__(self, boxes_xywh, S, I, U, V): self.results = [] self.boxes_xywh = boxes_xywh.cpu().tolist() assert (len(boxes_xywh.size()) == 2) assert (boxes_xywh.size(1) == 4) for (i, box_xywh) in enumerate(boxes_xywh): result_i = self._output_to_result(box_xywh, S[[i]], I[[i]], U[[i]], V[[i]]) result_numpy_i = result_i.cpu().numpy() result_encoded_i = DensePoseResult.encode_png_data(result_numpy_i) result_encoded_with_shape_i = (result_numpy_i.shape, result_encoded_i) self.results.append(result_encoded_with_shape_i) def __str__(self): s = 'DensePoseResult: N={} [{}]'.format(len(self.results), ', '.join([str(list(r[0])) for r in self.results])) return s def _output_to_result(self, box_xywh, S, I, U, V): (x, y, w, h) = box_xywh w = max(int(w), 1) h = max(int(h), 1) result = torch.zeros([3, h, w], dtype=torch.uint8, device=U.device) assert (len(S.size()) == 4), 'AnnIndex tensor size should have {} dimensions but has {}'.format(4, len(S.size())) s_bbox = F.interpolate(S, (h, w), mode='bilinear', align_corners=False).argmax(dim=1) assert (len(I.size()) == 4), 'IndexUV tensor size should have {} dimensions but has {}'.format(4, len(S.size())) i_bbox = (F.interpolate(I, (h, w), mode='bilinear', align_corners=False).argmax(dim=1) * (s_bbox > 0).long()).squeeze(0) assert (len(U.size()) == 4), 'U tensor size should have {} dimensions but has {}'.format(4, len(U.size())) u_bbox = F.interpolate(U, (h, w), mode='bilinear', align_corners=False) assert (len(V.size()) == 4), 'V tensor size should have {} dimensions but has {}'.format(4, len(V.size())) v_bbox = F.interpolate(V, (h, w), mode='bilinear', align_corners=False) result[0] = i_bbox for part_id in range(1, u_bbox.size(1)): result[1][(i_bbox == part_id)] = (u_bbox[(0, part_id)][(i_bbox == part_id)] * 255).clamp(0, 255).to(torch.uint8) result[2][(i_bbox == part_id)] = (v_bbox[(0, part_id)][(i_bbox == part_id)] * 255).clamp(0, 255).to(torch.uint8) assert (result.size(1) == h), 'Results height {} should be equalto bounding box height {}'.format(result.size(1), h) assert (result.size(2) == w), 'Results width {} should be equalto bounding box width {}'.format(result.size(2), w) return result def encode_png_data(arr): assert (len(arr.shape) == 3), 'Expected a 3D array as an input, got a {0}D array'.format(len(arr.shape)) assert (arr.shape[0] == 3), 'Expected first array dimension of size 3, got {0}'.format(arr.shape[0]) assert (arr.dtype == np.uint8), 'Expected an array of type np.uint8, got {0}'.format(arr.dtype) data = np.moveaxis(arr, 0, (- 1)) im = Image.fromarray(data) fstream = BytesIO() im.save(fstream, format='png', optimize=True) s = base64.encodebytes(fstream.getvalue()).decode() return s def decode_png_data(shape, s): fstream = BytesIO(base64.decodebytes(s.encode())) im = Image.open(fstream) data = np.moveaxis(np.array(im.getdata(), dtype=np.uint8), (- 1), 0) return data.reshape(shape) def __len__(self): return len(self.results) def __getitem__(self, item): result_encoded = self.results[item] bbox_xywh = self.boxes_xywh[item] return (result_encoded, bbox_xywh)
class TeacherForcingScheduler(_Scheduler): def __init__(self, high, low, f=scheduled_sampling, step=0): super(TeacherForcingScheduler, self).__init__(step) self.high = high self.low = low self._step = step self.schedule_f = f def get_tfr(self): return self.schedule_f(self._step, self.high, self.low) def step(self): tfr = self.get_tfr() self._update_step() return tfr
def stack(data, stack_from_deltas=False): num_bins = (int(((Forest.log_lambda_max - Forest.log_lambda_min) / Forest.delta_log_lambda)) + 1) stack_log_lambda = (Forest.log_lambda_min + (np.arange(num_bins) * Forest.delta_log_lambda)) stack_delta = np.zeros(num_bins) stack_weight = np.zeros(num_bins) for healpix in sorted(list(data.keys())): for forest in data[healpix]: if stack_from_deltas: delta = forest.delta weights = forest.weights else: delta = (forest.flux / forest.cont) var_lss = Forest.get_var_lss(forest.log_lambda) eta = Forest.get_eta(forest.log_lambda) fudge = Forest.get_fudge(forest.log_lambda) var = ((1.0 / forest.ivar) / (forest.cont ** 2)) variance = (((eta * var) + var_lss) + (fudge / var)) weights = (1.0 / variance) bins = (((forest.log_lambda - Forest.log_lambda_min) / Forest.delta_log_lambda) + 0.5).astype(int) rebin = np.bincount(bins, weights=(delta * weights)) stack_delta[:len(rebin)] += rebin rebin = np.bincount(bins, weights=weights) stack_weight[:len(rebin)] += rebin w = (stack_weight > 0) stack_delta[w] /= stack_weight[w] return (stack_log_lambda, stack_delta, stack_weight)
def mlp_mixer_l16(num_classes: int, image_size: int=224, channels: int=3): params = dict(patch_size=16, num_layers=24, hidden_dim=1024, tokens_hidden_dim=512, channels_hidden_dim=4096) return MLPMixer(num_classes, image_size, channels, **params)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=0, help='GPU number.') parser.add_argument('--seed', type=int, default=40, help='Random seed.') parser.add_argument('--dataset', type=str, default='RNA-Puzzles', help='Dataset to be used') parser.add_argument('--epochs', type=int, default=150, help='Number of epochs to train.') parser.add_argument('--lr', type=float, default=0.0005, help='Initial learning rate.') parser.add_argument('--wd', type=float, default=0, help='Weight decay (L2 loss).') parser.add_argument('--n_layer', type=int, default=2, help='Number of hidden layers.') parser.add_argument('--dim', type=int, default=64, help='Size of input hidden units.') parser.add_argument('--batch_size', type=int, default=8, help='batch_size') parser.add_argument('--cutoff_l', type=float, default=2.6, help='cutoff in local layer') parser.add_argument('--cutoff_g', type=float, default=20.0, help='cutoff in global layer') args = parser.parse_args() device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) if torch.cuda.is_available(): torch.cuda.set_device(args.gpu) set_seed(args.seed) path = osp.join('.', 'data', args.dataset) train_dataset = TUDataset(path, name='train', use_node_attr=True).shuffle() val_dataset = TUDataset(path, name='val', use_node_attr=True) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False) print('Data loaded!') config = Config(dataset=args.dataset, dim=args.dim, n_layer=args.n_layer, cutoff_l=args.cutoff_l, cutoff_g=args.cutoff_g) model = PAMNet(config).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd, amsgrad=False) print('Start training!') best_val_loss = None for epoch in range(args.epochs): model.train() for data in train_loader: data = data.to(device) optimizer.zero_grad() output = model(data) loss = F.smooth_l1_loss(output, data.y) loss.backward() optimizer.step() (train_loss, _) = test(model, train_loader, device) (val_loss, _) = test(model, val_loader, device) print('Epoch: {:03d}, Train Loss: {:.7f}, Val Loss: {:.7f}'.format((epoch + 1), train_loss, val_loss)) save_folder = os.path.join('.', 'save', args.dataset) if (not os.path.exists(save_folder)): os.makedirs(save_folder) if ((best_val_loss is None) or (val_loss < best_val_loss)): best_val_loss = val_loss torch.save(model.state_dict(), os.path.join(save_folder, 'best_model.h5'))
class ChainTensorDataset(Dataset): def __init__(self, *datasets): self.datasets = datasets def __getitem__(self, item): outputs = [] for d in self.datasets: output = d[item] if (len(output) < 2): outputs.append(output[0]) else: outputs.append(output) return outputs def __len__(self): return len(self.datasets[0])
class ImageFilelist(data.Dataset): def __init__(self, opt, flist_reader=default_flist_reader, loader=default_loader): self.imlist = flist_reader(opt['image_list']) self.loader = loader self.opt = opt transform_list = [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] self.transform = transforms.Compose(transform_list) if ((opt['mask_list'] is not None) and (opt['mask_type'] == 'irregular')): self.mask_data = flist_reader(opt['mask_list']) def __getitem__(self, index): impath = self.imlist[index] img = self.loader(impath) img = self.resize(img, self.opt['img_shape'][2], self.opt['img_shape'][1]) img_tensor = self.transform(img) if (self.opt['mask_type'] == 'regular'): (bbox_tensor, mask_tensor) = self.load_mask(index) else: mask_tensor = self.load_mask(index) bbox = util.bbox(self.opt) bbox_tensor = torch.from_numpy(np.array(bbox)) input_tensor = (img_tensor * (1.0 - mask_tensor)) return {'input': input_tensor, 'bbox': bbox_tensor, 'mask': mask_tensor, 'target': img_tensor, 'paths': impath} def __len__(self): return len(self.imlist) def load_mask(self, index): if (self.opt['mask_type'] == 'regular'): bbox = util.bbox(self.opt) mask = util.bbox2mask(bbox, self.opt) bbox_t = torch.from_numpy(np.array(bbox)) return (bbox_t, mask) elif (self.opt['mask_type'] == 'irregular'): if self.opt['use_shuffle']: mask_index = random.randint(0, (len(self.mask_data) - 1)) else: mask_index = index mask = self.loader(self.mask_data[mask_index]).convert('L') mask = np.asarray(mask.resize((self.opt['img_shape'][2], self.opt['img_shape'][1]), Image.BICUBIC)) mask = (mask > 0).astype(np.float32) mask = torch.from_numpy(np.expand_dims(mask, 0)) return mask else: raise NotImplementedError('Unsupported mask type: {}'.format(self.opt['mask_type'])) def resize(self, img, height, width, centerCrop=False): (imgw, imgh) = (img.size[0], img.size[1]) if (imgh != imgw): if centerCrop: side = np.minimum(imgh, imgw) j = ((imgh - side) // 2) i = ((imgw - side) // 2) img = img.crop((i, j, side, side)) else: side = np.minimum(imgh, imgw) ix = random.randrange(0, ((imgw - side) + 1)) iy = random.randrange(0, ((imgh - side) + 1)) img = img.crop((ix, iy, side, side)) img = img.resize((width, height), Image.BICUBIC) return img
class QDQBertForMultipleChoice(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class _TestIterableDataset(IterableDataset): def __init__(self, data_size=32, sleep_time=0): self.size = data_size self.sleep_time = sleep_time def __iter__(self): worker_info = torch.utils.data.get_worker_info() if (worker_info is None): worker_id = 0 else: worker_id = worker_info.id if (worker_id == 1): time.sleep(self.sleep_time) for x in range(self.size): (yield (x + (worker_id * 100)))
class YCbCr2RGB(): def __call__(self, ycbcr): return F_transforms.ycbcr2rgb(ycbcr) def __repr__(self): return f'{self.__class__.__name__}()'
def test_get_loading_pipeline(): pipelines = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])] expected_pipelines = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True)] assert (expected_pipelines == get_loading_pipeline(pipelines))
class EfficientNetB3(nn.Module): def __init__(self, feat_dim=12, feature_block=6): super(EfficientNetB3, self).__init__() self.backbone_net = EfficientNet.from_pretrained('efficientnet-b3') self.feature_block = feature_block if (self.feature_block == 6): self.feature_extractor = self.backbone_net.extract_features else: self.feature_extractor = self.backbone_net.extract_endpoints self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc_pose = nn.Linear(1536, feat_dim) def _aggregate_feature2(self, x): batch = x.shape[0] feature_t = x[:(batch // 2)] feature_r = x[(batch // 2):] feature = torch.stack([feature_t, feature_r]) return feature def forward(self, x, return_feature=False, isSingleStream=False): feat_out = [] if (self.feature_block == 6): x = self.feature_extractor(x) fe = x.clone() else: list_x = self.feature_extractor(x) fe = list_x[('reduction_' + str(self.feature_block))] x = list_x['reduction_6'] if return_feature: if isSingleStream: feature = torch.stack([fe]) else: feature = self._aggregate_feature2(fe) feat_out.append(feature) x = self.avgpool(x) x = x.reshape(x.size(0), (- 1)) predict = self.fc_pose(x) return (feat_out, predict)
class CIFAR(Dataset): def __init__(self, root, train=True, transform=None, target_transform=None, top_k=(1, 5), is_cifar100=True, keep_rgb=False): if is_cifar100: self.data_set = CIFAR100(root, train=train, download=True) else: self.data_set = CIFAR10(root, train=train, download=True) self.classes = self.data_set.classes self.root = root self.transform = transform self.target_transform = target_transform self.keep_rgb = keep_rgb self._update_evaluator(top_k) def __getitem__(self, index: int): (image, target) = self.data_set.__getitem__(index) image = default_converter(image, rgb=self.keep_rgb) if (self.transform is not None): image = self.transform(image) if (self.target_transform is not None): target = self.target_transform(target) return (image, target) def __len__(self) -> int: return self.data_set.__len__() def _update_evaluator(self, top_k): self.evaluator = GeneralEvaluator(self.classes, top_k=top_k) def __repr__(self): return (((self.__class__.__name__ + ' (') + self.root) + ')')
def parse_args(): parser = argparse.ArgumentParser(description='mmediting tester') parser.add_argument('config', help='test config file path') parser.add_argument('model', help='input model file') parser.add_argument('backend', help='backend of the model.', choices=['onnxruntime', 'tensorrt']) parser.add_argument('--out', help='output result pickle file') parser.add_argument('--save-path', default=None, type=str, help='path to store images and if not given, will not save image') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') args = parser.parse_args() return args
def _ms_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True _set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) while True: r = index_queue.get() if (r is None): break (idx, batch_indices) = r try: idx_scale = 0 samples = collate_fn([dataset[i] for i in batch_indices]) samples.append(idx_scale) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
def create_eos_event(): eos_event = compound_event.copy() eos_event['type'] = 'EOS' return eos_event
_model def xception71(pretrained=False, **kwargs): block_cfg = [dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=1), dict(in_chs=256, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=1), dict(in_chs=728, out_chs=728, stride=2), *([dict(in_chs=728, out_chs=728, stride=1)] * 16), dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False)] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), **kwargs) return _xception('xception71', pretrained=pretrained, **model_args)
class DCShadowNet(object): def __init__(self, args): self.model_name = 'DCShadowNet' self.result_dir = args.result_dir self.dataset = args.dataset self.datasetpath = args.datasetpath self.iteration = args.iteration self.decay_flag = args.decay_flag self.batch_size = args.batch_size self.print_freq = args.print_freq self.save_freq = args.save_freq self.lr = args.lr self.weight_decay = args.weight_decay self.ch = args.ch self.adv_weight = args.adv_weight self.cycle_weight = args.cycle_weight self.identity_weight = args.identity_weight self.dom_weight = args.dom_weight self.use_ch_loss = args.use_ch_loss self.use_pecp_loss = args.use_pecp_loss self.use_smooth_loss = args.use_smooth_loss if (args.use_ch_loss == True): self.ch_weight = args.ch_weight if (args.use_pecp_loss == True): self.pecp_weight = args.pecp_weight if (args.use_smooth_loss == True): self.smooth_weight = args.smooth_weight ' Generator ' self.n_res = args.n_res self.n_dis = args.n_dis self.img_size = args.img_size self.img_ch = args.img_ch self.device = args.device self.benchmark_flag = args.benchmark_flag self.resume = args.resume if (torch.backends.cudnn.enabled and self.benchmark_flag): print('set benchmark !') torch.backends.cudnn.benchmark = True print() print('##### Information #####') print('# dataset : ', self.dataset) print('# datasetpath : ', self.datasetpath) def build_model(self): train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.Resize(((self.img_size + 30), (self.img_size + 30))), transforms.RandomCrop(self.img_size), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]) test_transform = transforms.Compose([transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]) self.trainA = ImageFolder(os.path.join('dataset', self.datasetpath, 'trainA'), train_transform) self.trainB = ImageFolder(os.path.join('dataset', self.datasetpath, 'trainB'), train_transform) if self.use_ch_loss: self.trainC = ImageFolder(os.path.join('dataset', self.datasetpath, 'train_A_intr2d_light'), train_transform) self.testA = ImageFolder(os.path.join('dataset', self.datasetpath, 'testA'), test_transform) self.testB = ImageFolder(os.path.join('dataset', self.datasetpath, 'testB'), test_transform) if self.use_ch_loss: self.testC = ImageFolder(os.path.join('dataset', self.datasetpath, 'test_A_intr2d_light'), test_transform) self.trainA_loader = DataLoader(self.trainA, batch_size=self.batch_size, shuffle=True) self.trainB_loader = DataLoader(self.trainB, batch_size=self.batch_size, shuffle=True) if self.use_ch_loss: self.trainC_loader = DataLoader(self.trainC, batch_size=self.batch_size, shuffle=False) self.testA_loader = DataLoader(self.testA, batch_size=1, shuffle=False) self.testB_loader = DataLoader(self.testB, batch_size=1, shuffle=False) if self.use_ch_loss: self.testC_loader = DataLoader(self.testC, batch_size=1, shuffle=False) ' Define Generator, Discriminator ' self.genA2B = ResnetGenerator(input_nc=3, output_nc=3, ngf=self.ch, n_blocks=self.n_res, img_size=self.img_size, light=True).to(self.device) self.genB2A = ResnetGenerator(input_nc=3, output_nc=3, ngf=self.ch, n_blocks=self.n_res, img_size=self.img_size, light=True).to(self.device) self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7).to(self.device) self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7).to(self.device) self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5).to(self.device) self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5).to(self.device) self.L1_loss = nn.L1Loss().to(self.device) self.MSE_loss = nn.MSELoss().to(self.device) self.BCE_loss = nn.BCEWithLogitsLoss().to(self.device) self.G_optim = torch.optim.Adam(itertools.chain(self.genA2B.parameters(), self.genB2A.parameters()), lr=self.lr, betas=(0.5, 0.999), weight_decay=self.weight_decay) self.D_optim = torch.optim.Adam(itertools.chain(self.disGA.parameters(), self.disGB.parameters(), self.disLA.parameters(), self.disLB.parameters()), lr=self.lr, betas=(0.5, 0.999), weight_decay=self.weight_decay) self.Rho_clipper = RhoClipper(0, 1) def train(self): (self.genA2B.train(), self.genB2A.train(), self.disGA.train(), self.disGB.train(), self.disLA.train(), self.disLB.train()) start_iter = 1 if self.resume: model_list = glob(os.path.join(self.result_dir, self.dataset, 'model', '*.pt')) if (not (len(model_list) == 0)): model_list.sort() start_iter = int(model_list[(- 1)].split('_')[(- 1)].split('.')[0]) self.load(os.path.join(self.result_dir, self.dataset, 'model'), start_iter) print(' [*] Load SUCCESS') if (self.decay_flag and (start_iter > (self.iteration // 2))): self.G_optim.param_groups[0]['lr'] -= ((self.lr / (self.iteration // 2)) * (start_iter - (self.iteration // 2))) self.D_optim.param_groups[0]['lr'] -= ((self.lr / (self.iteration // 2)) * (start_iter - (self.iteration // 2))) print('training start !') start_time = time.time() for step in range(start_iter, (self.iteration + 1)): if (self.decay_flag and (step > (self.iteration // 2))): self.G_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2)) self.D_optim.param_groups[0]['lr'] -= (self.lr / (self.iteration // 2)) try: (real_A, _) = trainA_iter.next() except: trainA_iter = iter(self.trainA_loader) (real_A, _) = trainA_iter.next() try: (real_B, _) = trainB_iter.next() except: trainB_iter = iter(self.trainB_loader) (real_B, _) = trainB_iter.next() if self.use_ch_loss: try: (real_C, _) = trainC_iter.next() except: trainC_iter = iter(self.trainC_loader) (real_C, _) = trainC_iter.next() (real_A, real_B) = (real_A.to(self.device), real_B.to(self.device)) if self.use_ch_loss: real_C = real_C.to(self.device) self.D_optim.zero_grad() (fake_A2B, _, _) = self.genA2B(real_A) (fake_B2A, _, _) = self.genB2A(real_B) (real_GA_logit, real_GA_Dom_logit, _) = self.disGA(real_A) (real_LA_logit, real_LA_Dom_logit, _) = self.disLA(real_A) (real_GB_logit, real_GB_Dom_logit, _) = self.disGB(real_B) (real_LB_logit, real_LB_Dom_logit, _) = self.disLB(real_B) (fake_GA_logit, fake_GA_Dom_logit, _) = self.disGA(fake_B2A) (fake_LA_logit, fake_LA_Dom_logit, _) = self.disLA(fake_B2A) (fake_GB_logit, fake_GB_Dom_logit, _) = self.disGB(fake_A2B) (fake_LB_logit, fake_LB_Dom_logit, _) = self.disLB(fake_A2B) D_ad_loss_GA = (self.MSE_loss(real_GA_logit, torch.ones_like(real_GA_logit).to(self.device)) + self.MSE_loss(fake_GA_logit, torch.zeros_like(fake_GA_logit).to(self.device))) D_ad_Dom_loss_GA = (self.MSE_loss(real_GA_Dom_logit, torch.ones_like(real_GA_Dom_logit).to(self.device)) + self.MSE_loss(fake_GA_Dom_logit, torch.zeros_like(fake_GA_Dom_logit).to(self.device))) D_ad_loss_LA = (self.MSE_loss(real_LA_logit, torch.ones_like(real_LA_logit).to(self.device)) + self.MSE_loss(fake_LA_logit, torch.zeros_like(fake_LA_logit).to(self.device))) D_ad_Dom_loss_LA = (self.MSE_loss(real_LA_Dom_logit, torch.ones_like(real_LA_Dom_logit).to(self.device)) + self.MSE_loss(fake_LA_Dom_logit, torch.zeros_like(fake_LA_Dom_logit).to(self.device))) D_ad_loss_GB = (self.MSE_loss(real_GB_logit, torch.ones_like(real_GB_logit).to(self.device)) + self.MSE_loss(fake_GB_logit, torch.zeros_like(fake_GB_logit).to(self.device))) D_ad_Dom_loss_GB = (self.MSE_loss(real_GB_Dom_logit, torch.ones_like(real_GB_Dom_logit).to(self.device)) + self.MSE_loss(fake_GB_Dom_logit, torch.zeros_like(fake_GB_Dom_logit).to(self.device))) D_ad_loss_LB = (self.MSE_loss(real_LB_logit, torch.ones_like(real_LB_logit).to(self.device)) + self.MSE_loss(fake_LB_logit, torch.zeros_like(fake_LB_logit).to(self.device))) D_ad_Dom_loss_LB = (self.MSE_loss(real_LB_Dom_logit, torch.ones_like(real_LB_Dom_logit).to(self.device)) + self.MSE_loss(fake_LB_Dom_logit, torch.zeros_like(fake_LB_Dom_logit).to(self.device))) D_loss_A = (self.adv_weight * (((D_ad_loss_GA + D_ad_Dom_loss_GA) + D_ad_loss_LA) + D_ad_Dom_loss_LA)) D_loss_B = (self.adv_weight * (((D_ad_loss_GB + D_ad_Dom_loss_GB) + D_ad_loss_LB) + D_ad_Dom_loss_LB)) Discriminator_loss = (D_loss_A + D_loss_B) Discriminator_loss.backward() self.D_optim.step() self.G_optim.zero_grad() (fake_A2B, fake_A2B_Dom_logit, _) = self.genA2B(real_A) (fake_B2A, fake_B2A_Dom_logit, _) = self.genB2A(real_B) (fake_A2B2A, _, _) = self.genB2A(fake_A2B) (fake_B2A2B, _, _) = self.genA2B(fake_B2A) (fake_A2A, fake_A2A_Dom_logit, _) = self.genB2A(real_A) (fake_B2B, fake_B2B_Dom_logit, _) = self.genA2B(real_B) (fake_GA_logit, fake_GA_Dom_logit, _) = self.disGA(fake_B2A) (fake_LA_logit, fake_LA_Dom_logit, _) = self.disLA(fake_B2A) (fake_GB_logit, fake_GB_Dom_logit, _) = self.disGB(fake_A2B) (fake_LB_logit, fake_LB_Dom_logit, _) = self.disLB(fake_A2B) G_ad_loss_GA = self.MSE_loss(fake_GA_logit, torch.ones_like(fake_GA_logit).to(self.device)) G_ad_Dom_loss_GA = self.MSE_loss(fake_GA_Dom_logit, torch.ones_like(fake_GA_Dom_logit).to(self.device)) G_ad_loss_LA = self.MSE_loss(fake_LA_logit, torch.ones_like(fake_LA_logit).to(self.device)) G_ad_Dom_loss_LA = self.MSE_loss(fake_LA_Dom_logit, torch.ones_like(fake_LA_Dom_logit).to(self.device)) G_ad_loss_GB = self.MSE_loss(fake_GB_logit, torch.ones_like(fake_GB_logit).to(self.device)) G_ad_Dom_loss_GB = self.MSE_loss(fake_GB_Dom_logit, torch.ones_like(fake_GB_Dom_logit).to(self.device)) G_ad_loss_LB = self.MSE_loss(fake_LB_logit, torch.ones_like(fake_LB_logit).to(self.device)) G_ad_Dom_loss_LB = self.MSE_loss(fake_LB_Dom_logit, torch.ones_like(fake_LB_Dom_logit).to(self.device)) G_recon_loss_A = self.L1_loss(fake_A2B2A, real_A) G_recon_loss_B = self.L1_loss(fake_B2A2B, real_B) G_identity_loss_A = self.L1_loss(fake_A2A, real_A) G_identity_loss_B = self.L1_loss(fake_B2B, real_B) G_dom_loss_A = (self.BCE_loss(fake_B2A_Dom_logit, torch.ones_like(fake_B2A_Dom_logit).to(self.device)) + self.BCE_loss(fake_A2A_Dom_logit, torch.zeros_like(fake_A2A_Dom_logit).to(self.device))) G_dom_loss_B = (self.BCE_loss(fake_A2B_Dom_logit, torch.ones_like(fake_A2B_Dom_logit).to(self.device)) + self.BCE_loss(fake_B2B_Dom_logit, torch.zeros_like(fake_B2B_Dom_logit).to(self.device))) if self.use_pecp_loss: selfpecpvgg_loss = PerceptualLossVgg16(None, [0], weights=[1.0], indices=[22]) loss_selfpecp = selfpecpvgg_loss(fake_A2B, real_A) if self.use_smooth_loss: gen_mask = softmask_generator(real_A, fake_A2B) loss_smooth = smooth_loss_masked(fake_A2B, gen_mask) if self.use_ch_loss: fake_A2B_ = ((fake_A2B + 1.0) / 2.0) ch_z = (fake_A2B_ / fake_A2B_.sum(dim=1, keepdim=True).clamp(min=1e-08)) ch_z = ((2 * ch_z) - 1) ch_norm = real_C loss_ch = self.L1_loss(ch_z, ch_norm) G_loss_A = ((((self.adv_weight * (((G_ad_loss_GA + G_ad_Dom_loss_GA) + G_ad_loss_LA) + G_ad_Dom_loss_LA)) + (self.cycle_weight * G_recon_loss_A)) + (self.identity_weight * G_identity_loss_A)) + (self.dom_weight * G_dom_loss_A)) G_loss_B = ((((self.adv_weight * (((G_ad_loss_GB + G_ad_Dom_loss_GB) + G_ad_loss_LB) + G_ad_Dom_loss_LB)) + (self.cycle_weight * G_recon_loss_B)) + (self.identity_weight * G_identity_loss_B)) + (self.dom_weight * G_dom_loss_B)) Generator_loss = (G_loss_A + G_loss_B) if (self.use_ch_loss == True): Generator_loss = (Generator_loss + loss_ch) if (self.use_pecp_loss == True): Generator_loss = (Generator_loss + loss_selfpecp) if (self.use_smooth_loss == True): Generator_loss = (Generator_loss + loss_smooth) Generator_loss.backward() self.G_optim.step() self.genA2B.apply(self.Rho_clipper) self.genB2A.apply(self.Rho_clipper) print(('[%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f' % (step, self.iteration, (time.time() - start_time), Discriminator_loss, Generator_loss))) with torch.no_grad(): if ((step % self.print_freq) == 0): train_sample_num = 5 test_sample_num = 5 A2B = np.zeros(((self.img_size * 4), 0, 3)) (self.genA2B.eval(), self.genB2A.eval(), self.disGA.eval(), self.disGB.eval(), self.disLA.eval(), self.disLB.eval()) for _ in range(train_sample_num): try: (real_A, _) = trainA_iter.next() except: trainA_iter = iter(self.trainA_loader) (real_A, _) = trainA_iter.next() try: (real_B, _) = trainB_iter.next() except: trainB_iter = iter(self.trainB_loader) (real_B, _) = trainB_iter.next() (real_A, real_B) = (real_A.to(self.device), real_B.to(self.device)) (fake_A2B, _, _) = self.genA2B(real_A) (fake_B2A, _, _) = self.genB2A(real_B) (fake_A2B2A, _, _) = self.genB2A(fake_A2B) (fake_B2A2B, _, _) = self.genA2B(fake_B2A) (fake_A2A, _, _) = self.genB2A(real_A) (fake_B2B, _, _) = self.genA2B(real_B) A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1) for _ in range(test_sample_num): try: (real_A, _) = testA_iter.next() except: testA_iter = iter(self.testA_loader) (real_A, _) = testA_iter.next() try: (real_B, _) = testB_iter.next() except: testB_iter = iter(self.testB_loader) (real_B, _) = testB_iter.next() if self.use_ch_loss: try: (real_C_test, _) = testC_iter.next() except: testC_iter = iter(self.testC_loader) (real_C_test, _) = testC_iter.next() (real_A, real_B) = (real_A.to(self.device), real_B.to(self.device)) if self.use_ch_loss: real_C_test = real_C_test.to(self.device) (fake_A2B, _, _) = self.genA2B(real_A) (fake_B2A, _, _) = self.genB2A(real_B) (fake_A2B2A, _, _) = self.genB2A(fake_A2B) (fake_B2A2B, _, _) = self.genA2B(fake_B2A) (fake_A2A, _, _) = self.genB2A(real_A) (fake_B2B, _, _) = self.genA2B(real_B) if (self.use_smooth_loss == True): gen_mask = softmask_generator(real_A, fake_A2B) A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))), RGB2BGR(tensor2numpy(denorm(gen_mask[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1) if (self.use_ch_loss == True): fake_A2B_ = ((fake_A2B + 1.0) / 2.0) ch_z = (fake_A2B_ / fake_A2B_.sum(dim=1, keepdim=True).clamp(min=1e-08)) ch_z_test = ((2 * ch_z) - 1) ch_norm_test = real_C_test A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))), RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))), RGB2BGR(tensor2numpy(denorm(ch_norm_test[0]))), RGB2BGR(tensor2numpy(denorm(ch_z_test[0])))), 0)), 1) cv2.imwrite(os.path.join(self.result_dir, self.dataset, 'train_img', ('A2B_%07d.png' % step)), (A2B * 255.0)) (self.genA2B.train(), self.genB2A.train(), self.disGA.train(), self.disGB.train(), self.disLA.train(), self.disLB.train()) if ((step % self.save_freq) == 0): self.save(os.path.join(self.result_dir, self.dataset, 'model'), step) if ((step % 1000) == 0): params = {} params['genA2B'] = self.genA2B.state_dict() params['genB2A'] = self.genB2A.state_dict() params['disGA'] = self.disGA.state_dict() params['disGB'] = self.disGB.state_dict() params['disLA'] = self.disLA.state_dict() params['disLB'] = self.disLB.state_dict() torch.save(params, os.path.join(self.result_dir, (self.dataset + '_params_latest.pt'))) def save(self, dir, step): params = {} params['genA2B'] = self.genA2B.state_dict() params['genB2A'] = self.genB2A.state_dict() params['disGA'] = self.disGA.state_dict() params['disGB'] = self.disGB.state_dict() params['disLA'] = self.disLA.state_dict() params['disLB'] = self.disLB.state_dict() torch.save(params, os.path.join(dir, (self.dataset + ('_params_%07d.pt' % step)))) def load(self, dir, step): params = torch.load(os.path.join(dir, (self.dataset + ('_params_%07d.pt' % step)))) self.genA2B.load_state_dict(params['genA2B']) self.genB2A.load_state_dict(params['genB2A']) self.disGA.load_state_dict(params['disGA']) self.disGB.load_state_dict(params['disGB']) self.disLA.load_state_dict(params['disLA']) self.disLB.load_state_dict(params['disLB'])
def build_negative_set(plt_set1, plt_set2, car_set1, car_set2, ptrn_set, ptst_set, amount, multiply): size = (len(ptrn_set) + (multiply * len(ptst_set))) data = collecting_negative_samples(plt_set1, plt_set2, car_set1, car_set2, size, amount) np.random.shuffle(data) ntrn_set = data[:len(ptrn_set)] ntst_set = data[len(ptrn_set):] return (ntrn_set, ntst_set)
def test_log_linear_equals_log_linear_exp_log(): key = jax.random.PRNGKey(0) (key, subkey) = jax.random.split(key) x = jax.random.normal(subkey, (9, 5)) (sign_x, log_x) = slog_helpers.array_to_slog(x) (key, subkey) = jax.random.split(key) kernel = jax.random.normal(subkey, (5, 7)) (sign_linear_out, log_linear_out) = slog_helpers.array_to_slog(jnp.dot(x, kernel)) (sign_linear_exp_log_out, log_linear_exp_log_out) = log_linear_exp(sign_x, log_x, kernel, axis=(- 1)) np.testing.assert_allclose(sign_linear_exp_log_out, sign_linear_out) np.testing.assert_allclose(log_linear_exp_log_out, log_linear_out, rtol=1e-05)