code
stringlengths
101
5.91M
class MultiSingingSpeechMixValidation(Dataset): dataset_name = 'multi_singing_with_speech_valid' def __init__(self, data_dir, sample_rate=24000, n_src=2, segment=6, augment=True): self.source_1_paths = [] self.source_2_data_root = [] self.metadata_list = [] for data_dir_set in data_dir: with open(data_dir_set[2], 'r') as json_file: self.valid_regions_dict = json.load(json_file) for (key, value) in self.valid_regions_dict.items(): self.source_1_paths.append(f'{data_dir_set[0]}/{key}') if (data_dir_set[3] == 'singing_speech'): self.source_2_data_root.append([data_dir_set[0], data_dir_set[1]]) else: self.source_2_data_root.append(data_dir_set[1]) self.metadata_list.append(value) self.segment = segment self.sample_rate = sample_rate self.n_src = n_src self.augment = augment self.meter = pyln.Meter(self.sample_rate) def __len__(self): return len(self.source_1_paths) def __getitem__(self, idx): data_path_1 = self.source_1_paths[idx] metadata = self.metadata_list[idx] sources_list = [] source_1 = load_wav_specific_position_mono(data_path_1, self.sample_rate, self.segment, 0.0) source_2 = 0.0 for (src_idx, src_basename) in enumerate(metadata['corresponding_data']): if (metadata['type'] == 'singing+speech'): if ((src_idx % 2) == 0): source_2_temp_file_path = f'{self.source_2_data_root[idx][1]}/{src_basename}' else: source_2_temp_file_path = f'{self.source_2_data_root[idx][0]}/{src_basename}' else: source_2_temp_file_path = f'{self.source_2_data_root[idx]}/{src_basename}' source_2_temp = load_wav_specific_position_mono(source_2_temp_file_path, self.sample_rate, self.segment, metadata['position(sec)'][src_idx]) if metadata['unison_aug']: source_2_temp = change_pitch_and_formant(source_2_temp, self.sample_rate, metadata['unison_params'][src_idx][0], metadata['unison_params'][src_idx][1], 1, metadata['unison_params'][src_idx][3]) source_2_temp = (source_2_temp * metadata['gain_adjustment'][src_idx]) source_2 = source_2_temp if self.augment: (source_1, source_2) = loudness_match_and_norm(source_1, source_2, self.meter) mixture = (source_1 + source_2) (mixture, adjusted_gain) = loudnorm(mixture, (- 24.0), self.meter) source_1 = (source_1 * db2linear(adjusted_gain)) source_2 = (source_2 * db2linear(adjusted_gain)) sources_list.append(source_1) sources_list.append(source_2) mixture = torch.as_tensor(mixture, dtype=torch.float32) sources = np.vstack(sources_list) sources = torch.as_tensor(sources, dtype=torch.float32) return (mixture, sources)
def create_env(env_id, args, rank=(- 1)): if ('v0' in env_id): import ENV.DigitalPose2DBase as poseEnv else: import ENV.DigitalPose2D as poseEnv env = poseEnv.gym.make(env_id, args.render_save) return env
def splitlines(lines: list[str], sep: ty.N[str]=None) -> list[list[str]]: return [l.split(sep) for l in lines]
class Apollo(Optimizer): def __init__(self, params, lr, beta=0.9, eps=0.0001, rebound='constant', warmup=500, init_lr=None, weight_decay=0, weight_decay_type=None): if (not (0.0 < lr)): raise ValueError('Invalid learning rate value: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= beta < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(beta)) if (rebound not in ['constant', 'belief']): raise ValueError('Invalid recitifed bound: {}'.format(rebound)) if (not (0.0 <= warmup)): raise ValueError('Invalid warmup updates: {}'.format(warmup)) if (init_lr is None): init_lr = (lr / 1000) if (not (0.0 <= init_lr <= lr)): raise ValueError('Invalid initial learning rate: {}'.format(init_lr)) if (not (0.0 <= weight_decay)): raise ValueError('Invalid weight_decay value: {}'.format(weight_decay)) if (weight_decay_type is None): weight_decay_type = ('L2' if (rebound == 'constant') else 'decoupled') if (weight_decay_type not in ['L2', 'decoupled', 'stable']): raise ValueError('Invalid weight decay type: {}'.format(weight_decay_type)) defaults = dict(lr=lr, beta=beta, eps=eps, rebound=rebound, warmup=warmup, init_lr=init_lr, base_lr=lr, weight_decay=weight_decay, weight_decay_type=weight_decay_type) super(Apollo, self).__init__(params, defaults) def __setstate__(self, state): super(Apollo, self).__setstate__(state) _grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg_grad'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['approx_hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['update'] = torch.zeros_like(p, memory_format=torch.preserve_format) if (state['step'] < group['warmup']): curr_lr = ((((group['base_lr'] - group['init_lr']) * state['step']) / group['warmup']) + group['init_lr']) else: curr_lr = group['lr'] grad = p.grad if grad.is_sparse: raise RuntimeError('Atom does not support sparse gradients.') if ((group['weight_decay'] != 0) and (group['weight_decay_type'] == 'L2')): grad = grad.add(p, alpha=group['weight_decay']) beta = group['beta'] eps = group['eps'] exp_avg_grad = state['exp_avg_grad'] B = state['approx_hessian'] d_p = state['update'] state['step'] += 1 bias_correction = (1 - (beta ** state['step'])) alpha = ((1 - beta) / bias_correction) delta_grad = (grad - exp_avg_grad) if (group['rebound'] == 'belief'): rebound = delta_grad.norm(p=np.inf) else: rebound = 0.01 eps = (eps / rebound) exp_avg_grad.add_(delta_grad, alpha=alpha) denom = d_p.norm(p=4).add(eps) d_p.div_(denom) v_sq = d_p.mul(d_p) delta = (delta_grad.div_(denom).mul_(d_p).sum().mul((- alpha)) - B.mul(v_sq).sum()) B.addcmul_(v_sq, delta) if (group['rebound'] == 'belief'): denom = torch.max(B.abs(), rebound).add_((eps / alpha)) else: denom = B.abs().clamp_(min=rebound) d_p.copy_(exp_avg_grad.div(denom)) if ((group['weight_decay'] != 0) and (group['weight_decay_type'] != 'L2')): if (group['weight_decay_type'] == 'stable'): weight_decay = (group['weight_decay'] / denom.mean().item()) else: weight_decay = group['weight_decay'] d_p.add_(p, alpha=weight_decay) p.add_(d_p, alpha=(- curr_lr)) return loss
def test_default_args(): run_cell('\n x = 7\n def foo(y=x):\n return y + 5\n ') run_cell('a = foo()') assert_not_detected() run_cell('x = 10') assert_not_detected() run_cell('b = foo()') assert_detected('Should have detected stale dependency of fn foo() on x')
class BatchWhiten(Module): def __init__(self, num_features: int, momentum: float=0.1, track_running_stats: bool=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super(BatchWhiten, self).__init__() self.num_features = num_features self.momentum = momentum self.track_running_stats = track_running_stats if self.track_running_stats: self.register_buffer('running_covar', torch.eye(num_features, **factory_kwargs)) self.running_covar: Optional[Tensor] self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long, **{k: v for (k, v) in factory_kwargs.items() if (k != 'dtype')})) self.num_batches_tracked: Optional[Tensor] else: self.register_buffer('running_covar', None) self.register_buffer('num_batches_tracked', None) self.reset_parameters() def reset_running_stats(self): if self.track_running_stats: self.running_covar.fill_diagonal_(1) self.num_batches_tracked.zero_() def reset_parameters(self): self.reset_running_stats() def forward(self, input: Tensor) -> Tensor: if (self.momentum is None): exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if (self.training and self.track_running_stats): if (self.num_batches_tracked is not None): self.num_batches_tracked.add_(1) if (self.momentum is None): exponential_average_factor = (1.0 / float(self.num_batches_tracked)) else: exponential_average_factor = self.momentum '\n Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n ' if self.training: bn_training = True else: bn_training = (self.running_covar is None) running_covar = (self.running_covar if ((not self.training) or self.track_running_stats) else None) covar = (torch.matmul(input.T, input) / input.shape[0]) if bn_training: with torch.no_grad(): if (running_covar is not None): running_covar.mul_(exponential_average_factor).add_(covar, alpha=(1 - exponential_average_factor)) if (running_covar is not None): covar = running_covar B = inv_sqrtm(covar) input = torch.matmul(input, B) return input else: return input
def continue_training(logdir): hypes = utils.load_hypes_from_logdir(logdir) modules = utils.load_modules_from_logdir(logdir) with tf.Session() as sess: with tf.name_scope('Queues'): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] logging_file = os.path.join(logdir, 'output.log') utils.create_filewrite_handler(logging_file, mode='a') logging.info('Continue training.') cur_step = core.load_weights(logdir, sess, saver) if (cur_step is None): logging.warning('Loaded global_step is None.') logging.warning('This could mean, that no weights have been loaded.') logging.warning('Starting Training with step 0.') cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out modules['input'].start_enqueuing_threads(hypes, queue, 'train', sess) run_training(hypes, modules, tv_graph, tv_sess, cur_step) tv_sess['coord'].request_stop() tv_sess['coord'].join(tv_sess['threads'])
class ImageTransformer(object): def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, sample): images = sample['images'] resized_images = [] for image in images: (height, width) = image.shape[0:2] if ((height != self.output_size) or (width != self.output_size)): image = cv2.resize(image, (self.output_size, self.output_size)) image = image.astype(np.float32) image /= 255.0 image = ((image * 2) - 1) image = np.transpose(image, (2, 0, 1)) resized_images.append(image) resized_images = np.stack(resized_images, axis=0) sample['images'] = resized_images return sample
class SimpleModel(Model): def __init__(self, output_dim=2, hidden_sizes=(4, 4), name=None): super().__init__(name) self._output_dim = output_dim self._hidden_sizes = hidden_sizes def network_output_spec(self): return ['state', 'action'] def _build(self, obs_input, name=None): del name state = mlp(obs_input, self._output_dim, self._hidden_sizes, 'state') action = mlp(obs_input, self._output_dim, self._hidden_sizes, 'action') return (state, action)
def test_chunk_text_preprocessor(): df = pd.read_csv(os.path.join(data_folder, fname)) text_processor = TextPreprocessor(text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50) X_text = text_processor.fit_transform(df) chunk_text_processor = ChunkTextPreprocessor(text_col=text_col, n_chunks=n_chunks, n_cpus=1, maxlen=10, max_vocab=50) for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize): chunk_text_processor.partial_fit(chunk) X_text_chunk = chunk_text_processor.transform(df) reconstruced_df = text_processor.inverse_transform(X_text) reconstruced_df_chunk = chunk_text_processor.inverse_transform(X_text_chunk) assert reconstruced_df.equals(reconstruced_df_chunk)
def evaluate(args, model, tokenizer, prefix=''): eval_task_names = (('mnli', 'mnli-mm') if (args.task_name == 'mnli') else (args.task_name,)) eval_outputs_dirs = ((args.output_dir, (args.output_dir + '-MM')) if (args.task_name == 'mnli') else (args.output_dir,)) results = {} for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])): os.makedirs(eval_output_dir) args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu)) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))): model = torch.nn.DataParallel(model) logger.info('***** Running evaluation {} *****'.format(prefix)) logger.info(' Num examples = %d', len(eval_dataset)) logger.info(' Batch size = %d', args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc='Evaluating'): model.eval() batch = tuple((t.to(args.device) for t in batch)) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (args.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None) outputs = model(**inputs) (tmp_eval_loss, logits) = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if (preds is None): preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = (eval_loss / nb_eval_steps) if (args.output_mode == 'classification'): preds = np.argmax(preds, axis=1) elif (args.output_mode == 'regression'): preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt') with open(output_eval_file, 'w') as writer: logger.info('***** Eval results {} *****'.format(prefix)) for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) return results
def check_has_diff_elements(given_set: (list or set), universal_set: (list or set), msg: str=''): diff_set = (set(given_set) - set(universal_set)) if (len(diff_set) > 0): raise ValueError((msg % {'diff_set': diff_set}))
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False, pre_eval=False): results = single_gpu_test(model, data_loader, pre_eval=True) return results
def guassian_rev_tozero_tolinear(x, prec=tf.float64): a = 0.5 b = (- 0.) x0 = 0. step1 = tf.where(tf.greater(x, 0.0), (1.0 - tf.exp(((- x) * x))), tf.zeros_like(x)) return tf.where(tf.greater(x, x0), ((a * x) + b), step1)
class DataIterator(): def __init__(self, dataset, batch_size, batch_by_tokens, max_src_length, max_tgt_length, buffer_multiple_size, device, model_path, len_diff=(- 1), len_ratio=(- 1), multi_scale=1, corpus='train', bucket_data=True, rank=(- 1), num_replicas=0): self.train = False self.device = device if (corpus == 'train'): self.src_path = dataset.train_src_path self.tgt_path = dataset.train_tgt_path self.tgt_path_ori = None self.train = True elif (corpus == 'dev'): self.src_path = dataset.dev_src_path self.tgt_path = dataset.dev_tgt_path self.tgt_path_ori = dataset.dev_tgt_path_ori elif (corpus == 'test'): self.src_path = dataset.test_src_path self.tgt_path = dataset.test_tgt_path self.tgt_path_ori = dataset.test_tgt_path_ori else: raise ValueError self.corpus = corpus self.batch_size = batch_size self.batch_size_fn = (max_tok_len if batch_by_tokens else None) self.max_src_length = max_src_length self.max_tgt_length = max_tgt_length self.len_diff = len_diff self.len_ratio = len_ratio self.multi_scale = multi_scale self.src_word2id = dataset.src_word2id self.tgt_word2id = dataset.tgt_word2id if (rank < 0): assert (num_replicas == 0) else: assert (corpus == 'train') assert (rank < num_replicas) assert (self.tgt_path_ori is None) self.rank = rank self.num_replicas = num_replicas (self.data_size, self.data) = self.get_dataset() self.batches = None if self.train: self.buffer_size = (buffer_multiple_size * self.batch_size) assert (buffer_multiple_size > 0) else: self.buffer_size = (- 1) self.src_pad_idx = self.src_word2id[SRC_PAD] self.tgt_pad_idx = self.tgt_word2id[TGT_PAD] self.bucket = bucket_data self.sents_num = 0 self.tgt_sort_origin_path = os.path.join(model_path, (os.path.basename(self.tgt_path) + '.sort')) def filter_sents(self, s_tokens, t_tokens): if ((self.max_tgt_length > 0) and (self.max_src_length > 0)): if (((len(s_tokens) + 1) > self.max_src_length) or ((len(t_tokens) + 1) > self.max_tgt_length)): return True if (self.len_diff > 0): if (abs((len(s_tokens) - len(t_tokens))) > self.len_diff): return True if (self.len_ratio > 0): ratio = (len(t_tokens) / len(s_tokens)) if ((ratio > self.len_ratio) or (ratio < (1.0 / self.len_ratio))): return True return False def pad_tgt(self, tgt): scale = self.multi_scale tgt_len = len(tgt) res = ((tgt_len % scale) if ((tgt_len % scale) > 0) else scale) tgt_len = ((scale - res) + tgt_len) tgt = (tgt + ([self.tgt_word2id[EOS]] * (tgt_len - len(tgt)))) return tgt def get_dataset(self): count = 0 data = [] outliers = 0 (src_path, tgt_path) = (self.src_path, self.tgt_path) tgt_ori_path = self.tgt_path_ori ftgt_ori = (None if (tgt_ori_path is None) else codecs.open(tgt_ori_path, 'r', encoding='utf-8')) with codecs.open(src_path, 'r', encoding='utf-8') as fsrc, codecs.open(tgt_path, 'r', encoding='utf-8') as ftgt: for (id, (s, t)) in enumerate(zip(fsrc, ftgt)): if ((self.num_replicas > 0) and ((id % self.num_replicas) != self.rank)): continue s_tokens = s.strip().split() t_tokens = t.strip().split() t_ori = (ftgt_ori.readline().strip() if (ftgt_ori is not None) else None) src = ([(self.src_word2id[word] if (word in self.src_word2id) else self.src_word2id[UNK]) for word in s_tokens] + [self.src_word2id[EOS]]) tgt = [(self.tgt_word2id[word] if (word in self.tgt_word2id) else self.tgt_word2id[UNK]) for word in t_tokens] tgt = self.pad_tgt(tgt) if (self.train and self.filter_sents(src, tgt)): outliers += 1 continue else: if (not self.train): data.append((src, tgt, t_ori)) if self.filter_sents(src, tgt): outliers += 1 else: data.append((src, tgt)) count += 1 print(f'Load total {count} sentences pairs, {outliers} are out of maximum sentence length!') return (count, data) def batch(self, batch_size): batch_size_fn = self.batch_size_fn if (batch_size_fn is None): def batch_size_fn(new, count): return count (minibatch, size_so_far) = ([], 0) for ex in self.data: minibatch.append(ex) size_so_far = batch_size_fn(ex, len(minibatch)) if (size_so_far == batch_size): (yield minibatch) (minibatch, size_so_far) = ([], 0) elif (size_so_far > batch_size): (yield minibatch[:(- 1)]) (minibatch, size_so_far) = (minibatch[(- 1):], batch_size_fn(ex, 1)) if minibatch: (yield minibatch) def process_batch(self, minibatch): src_max_len = max([len(d[0]) for d in minibatch]) tgt_max_len = max([len(d[1]) for d in minibatch]) (padded_src, padded_tgt) = ([], []) src_mask = [] tgt_mask = [] for d in minibatch: (s, t) = (d[0], d[1]) padded_src.append((s + ([self.src_pad_idx] * (src_max_len - len(s))))) padded_tgt.append((t + ([self.tgt_pad_idx] * (tgt_max_len - len(t))))) src_mask.append((([1.0] * len(s)) + ([0.0] * (src_max_len - len(s))))) tgt_mask.append((([1.0] * len(t)) + ([0.0] * (tgt_max_len - len(t))))) padded_src = torch.from_numpy(np.array(padded_src)).long().to(self.device) padded_tgt = torch.from_numpy(np.array(padded_tgt)).long().to(self.device) src_mask = torch.from_numpy(np.array(src_mask)).float().to(self.device) tgt_mask = torch.from_numpy(np.array(tgt_mask)).float().to(self.device) return (padded_src, padded_tgt, src_mask, tgt_mask) def init_epoch(self): if self.train: def _pool(): for p in self.batch(self.buffer_size): if self.bucket: p_batch = bucket_batch_iter(p, self.batch_size, batch_size_fn=self.batch_size_fn, shuffle=True) else: p_batch = batch_iter(random.sample(p, len(p)), self.batch_size, batch_size_fn=self.batch_size_fn) p_batch = list(p_batch) for b in p_batch: (yield b) self.batches = _pool() else: if (self.batches is None): self.batches = [] else: self.batches.clear() iter_func = (bucket_batch_iter if self.bucket else batch_iter) for b in iter_func(self.data, self.batch_size, batch_size_fn=self.batch_size_fn): self.batches.append(b) def __iter__(self): while True: self.init_epoch() tgt_ori_sents = [] for (idx, minibatch) in enumerate(self.batches): self.sents_num += len(minibatch) if (not self.train): tgt_ori_sents.append([d[2] for d in minibatch]) (src_batch, tgt_batch, src_mask, tgt_mask) = self.process_batch(minibatch) (yield (src_batch, tgt_batch, src_mask, tgt_mask)) if (not self.train): with codecs.open(self.tgt_sort_origin_path, 'w', encoding='utf-8') as fout: for b in tgt_ori_sents: for sent in b: fout.write((sent + '\n')) return def get_batch(self, batch_size): batch = random.sample(self.data, batch_size) return self.process_batch(batch) def epoch(self): return ((self.sents_num * 1.0) / self.data_size) def __len__(self): if (self.batch_size_fn is not None): raise NotImplementedError return math.ceil((self.data_size / self.batch_size))
class SpatialMaxPooling(Layer): def __init__(self, kw, kh, dw, dh, pad_w=0, pad_h=0, to_ceil=False, format='NCHW', bigdl_type='float'): super(SpatialMaxPooling, self).__init__(None, bigdl_type, kw, kh, dw, dh, pad_w, pad_h, to_ceil, format)
class World(): def __init__(self, bullet_client, gravity, timestep, frame_skip): self._p = bullet_client self.gravity = gravity self.timestep = timestep self.frame_skip = frame_skip self.numSolverIterations = 5 self.clean_everything() def clean_everything(self): self._p.setGravity(0, 0, (- self.gravity)) self._p.setDefaultContactERP(0.9) self._p.setPhysicsEngineParameter(fixedTimeStep=(self.timestep * self.frame_skip), numSolverIterations=self.numSolverIterations, numSubSteps=self.frame_skip) def step(self, frame_skip): self._p.stepSimulation()
class ShaResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, shared_conv=None): super(ShaResUnit, self).__init__() self.resize_identity = ((in_channels != out_channels) or (stride != 1)) if bottleneck: self.body = ShaResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride, shared_conv=shared_conv) else: self.body = ShaResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride, shared_conv=shared_conv) if self.resize_identity: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = (x + identity) x = self.activ(x) return x
def main(not_parsed_args): if (len(not_parsed_args) > 1): print(('Unknown args:%s' % not_parsed_args)) exit() print('Building Y channel data...') training_filenames = util.get_files_in_directory((((FLAGS.data_dir + '/') + FLAGS.dataset) + '/')) target_dir = (((FLAGS.data_dir + '/') + FLAGS.dataset) + '_y/') util.make_dir(target_dir) for file_path in training_filenames: org_image = util.load_image(file_path) if (org_image.shape[2] == 3): org_image = util.convert_rgb_to_y(org_image) filename = os.path.basename(file_path) (filename, extension) = os.path.splitext(filename) new_filename = (target_dir + filename) util.save_image((new_filename + '.bmp'), org_image)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', default='', help='Config path.') args = parser.parse_args() with open(args.config) as f: opt = yaml.load(f) opt = EasyDict(opt['common']) opt.learning_rate = (opt.learning_rate * (128.0 / opt.batch_size)) print(opt) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) tb_logger.configure(opt.logger_name, flush_secs=5) vocab = pickle.load(open(os.path.join(opt.vocab_path, ('%s_vocab.pkl' % opt.data_name)), 'rb')) opt.vocab_size = len(vocab) opt.distributed = False (train_loader, val_loader) = data.get_loaders(opt.data_name, vocab, opt.crop_size, opt.batch_size, opt.workers, opt) print(len(train_loader), len(val_loader), opt.batch_size) model = CAMP(opt) best_rsum = 0 if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) start_epoch = checkpoint['epoch'] best_rsum = checkpoint['best_rsum'] model.load_state_dict(checkpoint['model']) model.Eiters = checkpoint['Eiters'] print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})".format(opt.resume, start_epoch, best_rsum)) else: print("=> no checkpoint found at '{}'".format(opt.resume)) for epoch in range(opt.num_epochs): adjust_learning_rate(opt, model.optimizer, epoch) train(opt, train_loader, model, epoch, val_loader, tb_logger) if ((epoch % opt.val_epoc) == 0): rsum = validate(opt, val_loader, model, tb_logger) is_best = (rsum > best_rsum) best_rsum = max(rsum, best_rsum) save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_rsum': best_rsum, 'opt': opt, 'Eiters': model.Eiters}, is_best, filename=(('checkpoint_' + str(epoch)) + '.pth.tar'), prefix=(opt.logger_name + '/'))
class WatermarkLogitsProcessor(WatermarkBase, LogitsProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _calc_greenlist_mask(self, scores: torch.FloatTensor, greenlist_token_ids) -> torch.BoolTensor: green_tokens_mask = torch.zeros_like(scores) for b_idx in range(len(greenlist_token_ids)): green_tokens_mask[b_idx][greenlist_token_ids[b_idx]] = 1 final_mask = green_tokens_mask.bool() return final_mask def _bias_greenlist_logits(self, scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float) -> torch.Tensor: scores[greenlist_mask] = (scores[greenlist_mask] + greenlist_bias) return scores def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if (self.rng is None): self.rng = torch.Generator(device=input_ids.device) batched_greenlist_ids = [None for _ in range(input_ids.shape[0])] for b_idx in range(input_ids.shape[0]): greenlist_ids = self._get_greenlist_ids(input_ids[b_idx]) batched_greenlist_ids[b_idx] = greenlist_ids green_tokens_mask = self._calc_greenlist_mask(scores=scores, greenlist_token_ids=batched_greenlist_ids) scores = self._bias_greenlist_logits(scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta) return scores
class FastGeLUFunction(torch.autograd.Function): def forward(ctx, input): ctx.save_for_backward(input) return gelu_fwd(input) def backward(ctx, grad_output): (input,) = ctx.saved_tensors tmp = gelu_bwd(grad_output, input) return tmp
def load_CIFAR10(data_root): transform_train = transforms.Compose([transforms.AutoAugment(transforms.AutoAugmentPolicy.CIFAR10), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) trainset = datasets.CIFAR10(root=data_root, train=True, download=True, transform=transform_train) valset = datasets.CIFAR10(root=data_root, train=True, download=True, transform=transform_test) testset = datasets.CIFAR10(root=data_root, train=False, download=True, transform=transform_test) (train, val) = train_val_split(trainset, valset) return (train, val, testset)
def load_torch_model_from_checkpoint(checkpoint: Union[(str, Path)], model: torch.nn.Module, map_location: str=None) -> torch.nn.Module: if (not torch.cuda.is_available()): map_location = 'cpu' state_dict = torch.load(checkpoint, map_location=map_location) if isinstance(state_dict, DataParallel): logging.debug(' [torch-DataParallel]:') state_dict = state_dict.state_dict() new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if (k[:7] == 'module.'): k = k[7:] new_state_dict[k] = v state_dict = new_state_dict elif (isinstance(state_dict, dict) and ('model' in state_dict)): logging.debug(' [torch-model-attr]:') state_dict = state_dict['model'] elif (isinstance(state_dict, dict) and ('state_dict' in state_dict)): logging.debug(' [torch-state_dict-attr]:') state_dict = state_dict['state_dict'] new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if (k[:6] == 'model.'): k = k[6:] new_state_dict[k] = v state_dict = new_state_dict model.load_state_dict(state_dict) return model
class SystemResponse(Event): def __init__(self, session_token: str=None): super().__init__(session_token) self.type = 'SYSTEM_RESPONSE' self.latency = None def tick(self): self._start_time = time.time() return self def tock(self): assert hasattr(self, '_start_time'), 'Did you forget to run .tick() ?' self._end_time = time.time() self.latency = (self._end_time - self._start_time) return self def description(self) -> str: return None def __str__(self) -> str: return f'{str(self.timestamp)} [latency {np.round(self.latency, 2)} s]: [{self.type}] -- {self.description}'
(version='2.0') def set_all_env_var(conf, overwrite_existing=False): cpu_counts = psutil.cpu_count(logical=False) if (not conf): conf = {} conf['num_of_instance'] = 1 conf['cores_per_instance'] = cpu_counts if ('cores_per_instance' in conf): assert ((conf['cores_per_instance'] * conf['num_of_instance']) <= cpu_counts), 'num_of_instance * cores_per_instance should <= cpu physical cores' else: assert (conf['num_of_instance'] <= cpu_counts), 'num_of_instance should <= cpu counts' conf['cores_per_instance'] = int((cpu_counts / conf['num_of_instance'])) for (var, value) in conf.items(): set_env_var(var.upper(), value, overwrite_existing)
_function def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False): assert (isinstance(x, torch.Tensor) and (x.ndim == 4)) assert (isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)) assert ((f is None) or (isinstance(f, torch.Tensor) and (f.ndim in [1, 2]) and (f.dtype == torch.float32))) assert (isinstance(up, int) and (up >= 1)) assert (isinstance(down, int) and (down >= 1)) assert (isinstance(groups, int) and (groups >= 1)) (out_channels, in_channels_per_group, kh, kw) = _get_weight_shape(w) (fw, fh) = _get_filter_size(f) (px0, px1, py0, py1) = _parse_padding(padding) if (up > 1): px0 += (((fw + up) - 1) // 2) px1 += ((fw - up) // 2) py0 += (((fh + up) - 1) // 2) py1 += ((fh - up) // 2) if (down > 1): px0 += (((fw - down) + 1) // 2) px1 += ((fw - down) // 2) py0 += (((fh - down) + 1) // 2) py1 += ((fh - down) // 2) if ((kw == 1) and (kh == 1) and ((down > 1) and (up == 1))): x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) return x if ((kw == 1) and (kh == 1) and ((up > 1) and (down == 1))): x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0, px1, py0, py1], gain=(up ** 2), flip_filter=flip_filter) return x if ((down > 1) and (up == 1)): x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) return x if (up > 1): if (groups == 1): w = w.transpose(0, 1) else: w = w.reshape(groups, (out_channels // groups), in_channels_per_group, kh, kw) w = w.transpose(1, 2) w = w.reshape((groups * in_channels_per_group), (out_channels // groups), kh, kw) px0 -= (kw - 1) px1 -= (kw - up) py0 -= (kh - 1) py1 -= (kh - up) pxt = max(min((- px0), (- px1)), 0) pyt = max(min((- py0), (- py1)), 0) x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt, pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[(px0 + pxt), (px1 + pxt), (py0 + pyt), (py1 + pyt)], gain=(up ** 2), flip_filter=flip_filter) if (down > 1): x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) return x if ((up == 1) and (down == 1)): if ((px0 == px1) and (py0 == py1) and (px0 >= 0) and (py0 >= 0)): return _conv2d_wrapper(x=x, w=w, padding=[py0, px0], groups=groups, flip_weight=flip_weight) x = upfirdn2d.upfirdn2d(x=x, f=(f if (up > 1) else None), up=up, padding=[px0, px1, py0, py1], gain=(up ** 2), flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) if (down > 1): x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) return x
def get_nx_graph(file_path, full_node_list, sep='\t'): df = pd.read_csv(file_path, sep=sep) if (df.shape[1] == 2): df['weight'] = 1.0 graph = nx.from_pandas_edgelist(df, 'from_id', 'to_id', edge_attr='weight', create_using=nx.Graph) graph.add_nodes_from(full_node_list) graph.remove_edges_from(nx.selfloop_edges(graph)) return graph
def load(config): cls_name = config.trainer.name try: cls = globals()[cls_name] return cls(config) except KeyError: raise Exception('No such trainer: {}'.format(cls_name))
def extract_raw_features(ds, path_data, layer, max_dim, mode='keyframes'): path_raw_features = os.path.join(path_data, ds.dataset, 'conv_features', mode, layer, str(max_dim)) if (mode == 'keyframes'): list_images = ds.keyframes else: list_images = ds.q_keyframes path_raw_features = compute_features(list_images, path_raw_features, layer, max_dim) return path_raw_features
def run_main(): if (not os.path.isdir(PATH_SCTTESTING)): logger.warning(f''' This folder does not exist: {PATH_SCTTESTING}''') logger.warning('Please change the path at the top of this file') subj_lst = [os.path.join(PATH_SCTTESTING, s, 'anat') for s in os.listdir(PATH_SCTTESTING) if os.path.isdir(os.path.join(PATH_SCTTESTING, s, 'anat'))] logger.info(f''' {len(subj_lst)} subjects found. ''') contrast_lst_lst = [] for subj_fold in tqdm(subj_lst, desc='Scanning dataset'): img_lst = [i for i in os.listdir(subj_fold) if i.endswith('.nii.gz')] contrast_cur_lst = ['_'.join(c.split('.nii.gz')[0].split('_')[1:]) for c in img_lst] contrast_lst_lst.append(contrast_cur_lst) contrast_lst = [sublst for lst in contrast_lst_lst for sublst in lst] contrast_lst_noDuplicate = list(set(contrast_lst)) logger.info(f''' {len(contrast_lst_noDuplicate)} contrasts found. ''') logger.info(f"['{', '.join(contrast_lst_noDuplicate)}']")
class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
class VanEncoder(nn.Module): def __init__(self, config: VanConfig): super().__init__() self.stages = nn.ModuleList([]) patch_sizes = config.patch_sizes strides = config.strides hidden_sizes = config.hidden_sizes depths = config.depths mlp_ratios = config.mlp_ratios drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] for (num_stage, (patch_size, stride, hidden_size, depth, mlp_expantion, drop_path_rate)) in enumerate(zip(patch_sizes, strides, hidden_sizes, depths, mlp_ratios, drop_path_rates)): is_first_stage = (num_stage == 0) in_channels = hidden_sizes[(num_stage - 1)] if is_first_stage: in_channels = config.num_channels self.stages.append(VanStage(config, in_channels, hidden_size, patch_size=patch_size, stride=stride, depth=depth, mlp_ratio=mlp_expantion, drop_path_rate=drop_path_rate)) def forward(self, hidden_state: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[(Tuple, BaseModelOutputWithNoAttention)]: all_hidden_states = (() if output_hidden_states else None) for (_, stage_module) in enumerate(self.stages): hidden_state = stage_module(hidden_state) if output_hidden_states: all_hidden_states = (all_hidden_states + (hidden_state,)) if (not return_dict): return tuple((v for v in [hidden_state, all_hidden_states] if (v is not None))) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
class LSegmentationModule(pl.LightningModule): def __init__(self, data_path, dataset, batch_size, base_lr, max_epochs, **kwargs): super().__init__() self.data_path = data_path self.batch_size = batch_size self.base_lr = ((base_lr / 16) * batch_size) self.lr = self.base_lr self.epochs = max_epochs self.other_kwargs = kwargs self.enabled = False self.scaler = amp.GradScaler(enabled=self.enabled) def forward(self, x): return self.net(x) def evaluate(self, x, target=None): pred = self.net.forward(x) if isinstance(pred, (tuple, list)): pred = pred[0] if (target is None): return pred (correct, labeled) = batch_pix_accuracy(pred.data, target.data) (inter, union) = batch_intersection_union(pred.data, target.data, self.nclass) return (correct, labeled, inter, union) def evaluate_random(self, x, labelset, target=None): pred = self.net.forward(x, labelset) if isinstance(pred, (tuple, list)): pred = pred[0] if (target is None): return pred (correct, labeled) = batch_pix_accuracy(pred.data, target.data) (inter, union) = batch_intersection_union(pred.data, target.data, self.nclass) return (correct, labeled, inter, union) def training_step(self, batch, batch_nb): (img, target) = batch with amp.autocast(enabled=self.enabled): out = self(img) multi_loss = isinstance(out, tuple) if multi_loss: loss = self.criterion(*out, target) else: loss = self.criterion(out, target) loss = self.scaler.scale(loss) final_output = (out[0] if multi_loss else out) (train_pred, train_gt) = self._filter_invalid(final_output, target) if (train_gt.nelement() != 0): self.train_accuracy(train_pred, train_gt) self.log('train_loss', loss) return loss def training_epoch_end(self, outs): self.log('train_acc_epoch', self.train_accuracy.compute()) def validation_step(self, batch, batch_nb): (img, target) = batch out = self(img) multi_loss = isinstance(out, tuple) if multi_loss: val_loss = self.criterion(*out, target) else: val_loss = self.criterion(out, target) final_output = (out[0] if multi_loss else out) (valid_pred, valid_gt) = self._filter_invalid(final_output, target) self.val_iou.update(target, final_output) (pixAcc, iou) = self.val_iou.get() self.log('val_loss_step', val_loss) self.log('pix_acc_step', pixAcc) self.log('val_acc_step', self.val_accuracy(valid_pred, valid_gt)) self.log('val_iou', iou) def validation_epoch_end(self, outs): (pixAcc, iou) = self.val_iou.get() self.log('val_acc_epoch', self.val_accuracy.compute()) self.log('val_iou_epoch', iou) self.log('pix_acc_epoch', pixAcc) self.val_iou.reset() def _filter_invalid(self, pred, target): valid = (target != self.other_kwargs['ignore_index']) (_, mx) = torch.max(pred, dim=1) return (mx[valid], target[valid]) def configure_optimizers(self): params_list = [{'params': self.net.pretrained.parameters(), 'lr': self.base_lr}] if hasattr(self.net, 'scratch'): print('Found output scratch') params_list.append({'params': self.net.scratch.parameters(), 'lr': (self.base_lr * 10)}) if hasattr(self.net, 'auxlayer'): print('Found auxlayer') params_list.append({'params': self.net.auxlayer.parameters(), 'lr': (self.base_lr * 10)}) if hasattr(self.net, 'scale_inv_conv'): print(self.net.scale_inv_conv) print('Found scaleinv layers') params_list.append({'params': self.net.scale_inv_conv.parameters(), 'lr': (self.base_lr * 10)}) params_list.append({'params': self.net.scale2_conv.parameters(), 'lr': (self.base_lr * 10)}) params_list.append({'params': self.net.scale3_conv.parameters(), 'lr': (self.base_lr * 10)}) params_list.append({'params': self.net.scale4_conv.parameters(), 'lr': (self.base_lr * 10)}) if self.other_kwargs['midasproto']: print('Using midas optimization protocol') opt = torch.optim.Adam(params_list, lr=self.base_lr, betas=(0.9, 0.999), weight_decay=self.other_kwargs['weight_decay']) sch = torch.optim.lr_scheduler.LambdaLR(opt, (lambda x: pow((1.0 - (x / self.epochs)), 0.9))) else: opt = torch.optim.SGD(params_list, lr=self.base_lr, momentum=0.9, weight_decay=self.other_kwargs['weight_decay']) sch = torch.optim.lr_scheduler.LambdaLR(opt, (lambda x: pow((1.0 - (x / self.epochs)), 0.9))) return ([opt], [sch]) def train_dataloader(self): return torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True, num_workers=16, worker_init_fn=(lambda x: random.seed((time.time() + x)))) def val_dataloader(self): return torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False, num_workers=16) def get_trainset(self, dset, augment=False, **kwargs): print(kwargs) if (augment == True): mode = 'train_x' else: mode = 'train' print(mode) dset = get_dataset(dset, root=self.data_path, split='train', mode=mode, transform=self.train_transform, **kwargs) self.num_classes = dset.num_class self.train_accuracy = pl.metrics.Accuracy() return dset def get_valset(self, dset, augment=False, **kwargs): self.val_accuracy = pl.metrics.Accuracy() self.val_iou = SegmentationMetric(self.num_classes) if (augment == True): mode = 'val_x' else: mode = 'val' print(mode) return get_dataset(dset, root=self.data_path, split='val', mode=mode, transform=self.val_transform, **kwargs) def get_criterion(self, **kwargs): return SegmentationLosses(se_loss=kwargs['se_loss'], aux=kwargs['aux'], nclass=self.num_classes, se_weight=kwargs['se_weight'], aux_weight=kwargs['aux_weight'], ignore_index=kwargs['ignore_index']) def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument('--data_path', type=str, help='path where dataset is stored') parser.add_argument('--dataset', choices=get_available_datasets(), default='ade20k', help='dataset to train on') parser.add_argument('--batch_size', type=int, default=16, help='size of the batches') parser.add_argument('--base_lr', type=float, default=0.004, help='learning rate') parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight_decay') parser.add_argument('--aux', action='store_true', default=False, help='Auxilary Loss') parser.add_argument('--aux-weight', type=float, default=0.2, help='Auxilary loss weight (default: 0.2)') parser.add_argument('--se-loss', action='store_true', default=False, help='Semantic Encoding Loss SE-loss') parser.add_argument('--se-weight', type=float, default=0.2, help='SE-loss weight (default: 0.2)') parser.add_argument('--midasproto', action='store_true', default=False, help='midasprotocol') parser.add_argument('--ignore_index', type=int, default=(- 1), help='numeric value of ignore label in gt') parser.add_argument('--augment', action='store_true', default=False, help='Use extended augmentations') return parser
def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict, strict=False) return model
def heatmap(s, fax=None, fill_value=None, nxticks=8, nyticks=6, cbar_label=None, scaling=1.0, vmin=None, vmax=None, with_cbar=True, transpose=True, cmap='viridis', cbar_pad=0.05, cbar_ax=None, remove_day_label=True): if (type(s) == pd.DataFrame): if (len(s.columns) != 1): raise ValueError(f'Expecting 1 column but got: {len(s.columns)}') s = s[s.columns[0]] if (fill_value is None): fill_value = s.min() if (cbar_label is None): cbar_label = s.name elif cbar_label.startswith('_'): cbar_label = None if (vmin is None): vmin = s.min() if (vmax is None): vmax = s.max() if transpose: xlabel = ('' if remove_day_label else 'day') def xtickformatter(el): return el.strftime('%b-%y') ylabel = 'Hour of day' def ytickformatter(el): return el.hour cbar_orientation = 'horizontal' else: xlabel = 'Hour of day' def xtickformatter(el): return el.hour ylabel = ('' if remove_day_label else 'day') def ytickformatter(el): return el.strftime('%m-%y') cbar_orientation = 'vertical' df_heatmap = pd.DataFrame({'date': s.index.date, 'time': s.index.time, 'value_col': (s.values * scaling)}) df_heatmap = df_heatmap.pivot(index='date', columns='time', values='value_col') df_heatmap.fillna(value=fill_value, inplace=True) if (fax is None): (f, ax) = plt.subplots() else: (f, ax) = fax if transpose: df_heatmap = df_heatmap.transpose() mappable = ax.pcolor(df_heatmap, cmap=cmap, vmin=vmin, vmax=vmax) ax.invert_yaxis() nyticks = min(len(df_heatmap.index), nyticks) nxticks = min(len(df_heatmap.columns), nxticks) yticks = range(0, len(df_heatmap.index), int((len(df_heatmap.index) / nyticks))) xticks = range(0, len(df_heatmap.columns), int((len(df_heatmap.columns) / nxticks))) ax.set_xticks(xticks) ax.set_yticks(yticks) ax.set_xticklabels([xtickformatter(el) for el in df_heatmap.columns[xticks]]) ax.set_yticklabels([ytickformatter(el) for el in df_heatmap.index[yticks]]) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if with_cbar: f.colorbar(mappable, ax=cbar_ax, label=cbar_label, orientation=cbar_orientation, pad=cbar_pad) ax.xaxis.tick_top() ax.xaxis.set_label_position('top') return (f, ax)
class RoIAlignFunction(Function): def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): if isinstance(out_size, int): out_h = out_size out_w = out_size elif isinstance(out_size, tuple): assert (len(out_size) == 2) assert isinstance(out_size[0], int) assert isinstance(out_size[1], int) (out_h, out_w) = out_size else: raise TypeError('"out_size" must be an integer or tuple of integers') ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() (batch_size, num_channels, data_height, data_width) = features.size() num_rois = rois.size(0) output = features.new_zeros(num_rois, num_channels, out_h, out_w) if features.is_cuda: roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, sample_num, output) else: raise NotImplementedError return output def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert ((feature_size is not None) and grad_output.is_cuda) (batch_size, num_channels, data_height, data_width) = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width) roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, out_w, spatial_scale, sample_num, grad_input) return (grad_input, grad_rois, None, None, None)
class EncoderImageFull(nn.Module): def __init__(self, embed_size, finetune=False, cnn_type='vgg19', use_abs=False, no_imgnorm=False): super(EncoderImageFull, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.cnn = self.get_cnn(cnn_type, True) for param in self.cnn.parameters(): param.requires_grad = finetune if cnn_type.startswith('vgg'): self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features, embed_size) self.cnn.classifier = nn.Sequential(*list(self.cnn.classifier.children())[:(- 1)]) elif cnn_type.startswith('resnet'): self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size) self.cnn.module.fc = nn.Sequential() self.init_weights() def get_cnn(self, arch, pretrained): if pretrained: print("=> using pre-trained model '{}'".format(arch)) model = models.__dict__[arch](pretrained=True) else: print("=> creating model '{}'".format(arch)) model = models.__dict__[arch]() if (arch.startswith('alexnet') or arch.startswith('vgg')): model.features = nn.DataParallel(model.features) model.cuda() else: model = nn.DataParallel(model).cuda() return model def load_state_dict(self, state_dict): if ('cnn.classifier.1.weight' in state_dict): state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight'] del state_dict['cnn.classifier.1.weight'] state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias'] del state_dict['cnn.classifier.1.bias'] state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight'] del state_dict['cnn.classifier.4.weight'] state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias'] del state_dict['cnn.classifier.4.bias'] super(EncoderImageFull, self).load_state_dict(state_dict) def init_weights(self): r = (np.sqrt(6.0) / np.sqrt((self.fc.in_features + self.fc.out_features))) self.fc.weight.data.uniform_((- r), r) self.fc.bias.data.fill_(0) def forward(self, images): features = self.cnn(images) features = l2norm(features) features = self.fc(features) if (not self.no_imgnorm): features = l2norm(features) if self.use_abs: features = torch.abs(features) return features
def evaluate(model, device, params, silent=True): assert (len(params.eval_database_files) == len(params.eval_query_files)) stats = {} for (database_file, query_file) in zip(params.eval_database_files, params.eval_query_files): location_name = database_file.split('_')[0] temp = query_file.split('_')[0] assert (location_name == temp), 'Database location: {} does not match query location: {}'.format(database_file, query_file) p = os.path.join(params.dataset_folder, database_file) with open(p, 'rb') as f: database_sets = pickle.load(f) p = os.path.join(params.dataset_folder, query_file) with open(p, 'rb') as f: query_sets = pickle.load(f) temp = evaluate_dataset(model, device, params, database_sets, query_sets, silent=silent) stats[location_name] = temp return stats
def build_model(frames=172, shingles=8, bands=40, channels=1, codebook=2000): input_shape = (bands, frames, channels) kernel = (bands, shingles) model = Sequential([Convolution2D(codebook, kernel, strides=(1, shingles), padding='same', activation=None, input_shape=input_shape)]) return model
def load_h5_data_label_seg(h5_filename): f = h5py.File(h5_filename) data = f['data'][:] label = f['label'][:] seg = f['pid'][:] return (data, label, seg)
def test_isotropic_eddington_dehnencore_in_nfw_beta_directint(): pot = potential.NFWPotential(amp=2.3, a=1.3) denspot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15) dfp = eddingtondf(pot=pot, denspot=denspot) tol = 1e-08 check_beta_directint(dfp, tol, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31) return None
class Feature(object): def __init__(self, config, probase, nlp=None): self.pretrain_embeddings = config.pretrain_embeddings self.data_type = config.data_type self.X = {d: {} for d in self.data_type} self.y = {} self.vectors = [] self.load_vectors_from_path(self.pretrain_embeddings, clear=True) self.nlp = nlp self.pb = probase def extract_vector_features(self, dataset): for (embed, vector) in tqdm(zip(self.pretrain_embeddings, self.vectors)): for dt in self.data_type: (_X, _y) = self.vector_features(getattr(dataset, dt), vector) (self.X[dt][embed['name']], self.y[dt]) = (_X, _y) def extract_statistical_features(self, dataset): for dt in tqdm(self.data_type): (_X, _y) = self.statistical_features(getattr(dataset, dt)) (self.X[dt]['Probase'], self.y[dt]) = (_X, _y) def load_vectors_from_path(self, pretrain_embeddings, clear=True): if clear: self.vectors = [] for pe in pretrain_embeddings: (embed_name, embed_path, is_binary) = (pe['name'], pe['path'], pe['is_bin']) print('Loading pretrain embeddings:', embed_name) if is_binary: self.vectors.append(KeyedVectors.load_word2vec_format(embed_path, binary=True)) else: self.vectors.append(KeyedVectors.load_word2vec_format(embed_path)) def wr(self, vector, w): if (w in vector.wv): return vector.wv[w] elif (self.nlp and (self.nlp.lemma(w) in vector.wv)): return vector.wv[self.nlp.lemma(w)] else: return np.ones(300) def vector_features(self, dataset, vector): X = [] y = [] for d in dataset: y.append(d[(- 1)]) original_vectors = np.concatenate([self.wr(vector, w) for w in d[:(- 1)]]) w1w2_norm = self._wrap_np([self.vec_diff_norm(d[0], d[1], vector)]) w1attr_norm = self._wrap_np([self.vec_diff_norm(d[0], d[2], vector)]) w2attr_norm = self._wrap_np([self.vec_diff_norm(d[1], d[2], vector)]) w1w2_cos = self._wrap_np([self.vec_cosine(d[0], d[1], vector)]) w1attr_cos = self._wrap_np([self.vec_cosine(d[0], d[2], vector)]) w2attr_cos = self._wrap_np([self.vec_cosine(d[1], d[2], vector)]) X.append(np.concatenate([original_vectors, w1w2_norm, w1attr_norm, w2attr_norm, w1w2_cos, w1attr_cos, w2attr_cos])) return (X, y) def _wrap_np(self, values): return np.array(values, dtype=np.float) def vec_cosine(self, w1, w2, vector): return cosine(self.wr(vector, w1), self.wr(vector, w2)) def vec_diff_norm(self, w1, w2, vector, n=1): return LA.norm((self.wr(vector, w1) - self.wr(vector, w2)), n) def statistical_features(self, dataset): def safe_log(x): return (math.log(x) if (x > 0) else 0) X = [] y = [] for d in dataset: y.append(d[(- 1)]) w1attr_stat = [x for x in map(safe_log, self.pb.statistical_features(d[2], d[0]))] w2attr_stat = [x for x in map(safe_log, self.pb.statistical_features(d[2], d[1]))] X.append(self._wrap_np((w1attr_stat + w2attr_stat))) return (X, y)
def parse_args(): parser = ArgumentParser(description='Training script: StyleGAN2 with DataParallel.') parser.add_argument('gin_config', type=str, help='Path to the gin configuration file') parser.add_argument('architecture', type=str, help='Architecture') parser.add_argument('--mode', default='std', type=str, help='Training mode (default: std)') parser.add_argument('--penalty', default='none', type=str, help='Penalty (default: none)') parser.add_argument('--aug', default='none', type=str, help='Augmentation (default: hfrt)') parser.add_argument('--use_warmup', action='store_true', help='Use warmup strategy on LR') parser.add_argument('--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 0)') parser.add_argument('--temp', default=0.1, type=float, help='Temperature hyperparameter for contrastive losses') parser.add_argument('--lbd_a', default=1.0, type=float, help='Relative strength of the fake loss of ContraD') parser.add_argument('--no_lazy', action='store_true', help='Do not use lazy regularization') parser.add_argument('--d_reg_every', type=int, default=16, help='Interval of applying R1 when lazy regularization is used') parser.add_argument('--lbd_r1', type=float, default=10, help='R1 regularization') parser.add_argument('--style_mix', default=0.9, type=float, help='Style mixing regularization') parser.add_argument('--halflife_k', default=20, type=int, help='Half-life of exponential moving average in thousands of images') parser.add_argument('--ema_start_k', default=None, type=int, help='When to start the exponential moving average of G (default: halflife_k)') parser.add_argument('--halflife_lr', default=0, type=int, help='Apply LR decay when > 0') parser.add_argument('--no_fid', action='store_true', help='Do not track FIDs during training') parser.add_argument('--no_gif', action='store_true', help='Do not save GIF of sample generations from a fixed latent periodically during training') parser.add_argument('--n_eval_avg', default=3, type=int, help='How many times to average FID and IS') parser.add_argument('--print_every', help='', default=50, type=int) parser.add_argument('--evaluate_every', help='', default=2000, type=int) parser.add_argument('--save_every', help='', default=100000, type=int) parser.add_argument('--comment', help='Comment', default='', type=str) parser.add_argument('--resume', default=None, type=str, help='Path to logdir to resume the training') parser.add_argument('--finetune', default=None, type=str, help='Path to logdir that contains a pre-trained checkpoint of D') return parser.parse_args()
def train_network(config: MuZeroConfig, storage: SharedStorage, replay_buffer: ReplayBuffer): network = Network(config.action_space_size).to(device) while True: optimizer = optim.SGD(network.parameters(), lr=0.01, weight_decay=config.lr_decay_rate, momentum=config.momentum) while (not (len(replay_buffer.buffer) > 0)): pass for i in range(config.training_steps): if (((i % config.checkpoint_interval) == 0) and (i > 0)): storage.save_network(i, network) vs_random_once = vs_random(network) print('network_vs_random = ', sorted(vs_random_once.items()), end='\n') vs_older = latest_vs_older(storage.latest_network(), storage.old_network()) print('lastnet_vs_older = ', sorted(vs_older.items()), end='\n') batch = replay_buffer.sample_batch(config.num_unroll_steps, config.td_steps) update_weights(batch, network, optimizer) storage.save_network(config.training_steps, network)
def main(): parser = argparse.ArgumentParser(description='Save musdb-XL-train wave files from the downloaded sample-wise gain parameters') parser.add_argument('--root', type=str, default='/path/to/musdb18hq', help='Root directory') parser.add_argument('--musdb_XL_train_npy_root', type=str, default='/path/to/musdb-XL-train', help="Directory of numpy arrays of musdb-XL-train's sample-wise ratio ") parser.add_argument('--output', type=str, default='/path/to/musdb-XL-train', help='Directory to save musdb-XL-train wave data') args = parser.parse_args() sources = ['vocals', 'bass', 'drums', 'other'] path_csv_fixed = f'{args.musdb_XL_train_npy_root}/ozone_train_fixed.csv' list_path_csv_random = sorted(glob.glob(f'{args.musdb_XL_train_npy_root}/ozone_train_random_*.csv')) fixed_list = [] os.makedirs(f'{args.output}/ozone_train_fixed', exist_ok=True) with open(path_csv_fixed, 'r', encoding='utf-8') as f: rdr = csv.reader(f) for (k, line) in enumerate(rdr): if (k == 0): pass else: fixed_list.append(line) for fixed_song in tqdm.tqdm(fixed_list): audio_sources = [] for source in sources: (audio, sr) = librosa.load(f'{args.root}/train/{fixed_song[0]}/{source}.wav', sr=44100, mono=False) audio_sources.append(audio) stems = np.stack(audio_sources, axis=0) mixture = stems.sum(0) ratio = np.load(f'{args.musdb_XL_train_npy_root}/np_ratio/ozone_train_fixed/{fixed_song[0]}.npy') output = (mixture * ratio) sf.write(f'{args.output}/ozone_train_fixed/{fixed_song[0]}.wav', output.T, 44100, subtype='PCM_16') random_list = [] os.makedirs(f'{args.output}/ozone_train_random', exist_ok=True) for path_csv_random in list_path_csv_random: with open(path_csv_random, 'r', encoding='utf-8') as f: rdr = csv.reader(f) for (k, line) in enumerate(rdr): if (k == 0): pass else: random_list.append(line) for random_song in tqdm.tqdm(random_list): audio_sources = [] for (k, source) in enumerate(sources): (audio, sr) = librosa.load(f'{args.root}/train/{random_song[(3 + (k * 4))]}/{source}.wav', sr=44100, mono=False, offset=float(random_song[(4 + (k * 4))]), duration=4.0) audio = (audio * float(random_song[(5 + (k * 4))])) if (random_song[(6 + (k * 4))].lower() == 'true'): audio = np.flip(audio, axis=0) audio_sources.append(audio) stems = np.stack(audio_sources, axis=0) mixture = stems.sum(0) ratio = np.load(f'{args.musdb_XL_train_npy_root}/np_ratio/ozone_train_random/{random_song[0]}.npy') output = (mixture * ratio) sf.write(f'{args.output}/ozone_train_random/{random_song[0]}.wav', output.T, 44100, subtype='PCM_16')
def who_collided(sim_context: SimContext) -> FrozenSet[PlayerName]: return frozenset(chain.from_iterable(map((lambda report: report.players.keys()), sim_context.collision_reports)))
def calc_vocab_num(predicts): vocab = [] for sentence in predicts: g = word_tokenize(sentence.lower()) for word in g: if (word not in vocab): vocab.append(word) return vocab
class BaselineRunner(): def __init__(self, args, config): self.args = args self.config = config def get_optimizer(self, parameters): if (self.config.optim.optimizer == 'Adam'): return optim.Adam(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay, betas=(self.config.optim.beta1, 0.999), amsgrad=self.config.optim.amsgrad) elif (self.config.optim.optimizer == 'RMSProp'): return optim.RMSprop(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay) elif (self.config.optim.optimizer == 'SGD'): return optim.SGD(parameters, lr=self.config.optim.lr, momentum=0.9) else: raise NotImplementedError('Optimizer {} not understood.'.format(self.config.optim.optimizer)) def logit_transform(self, image, lam=1e-06): image = (lam + ((1 - (2 * lam)) * image)) return (torch.log(image) - torch.log1p((- image))) def train(self): if (self.config.data.random_flip is False): tran_transform = test_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()]) else: tran_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]) test_transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()]) if (self.config.data.dataset == 'CIFAR10'): dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True, transform=tran_transform) test_dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10_test'), train=False, download=True, transform=test_transform) elif (self.config.data.dataset == 'MNIST'): dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True, transform=tran_transform) test_dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist_test'), train=False, download=True, transform=test_transform) elif (self.config.data.dataset == 'CELEBA'): if self.config.data.random_flip: dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), download=True) else: dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True) test_dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba_test'), split='test', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True) dataloader = DataLoader(dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(test_dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4, drop_last=True) test_iter = iter(test_loader) self.config.input_dim = ((self.config.data.image_size ** 2) * self.config.data.channels) tb_path = os.path.join(self.args.run, 'tensorboard', self.args.doc) if os.path.exists(tb_path): shutil.rmtree(tb_path) tb_logger = tensorboardX.SummaryWriter(log_dir=tb_path) score = RefineNetDilated(self.config).to(self.config.device) score = torch.nn.DataParallel(score) optimizer = self.get_optimizer(score.parameters()) if self.args.resume_training: states = torch.load(os.path.join(self.args.log, 'checkpoint.pth')) score.load_state_dict(states[0]) optimizer.load_state_dict(states[1]) step = 0 for epoch in range(self.config.training.n_epochs): for (i, (X, y)) in enumerate(dataloader): step += 1 score.train() X = X.to(self.config.device) X = (((X / 256.0) * 255.0) + (torch.rand_like(X) / 256.0)) if self.config.data.logit_transform: X = self.logit_transform(X) loss = dsm_score_estimation(score, X, sigma=0.01) optimizer.zero_grad() loss.backward() optimizer.step() tb_logger.add_scalar('loss', loss, global_step=step) logging.info('step: {}, loss: {}'.format(step, loss.item())) if (step >= self.config.training.n_iters): return 0 if ((step % 100) == 0): score.eval() try: (test_X, test_y) = next(test_iter) except StopIteration: test_iter = iter(test_loader) (test_X, test_y) = next(test_iter) test_X = test_X.to(self.config.device) test_X = (((test_X / 256.0) * 255.0) + (torch.rand_like(test_X) / 256.0)) if self.config.data.logit_transform: test_X = self.logit_transform(test_X) with torch.no_grad(): test_dsm_loss = dsm_score_estimation(score, test_X, sigma=0.01) tb_logger.add_scalar('test_dsm_loss', test_dsm_loss, global_step=step) if ((step % self.config.training.snapshot_freq) == 0): states = [score.state_dict(), optimizer.state_dict()] torch.save(states, os.path.join(self.args.log, 'checkpoint_{}.pth'.format(step))) torch.save(states, os.path.join(self.args.log, 'checkpoint.pth')) def Langevin_dynamics(self, x_mod, scorenet, n_steps=1000, step_lr=2e-05): images = [] with torch.no_grad(): for _ in range(n_steps): images.append(torch.clamp(x_mod, 0.0, 1.0).to('cpu')) noise = (torch.randn_like(x_mod) * np.sqrt((step_lr * 2))) grad = scorenet(x_mod) x_mod = ((x_mod + (step_lr * grad)) + noise) print('modulus of grad components: mean {}, max {}'.format(grad.abs().mean(), grad.abs().max())) return images def test(self): states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'), map_location=self.config.device) score = RefineNetDilated(self.config).to(self.config.device) score = torch.nn.DataParallel(score) score.load_state_dict(states[0]) if (not os.path.exists(self.args.image_folder)): os.makedirs(self.args.image_folder) score.eval() if ((self.config.data.dataset == 'MNIST') or (self.config.data.dataset == 'FashionMNIST')): transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()]) if (self.config.data.dataset == 'MNIST'): dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True, transform=transform) else: dataset = FashionMNIST(os.path.join(self.args.run, 'datasets', 'fmnist'), train=True, download=True, transform=transform) dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4) data_iter = iter(dataloader) (samples, _) = next(data_iter) samples = samples.cuda() samples = torch.rand_like(samples) all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05) for (i, sample) in enumerate(tqdm.tqdm(all_samples)): sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size) if self.config.data.logit_transform: sample = torch.sigmoid(sample) torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i))) elif (self.config.data.dataset == 'CELEBA'): dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='test', transform=transforms.Compose([transforms.CenterCrop(140), transforms.Resize(self.config.data.image_size), transforms.ToTensor()]), download=True) dataloader = DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4) (samples, _) = next(iter(dataloader)) samples = torch.rand(100, 3, self.config.data.image_size, self.config.data.image_size, device=self.config.device) all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05) for (i, sample) in enumerate(tqdm.tqdm(all_samples)): sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size) if self.config.data.logit_transform: sample = torch.sigmoid(sample) torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i))) else: transform = transforms.Compose([transforms.Resize(self.config.data.image_size), transforms.ToTensor()]) if (self.config.data.dataset == 'CIFAR10'): dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True, transform=transform) dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4) data_iter = iter(dataloader) (samples, _) = next(data_iter) samples = samples.cuda() samples = torch.rand_like(samples) all_samples = self.Langevin_dynamics(samples, score, 1000, 2e-05) for (i, sample) in enumerate(tqdm.tqdm(all_samples)): sample = sample.view(100, self.config.data.channels, self.config.data.image_size, self.config.data.image_size) if self.config.data.logit_transform: sample = torch.sigmoid(sample) torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
class MRCNERDataLoader(object): def __init__(self, config, data_processor, label_list, tokenizer, mode='train', allow_impossible=True, entity_scheme='bes'): self.data_dir = config.data_dir self.data_mode = config.data_mode self.lang_type = config.lang_type self.save_cache_path = os.path.join(self.data_dir, ((self.lang_type + '_') + self.data_mode)) if (not os.path.exists(self.save_cache_path)): os.mkdir(self.save_cache_path) self.max_seq_length = config.max_seq_length self.entity_scheme = entity_scheme self.distributed_data_sampler = ((config.n_gpu > 1) and (config.data_parallel == 'ddp')) if (mode == 'train'): self.train_batch_size = config.train_batch_size self.dev_batch_size = config.dev_batch_size self.test_batch_size = config.test_batch_size self.num_train_epochs = config.num_train_epochs elif (mode == 'test'): self.test_batch_size = config.test_batch_size elif (mode == 'transform_binary_files'): print(('=*=' * 15)) print('Transform pre-processed MRC-NER datasets into binary files. ') print('max_sequence_length is : ', config.max_seq_length) print('data_dir is : ', config.data_dir) print(('=*=' * 15)) else: raise ValueError('[mode] for MRCNERDataLoader does not exist.') self.data_processor = data_processor self.label_list = label_list self.allow_impossible = allow_impossible self.tokenizer = tokenizer self.max_seq_len = config.max_seq_length self.data_cache = config.data_cache self.num_train_instances = 0 self.num_dev_instances = 0 self.num_test_instances = 0 def examples_for_diff_data_mode(self, data_sign): src_qa = 'squad_en' src_ner_qa = 'conll_mrc_en' if (self.lang_type == 'esp'): tgt_qa = 'esp_qa' src_trans_qa = 'squad_es' src_trans_ner_qa = 'conll_mrc_es' pseudo_qa = 'esp_pseudo' elif (self.lang_type == 'deu1'): tgt_qa = 'deu_qa_1' src_trans_qa = 'squad_de' src_trans_ner_qa = 'conll_mrc_de' pseudo_qa = 'deu_pseudo' elif (self.lang_type == 'deu2'): tgt_qa = 'deu_qa_2' src_trans_qa = 'squad_de' src_trans_ner_qa = 'conll_mrc_de' pseudo_qa = 'deu_pseudo' elif (self.lang_type == 'ned'): tgt_qa = None src_trans_qa = 'squad_nl' src_trans_ner_qa = 'conll_mrc_nl' pseudo_qa = 'ned_pseudo' elif (self.lang_type == 'no'): tgt_qa = None src_trans_qa = 'squad_no' src_trans_ner_qa = 'conll_mrc_no' else: print('Language type is not valid!') if (self.data_mode == 'tgt'): if (tgt_qa is not None): examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign) else: print('No qa dataset for {}'.format(self.lang_type)) return if (self.data_mode == 'src'): examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) update_data_path = os.path.join(self.data_dir, src_qa) if (self.data_mode == 'src+trans'): examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) limit_len = 10000 sp_examples = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) examples.extend(sp_examples) if (self.data_mode == 'tgt+src'): if (tgt_qa is not None): examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign) src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) limit_len = 10000 sp_examples = random.sample(src_examples, min(limit_len, len(src_examples))) examples.extend(sp_examples) else: print('No qa dataset for {}'.format(self.lang_type)) return if (self.data_mode == 'tgt+src+trans'): if (tgt_qa is not None): examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign) src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples))) sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) else: print('No qa dataset for {}'.format(self.lang_type)) return if (self.data_mode == 'src+trans+conll+pseudo'): examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign) pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) sp_examples_2 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples))) sp_examples_3 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) examples.extend(sp_examples_3) if (self.data_mode == 'trans+pseudo+conll_trans'): examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign) src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples))) sp_examples_2 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) if (self.data_mode == 'src+trans+conll+conll_trans'): examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign) src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) sp_examples_2 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples))) sp_examples_3 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) examples.extend(sp_examples_3) if (self.data_mode == 'tgt+src+trans+conll'): if (tgt_qa is not None): examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign) src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples))) sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) sp_examples_3 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) examples.extend(sp_examples_3) else: print('No qa dataset for {}'.format(self.lang_type)) return if (self.data_mode == 'tgt+src+trans+conll+conll_trans+pseudo'): if (tgt_qa is not None): examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign) src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign) src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign) src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign) src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign) pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign) limit_len = 10000 sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples))) sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples))) sp_examples_3 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples))) sp_examples_4 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples))) sp_examples_5 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples))) examples.extend(sp_examples_1) examples.extend(sp_examples_2) examples.extend(sp_examples_3) examples.extend(sp_examples_4) examples.extend(sp_examples_5) else: print('No qa dataset for {}'.format(self.lang_type)) return return examples def convert_examples_to_features(self, data_sign='train', num_data_processor=1, logger=None): print(f'loading {data_sign} data ... ...') examples = self.examples_for_diff_data_mode(data_sign) if (data_sign == 'train'): self.num_train_instances = len(examples) elif (data_sign == 'dev'): self.num_dev_instances = len(examples) elif (data_sign == 'test'): self.num_test_instances = len(examples) else: raise ValueError('please notice that the data_sign can only be train/dev/test !!') if (num_data_processor == 1): cache_path = os.path.join(self.save_cache_path, 'mrc-ner.{}.cache.{}'.format(data_sign, str(self.max_seq_len))) if os.path.exists(cache_path): features = torch.load(cache_path) else: features = convert_examples_to_features(examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme) torch.save(features, cache_path) return features def export_features_to_cache_file(idx, sliced_features, num_data_processor): cache_path = os.path.join(self.save_cache_path, 'mrc-ner.{}.cache.{}.{}-{}'.format(data_sign, str(self.max_seq_len), str(num_data_processor), str(idx))) torch.save(sliced_features, cache_path) features_lst = [] total_examples = len(examples) size_of_one_process = math.ceil((total_examples / num_data_processor)) path_to_preprocessed_cache = os.path.join(self.save_cache_path, 'mrc-ner.{}.cache.{}.{}-*'.format(data_sign, str(self.max_seq_len), str(num_data_processor))) collection_of_preprocessed_cache = glob(path_to_preprocessed_cache) if (len(collection_of_preprocessed_cache) == num_data_processor): print(f'%%%% %%%% Load Saved Cache files in {self.save_cache_path} %%% %%% ') elif (len(collection_of_preprocessed_cache) != 0): for item_of_preprocessed_cache in collection_of_preprocessed_cache: os.remove(item_of_preprocessed_cache) for idx in range(num_data_processor): start = (size_of_one_process * idx) end = (((idx + 1) * size_of_one_process) if (((idx + 1) * size_of_one_process) < total_examples) else total_examples) sliced_examples = examples[start:end] sliced_features = convert_examples_to_features(sliced_examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme) export_features_to_cache_file(idx, sliced_features, num_data_processor) del examples else: for idx in range(num_data_processor): start = (size_of_one_process * idx) end = (((idx + 1) * size_of_one_process) if (((idx + 1) * size_of_one_process) < total_examples) else total_examples) sliced_examples = examples[start:end] sliced_features = convert_examples_to_features(sliced_examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme) export_features_to_cache_file(idx, sliced_features, num_data_processor) del examples multi_process_for_data = Pool(num_data_processor) for idx in range(num_data_processor): features_lst.append(multi_process_for_data.apply_async(MRCNERDataLoader.read_features_from_cache_file, args=(idx, self.save_cache_path, data_sign, self.max_seq_len, num_data_processor, logger))) multi_process_for_data.close() multi_process_for_data.join() features = [] for feature_slice in features_lst: features.extend(feature_slice.get()) return features def get_dataloader(self, data_sign='train', num_data_processor=1, logger=None): features = self.convert_examples_to_features(data_sign=data_sign, num_data_processor=num_data_processor, logger=logger) input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) start_pos = torch.tensor([f.start_position for f in features], dtype=torch.long) end_pos = torch.tensor([f.end_position for f in features], dtype=torch.long) span_pos = torch.tensor([f.span_position for f in features], dtype=torch.long) ner_cate = torch.tensor([f.ner_cate for f in features], dtype=torch.long) span_label_mask = torch.tensor([f.span_label_mask for f in features], dtype=torch.long) dataset = TensorDataset(input_ids, input_mask, segment_ids, start_pos, end_pos, span_pos, span_label_mask, ner_cate) if (data_sign == 'train'): if self.distributed_data_sampler: datasampler = DistributedSampler(dataset) else: datasampler = RandomSampler(dataset) dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.train_batch_size) elif (data_sign == 'dev'): datasampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.dev_batch_size) elif (data_sign == 'test'): datasampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.test_batch_size) return dataloader def read_features_from_cache_file(idx, data_dir, data_sign, max_seq_len, num_data_processor, logger): cache_path = os.path.join(data_dir, 'mrc-ner.{}.cache.{}.{}-{}'.format(data_sign, str(max_seq_len), str(num_data_processor), str(idx))) sliced_features = torch.load(cache_path) return sliced_features def get_train_instance(self): return self.num_train_instances
class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if (isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d)): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if (module.bias is not None): module.bias = nn.init.constant_(module.bias, 0)
def annotation_contains(a1: Annotation, a2: Annotation): if ((a1.evidence_start <= a2.evidence_start) and (a2.evidence_start <= a1.evidence_end) and (a1.evidence_end >= a2.evidence_end)): return True return False
def ToOneHot2D(f, dim): if (len(f.shape) == 1): f = np.expand_dims(f, (- 1)) assert (len(f.shape) == 2) shape = (f.shape + (dim,)) oh = np.zeros(shape) for i in range(f.shape[0]): for j in range(f.shape[1]): idx = f[(i, j)] if (idx >= 0): oh[(i, j, idx)] = 1.0 return oh
class TestCircuitDrawer(QiskitTestCase): def test_default_output(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={}): circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit) self.assertIsInstance(out, text.TextDrawing) (visualization.HAS_MATPLOTLIB, 'Skipped because matplotib is not available') def test_user_config_default_output(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={'circuit_drawer': 'mpl'}): circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit) self.assertIsInstance(out, figure.Figure) def test_default_output_with_user_config_not_set(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={'other_option': True}): circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit) self.assertIsInstance(out, text.TextDrawing) (visualization.HAS_MATPLOTLIB, 'Skipped because matplotib is not available') def test_kwarg_priority_over_user_config_default_output(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={'circuit_drawer': 'latex'}): circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit, output='mpl') self.assertIsInstance(out, figure.Figure) (visualization.HAS_MATPLOTLIB, 'Skipped because matplotib is not available') def test_default_backend_auto_output_with_mpl(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={'circuit_drawer': 'auto'}): circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit) self.assertIsInstance(out, figure.Figure) def test_default_backend_auto_output_without_mpl(self): with unittest.mock.patch('qiskit.user_config.get_config', return_value={'circuit_drawer': 'auto'}): with unittest.mock.patch.object(visualization.circuit_visualization, '_matplotlib', autospec=True) as mpl_mock: mpl_mock.HAS_MATPLOTLIB = False circuit = QuantumCircuit() out = visualization.circuit_drawer(circuit) self.assertIsInstance(out, text.TextDrawing)
class DeadlockPunishmentConfig(RewardConfig): def __init__(self, value): self.value = value def create_reward_shaper(self): return DeadlockPunishment(self.value)
def _check_value(name, src, supported_type, supported_value=[]): if (isinstance(src, list) and any([(not isinstance(i, supported_type)) for i in src])): assert False, 'Type of {} items should be {} but not {}'.format(name, str(supported_type), [type(i) for i in src]) elif ((not isinstance(src, list)) and (not isinstance(src, supported_type))): assert False, 'Type of {} should be {} but not {}'.format(name, str(supported_type), type(src)) if (len(supported_value) > 0): if (isinstance(src, str) and (src not in supported_value)): assert False, '{} is not in supported {}: {}. Skip setting it.'.format(src, name, str(supported_value)) elif (isinstance(src, list) and all([isinstance(i, str) for i in src]) and any([(i not in supported_value) for i in src])): assert False, '{} is not in supported {}: {}. Skip setting it.'.format(src, name, str(supported_value)) return True
def _update_optimizer_with_manual_step_learning_rate(optimizer, initial_learning_rate, learning_rate_scaling): manual_lr = optimizer.learning_rate.manual_step_learning_rate manual_lr.initial_learning_rate = initial_learning_rate for i in range(3): schedule = manual_lr.schedule.add() schedule.learning_rate = (initial_learning_rate * (learning_rate_scaling ** i))
class ClearMLCallback(TrainerCallback): def __init__(self): if is_clearml_available(): import clearml self._clearml = clearml else: raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.") self._initialized = False self._clearml_task = None self._log_model = (os.getenv('CLEARML_LOG_MODEL', 'FALSE').upper() in ENV_VARS_TRUE_VALUES.union({'TRUE'})) def setup(self, args, state, model, tokenizer, **kwargs): if (self._clearml is None): return if self._initialized: return if state.is_world_process_zero: logger.info('Automatic ClearML logging enabled.') if (self._clearml_task is None): if self._clearml.Task.current_task(): self._clearml_task = self._clearml.Task.current_task() self._initialized = True logger.info('External ClearML Task has been connected.') else: self._clearml_task = self._clearml.Task.init(project_name=os.getenv('CLEARML_PROJECT', 'HuggingFace Transformers'), task_name=os.getenv('CLEARML_TASK', 'Trainer'), auto_connect_frameworks={'tensorboard': False, 'pytorch': False}, output_uri=True) self._initialized = True logger.info('ClearML Task has been initialized.') self._clearml_task.connect(args, 'Args') if (hasattr(model, 'config') and (model.config is not None)): self._clearml_task.connect(model.config, 'Model Configuration') def on_train_begin(self, args, state, control, model=None, tokenizer=None, **kwargs): if (self._clearml is None): return if state.is_hyper_param_search: self._initialized = False if (not self._initialized): self.setup(args, state, model, tokenizer, **kwargs) def on_train_end(self, args, state, control, model=None, tokenizer=None, metrics=None, logs=None, **kwargs): if (self._clearml is None): return if (self._clearml_task and state.is_world_process_zero): self._clearml_task.close() def on_log(self, args, state, control, model=None, tokenizer=None, logs=None, **kwargs): if (self._clearml is None): return if (not self._initialized): self.setup(args, state, model, tokenizer, **kwargs) if state.is_world_process_zero: eval_prefix = 'eval_' eval_prefix_len = len(eval_prefix) test_prefix = 'test_' test_prefix_len = len(test_prefix) single_value_scalars = ['train_runtime', 'train_samples_per_second', 'train_steps_per_second', 'train_loss', 'total_flos', 'epoch'] for (k, v) in logs.items(): if isinstance(v, (int, float)): if (k in single_value_scalars): self._clearml_task.get_logger().report_single_value(name=k, value=v) elif k.startswith(eval_prefix): self._clearml_task.get_logger().report_scalar(title=k[eval_prefix_len:], series='eval', value=v, iteration=state.global_step) elif k.startswith(test_prefix): self._clearml_task.get_logger().report_scalar(title=k[test_prefix_len:], series='test', value=v, iteration=state.global_step) else: self._clearml_task.get_logger().report_scalar(title=k, series='train', value=v, iteration=state.global_step) else: logger.warning(f"""Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a scalar. This invocation of ClearML logger's report_scalar() is incorrect so we dropped this attribute.""") def on_save(self, args, state, control, **kwargs): if (self._log_model and self._clearml_task and state.is_world_process_zero): ckpt_dir = f'checkpoint-{state.global_step}' artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f'Logging checkpoint artifacts in {ckpt_dir}. This may take time.') self._clearml_task.update_output_model(artifact_path, iteration=state.global_step, auto_delete_file=False)
def process_joint(args): split = 'train_raw' root = Path(args.data_root).absolute() lang = args.tgt_lang cur_root = (root / f'en-{lang}') if (not cur_root.is_dir()): print(f'{cur_root.as_posix()} does not exist. Skipped.') df = load_df_from_tsv((cur_root / f'{split}.tsv')) train_text = [] for (_, row) in df.iterrows(): train_text.append(row['src_text']) train_text.append(row['tgt_text']) v_size_str = ('' if (args.vocab_type == 'char') else str(args.vocab_size)) spm_filename_prefix = f'spm_{args.vocab_type}{v_size_str}_raw' with NamedTemporaryFile(mode='w') as f: for t in train_text: f.write((t + '\n')) special_symbols = [f'<lang:{lang}>', '<lang:en>'] gen_vocab(Path(f.name), (cur_root / spm_filename_prefix), args.vocab_type, args.vocab_size, special_symbols=special_symbols) gen_config_yaml_raw(cur_root, (spm_filename_prefix + '.model'), yaml_filename=f'config_raw.yaml', prepend_tgt_lang_tag=True, use_audio_input=True)
class TestQuantization(unittest.TestCase): def setUpClass(self): self.constant_graph = build_fake_model() self.test_graph = create_test_graph() def tearDownClass(self): shutil.rmtree('saved', ignore_errors=True) def test_run_mse_one_trial(self): from neural_compressor.config import AccuracyCriterion, PostTrainingQuantConfig, TuningCriterion from neural_compressor.data import DATALOADERS, Datasets from neural_compressor.quantization import fit dataset = Datasets('tensorflow')['dummy']((100, 3, 3, 1), label=True) dataloader = DATALOADERS['tensorflow'](dataset) tune_cri = TuningCriterion(strategy='mse', max_trials=1) acc_cri = AccuracyCriterion(tolerable_loss=0.01) conf = PostTrainingQuantConfig(quant_level=1, tuning_criterion=tune_cri, accuracy_criterion=acc_cri) def fake_eval(model): return 1 q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=fake_eval) self.assertNotEqual(q_model, None) def test_run_mse_max_trials(self): from neural_compressor.config import AccuracyCriterion, PostTrainingQuantConfig, TuningCriterion from neural_compressor.data import DATALOADERS, Datasets from neural_compressor.quantization import fit dataset = Datasets('tensorflow')['dummy']((100, 3, 3, 1), label=True) dataloader = DATALOADERS['tensorflow'](dataset) tune_cri = TuningCriterion(strategy='mse', max_trials=3) acc_cri = AccuracyCriterion(tolerable_loss=0.01) op_name_dict = {'conv1': {'activation': {'dtype': ['fp32']}}} acc = [0, 1, 0.9, 1] def fake_eval(model): acc.pop(0) return acc[0] conf = PostTrainingQuantConfig(quant_level=1, op_name_dict=op_name_dict, tuning_criterion=tune_cri, accuracy_criterion=acc_cri) q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=fake_eval) self.assertNotEqual(q_model, None)
def blackify(code): has_indent = (len(get_indent(code)) > 0) if has_indent: code = f'''class Bla: {code}''' mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=119) result = black.format_str(code, mode=mode) (result, _) = style_docstrings_in_code(result) return (result[len('class Bla:\n'):] if has_indent else result)
def main(): parser = argparse.ArgumentParser(description='Run VOT.') parser.add_argument('tracker_name', type=str) parser.add_argument('tracker_param', type=str) parser.add_argument('--run_id', type=int, default=None) args = parser.parse_args() run_vot(args.tracker_name, args.tracker_param, args.run_id)
def plot_metrics(data, labels, colors, wpgen, planner, maps, param_list, quantity, metrics, legendsoff, show, classic, withclassic, byplanner, nosubtitle): barwidth = 0.1 other_quantity = np.array([*param_list.keys()])[[(x != quantity) for x in [*param_list.keys()]]][0] new_index = pd.MultiIndex.from_arrays([data['map'], data['wpgen'], data['planner'], data[other_quantity], data[quantity]], names=('map', 'wpgen', 'planner', other_quantity, quantity)) if byplanner: new_index = pd.MultiIndex.from_arrays([data['map'], data['planner'], data['wpgen'], data[other_quantity], data[quantity]], names=('map', 'planner', 'wpgen', other_quantity, quantity)) data.index = new_index data = data.sort_index() r = {} for map in maps: map_data = data.loc[map] xticks = len(param_list[quantity]) if byplanner: for plan in planner: planner_data = map_data.loc[plan] if (plan in classic): continue for metric in metrics: (fig, ax) = plt.subplots() iterate = wpgen if withclassic: iterate = (wpgen + classic) for (i, iter) in enumerate(iterate): if (iter == 'classic'): continue elif (i == 0): r[iter] = np.arange(xticks) elif (iterate[(i - 1)] == 'classic'): r[iter] = [(x + barwidth) for x in r[iterate[(i - 2)]]] else: r[iter] = [(x + barwidth) for x in r[iterate[(i - 1)]]] if (iter in classic): wp_data = data.loc[map].loc[iter].loc['classic'] else: wp_data = planner_data.loc[iter] for (a, oq) in enumerate(param_list[other_quantity]): oq_data = wp_data.loc[oq] l = len(param_list[other_quantity]) if (metric == 'success'): if (a == (l - 1)): ax.bar(r[iter], oq_data[metric], width=barwidth, label=labels[iter], color=colors[iter], alpha=((a + 1) / l)) else: ax.bar(r[iter], oq_data[metric], width=barwidth, color=colors[iter], alpha=((a + 1) / l)) elif (a == 0): ax.bar(r[iter], oq_data[metric], width=barwidth, label=labels[iter], color=colors[iter], alpha=(1 - (a / l))) else: ax.bar(r[iter], oq_data[metric], width=barwidth, color=colors[iter], alpha=(1 - (a / l))) caption = '' if (metric == 'path'): caption = 'avg. Path Lenght [m]' title = 'Path Lenght' if (metric == 'time'): caption = 'avg. Time t.g. [s]' title = 'Time t.g.' if (metric == 'success'): caption = 'Success' title = caption if (metric == 'collision'): caption = 'Collisions' title = caption if (quantity == 'obs'): plt.xlabel('No. Obstacles', fontsize=15) plt.xticks([(r + ((((len(iterate) - 1) / 2) - 0.5) * barwidth)) for r in range(xticks)], [int(obs.replace('obs', '')) for obs in param_list['obs']]) plt.ylabel('{}'.format(caption), fontsize=15) plt.suptitle('{} over No. Obstacles'.format(title), fontweight='bold', fontsize=16) if nosubtitle: plt.title('Local Planner: {0}, Map: {1}'.format(labels[plan], labels[map])) else: plt.title('Map: {1}'.format(labels[map])) ax.grid('on') if legendsoff: plt.legend(loc='upper left') if withclassic: plt.savefig('{0}_obs_{1}_byplanner_{2}_withclassic.png'.format(metric, map, plan)) else: plt.savefig('{0}_obs_{1}_byplanner_{2}.png'.format(metric, map, plan)) if show: plt.show() else: plt.close() if (quantity == 'vel'): plt.xlabel('Obstacle Velocity', fontsize=15) plt.xticks([(r + ((((len(iterate) - 1) / 2) - 0.5) * barwidth)) for r in range(xticks)], ['0.{0}'.format(int(vel.replace('vel', ''))) for vel in param_list['vel']]) plt.ylabel('{}'.format(caption), fontsize=15) plt.suptitle('{} over Obstacle Velocity'.format(title), fontweight='bold', fontsize=16) if nosubtitle: plt.title('Local Planner: {0}, Map: {1}'.format(labels[plan], labels[map])) else: plt.title('Map: {1}'.format(labels[map])) ax.grid('on') if legendsoff: plt.legend(loc='upper left') if withclassic: plt.savefig('{0}_vel_{1}_byplanner_{2}_withclassic.png'.format(metric, map, plan)) else: plt.savefig('{0}_vel_{1}_byplanner_{2}.png'.format(metric, map, plan)) if show: plt.show() else: plt.close() else: for wp in wpgen: wp_data = map_data.loc[wp] for metric in metrics: (fig, ax) = plt.subplots() for (i, plan) in enumerate(planner): if (wp == 'classic'): if (plan in classic): if (plan == classic[0]): r[plan] = np.arange(xticks) else: r[plan] = [(x + barwidth) for x in r[planner[(i - 1)]]] elif (i == 0): r[plan] = np.arange(xticks) else: r[plan] = [(x + barwidth) for x in r[planner[(i - 1)]]] if ((plan in classic) and (wp != 'classic')): if withclassic: planner_data = data.loc[map].loc['classic'].loc[plan] else: continue elif ((plan not in classic) and (wp == 'classic')): continue else: planner_data = wp_data.loc[plan] for (a, oq) in enumerate(param_list[other_quantity]): oq_data = planner_data.loc[oq] l = len(param_list[other_quantity]) if (metric == 'success'): if (a == (l - 1)): ax.bar(r[plan], oq_data[metric], width=barwidth, label=labels[plan], color=colors[plan], alpha=((a + 1) / l)) else: ax.bar(r[plan], oq_data[metric], width=barwidth, color=colors[plan], alpha=((a + 1) / l)) elif (a == 0): ax.bar(r[plan], oq_data[metric], width=barwidth, label=labels[plan], color=colors[plan], alpha=(1 - (a / l))) else: ax.bar(r[plan], oq_data[metric], width=barwidth, color=colors[plan], alpha=(1 - (a / l))) caption = '' if (metric == 'path'): caption = 'avg. Path Lenght [m]' title = 'Path Lenght' if (metric == 'time'): caption = 'avg. Time t.g. [s]' title = 'Time t.g.' if (metric == 'success'): caption = 'Success' title = caption if (metric == 'collision'): caption = 'Collisions' title = caption if (quantity == 'obs'): plt.xlabel('No. Obstacles', fontsize=15) planners_plotted = len(planner) if (wp == 'classic'): planners_plotted = len(classic) if ((wp != 'classic') and (not withclassic)): planners_plotted = (len(planner) - len(classic)) plt.xticks([(r + (((planners_plotted / 2) - 0.5) * barwidth)) for r in range(xticks)], [int(obs.replace('obs', '')) for obs in param_list['obs']]) plt.ylabel('{}'.format(caption), fontsize=15) plt.suptitle('{} over No. Obstacles'.format(title), fontweight='bold', fontsize=16) if nosubtitle: if (wp == 'classic'): plt.title('Classic Navigation Systems, Map: {0}'.format(labels[map])) else: plt.title('Waypoint generator: {0}, Map: {1}'.format(labels[wp], labels[map])) else: plt.title('Map: {1}'.format(labels[map])) ax.grid('on') if legendsoff: plt.legend(loc='upper left') if withclassic: plt.savefig('{0}_obs_{1}_{2}_withclassic.png'.format(metric, map, wp)) else: plt.savefig('{0}_obs_{1}_{2}.png'.format(metric, map, wp)) if show: plt.show() else: plt.close() if (quantity == 'vel'): plt.xlabel('Obstacle Velocity', fontsize=15) planners_plotted = len(planner) if (wp == 'classic'): planners_plotted = len(classic) if ((wp != 'classic') and (not withclassic)): planners_plotted = (len(planner) - len(classic)) plt.xticks([(r + (((planners_plotted / 2) - 0.5) * barwidth)) for r in range(xticks)], ['0.{0}'.format(int(vel.replace('vel', ''))) for vel in param_list['vel']]) plt.ylabel('{}'.format(caption), fontsize=15) plt.suptitle('{} over Obstacle Velocity'.format(title), fontweight='bold', fontsize=16) if nosubtitle: if (wp == 'classic'): plt.title('Classic Navigation Systems, Map: {0}'.format(labels[map])) else: plt.title('Waypoint generator: {0}, Map: {1}'.format(labels[wp], labels[map])) else: plt.title('Map: {1}'.format(labels[map])) ax.grid('on') if legendsoff: plt.legend(loc='upper left') if withclassic: plt.savefig('{0}_vel_{1}_{2}_withclassic.png'.format(metric, map, wp)) else: plt.savefig('{0}_vel_{1}_{2}.png'.format(metric, map, wp)) if show: plt.show() else: plt.close()
class DGRecLayer(nn.Module): def __init__(self, args): super().__init__() self.k = args.k self.sigma = args.sigma self.gamma = args.gamma def similarity_matrix(self, X, sigma=1.0, gamma=2.0): dists = th.cdist(X, X) sims = th.exp(((- dists) / (sigma * dists.mean(dim=(- 1)).mean(dim=(- 1)).reshape((- 1), 1, 1)))) return sims def submodular_selection_feature(self, nodes): device = nodes.mailbox['m'].device feature = nodes.mailbox['m'] sims = self.similarity_matrix(feature, self.sigma, self.gamma) (batch_num, neighbor_num, feature_size) = feature.shape nodes_selected = [] cache = th.zeros((batch_num, 1, neighbor_num), device=device) for i in range(self.k): gain = th.sum((th.maximum(sims, cache) - cache), dim=(- 1)) selected = th.argmax(gain, dim=1) cache = th.maximum(sims[(th.arange(batch_num, device=device), selected)].unsqueeze(1), cache) nodes_selected.append(selected) return th.stack(nodes_selected).t() def sub_reduction(self, nodes): mail = nodes.mailbox['m'] (batch_size, neighbor_size, feature_size) = mail.shape if (((- 1) in nodes.mailbox['c']) or (nodes.mailbox['m'].shape[1] <= self.k)): mail = mail.sum(dim=1) else: neighbors = self.submodular_selection_feature(nodes) mail = mail[(th.arange(batch_size, dtype=th.long, device=mail.device).unsqueeze((- 1)), neighbors)] mail = mail.sum(dim=1) return {'h': mail} def category_aggregation(self, edges): return {'c': edges.src['category'], 'm': edges.src['h']} def forward(self, graph, h, etype): with graph.local_scope(): (src, _, dst) = etype feat_src = h[src] feat_dst = h[dst] degs = graph.out_degrees(etype=etype).float().clamp(min=1) norm = th.pow(degs, (- 0.5)) shp = (norm.shape + ((1,) * (feat_src.dim() - 1))) norm = th.reshape(norm, shp) feat_src = (feat_src * norm) graph.nodes[src].data['h'] = feat_src graph.update_all(self.category_aggregation, self.sub_reduction, etype=etype) rst = graph.nodes[dst].data['h'] degs = graph.in_degrees(etype=etype).float().clamp(min=1) norm = th.pow(degs, (- 0.5)) shp = (norm.shape + ((1,) * (feat_dst.dim() - 1))) norm = th.reshape(norm, shp) rst = (rst * norm) return rst
def save_to_cache(path, obj): path = Path(path) logger = logging.getLogger(__name__) logger.info(f'Saving to cache at {str(path)}') path.parent.mkdir(exist_ok=True) with open(path, 'wb') as f: pickle.dump(obj, f)
def compute_doc_freq(crefs): document_frequency = defaultdict(float) for refs in tqdm(crefs, ncols=100, desc='compute_doc_freq'): for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]): document_frequency[ngram] += 1 return document_frequency
def main(): if args.tensorboard: configure(('runs/%s' % args.name)) if args.augment: transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) else: transform_train = transforms.Compose([transforms.ToTensor()]) transform_test = transforms.Compose([transforms.ToTensor()]) kwargs = {'num_workers': 1, 'pin_memory': True} if (args.in_dataset == 'CIFAR-10'): normalizer = transforms.Normalize(mean=[(x / 255.0) for x in [125.3, 123.0, 113.9]], std=[(x / 255.0) for x in [63.0, 62.1, 66.7]]) train_loader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder('./datasets/row_train_data/CIFAR-10', transform=transform_train), batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test), batch_size=args.batch_size, shuffle=True, **kwargs) num_classes = 10 lr_schedule = [50, 75, 90] elif (args.in_dataset == 'CIFAR-100'): normalizer = transforms.Normalize(mean=[(x / 255.0) for x in [125.3, 123.0, 113.9]], std=[(x / 255.0) for x in [63.0, 62.1, 66.7]]) train_loader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder('./datasets/row_train_data/CIFAR-100', transform=transform_train), batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(datasets.CIFAR100('./datasets/cifar100', train=False, transform=transform_test), batch_size=args.batch_size, shuffle=True, **kwargs) num_classes = 100 lr_schedule = [50, 75, 90] elif (args.in_dataset == 'SVHN'): normalizer = None transform = transforms.Compose([transforms.ToTensor()]) train_loader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder('./datasets/row_train_data/SVHN', transform=transform), batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(svhn.SVHN('datasets/svhn/', split='test', transform=transforms.ToTensor(), download=False), batch_size=args.batch_size, shuffle=False, **kwargs) args.epochs = 20 args.save_epoch = 2 lr_schedule = [10, 15, 18] num_classes = 10 if (args.model_arch == 'densenet'): model = dn.DenseNet3(args.layers, (num_classes + 1), args.growth, reduction=args.reduce, bottleneck=args.bottleneck, dropRate=args.droprate, normalizer=normalizer) elif (args.model_arch == 'wideresnet'): model = wn.WideResNet(args.depth, (num_classes + 1), widen_factor=args.width, dropRate=args.droprate, normalizer=normalizer) else: assert False, 'Not supported model arch: {}'.format(args.model_arch) attack = LinfPGDAttack(model=model, eps=args.epsilon, nb_iter=args.iters, eps_iter=args.iter_size, rand_init=True, targeted=True, num_classes=(num_classes + 1), loss_func='CE', elementwise_best=True) print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) model = model.cuda() cudnn.benchmark = True criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, nesterov=True, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch, lr_schedule) train_rowl(train_loader, model, criterion, optimizer, epoch, num_classes, attack) prec1 = validate(val_loader, model, criterion, num_classes, epoch) if (((epoch + 1) % args.save_epoch) == 0): save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict()}, (epoch + 1))
def is_existed_rec(name, obj): if isinstance(obj, list): for s in obj: assert os.path.exists(s), f'{name}:{s} does not exist' fsize = (os.path.getsize(s) / float((1024 * 1024))) print(s, round(fsize, 2)) elif isinstance(obj, str): assert os.path.exists(obj), f'{name}:{obj} does not exist' fsize = (os.path.getsize(obj) / float((1024 * 1024))) print(obj, round(fsize, 2)) elif isinstance(obj, dict): for (k, v) in obj.items(): is_existed_rec(((name + '-') + k), v) else: print(f'Unknown object:{name}:{obj}') return
class RewardSwitchSTDP(Learner): def __init__(self, trainable=None, **kwargs): super(RewardSwitchSTDP, self).__init__(trainable=trainable, **kwargs) self.trainable = trainable self.prefered_backend = ['pytorch'] self.name = 'Reward_Switch_STDP' self._constant_variables = dict() self._constant_variables['Apost_pos'] = kwargs.get('Apost_pos', 0.005) self._constant_variables['Apre_pos'] = kwargs.get('Apre_pos', (- 0.004)) self._constant_variables['Apost_neg'] = kwargs.get('Apost_neg', (- 0.005)) self._constant_variables['Apre_neg'] = kwargs.get('Apre_neg', 0.004) self._constant_variables['pre_decay'] = kwargs.get('pre_decay', np.exp(((- 1) / 20.0))) post_decay = kwargs.get('post_decay', np.exp(((- 1) / 20.0))) self._constant_variables['post_decay'] = post_decay self._constant_variables['homoestatic'] = kwargs.get('homoestatic', 1e-10) m_rate = kwargs.get('m_rate', 20.0) self._constant_variables['m_rate'] = ((m_rate * (1 - post_decay)) / (1000.0 * post_decay)) self.w_min = kwargs.get('w_min', 0.0) self.w_max = kwargs.get('w_max', 0.5) self.w_norm = 1.2 self.w_mean = None self.lr = kwargs.get('lr', 0.1) self.param_run_update = True self.reward_name = kwargs.get('reward_name', 'Output_Reward[updated]') def update(self, input, output, reward, input_trace, output_trace, pre_decay, post_decay, Apost_pos, Apost_neg, Apre_pos, Apre_neg, m_rate, homoestatic, weight): if (self.w_mean is None): self.w_mean = torch.mean(weight, dim=1, keepdim=True).detach() self.aw_mean = torch.mean(self.w_mean) if self.training: input_trace = (((pre_decay * input_trace) * input.le(0.0)) + input) output_trace = (((post_decay * output_trace) * output.le(0.0)) + output) Apost = ((reward.gt(0) * Apost_pos) + (reward.le(0) * Apost_neg)) Apre = ((reward.gt(0) * Apre_pos) + (reward.le(0) * Apre_neg)) pre_post = torch.matmul(output.permute(1, 0), (Apost * input_trace)) post_pre = torch.matmul((Apre * output_trace).permute(1, 0), input) dw = ((pre_post + post_pre) / (1.0 * input.shape[0])) weight = (weight + dw) weight = torch.clamp(weight, 0, 0.1) return (input_trace, output_trace, weight) def build(self, backend): self._constant_variables['m_rate'] = (self._constant_variables['m_rate'] * backend.dt) self._constant_variables['homoestatic'] = (self._constant_variables['homoestatic'] / self._constant_variables['m_rate']) super(RewardSwitchSTDP, self).build(backend) self.dt = backend.dt self.run_time = backend.runtime for conn in self.trainable_connections.values(): preg = conn.pre postg = conn.post pre_name = conn.get_input_name(preg, postg) post_name = conn.get_group_name(postg, 'O') weight_name = conn.get_link_name(preg, postg, 'weight') input_trace_name = (conn.id + '_{input_trace}') output_trace_name = (conn.id + '_{output_trace[stay]}') self.variable_to_backend(input_trace_name, backend._variables[pre_name].shape, value=0.0) self.variable_to_backend(output_trace_name, backend._variables[post_name].shape, value=0.0) self.op_to_backend([input_trace_name, output_trace_name, weight_name], self.update, [pre_name, post_name, self.reward_name, input_trace_name, output_trace_name, 'pre_decay', 'post_decay', 'Apost_pos', 'Apost_neg', 'Apre_pos', 'Apre_neg', 'm_rate', 'homoestatic', weight_name])
def compute_cumulative(df: pd.DataFrame, feature_max_dict: Dict[(str, float)]) -> pd.DataFrame: df_sorted = df.sort_values(by=['feature', 'cutoff'], ascending=True) df_sorted['running_sum'] = df_sorted.groupby('feature')['points'].transform((lambda x: x[::(- 1)].cumsum()[::(- 1)])) df_interval = df_sorted[['feature', 'cutoff', 'running_sum']] df_interval.columns = ['feature', 'interval_upper', 'interval_points'] df_interval['interval_lower'] = df_interval.groupby('feature')['interval_upper'].shift(1) df_interval['interval_lower'].fillna(float('-inf'), inplace=True) (new_rows, max_features) = ([], []) for (feature, group) in df_interval.groupby('feature'): max_interval_upper = group['interval_upper'].max() if (max_interval_upper != feature_max_dict[feature]): new_rows.append({'feature': feature, 'interval_lower': max_interval_upper, 'interval_upper': np.inf, 'interval_points': 0}) else: max_features.append(feature) new_df = pd.DataFrame(new_rows) result = pd.concat([df_interval, new_df], axis=0, ignore_index=True).sort_values(['feature', 'interval_lower']) result = result[['feature', 'interval_lower', 'interval_upper', 'interval_points']] result.reset_index(inplace=True, drop=True) for feature in max_features: change_idx = result[(result['feature'] == feature)].tail(1).index[0] result.loc[(result.index[change_idx], 'interval_upper')] = float('inf') return result
class MAE(PytorchMetric): def __init__(self): self.total = torch.tensor(0) self.sum_abs_error = torch.tensor(0.0) def __call__(self, preds, targets): _check_same_shape(preds, targets) self.sum_abs_error += torch.sum(torch.abs(torch.sub(preds, targets))) self.total += targets.numel() def compute(self): return (self.sum_abs_error / self.total)
def parse_requirements(fname='requirements/runtime.txt', with_version=True): import sys from os.path import exists require_fpath = fname def parse_line(line): if line.startswith('-r '): target = line.split(' ')[1] for info in parse_require_file(target): (yield info) else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] else: pat = (('(' + '|'.join(['>=', '==', '>'])) + ')') parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if (len(parts) > 1): (op, rest) = parts[1:] if (';' in rest): (version, platform_deps) = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest info['version'] = (op, version) (yield info) def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if (line and (not line.startswith('#'))): for info in parse_line(line): (yield info) def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if (with_version and ('version' in info)): parts.extend(info['version']) if (not sys.version.startswith('3.4')): platform_deps = info.get('platform_deps') if (platform_deps is not None): parts.append((';' + platform_deps)) item = ''.join(parts) (yield item) packages = list(gen_packages_items()) return packages
def process_event(events: dict[(str, Any)], labels_dictionary: dict[(str, int)]): new_annotation = {} for (element, value) in events.items(): if (element == 'label'): new_annotation[element] = labels_dictionary[value] elif (element == 'frame'): new_annotation[element] = int(value) elif (element == 'comment'): new_annotation[element] = value else: new_annotation[element] = value return new_annotation
def get_device_map(n_layers, devices): layers = list(range(n_layers)) n_blocks = int(ceil((n_layers / len(devices)))) layers_list = [layers[i:(i + n_blocks)] for i in range(0, n_layers, n_blocks)] return dict(zip(devices, layers_list))
def fid_inception_v3(): inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False, init_weights=False) inception.Mixed_5b = FIDInceptionA(192, pool_features=32) inception.Mixed_5c = FIDInceptionA(256, pool_features=64) inception.Mixed_5d = FIDInceptionA(288, pool_features=64) inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) inception.Mixed_7b = FIDInceptionE_1(1280) inception.Mixed_7c = FIDInceptionE_2(2048) state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) inception.load_state_dict(state_dict) return inception
class VATLoss(nn.Module): def __init__(self, xi=10.0, eps=1.0, prop_eps=0.25, ip=1, distance_func=KL_div(reduce=True)): super(VATLoss, self).__init__() self.xi = xi self.eps = eps self.ip = ip self.prop_eps = prop_eps self.distance_func = distance_func def forward(self, model, x: torch.Tensor): with torch.no_grad(): pred = model(x)[0] assert simplex(pred) d = torch.randn_like(x, device=x.device) d = _l2_normalize(d) with _disable_tracking_bn_stats(model): for _ in range(self.ip): d.requires_grad_() pred_hat = model((x + (self.xi * d)))[0] adv_distance = self.distance_func(pred_hat, pred) adv_distance.backward() d = _l2_normalize(d.grad) if isinstance(self.eps, torch.Tensor): (bn, *shape) = x.shape basic_view_shape: Tuple[(int, ...)] = (bn, *([1] * len(shape))) r_adv = ((d * self.eps.view(basic_view_shape).expand_as(d)) * self.prop_eps) elif isinstance(self.eps, (float, int)): r_adv = ((d * self.eps) * self.prop_eps) else: raise NotImplementedError(f'eps should be tensor or float, given {self.eps}.') pred_hat = model((x + r_adv))[0] lds = self.distance_func(pred_hat, pred) return (lds, (x + r_adv).detach(), r_adv.detach())
def ggml_convert_fp32(tensor: torch.Tensor, weight_shape: tuple, k: int, qtype: int): invalidInputError((tensor.dtype == torch.uint8), 'Input tensor must be uint8') src_ptr = ctypes.c_void_p(tensor.data.data_ptr()) dst_size = k dst_tensor = torch.empty(weight_shape, dtype=torch.float) dst_ptr = ctypes.c_void_p(dst_tensor.data.data_ptr()) ggml.ggml_dequantize(src_ptr, dst_ptr, k, qtype) return dst_tensor
def make_diff(file, original, reformatted): return list(difflib.unified_diff(original, reformatted, fromfile='{}\t(original)'.format(file), tofile='{}\t(reformatted)'.format(file), n=3))
class BYTETracker(object): def __init__(self, args, frame_rate=30): self.tracked_stracks = [] self.lost_stracks = [] self.removed_stracks = [] self.frame_id = 0 self.args = args self.det_thresh = (args.track_thresh + 0.1) self.buffer_size = int(((frame_rate / 30.0) * 30)) self.max_time_lost = self.buffer_size self.max_per_image = args.num_queries self.kalman_filter = KalmanFilter() def update(self, output_results): self.frame_id += 1 activated_starcks = [] refind_stracks = [] lost_stracks = [] removed_stracks = [] scores = output_results['scores'].cpu().numpy() classes = output_results['labels'].cpu().numpy() bboxes = output_results['boxes'].cpu().numpy() remain_inds = (scores > self.args.track_thresh) inds_low = (scores > 0.2) inds_high = (scores < self.args.track_thresh) inds_second = np.logical_and(inds_low, inds_high) dets_second = bboxes[inds_second] dets = bboxes[remain_inds] scores_keep = scores[remain_inds] scores_second = scores[inds_second] if (len(dets) > 0): detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s, 30) for (tlbr, s) in zip(dets, scores_keep)] else: detections = [] ' Add newly detected tracklets to tracked_stracks' unconfirmed = [] tracked_stracks = [] for track in self.tracked_stracks: if (not track.is_activated): unconfirmed.append(track) else: tracked_stracks.append(track) ' Step 2: First association, with Kalman and IOU' strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) STrack.multi_predict(strack_pool) dists = matching.iou_distance(strack_pool, detections) (matches, u_track, u_detection) = matching.linear_assignment(dists, thresh=0.8) for (itracked, idet) in matches: track = strack_pool[itracked] det = detections[idet] if (track.state == TrackState.Tracked): track.update(detections[idet], self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) ' Step 3: Second association, with IOU' if (len(dets_second) > 0): detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s, 30) for (tlbr, s) in zip(dets_second, scores_second)] else: detections_second = [] r_tracked_stracks = [strack_pool[i] for i in u_track if (strack_pool[i].state == TrackState.Tracked)] dists = matching.iou_distance(r_tracked_stracks, detections_second) (matches, u_track, u_detection_second) = matching.linear_assignment(dists, thresh=0.4) for (itracked, idet) in matches: track = r_tracked_stracks[itracked] det = detections_second[idet] if (track.state == TrackState.Tracked): track.update(det, self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) for it in u_track: track = r_tracked_stracks[it] if (not (track.state == TrackState.Lost)): track.mark_lost() lost_stracks.append(track) 'Deal with unconfirmed tracks, usually tracks with only one beginning frame' detections = [detections[i] for i in u_detection] dists = matching.iou_distance(unconfirmed, detections) (matches, u_unconfirmed, u_detection) = matching.linear_assignment(dists, thresh=0.7) for (itracked, idet) in matches: unconfirmed[itracked].update(detections[idet], self.frame_id) activated_starcks.append(unconfirmed[itracked]) for it in u_unconfirmed: track = unconfirmed[it] track.mark_removed() removed_stracks.append(track) ' Step 4: Init new stracks' for inew in u_detection: track = detections[inew] if (track.score < self.det_thresh): continue track.activate(self.kalman_filter, self.frame_id) activated_starcks.append(track) ' Step 5: Update state' for track in self.lost_stracks: if ((self.frame_id - track.end_frame) > self.max_time_lost): track.mark_removed() removed_stracks.append(track) self.tracked_stracks = [t for t in self.tracked_stracks if (t.state == TrackState.Tracked)] self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) self.lost_stracks.extend(lost_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) self.removed_stracks.extend(removed_stracks) (self.tracked_stracks, self.lost_stracks) = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) ret = list() for track in self.tracked_stracks: if track.is_activated: track_dict = {} track_dict['tracking_id'] = track.track_id track_dict['active'] = 1 track_dict['bbox'] = track.tlbr track_dict['score'] = track.score ret.append(track_dict) return copy.deepcopy(ret)
def process(device, model, model_type, image, input_size, target_size, optimize, use_camera): global first_execution if ('openvino' in model_type): if (first_execution or (not use_camera)): print(f' Input resized to {input_size[0]}x{input_size[1]} before entering the encoder') first_execution = False sample = [np.reshape(image, (1, 3, *input_size))] prediction = model(sample)[model.output(0)][0] prediction = cv2.resize(prediction, dsize=target_size, interpolation=cv2.INTER_CUBIC) else: sample = torch.from_numpy(image).to(device).unsqueeze(0) if (optimize and (device == torch.device('cuda'))): if first_execution: print(' Optimization to half-floats activated. Use with caution, because models like Swin require\n float precision to work properly and may yield non-finite depth values to some extent for\n half-floats.') sample = sample.to(memory_format=torch.channels_last) sample = sample.half() if (first_execution or (not use_camera)): (height, width) = sample.shape[2:] print(f' Input resized to {width}x{height} before entering the encoder') first_execution = False prediction = model.forward(sample) prediction = torch.nn.functional.interpolate(prediction.unsqueeze(1), size=target_size[::(- 1)], mode='bicubic', align_corners=False).squeeze().cpu().numpy() return prediction
def remove_ignore_keys_(state_dict): ignore_keys = ['encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor'] for k in ignore_keys: state_dict.pop(k, None)
def cupy_kernel(strFunction, objectVariables): strKernel = globals()[strFunction] while True: objectMatch = re.search('(SIZE_)([0-4])(\\()([^\\)]*)(\\))', strKernel) if (objectMatch is None): break intArg = int(objectMatch.group(2)) strTensor = objectMatch.group(4) intSizes = objectVariables[strTensor].size() strKernel = strKernel.replace(objectMatch.group(), str(intSizes[intArg])) while True: objectMatch = re.search('(VALUE_)([0-4])(\\()([^\\)]+)(\\))', strKernel) if (objectMatch is None): break intArgs = int(objectMatch.group(2)) strArgs = objectMatch.group(4).split(',') strTensor = strArgs[0] intStrides = objectVariables[strTensor].stride() strIndex = [(((('((' + strArgs[(intArg + 1)].replace('{', '(').replace('}', ')').strip()) + ')*') + str(intStrides[intArg])) + ')') for intArg in range(intArgs)] strKernel = strKernel.replace(objectMatch.group(0), (((strTensor + '[') + str.join('+', strIndex)) + ']')) return strKernel
def symmetric_cross_entropy(alpha, beta): def loss(y_true, y_pred): y_true_1 = y_true y_pred_1 = y_pred y_true_2 = y_true y_pred_2 = y_pred y_pred_1 = tf.clip_by_value(y_pred_1, 1e-07, 1.0) y_true_2 = tf.clip_by_value(y_true_2, 0.0001, 1.0) return ((alpha * tf.reduce_mean((- tf.reduce_sum((y_true_1 * tf.log(y_pred_1)), axis=(- 1))))) + (beta * tf.reduce_mean((- tf.reduce_sum((y_pred_2 * tf.log(y_true_2)), axis=(- 1)))))) return loss
def compute_input_streams(elec_pos: Array, ion_pos: Optional[Array]=None, include_2e_stream: bool=True, include_ei_norm: bool=True, ei_norm_softening: chex.Scalar=0.0, include_ee_norm: bool=True, ee_norm_softening: chex.Scalar=0.0) -> InputStreams: (input_1e, r_ei) = compute_electron_ion(elec_pos, ion_pos, include_ei_norm, ei_norm_softening) input_2e = None r_ee = None if include_2e_stream: (input_2e, r_ee) = compute_electron_electron(elec_pos, include_ee_norm, ee_norm_softening) return (input_1e, input_2e, r_ei, r_ee)
def flatten_str_dict(hierarchical_dict): flatten_dict = OrderedDict() for (k, v) in hierarchical_dict.items(): if isinstance(v, dict): flatten_v = flatten_str_dict(v) for (kk, vv) in flatten_v.items(): flatten_dict[(k + kk)] = vv else: flatten_dict[k] = v return flatten_dict
class MobilenetV2(Model): def model_url(self) -> str: return ' def package_name(self) -> str: return 'mobilenet_v2_1.0_224.tgz'
class IOUEntropyDataset(BaseDataset): def initialize(self, opt): self.with_conf_map = False (image_src_paths, image_rec_paths, label_paths, pred_paths, entropy_paths, conf_map_paths) = self.get_paths(opt) util.natural_sort(image_src_paths) util.natural_sort(image_rec_paths) util.natural_sort(label_paths) util.natural_sort(pred_paths) util.natural_sort(entropy_paths) if opt.with_conf_map: self.with_conf_map = opt.with_conf_map util.natural_sort(conf_map_paths) self.conf_map_paths = conf_map_paths self.image_src_paths = image_src_paths self.image_rec_paths = image_rec_paths self.label_paths = label_paths self.entropy_paths = entropy_paths self.pred_paths = pred_paths print(len(label_paths)) self.transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def __getitem__(self, index): image_src_path = self.image_src_paths[index] image_rec_path = self.image_rec_paths[index] label_path = self.label_paths[index] entropy_path = self.entropy_paths[index] if self.with_conf_map: conf_map_path = self.conf_map_paths[index] conf_map = np.load(conf_map_path)['confidence_map'] else: conf_map = 0 pred_path = (self.pred_paths[index] + '.npz') assert self.paths_match(label_path, image_src_path, image_rec_path), ("The label_path %s, image_src_path %s and image_rec_path %s don't match." % (label_path, image_src_path, image_rec_path)) image_src = Image.open(image_src_path).convert('RGB') image_rec = Image.open(image_rec_path).convert('RGB') iou_label = json.load(open(label_path, 'r')) entropy = json.load(open(entropy_path, 'r')) (prob_map, label_map) = (np.load(pred_path)['prob'], np.load(pred_path)['label']) prob_map = torch.from_numpy(prob_map) label_map = torch.from_numpy(label_map) image_src_tensor = self.transform(image_src) image_rec_tensor = self.transform(image_rec) data = {'image_src': image_src_tensor, 'image_rec': image_rec_tensor, 'iou': torch.tensor(iou_label[0]), 'valid': (torch.tensor(iou_label[1]) != 0), 'entropy': torch.tensor(entropy[5]), 'image_src_path': image_src_path, 'prob': prob_map, 'label_map': label_map, 'conf_map': conf_map} return data def __len__(self): return len(self.image_src_paths) def get_paths(self, opt): image_src_paths = make_dataset(opt.image_src_dir, recursive=True) image_rec_paths = make_dataset(opt.image_rec_dir, recursive=True) label_paths = make_iou_dataset(opt.iou_dir, recursive=True) entropy_paths = make_iou_dataset(opt.entropy_dir, recursive=True) pred_paths = make_dataset(opt.pred_dir, recursive=True) if opt.with_conf_map: conf_map_paths = make_dataset(opt.conf_map_dir, recursive=True, is_target_file=is_npz_file) else: conf_map_paths = None return (image_src_paths, image_rec_paths, label_paths, pred_paths, entropy_paths, conf_map_paths) def paths_match(self, path1, path2, path3): name1 = os.path.basename(path1) name2 = os.path.basename(path2) name3 = os.path.basename(path3) return (('_'.join(name1.split('_')[:3]) == '_'.join(name2.split('_')[:3])) and '_'.join(name3.split('_')[:3]))
class Multinomial(object): def __init__(self, n_variables, mean=None): self.n_variables = n_variables self.mean = mean self._sample = None self._cuda_device = None def sample(self, n_samples=1, resample=False): pass def log_prob(self, sample): maxval = torch.max(self.mean, dim=2, keepdim=True)[0] logsoftmax = (self.mean - (maxval + torch.log((torch.sum(torch.exp((self.mean - maxval)), dim=2, keepdim=True) + 1e-06)))) return (logsoftmax * sample) def reset_mean(self, value=None): assert ((self.mean is not None) or (value is not None)), 'Mean is None.' mean = (value if (value is not None) else torch.zeros(self.mean.size())) if (self._cuda_device is not None): mean = mean.cuda(self._cuda_device) mean = Variable(mean, requires_grad=self.mean.requires_grad) self.mean = mean self._sample = None def mean_trainable(self): assert (self.mean is not None), 'Mean is None.' self.mean = Variable(self.mean.data, requires_grad=True) def mean_not_trainable(self): self.mean.requires_grad = False def state_parameters(self): return self.mean def cuda(self, device_id): if (self.mean is not None): self.mean = Variable(self.mean.data.cuda(device_id), requires_grad=self.mean.requires_grad) self._cuda_device = device_id def cpu(self): if (self.mean is not None): self.mean = self.mean.cpu() self._cuda_device = None
def multi_resolution_spectrogram_mse(gt, est, n_fft=[2048, 1024, 512], n_hop=[512, 256, 128]): assert (gt.shape == est.shape) assert (len(n_fft) == len(n_hop)) score = 0.0 for i in range(len(n_fft)): gt_spec = librosa.magphase(librosa.stft(gt, n_fft=n_fft[i], hop_length=n_hop[i]))[0] est_spec = librosa.magphase(librosa.stft(est, n_fft=n_fft[i], hop_length=n_hop[i]))[0] score = (score + np.mean(((gt_spec - est_spec) ** 2))) return score
class syncbatchnorm_(Function): def forward(cls, ctx, x, gamma, beta, running_mean, running_var, extra, sync=True, training=True, momentum=0.1, eps=1e-05, activation='none', slope=0.01): cls._parse_extra(ctx, extra) ctx.sync = sync ctx.training = training ctx.momentum = momentum ctx.eps = eps ctx.activation = activation ctx.slope = slope assert (activation == 'none') x = x.contiguous() gamma = gamma.contiguous() beta = beta.contiguous() if ctx.training: if x.is_cuda: (_ex, _exs) = src.gpu.expectation_forward(x) else: raise NotImplemented if ctx.sync: if ctx.is_master: (_ex, _exs) = ([_ex.unsqueeze(0)], [_exs.unsqueeze(0)]) for _ in range(ctx.master_queue.maxsize): (_ex_w, _exs_w) = ctx.master_queue.get() ctx.master_queue.task_done() _ex.append(_ex_w.unsqueeze(0)) _exs.append(_exs_w.unsqueeze(0)) _ex = comm.gather(_ex).mean(0) _exs = comm.gather(_exs).mean(0) tensors = comm.broadcast_coalesced((_ex, _exs), ([_ex.get_device()] + ctx.worker_ids)) for (ts, queue) in zip(tensors[1:], ctx.worker_queues): queue.put(ts) else: ctx.master_queue.put((_ex, _exs)) (_ex, _exs) = ctx.worker_queue.get() ctx.worker_queue.task_done() _var = (_exs - (_ex ** 2)) running_mean.mul_((1 - ctx.momentum)).add_((ctx.momentum * _ex)) running_var.mul_((1 - ctx.momentum)).add_((ctx.momentum * _var)) ctx.mark_dirty(running_mean, running_var) else: (_ex, _var) = (running_mean.contiguous(), running_var.contiguous()) _exs = (_var + (_ex ** 2)) if x.is_cuda: y = src.gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) else: y = src.cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) ctx.save_for_backward(x, _ex, _exs, gamma, beta) return y _differentiable def backward(ctx, dz): (x, _ex, _exs, gamma, beta) = ctx.saved_tensors dz = dz.contiguous() if dz.is_cuda: (dx, _dex, _dexs, dgamma, dbeta) = src.gpu.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) else: raise NotImplemented if ctx.training: if ctx.sync: if ctx.is_master: (_dex, _dexs) = ([_dex.unsqueeze(0)], [_dexs.unsqueeze(0)]) for _ in range(ctx.master_queue.maxsize): (_dex_w, _dexs_w) = ctx.master_queue.get() ctx.master_queue.task_done() _dex.append(_dex_w.unsqueeze(0)) _dexs.append(_dexs_w.unsqueeze(0)) _dex = comm.gather(_dex).mean(0) _dexs = comm.gather(_dexs).mean(0) tensors = comm.broadcast_coalesced((_dex, _dexs), ([_dex.get_device()] + ctx.worker_ids)) for (ts, queue) in zip(tensors[1:], ctx.worker_queues): queue.put(ts) else: ctx.master_queue.put((_dex, _dexs)) (_dex, _dexs) = ctx.worker_queue.get() ctx.worker_queue.task_done() if x.is_cuda: dx_ = src.gpu.expectation_backward(x, _dex, _dexs) else: raise NotImplemented dx = (dx + dx_) return (dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None) def _parse_extra(ctx, extra): ctx.is_master = extra['is_master'] if ctx.is_master: ctx.master_queue = extra['master_queue'] ctx.worker_queues = extra['worker_queues'] ctx.worker_ids = extra['worker_ids'] else: ctx.master_queue = extra['master_queue'] ctx.worker_queue = extra['worker_queue']
def run_webcam(tracker_name, tracker_param, debug=None, visdom_info=None): visdom_info = ({} if (visdom_info is None) else visdom_info) tracker = Tracker(tracker_name, tracker_param) tracker.run_webcam(debug, visdom_info)
def lua_recursive_source(module): s = [] for m in module.modules: name = type(m).__name__ real = m if (name == 'TorchObject'): name = m._typename.replace('cudnn.', '') m = m._obj if (name == 'SpatialConvolution'): if (not hasattr(m, 'groups')): m.groups = 1 s += ['nn.Conv2d({},{},{},{},{},{},{},bias={}),#Conv2d'.format(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), 1, m.groups, (m.bias is not None))] elif (name == 'SpatialBatchNormalization'): s += ['nn.BatchNorm2d({},{},{},{}),#BatchNorm2d'.format(m.running_mean.size(0), m.eps, m.momentum, m.affine)] elif (name == 'ReLU'): s += ['nn.ReLU()'] elif (name == 'SpatialMaxPooling'): s += ['nn.MaxPool2d({},{},{},ceil_mode={}),#MaxPool2d'.format((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)] elif (name == 'SpatialAveragePooling'): s += ['nn.AvgPool2d({},{},{},ceil_mode={}),#AvgPool2d'.format((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)] elif (name == 'SpatialUpSamplingNearest'): s += ['nn.UpsamplingNearest2d(scale_factor={})'.format(m.scale_factor)] elif (name == 'View'): s += ['Lambda(lambda x: x.view(x.size(0),-1)), # View'] elif (name == 'Linear'): s1 = 'Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )' s2 = 'nn.Linear({},{},bias={})'.format(m.weight.size(1), m.weight.size(0), (m.bias is not None)) s += ['nn.Sequential({},{}),#Linear'.format(s1, s2)] elif (name == 'Dropout'): s += ['nn.Dropout({})'.format(m.p)] elif (name == 'SoftMax'): s += ['nn.Softmax()'] elif (name == 'Identity'): s += ['Lambda(lambda x: x), # Identity'] elif (name == 'SpatialFullConvolution'): s += ['nn.ConvTranspose2d({},{},{},{},{})'.format(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH))] elif (name == 'SpatialReplicationPadding'): s += ['nn.ReplicationPad2d({})'.format((m.pad_l, m.pad_r, m.pad_t, m.pad_b))] elif (name == 'SpatialReflectionPadding'): s += ['nn.ReflectionPad2d({})'.format((m.pad_l, m.pad_r, m.pad_t, m.pad_b))] elif (name == 'Copy'): s += ['Lambda(lambda x: x), # Copy'] elif (name == 'Narrow'): s += ['Lambda(lambda x,a={}: x.narrow(*a))'.format((m.dimension, m.index, m.length))] elif (name == 'SpatialCrossMapLRN'): lrn = 'torch.legacy.nn.SpatialCrossMapLRN(*{})'.format((m.size, m.alpha, m.beta, m.k)) s += ['Lambda(lambda x,lrn={}: Variable(lrn.forward(x)))'.format(lrn)] elif (name == 'Sequential'): s += ['nn.Sequential( # Sequential'] s += lua_recursive_source(m) s += [')'] elif (name == 'ConcatTable'): s += ['LambdaMap(lambda x: x, # ConcatTable'] s += lua_recursive_source(m) s += [')'] elif (name == 'CAddTable'): s += ['LambdaReduce(lambda x,y: x+y), # CAddTable'] elif (name == 'Concat'): dim = m.dimension s += ['LambdaReduce(lambda x,y,dim={}: torch.cat((x,y),dim), # Concat'.format(m.dimension)] s += lua_recursive_source(m) s += [')'] else: s += (('# ' + name) + ' Not Implement,\n') s = map((lambda x: '\t{}'.format(x)), s) return s
class LeadTimeEval(): def __init__(self, len_seq_in=4, bins_to_predict=32, n_channels=3): self.len_seq = len_seq_in self.n_bins = bins_to_predict self.n_channels = n_channels self.errors = {} self.index = ['day_in_year', 'in_start_id', 'channel'] self.cols = (self.index + [j for j in range(self.n_bins)]) def get_numpy(self, x): return x.detach().cpu().numpy() def update_errors(self, err, metadata): days = self.get_numpy(metadata['out']['day_in_year'][0]) lead_times = self.get_numpy(metadata['out']['lead_time'][0]) target_times = self.get_numpy(metadata['out']['time_bins'][0]) j = 0 for (d, lead_t, tgt_t, e) in zip(days, lead_times, target_times, err): start_t = ((tgt_t - self.len_seq) - lead_t) if (d not in self.errors.keys()): self.errors[d] = {} if (start_t not in self.errors[d].keys()): self.errors[d][start_t] = {} if (lead_t not in self.errors[d][start_t].keys()): self.errors[d][start_t][lead_t] = e else: print(f'Error, this lead_time={lead_t} was already updated in day={d} start_t={start_t}') j += 1 def __update_channel_errors(self, errors, row_channels): for id_chn in range(self.n_channels): row_channels[id_chn].append(errors[id_chn]) return row_channels def __get_lead_time_array(self, data, n_bins): rows = [] for id_date in data.keys(): for id_start in data[id_date].keys(): row_channels = [] for id_chn in range(self.n_channels): row_channels.append([id_date, id_start, id_chn]) for j in range(n_bins): if (j in data[id_date][id_start].keys()): errors = data[id_date][id_start][j] row_channels = self.__update_channel_errors(errors, row_channels) else: row_channels = self.__update_channel_errors(([np.NaN] * self.n_channels), row_channels) for row in row_channels: rows.append(row) return rows def get_lead_time_errors_df(self): import pandas as pd rows = self.__get_lead_time_array(self.errors, self.n_bins) df = pd.DataFrame(rows, columns=self.cols) df = df.set_index(self.index).sort_index() return df def get_lead_time_metrics(self, root, title, region='', y_label='mse', x_label='lead times'): import matplotlib.pyplot as plt fname = f'{root}/lead_times_mse_{region}.csv' fname_fig = f'{root}/lead_times_mse_fig_{region}' df = self.get_lead_time_errors_df() df.to_csv(fname, encoding='utf-8') print('saved errors to disk:', fname) (errs, std) = (df.mean(), df.std()) fig = plt.figure(figsize=(20, 10)) plt.errorbar(np.arange(len(errs)), errs, std, fmt='ok', lw=3) plt.ylabel(y_label) plt.xlabel(x_label) plt.xticks(np.arange(self.n_bins), np.arange(self.n_bins)) plt.title(title) fig.savefig(fname_fig) plt.show() plt.close(fig) return (list(errs), list(std))
def test_objective_sum(): (_, _, add_info) = compute_objective_sum(indices=np.array([0, 10, 6]), new_data={'objective': np.array([1.0, 5.0, 3.0])}, add_info={}, extra_args={'objective_sum': 10.0}, occupied=np.array([True, False, True]), cur_data={'objective': np.array([0.0, 2.0, 4.0])}) assert ('objective_sum' in add_info) assert np.isclose(add_info['objective_sum'], (((10.0 + (1.0 - 0.0)) + (5.0 - 0.0)) + (3.0 - 4.0)))
def repeat_generator(x_gen: 'DataGenerator', y_gen: 'DataGenerator', x_repeats: int=0, y_repeats: int=0) -> 'DataGenerator': def repeat_outputs(_x, _y): if (x_repeats > 0): _x = ([_x] * (x_repeats + 1)) if (y_repeats > 0): _y = ([_y] * (y_repeats + 1)) return (_x, _y) return map(repeat_outputs, x_gen, y_gen)
def infer(valid_queue, model, criterion): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() model.eval() with torch.no_grad(): for (step, (input, target)) in enumerate(valid_queue): input = Variable(input.cuda()) target = Variable(target.cuda(non_blocking=True)) (logits, _) = model(input) loss = criterion(logits, target) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data.item(), n) top1.update(prec1.data.item(), n) top5.update(prec5.data.item(), n) if ((step % args.report_freq) == 0): logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return (top1.avg, objs.avg)
def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = (np.log(preds) / temperature) exp_preds = np.exp(preds) preds = (exp_preds / np.sum(exp_preds)) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas)
def show_img_by_class(data_loader, classes): class_images = {} class_labels = {} while (len(class_images) < len(classes)): (images, labels) = next(data_loader) for (i, label) in enumerate(labels): if ((label.item() not in class_images) and (len(class_images) < len(classes))): class_images[label.item()] = images[i] class_labels[label.item()] = label plt.figure(figsize=(20, 20)) for (i, (label, image)) in enumerate(class_images.items()): ax = plt.subplot(1, len(classes), (i + 1)) img = ((image / 2) + 0.5) npimg = img.numpy() plt.imshow(npimg.squeeze(), cmap='gray') ax.set_title(f'{classes[class_labels[label].item()]}') ax.axis('off') plt.show() print(f'Image size: {images[0].size()}')
class BasicBlock(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=(dilation * multi_grid), dilation=(dilation * multi_grid), bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=False) self.relu_inplace = nn.ReLU(inplace=True) self.downsample = downsample self.dilation = dilation self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if (self.downsample is not None): residual = self.downsample(x) out = (out + residual) out = self.relu_inplace(out) return out
def regularization_loss(scope_name): collection_regularization = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss = [] for item in collection_regularization: if (scope_name in item.name): loss.append(item) return tf.reduce_sum(loss)