code
stringlengths
101
5.91M
def parse_pkg_info(fn): res = {} for ln in open(fn).read().splitlines(): if ((not ln) or (not ln[:1].strip())): continue (key, value) = ln.split(': ', 1) res[key] = value return res
class Evaluator(): def __init__(self) -> None: self.results = defaultdict(dict) self.iteration = (- 1) self.threshold_end = 0.5 def update_iteration(self, iteration: int) -> None: self.iteration = iteration def update_result(self, metric: str, value: Union[(float, dict)]) -> None: if (self.iteration > (- 1)): key_name = ('epoch_' + str(self.iteration)) else: key_name = 'final' if isinstance(value, float): self.results[key_name].update({metric: value}) elif (metric in self.results[key_name]): self.results[key_name][metric].update(value) else: self.results[key_name].update({metric: value}) def classify(self, probs, targets, test_data, multilabel=False): if (not targets): raise ValueError('When evaluating classification, need at least give targets') if multilabel: self._eval_multilabel(probs, targets, test_data) else: self._eval_singlelabel(probs, targets, test_data) def _eval_singlelabel(self, scores: np.ndarray, targets: List[int], eval_type: str) -> None: acc_dict = singlelabel.compute_acc_auc(scores, targets) log_results = {k: np.around((v * 100), decimals=2) for (k, v) in acc_dict.items()} save_results = acc_dict self.log_and_update(log_results, save_results, eval_type) def _eval_multilabel(self, scores: np.ndarray, targets: np.ndarray, eval_type: str) -> None: num_labels = scores.shape[(- 1)] targets = multilabel.multihot(targets, num_labels) log_results = {} (ap, ar, mAP, mAR) = multilabel.compute_map(scores, targets) f1_dict = multilabel.get_best_f1_scores(targets, scores, self.threshold_end) log_results['mAP'] = np.around((mAP * 100), decimals=2) log_results['mAR'] = np.around((mAR * 100), decimals=2) log_results.update({k: np.around((v * 100), decimals=2) for (k, v) in f1_dict.items()}) save_results = {'ap': ap, 'ar': ar, 'mAP': mAP, 'mAR': mAR, 'f1': f1_dict} self.log_and_update(log_results, save_results, eval_type) def log_and_update(self, log_results, save_results, eval_type): log_str = '' for (k, result) in log_results.items(): if (not isinstance(result, np.ndarray)): log_str += f'{k}: {result:.2f} ' else: log_str += f'{k}: {list(result)} ' logger.info(f'Classification results with {eval_type}: {log_str}') self.update_result('classification', {eval_type: save_results})
def _url_encode_impl(obj, charset, encode_keys, sort, key): from .datastructures import iter_multi_items iterable = iter_multi_items(obj) if sort: iterable = sorted(iterable, key=key) for (key, value) in iterable: if (value is None): continue if (not isinstance(key, bytes)): key = text_type(key).encode(charset) if (not isinstance(value, bytes)): value = text_type(value).encode(charset) (yield ((_fast_url_quote_plus(key) + '=') + _fast_url_quote_plus(value)))
_dispatch def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None): return (Dispatchable(x, np.ndarray),)
def make_command(*args): command_args = [] for arg in args: if isinstance(arg, list): command_args.extend(arg) else: command_args.append(arg) return command_args
class MGridClass(nd_grid): def __init__(self): super(MGridClass, self).__init__(sparse=False)
def test_output_size_check_dict(): r = model1_dict.forward(x1_dict.float()) assert (len(r[0][0]) == model1_dict.output_size)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', required=True) parser.add_argument('--config-args') parser.add_argument('--section', required=True) parser.add_argument('--inferred', required=True) parser.add_argument('--output') parser.add_argument('--logdir') parser.add_argument('--evaluate-beams-individually', action='store_true') args = parser.parse_args() (real_logdir, metrics) = evaluation.compute_metrics(args.config, args.config_args, args.section, args.inferred, args.logdir, evaluate_beams_individually=args.evaluate_beams_individually) if args.output: if real_logdir: output_path = args.output.replace('__LOGDIR__', real_logdir) else: output_path = args.output with open(output_path, 'w') as f: json.dump(metrics, f) print('Wrote eval results to {}'.format(output_path)) else: print(metrics)
class AdaFactorWClonedWeightPredictionForAggregation(WeightPredictor): def __init__(self, *args, **kw): super().__init__(*args, **kw) from optimizers.adafactor import Adafactor self.optimizer: Adafactor adafactor_init(self.optimizer) def forward(self): if (not self.n_steps): return self.true_weights_storage.create_cloned_if_needed() self.true_weights_storage.record_change_mode('pred') pgs = self.optimizer.param_groups if (self.scheduler is not None): step_lrs = self.scheduler.get_next(self.n_steps) pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))] else: pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs] with torch.no_grad(): for (group, step_lrs) in zip(pgs, pg_step_lrs): group = CowDict(group) for p in group['params']: if (p.grad is None): grad = None else: grad = p.grad.data if (grad.dtype in {torch.float16, torch.bfloat16}): grad = grad.float() state = self.optimizer.state[p] state = CowDict(state) grad_shape = (grad.shape if (grad is not None) else p.shape) (factored, use_first_moment) = self.optimizer._get_options(group, grad_shape) assert (len(state) > 0) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] else: exp_avg_sq = state['exp_avg_sq'] if use_first_moment: exp_avg = state['exp_avg'] for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs): p_data_fp32 = p.data if (p.data.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() state['step'] += 1 state['RMS'] = self.optimizer._rms(p_data_fp32) group['lr'] = self.optimizer._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) if (grad is None): update = torch.full_like(p.data, fill_value=group['eps'][0], memory_format=torch.preserve_format) else: update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = exp_avg_sq_row.mul(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t)) exp_avg_sq_col = exp_avg_sq_col.mul(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t)) update = self.optimizer._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = exp_avg_sq.mul(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self.optimizer._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(group['lr']) if use_first_moment: exp_avg = exp_avg.mul(group['beta1']).add_(update, alpha=(1 - group['beta1'])) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr'])) p_data_fp32.add_((- update)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) def revert(self): if (not self.n_steps): return self.true_weights_storage.restore_if_needed()
class CrossEntropy(nn.Module): def __init__(self): super().__init__() self.loss = nn.CrossEntropyLoss(ignore_index=(- 1)) def forward(self, output, target): return self.loss(output, target)
def get_root_document_iterator(file_path: str) -> Iterator[str]: import pyarrow.parquet as pq parquet_file = pq.ParquetFile(file_path) for batch in parquet_file.iter_batches(): df = batch.to_pandas() for row in df.iterrows(): (yield row[1].tolist()[0])
def process_corpus(bliss_corpus, char_vocab, silence_duration): from recipe.text.bliss import ProcessBlissText ljs = ProcessBlissText(bliss_corpus, [('end_token', {'token': '~'})], vocabulary=char_vocab) from recipe.corpus.ffmpeg import BlissFFMPEGJob, BlissRecoverDuration filter_string = ('-af "silenceremove=stop_periods=-1:stop_duration=%f:stop_threshold=-40dB"' % silence_duration) ljs_nosilence = BlissFFMPEGJob(ljs.out, filter_string, ffmpeg_binary=FFMPEG_BINARY, output_format='wav') ljs_nosilence.rqmt['time'] = 24 ljs_nosilence_recover = BlissRecoverDuration(ljs_nosilence.out) return ljs_nosilence_recover.out
def logistic_nll(x: Tensor, mean: Tensor, log_scale: Tensor): bin_size = (1 / 256) scale = log_scale.exp() x_centered = (x - mean) cdf1 = (x_centered / scale) cdf2 = ((x_centered + bin_size) / scale) p = ((torch.sigmoid(cdf2) - torch.sigmoid(cdf1)) + 1e-12) return (- p.log())
class GanLoader(object): def __init__(self, G, N=(10 ** 10), bs=64): (self.G, self.N, self.bs) = (G, N, bs) def __len__(self): return self.N def __iter__(self): with torch.no_grad(), Eval(self.G): for i in range((self.N // self.bs)): (yield self.G.sample(self.bs)) if ((self.N % self.bs) != 0): (yield self.G.sample((self.N % self.bs))) def write_imgs(self, path): np_images = np.concatenate([img.cpu().numpy() for img in self], axis=0) for (i, img) in enumerate(np_images): scipy.misc.imsave((path + 'img{}.jpg'.format(i)), img)
def filter_words(sentences): filters = [(lambda x: x.lower()), strip_numeric, strip_punctuation, remove_stopwords, stem_sentence] apply_filters_to_token = (lambda token: apply_filters(token, filters)) return list(map(apply_filters_to_token, sentences))
class pAdicRingRelaxed(pAdicRelaxedGeneric, pAdicRingBaseGeneric): def __init__(self, p, prec, print_mode, names): from sage.rings.padics import padic_relaxed_element (self._default_prec, self._halting_prec, self._secure) = prec pAdicRingBaseGeneric.__init__(self, p, self._default_prec, print_mode, names, padic_relaxed_element.pAdicRelaxedElement) self._element_class_module = padic_relaxed_element self._element_class_prefix = 'pAdicRelaxedElement_'
def register_Ns3FdBetFfMacScheduler_methods(root_module, cls): cls.add_constructor([param('ns3::FdBetFfMacScheduler const &', 'arg0')]) cls.add_constructor([]) cls.add_method('DoDispose', 'void', [], is_virtual=True) cls.add_method('GetFfMacCschedSapProvider', 'ns3::FfMacCschedSapProvider *', [], is_virtual=True) cls.add_method('GetFfMacSchedSapProvider', 'ns3::FfMacSchedSapProvider *', [], is_virtual=True) cls.add_method('GetLteFfrSapUser', 'ns3::LteFfrSapUser *', [], is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('SetFfMacCschedSapUser', 'void', [param('ns3::FfMacCschedSapUser *', 's')], is_virtual=True) cls.add_method('SetFfMacSchedSapUser', 'void', [param('ns3::FfMacSchedSapUser *', 's')], is_virtual=True) cls.add_method('SetLteFfrSapProvider', 'void', [param('ns3::LteFfrSapProvider *', 's')], is_virtual=True) cls.add_method('TransmissionModeConfigurationUpdate', 'void', [param('uint16_t', 'rnti'), param('uint8_t', 'txMode')]) return
def test_var_args_empty(): def arg_aot(*args): return np.zeros([20]) arg_aot.compile()
def compute_model_dim(cfg: DictConfig) -> int: if ((cfg.name == 'pose_gen') or (cfg.name == 'motion_gen')): return get_smplx_dimension_from_keys(cfg.dataset.modeling_keys) elif (cfg.name == 'path_planning'): return 2 elif (cfg.name == 'grasp_gen'): return ((3 + 6) + 24) else: raise Exception('Unsupported task.')
_utils.test() def test_reduce_merged(): a = ti.field(ti.f32, shape=16) b = ti.field(ti.f32, shape=4) c = ti.field(ti.f32, shape=()) ti.root.lazy_grad() def reduce(): for i in range(16): b[(i // 4)] += a[i] for i in range(4): c[None] += b[i] c.grad[None] = 1 reduce.grad() for i in range(4): assert (b.grad[i] == 1) for i in range(16): assert (a.grad[i] == 1)
def nvmlDeviceGetName(handle): c_name = ctypes.create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE) fn = _get_nvml_function('nvmlDeviceGetName') ret = fn(handle, c_name, ctypes.c_uint(NVML_DEVICE_NAME_BUFFER_SIZE)) _check_return(ret) return c_name.value
def figure1(): wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth'))) if (not os.path.exists('_vggface2_topk_frontal_nonmates.pkl')): _vggface2_topk_frontal_nonmates(wb, topk=32) n_subjects = 16 (matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects) matelist = [f_detection(im).rgb() for im in matelist] nonmatelist = [f_detection(im).rgb() for im in nonmatelist] probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist] probelist_clean = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1a_%d.jpg' % n_subjects), f_saliency=None) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage)) probelist_1a = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_ebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1b_%d.jpg' % n_subjects), f_saliency=f_saliency) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage)) probelist_1b = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_cebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1c_%d.jpg' % n_subjects), f_saliency=f_saliency) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage)) probelist_1c = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_tcebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1d_%d.jpg' % n_subjects), f_saliency=f_saliency) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage)) probelist_1d = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_weighted_subtree(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1e_%d.jpg' % n_subjects), f_saliency=f_saliency) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage)) probelist_1e = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) matelist = ([matelist[0]] * n_subjects) probelist = (((([probelist_1a[0]] + [probelist_1b[0]]) + [probelist_1c[0]]) + [probelist_1d[0]]) + [probelist_1e[0]]) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure1f_%d.jpg' % n_subjects), f_saliency=None) print(('[eccv20.figure1]: Saving montage to "%s"' % f_montage))
class TrueWeightsStorage(): def __init__(self, optimizer): self.true_weights = None self.true_weights_exist = False self.optimizer = optimizer self.change_mode = None self.restored_true_weights_to_the_model = False def record_change_mode(self, mode): if (self.change_mode is None): self.change_mode = mode elif mode: self.change_mode += f' -> {mode}' def get_true_weights(self): true_weights = self.true_weights if (true_weights is None): true_weights = self._return_current_weights() return true_weights def create_cloned_if_needed(self): if ((not self.true_weights_exist) or self.restored_true_weights_to_the_model): self.true_weights = self._create_current_cloned_buff() self.true_weights_exist = True if self.restored_true_weights_to_the_model: self.restored_true_weights_to_the_model = False def restore_if_needed(self): if (self.true_weights_exist and self.change_mode): self._restore_from_buff(self.true_weights) self.change_mode = None self.restored_true_weights_to_the_model = True def check_restore_if_needed(self, check=True): if check: if (self.true_weights_exist and (not self.change_mode)): print('-W- will not restore true weights. no change is recorded. Consider removing for efficiency') self.restore_if_needed() def reset_on_step(self): self.true_weights = None self.true_weights_exist = False self.change_mode = None self.restored_true_weights_to_the_model = False def _restore_from_buff(self, buff): with torch.no_grad(): for (pg, cloned) in zip(self.optimizer.param_groups, buff): for (p, bp) in zip(pg['params'], cloned): p.data = bp.detach() def _create_current_cloned_buff(self): buff = [[p.detach().clone() for p in pg['params']] for pg in self.optimizer.param_groups] return buff def _return_current_weights(self): return [[p for p in pg['params']] for pg in self.optimizer.param_groups]
_file_in_work_dir(['file_name']) _file_read_only(['file_name']) _low_level_step def write_file(file_name, content, work_dir='.', **kwargs): try: with open(os.path.join(work_dir, file_name), 'w') as f: f.write(content) observation = f'File {file_name} written successfully.' return observation except: raise EnvException(f'cannot write file {file_name}')
def coco_eval_with_return(result_files, result_types, coco, max_dets=(100, 300, 1000)): for res_type in result_types: assert (res_type in ['proposal', 'bbox', 'segm', 'keypoints']) if mmcv.is_str(coco): coco = COCO(coco) assert isinstance(coco, COCO) eval_results = {} for res_type in result_types: result_file = result_files[res_type] assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file) img_ids = coco.getImgIds() iou_type = ('bbox' if (res_type == 'proposal') else res_type) cocoEval = COCOeval(coco, coco_dets, iou_type) cocoEval.params.imgIds = img_ids if (res_type == 'proposal'): cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(max_dets) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if ((res_type == 'segm') or (res_type == 'bbox')): metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl'] eval_results[res_type] = {metric_names[i]: cocoEval.stats[i] for i in range(len(metric_names))} else: eval_results[res_type] = cocoEval.stats return eval_results
class DistributedGroupSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=0): (_rank, _num_replicas) = get_dist_info() if (num_replicas is None): num_replicas = _num_replicas if (rank is None): rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.seed = (seed if (seed is not None) else 0) assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for (i, j) in enumerate(self.group_sizes): self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) self.total_size = (self.num_samples * self.num_replicas) def __iter__(self): g = torch.Generator() g.manual_seed((self.epoch + self.seed)) indices = [] for (i, size) in enumerate(self.group_sizes): if (size > 0): indice = np.where((self.flag == i))[0] assert (len(indice) == size) indice = indice[list(torch.randperm(int(size), generator=g).numpy())].tolist() extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice)) tmp = indice.copy() for _ in range((extra // size)): indice.extend(tmp) indice.extend(tmp[:(extra % size)]) indices.extend(indice) assert (len(indices) == self.total_size) indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))] offset = (self.num_samples * self.rank) indices = indices[offset:(offset + self.num_samples)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
def get_experiments_from_kwargs(**kwargs): kwargs_coerced = {key: as_list(val) for (key, val) in kwargs.items()} experiments = [{key: value for (key, value) in zip(kwargs_coerced.keys(), record_values)} for record_values in itertools.product(*kwargs_coerced.values())] return experiments
class AverageMeterSet(object): def __init__(self, meters=None): self.meters = (meters if meters else {}) def __getitem__(self, key): if (key not in self.meters): meter = AverageMeter() meter.update(0) return meter return self.meters[key] def update(self, name, value, n=1): if (name not in self.meters): self.meters[name] = AverageMeter() self.meters[name].update(value, n) def reset(self): for meter in self.meters.values(): meter.reset() def values(self, format_string='{}'): return {format_string.format(name): meter.val for (name, meter) in self.meters.items()} def averages(self, format_string='{}'): return {format_string.format(name): meter.avg for (name, meter) in self.meters.items()} def sums(self, format_string='{}'): return {format_string.format(name): meter.sum for (name, meter) in self.meters.items()} def counts(self, format_string='{}'): return {format_string.format(name): meter.count for (name, meter) in self.meters.items()}
class ASR(sb.Brain): def compute_forward(self, batch, stage): batch = batch.to(self.device) (wavs, wav_lens) = batch.sig (tokens, _) = batch.tokens if self.hparams.gradient_checkpointing: wavs.requires_grad_() logits = torch.utils.checkpoint.checkpoint(self.modules.wavlm, wavs, wav_lens) else: logits = self.modules.wavlm(wavs, wav_lens) hyps = None if (stage != sb.Stage.TRAIN): hyps = sb.decoders.ctc_greedy_decode(logits, wav_lens, blank_id=self.hparams.blank_index) return (logits, hyps) def compute_objectives(self, predictions, batch, stage): (_, wav_lens) = batch.sig (logits, hyps) = predictions ids = batch.id (tokens, tokens_lens) = batch.tokens logits = logits.float() log_probs = logits.log_softmax(dim=(- 1)) loss = self.hparams.ctc_loss(log_probs, tokens, wav_lens, tokens_lens) if (stage != sb.Stage.TRAIN): target_words = batch.target_wrd predicted_words = self.tokenizer.decode(hyps) predicted_words = [text.split(' ') for text in predicted_words] self.wer_metric.append(ids, predicted_words, target_words) self.cer_metric.append(ids, predicted_words, target_words) return loss def on_stage_start(self, stage, epoch=None): if (stage != sb.Stage.TRAIN): self.cer_metric = self.hparams.cer_computer() self.wer_metric = self.hparams.wer_computer() def on_stage_end(self, stage, stage_loss, epoch=None): stage_stats = {'loss': stage_loss} if (stage == sb.Stage.TRAIN): self.train_stats = stage_stats else: stage_stats['CER'] = self.cer_metric.summarize('error_rate') stage_stats['WER'] = self.wer_metric.summarize('error_rate') if (stage == sb.Stage.VALID): (old_lr, new_lr) = self.hparams.lr_annealing(stage_stats['loss']) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) stats_meta_data = {'epoch': epoch, 'lr': old_lr} self.hparams.train_logger.log_stats(stats_meta=stats_meta_data, train_stats=self.train_stats, valid_stats=stage_stats) self.checkpointer.save_and_keep_only(meta={'CER': stage_stats['CER']}, min_keys=['CER']) elif (stage == sb.Stage.TEST): self.hparams.train_logger.log_stats(stats_meta={'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stage_stats) with open(self.hparams.wer_file, 'w', encoding='utf-8') as w: self.wer_metric.write_stats(w)
.parametrize('metric', METRICS) def test_kd_tree_numerical_consistency(global_random_seed, metric): (X_64, X_32, Y_64, Y_32) = get_dataset_for_binary_tree(random_seed=global_random_seed, features=50) metric_params = METRICS.get(metric, {}) kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params) kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params) k = 4 (dist_64, ind_64) = kd_64.query(Y_64, k=k) (dist_32, ind_32) = kd_32.query(Y_32, k=k) assert_allclose(dist_64, dist_32, rtol=1e-05) assert_equal(ind_64, ind_32) assert (dist_64.dtype == np.float64) assert (dist_32.dtype == np.float32) r = 2.38 ind_64 = kd_64.query_radius(Y_64, r=r) ind_32 = kd_32.query_radius(Y_32, r=r) for (_ind64, _ind32) in zip(ind_64, ind_32): assert_equal(_ind64, _ind32) (ind_64, dist_64) = kd_64.query_radius(Y_64, r=r, return_distance=True) (ind_32, dist_32) = kd_32.query_radius(Y_32, r=r, return_distance=True) for (_ind64, _ind32, _dist_64, _dist_32) in zip(ind_64, ind_32, dist_64, dist_32): assert_equal(_ind64, _ind32) assert_allclose(_dist_64, _dist_32, rtol=1e-05) assert (_dist_64.dtype == np.float64) assert (_dist_32.dtype == np.float32)
def match_allen_srl_structures(dataset, srl_data, is_gold): matched_events_count = 0 matched_args_count = 0 for (topic_id, topic) in dataset.topics.items(): for (doc_id, doc) in topic.docs.items(): for (sent_id, sent) in doc.get_sentences().items(): if (not config_dict['use_dep']): sent_str = sent.get_raw_sentence() parsed_sent = nlp(sent_str) find_nominalizations_args(parsed_sent, sent, is_gold) sent_srl_info = None if (doc_id in srl_data): doc_srl = srl_data[doc_id] if (int(sent_id) in doc_srl): sent_srl_info = doc_srl[int(sent_id)] if (sent_srl_info is not None): for event_srl in sent_srl_info.srl: event_text = event_srl.verb.text event_ecb_tok_ids = event_srl.verb.ecb_tok_ids if is_gold: sent_events = sent.gold_event_mentions sent_entities = sent.gold_entity_mentions else: sent_events = sent.pred_event_mentions sent_entities = sent.pred_entity_mentions event_found = False matched_event = None for event_mention in sent_events: if ((event_ecb_tok_ids == event_mention.tokens_numbers) or (event_text == event_mention.mention_str) or (event_text in event_mention.mention_str) or (event_mention.mention_str in event_text)): event_found = True matched_event = event_mention if is_gold: matched_events_count += 1 elif (matched_event.gold_mention_id is not None): matched_events_count += 1 if event_found: break if event_found: if (event_srl.arg0 is not None): if match_entity_with_srl_argument(sent_entities, matched_event, event_srl.arg0, 'A0', is_gold): matched_args_count += 1 if (event_srl.arg1 is not None): if match_entity_with_srl_argument(sent_entities, matched_event, event_srl.arg1, 'A1', is_gold): matched_args_count += 1 if (event_srl.arg_tmp is not None): if match_entity_with_srl_argument(sent_entities, matched_event, event_srl.arg_tmp, 'AM-TMP', is_gold): matched_args_count += 1 if (event_srl.arg_loc is not None): if match_entity_with_srl_argument(sent_entities, matched_event, event_srl.arg_loc, 'AM-LOC', is_gold): matched_args_count += 1 logger.info(('SRL matched events - ' + str(matched_events_count))) logger.info(('SRL matched args - ' + str(matched_args_count)))
class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if (padding is None): assert ((kernel_size % 2) == 1) padding = int(((dilation * (kernel_size - 1)) / 2)) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal
def is_union(ann): if (ann is Union): raise_error_container_parameter_missing('Union') return (hasattr(ann, '__module__') and (ann.__module__ == 'typing') and (getattr(ann, '__origin__', None) is Union))
def normalize_tensor(x): map_size = x.size() aggregated = x.view(map_size[0], map_size[1], (- 1)) (minimum, _) = torch.min(aggregated, dim=(- 1), keepdim=True) (maximum, _) = torch.max(aggregated, dim=(- 1), keepdim=True) normalized = torch.div((aggregated - minimum), (maximum - minimum)) normalized = normalized.view(map_size) return normalized
_scope def ndarray(dtype, shape, needs_grad=False): prog = get_runtime().prog if (prog is None): raise TaichiRuntimeError('Cannont create ndarray, maybe you forgot to call `ti.init()` first?') if isinstance(shape, numbers.Number): shape = (shape,) if (not all((((isinstance(x, int) or isinstance(x, np.integer)) and (x > 0) and (x <= ((2 ** 31) - 1))) for x in shape))): raise TaichiRuntimeError(f'{shape} is not a valid shape for ndarray') if (dtype in all_types): dt = cook_dtype(dtype) x = ScalarNdarray(dt, shape) elif isinstance(dtype, MatrixType): if (dtype.ndim == 1): x = VectorNdarray(dtype.n, dtype.dtype, shape) else: x = MatrixNdarray(dtype.n, dtype.m, dtype.dtype, shape) dt = dtype.dtype else: raise TaichiRuntimeError(f'{dtype} is not supported as ndarray element type') if needs_grad: if (not _ti_core.is_real(dt)): raise TaichiRuntimeError(f'{dt} is not supported for ndarray with `needs_grad=True` or `needs_dual=True`.') x_grad = ndarray(dtype, shape, needs_grad=False) x._set_grad(x_grad) return x
class FormalPolyhedraModule(CombinatorialFreeModule): def __classcall__(cls, base_ring, dimension, basis, category=None): if isinstance(basis, list): basis = tuple(basis) if isinstance(basis, tuple): from sage.geometry.polyhedron.base import Polyhedron_base for P in basis: if (not isinstance(P, Polyhedron_base)): raise TypeError(f'{P} is not a polyhedron') if (P.ambient_space().dimension() != dimension): raise TypeError(f'{P} does not belong to the ambient space') if (category is None): category = GradedModulesWithBasis(base_ring) return super().__classcall__(cls, base_ring=base_ring, dimension=dimension, basis=basis, category=category) def __init__(self, base_ring, dimension, basis, category): super().__init__(base_ring, basis, prefix='', category=category) def degree_on_basis(self, m): return m.dimension()
def feat_extraction(dataroot_dir, mode): DB = read_voxceleb_structure(dataroot_dir, data_type='wavs') if ((mode != 'train') and (mode != 'test')): raise mode_error count = 0 for i in range(len(DB)): extract_MFB(DB['filename'][i], mode=mode) count = (count + 1) filename = DB['filename'][i] print(('feature extraction (%s DB). step : %d, file : "%s"' % (mode, count, '/'.join(filename.split('/')[(- 3):])))) print(((('-' * 20) + ' Feature extraction done ') + ('-' * 20)))
class BatchAE(Data): def __init__(self, batch=None, **kwargs): super(BatchAE, self).__init__(**kwargs) self.batch = batch def from_data_list(data_list): keys = [set(data.keys) for data in data_list] keys = list(set.union(*keys)) assert ('batch' not in keys) batch = BatchAE() for key in keys: batch[key] = [] batch.batch = [] cumsum_node = 0 for (i, data) in enumerate(data_list): num_nodes = data.num_nodes batch.batch.append(torch.full((num_nodes,), i, dtype=torch.long)) for key in data.keys: item = data[key] if (key in ['edge_index', 'negative_edge_index']): item = (item + cumsum_node) batch[key].append(item) cumsum_node += num_nodes for key in keys: batch[key] = torch.cat(batch[key], dim=batch.cat_dim(key)) batch.batch = torch.cat(batch.batch, dim=(- 1)) return batch.contiguous() def num_graphs(self): return (self.batch[(- 1)].item() + 1) def cat_dim(self, key): return ((- 1) if (key in ['edge_index', 'negative_edge_index']) else 0)
def register_Ns3LteRrcSapMeasGapConfig_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::MeasGapConfig const &', 'arg0')]) cls.add_instance_attribute('gapOffsetValue', 'uint8_t', is_const=False) return
def OA_15_896(): from sage.rings.finite_rings.finite_field_constructor import FiniteField A = [[(0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (1, None), (4, None), (2, None), (2, None), (4, None), (1, None)], [(0, None), (1, None), (2, 17), (3, 20), (4, 49), (5, 4), (6, 59), (1, 15), (0, 114), (1, 76), (4, 106), (2, 87), (2, 118), (4, 49)], [(0, None), (2, None), (4, 2), (6, 98), (1, 53), (3, 97), (5, 123), (4, 3), (1, 32), (0, 10), (1, 45), (4, 3), (2, 1), (2, 14)], [(0, None), (3, None), (6, 16), (2, 86), (5, 102), (1, 64), (4, 69), (2, 11), (4, 55), (1, 90), (0, 115), (1, 15), (4, 7), (2, 0)], [(0, None), (4, None), (1, 4), (5, 110), (2, 51), (6, 118), (3, 8), (2, 81), (2, 79), (4, 98), (1, 2), (0, 3), (1, 7), (4, None)], [(0, None), (5, None), (3, 66), (1, 70), (6, 102), (4, 119), (2, 20), (4, 86), (2, 59), (2, 15), (4, 63), (1, 126), (0, 1), (1, 0)], [(0, None), (6, None), (5, 94), (4, 48), (3, 90), (2, 2), (1, 13), (1, 53), (4, 117), (2, 21), (2, 2), (4, 1), (1, 0), (0, 0)], [(0, None), (4, 6), (2, 21), (1, 112), (1, 36), (2, 14), (4, 60), (0, 1), (6, 64), (3, 0), (5, 31), (5, 3), (3, 3), (6, 14)], [(0, None), (5, 6), (4, 61), (4, None), (5, 108), (0, 91), (3, 10), (6, 15), (0, None), (6, 15), (3, 7), (5, 0), (5, 1), (3, 0)], [(0, None), (6, 6), (6, 107), (0, 88), (2, 12), (5, 44), (2, 31), (3, 64), (6, 0), (0, None), (6, 2), (3, 3), (5, None), (5, 0)], [(0, None), (0, 6), (1, 52), (3, 115), (6, 30), (3, 78), (1, 64), (5, 63), (3, 5), (6, None), (0, None), (6, 3), (3, 1), (5, None)], [(0, None), (1, 6), (3, 117), (6, 19), (3, 9), (1, 31), (0, 56), (5, 0), (5, 63), (3, None), (6, None), (0, None), (6, 7), (3, None)], [(0, None), (2, 6), (5, 116), (2, 3), (0, 0), (6, None), (6, 1), (3, 0), (5, 0), (5, 2), (3, None), (6, None), (0, None), (6, 0)], [(0, None), (3, 6), (0, 0), (5, 0), (4, 1), (4, None), (5, None), (6, 0), (3, 2), (5, 0), (5, None), (3, None), (6, None), (0, None)]] Y = [None, 0, 1, 2, 121, 66, 77, 78, 41, 100, 74, 118, 108, 43] return OA_n_times_2_pow_c_from_matrix(15, 7, FiniteField(7), list(zip(*A)), Y, check=False)
class DatasetMapperTTA(): def __init__(self, cfg): self.min_sizes = cfg.TEST.AUG.MIN_SIZES self.max_size = cfg.TEST.AUG.MAX_SIZE self.flip = cfg.TEST.AUG.FLIP self.image_format = cfg.INPUT.FORMAT def __call__(self, dataset_dict): numpy_image = dataset_dict['image'].permute(1, 2, 0).numpy() shape = numpy_image.shape orig_shape = (dataset_dict['height'], dataset_dict['width']) if (shape[:2] != orig_shape): pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1]) else: pre_tfm = NoOpTransform() aug_candidates = [] for min_size in self.min_sizes: resize = ResizeShortestEdge(min_size, self.max_size) aug_candidates.append([resize]) if self.flip: flip = RandomFlip(prob=1.0) aug_candidates.append([resize, flip]) ret = [] for aug in aug_candidates: (new_image, tfms) = apply_augmentations(aug, np.copy(numpy_image)) torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1))) dic = copy.deepcopy(dataset_dict) dic['transforms'] = (pre_tfm + tfms) dic['image'] = torch_image ret.append(dic) return ret
def test_gen_cylinder_mesh(output_dir): from sfepy.mesh.mesh_generators import gen_cylinder_mesh mesh = gen_cylinder_mesh([0.5, 1, 2, 1.5, 3], [5, 4, 3], [0, 2, 1], axis='z', non_uniform=True, verbose=False) filename = op.join(output_dir, 'gen_cylinder.mesh') mesh.write(filename) tst.report('cylinder mesh generated') csum = nm.sum((mesh.coors - nm.min(mesh.coors, axis=0)), axis=0) assert (nm.linalg.norm((csum - nm.array([120, 90, 90]))) < tolerance)
def acc_stat(accuracy): length = 10 a = accuracy[(- length)] a_stat = [((temp - stat.mean(a)) / stat.stdev(a)) for temp in a] return ((sum((1 for temp in a_stat if (abs(temp) > 1))) / length) < 0.5)
class Saver(object): def __init__(self, savedir='.', savetitle=''): self.savedir = savedir self.savefile = os.path.join(savedir, savetitle) self.saver = None def save(self, sess, itr): if (self.saver is None): self.saver = tf.train.Saver(max_to_keep=10) self.saver.save(sess, (((self.savefile + '_') + PREFIX) + str(itr))) print('Saved model at iteration', itr)
class ImageConv(nn.Module): def __init__(self, base_channels, in_channels=3): super(ImageConv, self).__init__() self.base_channels = base_channels self.out_channels = (8 * base_channels) self.conv0 = nn.Sequential(Conv2d(in_channels, base_channels, 3, 1, padding=1), Conv2d(base_channels, base_channels, 3, 1, padding=1)) self.conv1 = nn.Sequential(Conv2d(base_channels, (base_channels * 2), 5, stride=2, padding=2), Conv2d((base_channels * 2), (base_channels * 2), 3, 1, padding=1), Conv2d((base_channels * 2), (base_channels * 2), 3, 1, padding=1)) self.conv2 = nn.Sequential(Conv2d((base_channels * 2), (base_channels * 4), 5, stride=2, padding=2), Conv2d((base_channels * 4), (base_channels * 4), 3, 1, padding=1), nn.Conv2d((base_channels * 4), (base_channels * 4), 3, padding=1, bias=False)) def forward(self, imgs): out_dict = {} conv0 = self.conv0(imgs) out_dict['conv0'] = conv0 conv1 = self.conv1(conv0) out_dict['conv1'] = conv1 conv2 = self.conv2(conv1) out_dict['conv2'] = conv2 return out_dict
def test_case37(): url = (brokerIp + '/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7') r = requests.delete(url) print(r.status_code) assert (r.status_code == 204)
class SimplicialComplexHomset(sage.categories.homset.Homset): def __call__(self, f): return SimplicialComplexMorphism(f, self.domain(), self.codomain()) def diagonal_morphism(self, rename_vertices=True): mutable = self._codomain.is_mutable() X = self._domain.product(self._domain, rename_vertices=rename_vertices, is_mutable=mutable) if (self._codomain != X): raise TypeError('diagonal morphism is only defined for Hom(X,XxX)') f = {} if rename_vertices: f = {i: 'L{0}R{0}'.format(i) for i in self._domain.vertices()} else: f = {i: (i, i) for i in self._domain.vertices()} return SimplicialComplexMorphism(f, self._domain, X) def identity(self): if (not self.is_endomorphism_set()): raise TypeError('identity map is only defined for endomorphism sets') f = {i: i for i in self._domain.vertices()} return SimplicialComplexMorphism(f, self._domain, self._codomain) def an_element(self): X_vertices = self._domain.vertices() try: i = next(iter(self._codomain.vertices())) except StopIteration: if (not X_vertices): return {} else: raise TypeError('there are no morphisms from a non-empty simplicial complex to an empty simplicial complex') f = {x: i for x in X_vertices} return SimplicialComplexMorphism(f, self._domain, self._codomain)
class ExpRNNCell(RNNCell): name = 'exprnn' def __init__(self, d_input, d_model, orthogonal=True, hidden_activation='modrelu', **kwargs): super().__init__(d_input, d_model, orthogonal=orthogonal, hidden_activation=hidden_activation, **kwargs)
def consolidate_edges_scope(state: SDFGState, scope_node: Union[(nd.EntryNode, nd.ExitNode)]) -> int: if (scope_node is None): return 0 data_to_conn = {} consolidated = 0 if isinstance(scope_node, nd.EntryNode): outer_edges = state.in_edges inner_edges = state.out_edges inner_conn = (lambda e: e.src_conn) remove_outer_connector = scope_node.remove_in_connector remove_inner_connector = scope_node.remove_out_connector (prefix, oprefix) = ('IN_', 'OUT_') else: outer_edges = state.out_edges inner_edges = state.in_edges inner_conn = (lambda e: e.dst_conn) remove_outer_connector = scope_node.remove_out_connector remove_inner_connector = scope_node.remove_in_connector (prefix, oprefix) = ('OUT_', 'IN_') edges_by_connector = collections.defaultdict(list) connectors_to_remove = set() for e in inner_edges(scope_node): if e.data.is_empty(): continue conn = inner_conn(e) edges_by_connector[conn].append(e) if (e.data.data not in data_to_conn): data_to_conn[e.data.data] = conn elif (data_to_conn[e.data.data] != conn): connectors_to_remove.add(conn) for conn in connectors_to_remove: e = edges_by_connector[conn][0] offset = (3 if conn.startswith('IN_') else (4 if conn.startswith('OUT_') else len(oprefix))) target_conn = (prefix + data_to_conn[e.data.data][offset:]) conn_to_remove = (prefix + conn[offset:]) remove_outer_connector(conn_to_remove) if isinstance(scope_node, nd.EntryNode): out_edge = next((ed for ed in outer_edges(scope_node) if (ed.dst_conn == target_conn))) edge_to_remove = next((ed for ed in outer_edges(scope_node) if (ed.dst_conn == conn_to_remove))) else: out_edge = next((ed for ed in outer_edges(scope_node) if (ed.src_conn == target_conn))) edge_to_remove = next((ed for ed in outer_edges(scope_node) if (ed.src_conn == conn_to_remove))) out_edge.data.subset = sbs.union(out_edge.data.subset, edge_to_remove.data.subset) remove_edge_and_dangling_path(state, edge_to_remove) consolidated += 1 if isinstance(scope_node, nd.EntryNode): remove_inner_connector(e.src_conn) for e in edges_by_connector[conn]: e._src_conn = data_to_conn[e.data.data] else: remove_inner_connector(e.dst_conn) for e in edges_by_connector[conn]: e._dst_conn = data_to_conn[e.data.data] return consolidated
class VocabBuilder(): def __init__(self, min_freq=None, max_count=None): self.word_freq = collections.Counter() self.min_freq = min_freq self.max_count = max_count def add_word(self, word, count=1): self.word_freq[word] += count def finish(self, *args, **kwargs): eligible_words_and_freqs = self.word_freq.most_common(self.max_count) if (self.min_freq is not None): for (i, (word, freq)) in enumerate(eligible_words_and_freqs): if (freq < self.min_freq): eligible_words_and_freqs = eligible_words_and_freqs[:i] break return Vocab((word for (word, freq) in sorted(eligible_words_and_freqs)), *args, **kwargs)
class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertJapaneseTokenizer def setUp(self): super().setUp() vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '', '', '', '', '', '', ''] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='character', **kwargs) def get_input_output_texts(self): input_text = ' \n' output_text = ' ' return (input_text, output_text) def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='character') tokens = tokenizer.tokenize(' \n') self.assertListEqual(tokens, ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]) def test_character_tokenizer(self): vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '', '', '', '', '', ''] vocab = {} for (i, token) in enumerate(vocab_tokens): vocab[token] = i tokenizer = CharacterTokenizer(vocab=vocab, unk_token='[UNK]') self.assertListEqual(tokenizer.tokenize(''), []) self.assertListEqual(tokenizer.tokenize(''), ['', '', '', '', '']) self.assertListEqual(tokenizer.tokenize(''), ['', '', '', '', '[UNK]']) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained('bert-base-japanese-char') text = tokenizer.encode('', add_special_tokens=False) text_2 = tokenizer.encode('', add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert (encoded_sentence == (([2] + text) + [3])) assert (encoded_pair == (((([2] + text) + [3]) + text_2) + [3]))
class SNLIEval(object): def __init__(self, taskpath, seed=1111): logging.debug('***** Transfer task : SNLI Entailment*****\n\n') self.seed = seed train1 = self.loadFile(os.path.join(taskpath, 's1.train')) train2 = self.loadFile(os.path.join(taskpath, 's2.train')) trainlabels = io.open(os.path.join(taskpath, 'labels.train'), encoding='utf-8').read().splitlines() valid1 = self.loadFile(os.path.join(taskpath, 's1.dev')) valid2 = self.loadFile(os.path.join(taskpath, 's2.dev')) validlabels = io.open(os.path.join(taskpath, 'labels.dev'), encoding='utf-8').read().splitlines() test1 = self.loadFile(os.path.join(taskpath, 's1.test')) test2 = self.loadFile(os.path.join(taskpath, 's2.test')) testlabels = io.open(os.path.join(taskpath, 'labels.test'), encoding='utf-8').read().splitlines() sorted_train = sorted(zip(train2, train1, trainlabels), key=(lambda z: (len(z[0]), len(z[1]), z[2]))) (train2, train1, trainlabels) = map(list, zip(*sorted_train)) sorted_valid = sorted(zip(valid2, valid1, validlabels), key=(lambda z: (len(z[0]), len(z[1]), z[2]))) (valid2, valid1, validlabels) = map(list, zip(*sorted_valid)) sorted_test = sorted(zip(test2, test1, testlabels), key=(lambda z: (len(z[0]), len(z[1]), z[2]))) (test2, test1, testlabels) = map(list, zip(*sorted_test)) self.samples = (((((train1 + train2) + valid1) + valid2) + test1) + test2) self.data = {'train': (train1, train2, trainlabels), 'valid': (valid1, valid2, validlabels), 'test': (test1, test2, testlabels)} def do_prepare(self, params, prepare): return prepare(params, self.samples) def loadFile(self, fpath): with codecs.open(fpath, 'rb', 'latin-1') as f: return [line.split() for line in f.read().splitlines()] def run(self, params, batcher): (self.X, self.y) = ({}, {}) dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2} for key in self.data: if (key not in self.X): self.X[key] = [] if (key not in self.y): self.y[key] = [] (input1, input2, mylabels) = self.data[key] enc_input = [] n_labels = len(mylabels) for ii in range(0, n_labels, params.batch_size): batch1 = input1[ii:(ii + params.batch_size)] batch2 = input2[ii:(ii + params.batch_size)] if ((len(batch1) == len(batch2)) and (len(batch1) > 0)): enc1 = batcher(params, batch1) enc2 = batcher(params, batch2) enc_input.append(np.hstack((enc1, enc2, (enc1 * enc2), np.abs((enc1 - enc2))))) if (((ii * params.batch_size) % (20000 * params.batch_size)) == 0): logging.info(('PROGRESS (encoding): %.2f%%' % ((100 * ii) / n_labels))) self.X[key] = np.vstack(enc_input) self.y[key] = [dico_label[y] for y in mylabels] config = {'nclasses': 3, 'seed': self.seed, 'usepytorch': params.usepytorch, 'cudaEfficient': True, 'nhid': params.nhid, 'noreg': True} config_classifier = copy.deepcopy(params.classifier) config_classifier['max_epoch'] = 15 config_classifier['epoch_size'] = 1 config['classifier'] = config_classifier clf = SplitClassifier(self.X, self.y, config) (devacc, testacc) = clf.run() logging.debug('Dev acc : {0} Test acc : {1} for SNLI\n'.format(devacc, testacc)) return {'devacc': devacc, 'acc': testacc, 'ndev': len(self.data['valid'][0]), 'ntest': len(self.data['test'][0])}
def _seg_18(): return [(7813, 'V'), (7814, 'M', u'w'), (7815, 'V'), (7816, 'M', u'w'), (7817, 'V'), (7818, 'M', u'x'), (7819, 'V'), (7820, 'M', u'x'), (7821, 'V'), (7822, 'M', u'y'), (7823, 'V'), (7824, 'M', u'z'), (7825, 'V'), (7826, 'M', u'z'), (7827, 'V'), (7828, 'M', u'z'), (7829, 'V'), (7834, 'M', u'a'), (7835, 'M', u's'), (7836, 'V'), (7838, 'M', u'ss'), (7839, 'V'), (7840, 'M', u'a'), (7841, 'V'), (7842, 'M', u'a'), (7843, 'V'), (7844, 'M', u'a'), (7845, 'V'), (7846, 'M', u'a'), (7847, 'V'), (7848, 'M', u'a'), (7849, 'V'), (7850, 'M', u'a'), (7851, 'V'), (7852, 'M', u'a'), (7853, 'V'), (7854, 'M', u'a'), (7855, 'V'), (7856, 'M', u'a'), (7857, 'V'), (7858, 'M', u'a'), (7859, 'V'), (7860, 'M', u'a'), (7861, 'V'), (7862, 'M', u'a'), (7863, 'V'), (7864, 'M', u'e'), (7865, 'V'), (7866, 'M', u'e'), (7867, 'V'), (7868, 'M', u'e'), (7869, 'V'), (7870, 'M', u'e'), (7871, 'V'), (7872, 'M', u'e'), (7873, 'V'), (7874, 'M', u'e'), (7875, 'V'), (7876, 'M', u'e'), (7877, 'V'), (7878, 'M', u'e'), (7879, 'V'), (7880, 'M', u'i'), (7881, 'V'), (7882, 'M', u'i'), (7883, 'V'), (7884, 'M', u'o'), (7885, 'V'), (7886, 'M', u'o'), (7887, 'V'), (7888, 'M', u'o'), (7889, 'V'), (7890, 'M', u'o'), (7891, 'V'), (7892, 'M', u'o'), (7893, 'V'), (7894, 'M', u'o'), (7895, 'V'), (7896, 'M', u'o'), (7897, 'V'), (7898, 'M', u'o'), (7899, 'V'), (7900, 'M', u'o'), (7901, 'V'), (7902, 'M', u'o'), (7903, 'V'), (7904, 'M', u'o'), (7905, 'V'), (7906, 'M', u'o'), (7907, 'V'), (7908, 'M', u'u'), (7909, 'V'), (7910, 'M', u'u'), (7911, 'V'), (7912, 'M', u'u'), (7913, 'V'), (7914, 'M', u'u'), (7915, 'V'), (7916, 'M', u'u'), (7917, 'V')]
def import_request_result(request: CritiqueRequest) -> Optional[CritiqueRequestResult]: template: CritiqueTaskTemplate = request.template with _importers_lock: if (template.name not in _importer): _importer[template.name] = _MechanicalTurkRequestImporter(template) _importer[template.name].initialize() encoded_fields = {field_name: replace_emoji_characters(field_value) for (field_name, field_value) in request.fields.items()} return _importer[template.name].import_request_result(encoded_fields)
class Generator(torch.nn.Module): def __init__(self, input_size, vocab_size, pad_idx): super(Generator, self).__init__() self._generator = torch.nn.Sequential(torch.nn.Linear(input_size, vocab_size), torch.nn.LogSoftmax(dim=(- 1))) self.criterion = torch.nn.NLLLoss(ignore_index=pad_idx, reduction='sum') self.metrics = Seq2SeqMetrics() self.pad_idx = pad_idx def forward(self, inputs): (batch_size, seq_length, _) = inputs.size() inputs = inputs.view((batch_size * seq_length), (- 1)) scores = self._generator(inputs) scores = scores.view(batch_size, seq_length, (- 1)) (_, predictions) = scores.max(2) return dict(scores=scores, predictions=predictions) def compute_loss(self, inputs, targets): (batch_size, seq_length, _) = inputs.size() output = self(inputs) scores = output['scores'].view((batch_size * seq_length), (- 1)) predictions = output['predictions'].view((- 1)) targets = targets.view((- 1)) loss = self.criterion(scores, targets) non_pad = targets.ne(self.pad_idx) num_correct = predictions.eq(targets).masked_select(non_pad).sum().item() num_non_pad = non_pad.sum().item() self.metrics(loss.item(), num_non_pad, num_correct) return dict(loss=loss.div(float(num_non_pad)), predictions=output['predictions']) def from_params(cls, params): return cls(input_size=params['input_size'], vocab_size=params['vocab_size'], pad_idx=params['pad_idx'])
class DanishStemmer(_ScandinavianStemmer): __vowels = 'aeiouya' __consonants = 'bcdfghjklmnpqrstvwxz' __double_consonants = ('bb', 'cc', 'dd', 'ff', 'gg', 'hh', 'jj', 'kk', 'll', 'mm', 'nn', 'pp', 'qq', 'rr', 'ss', 'tt', 'vv', 'ww', 'xx', 'zz') __s_ending = 'abcdfghjklmnoprtvyza' __step1_suffixes = ('erendes', 'erende', 'hedens', 'ethed', 'erede', 'heden', 'heder', 'endes', 'ernes', 'erens', 'erets', 'ered', 'ende', 'erne', 'eren', 'erer', 'heds', 'enes', 'eres', 'eret', 'hed', 'ene', 'ere', 'ens', 'ers', 'ets', 'en', 'er', 'es', 'et', 'e', 's') __step2_suffixes = ('gd', 'dt', 'gt', 'kt') __step3_suffixes = ('elig', 'lst', 'lig', 'els', 'ig') def stem(self, word): word = word.lower() r1 = self._r1_scandinavian(word, self.__vowels) for suffix in self.__step1_suffixes: if r1.endswith(suffix): if (suffix == 's'): if (word[(- 2)] in self.__s_ending): word = word[:(- 1)] r1 = r1[:(- 1)] else: word = word[:(- len(suffix))] r1 = r1[:(- len(suffix))] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): word = word[:(- 1)] r1 = r1[:(- 1)] break if r1.endswith('igst'): word = word[:(- 2)] r1 = r1[:(- 2)] for suffix in self.__step3_suffixes: if r1.endswith(suffix): if (suffix == 'lst'): word = word[:(- 1)] r1 = r1[:(- 1)] else: word = word[:(- len(suffix))] r1 = r1[:(- len(suffix))] if r1.endswith(self.__step2_suffixes): word = word[:(- 1)] r1 = r1[:(- 1)] break for double_cons in self.__double_consonants: if (word.endswith(double_cons) and (len(word) > 3)): word = word[:(- 1)] break return word
def get_model_params(model_name, override_params): if model_name.startswith('efficientnet'): (w, d, s, p) = efficientnet_params(model_name) (blocks_args, global_params) = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s) else: raise NotImplementedError('model name is not pre-defined: {}'.format(model_name)) if override_params: global_params = global_params._replace(**override_params) return (blocks_args, global_params)
class distill(): def __init__(self, args, model, teacher): self.args = args self.student = model self.teacher = teacher self.student_layers = self.sampled_layer(args.arch, self.student) self.teacher_layers = self.sampled_layer(args.teacher_arch, self.teacher) def kwargs(**kwargs): return kwargs setattr(tcl.Conv2d, 'pre_defined', kwargs(kernel_initializer=tf.keras.initializers.he_normal(), use_biases=False, activation_fn=None, trainable=True)) setattr(tcl.BatchNorm, 'pre_defined', kwargs(trainable=True)) self.aux_layers = [] for (s, t) in zip(self.student_layers, self.teacher_layers): layers = [] Ds = s.gamma.shape[(- 1)] for i in range(3): layers.append(tcl.Conv2d([1, 1], (Ds if (i == 2) else (Ds * 2)))) layers.append(tcl.BatchNorm(activation_fn=(None if (i == 2) else tf.nn.relu))) self.aux_layers.append(tf.keras.Sequential(layers)) self.aux_layers[(- 1)].alpha = self.aux_layers[(- 1)].add_weight(name='alpha', shape=[1, 1, 1, Ds], initializer=tf.constant_initializer(5.0), trainable=True) def sampled_layer(self, arch, model): if ('WResNet' in arch): for i in range(1, 3): model.Layers[('BasicBlock%d.0/bn' % i)].keep_feat = 'pre_act' model.Layers['bn_last'].keep_feat = 'pre_act' return ([model.Layers[('BasicBlock%d.0/bn' % i)] for i in range(1, 3)] + [model.Layers['bn_last']]) def loss(self, sl, tl, aux): s = aux(sl.feat) t = tf.stop_gradient(tl.feat) var = (tf.math.softplus(aux.alpha) + 1) return (tf.reduce_mean((tf.math.log(var) + (tf.square((t - s)) / var))) / 2) def forward(self, input, labels, target_loss): self.teacher(input, training=False) return (target_loss + tf.add_n([self.loss(*data) for (i, data) in enumerate(zip(self.student_layers, self.teacher_layers, self.aux_layers))]))
class TFBaseModelOutput(ModelOutput): last_hidden_state: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None
class LogWriter(object): def __init__(self, save_path, log_types=['tensorboard', 'pkl']): self.save_path = save_path if (len(log_types) == 0): raise ValueError('Please specify at least one log_type file to write to in the LogWriter!') self.writers = [] for log_type in log_types: if ('tensorboard' == log_type): self.writers.append(SummaryWriter(save_path)) elif ('pkl' == log_type): self.writers.append(PklWriter(save_path)) else: raise TypeError('Unrecognized log_writer type: ', log_writer) def add_scalar(self, tag, scalar_value, global_step=None): for writer in self.writers: writer.add_scalar(tag, scalar_value=scalar_value, global_step=global_step) def add_histogram(self, tag, values, global_step=None, bins='sturges'): for writer in self.writers: writer.add_histogram(tag, values=values, global_step=global_step, bins=bins)
class ComplicatedSubArray(SubArray): def __str__(self): return 'myprefix {0} mypostfix'.format(self.view(SubArray)) def __repr__(self): return '<{0} {1}>'.format(self.__class__.__name__, self) def _validate_input(self, value): if (not isinstance(value, ComplicatedSubArray)): raise ValueError('Can only set to MySubArray values') return value def __setitem__(self, item, value): super(ComplicatedSubArray, self).__setitem__(item, self._validate_input(value)) def __getitem__(self, item): value = super(ComplicatedSubArray, self).__getitem__(item) if (not isinstance(value, np.ndarray)): value = value.__array__().view(ComplicatedSubArray) return value def flat(self): return CSAIterator(self) def flat(self, value): y = self.ravel() y[:] = value def __array_wrap__(self, obj, context=None): obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context) if ((context is not None) and (context[0] is np.multiply)): obj.info['multiplied'] = (obj.info.get('multiplied', 0) + 1) return obj
def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): class_name = ':class:`~transformers.{}`'.format(fn.__qualname__.split('.')[0]) intro = ' The {} forward method, overrides the :func:`__call__` special method.'.format(class_name) note = '\n\n .. note::\n Although the recipe for forward pass needs to be defined within this function, one should call the\n :class:`Module` instance afterwards instead of this since the former takes care of running the pre and post\n processing steps while the latter silently ignores them.\n ' fn.__doc__ = (((intro + note) + ''.join(docstr)) + (fn.__doc__ if (fn.__doc__ is not None) else '')) return fn return docstring_decorator
class HeaderContent(object): def __init__(self, header, content): self.header = header self.content = content def add_header(self, header): self.header.append(header) def add_paragraph(self, paragraph): self.content.append(paragraph) def get_num_headers(self): return len(self.header) def get_num_paras(self): return len(self.content) def get_num_words(self): return len(' '.join([x.strip() for x in self.content]).split()) def print_headers(self): for elem in self.header: print(elem.strip()) def print_joined_headers(self): print(' '.join([elem.strip().replace('\n', ' ') for elem in self.header])) def print_short_content(self): for elem in self.content: print(elem.strip()[:20])
def find_missing_eduspan(node, misplaced_children, verbose=False): if verbose: print('\nMISSING CHILDREN\n', node.eduspan, [m.eduspan for m in node.nodelist]) eduCovered = sorted(list(set([m.eduspan[0] for m in node.nodelist]))) eduCovered.extend(list(set([m.eduspan[1] for m in node.nodelist]))) eduCovered = sorted(list(set(eduCovered))) if ((len(eduCovered) != 0) and (tuple([min(eduCovered), max(eduCovered)]) != node.eduspan)): if (eduCovered[0] != node.eduspan[0]): if verbose: print('\tMissing', node.eduspan[0], (eduCovered[0] - 1)) child = findChild(node.eduspan[0], (eduCovered[0] - 1), misplaced_children) if (child != None): node.nodelist.append(child) misplaced_children.remove(child) elif (len(eduCovered) == 1): if verbose: print('\tMissing, ', (eduCovered[0] + 1), node.eduspan[1]) child = findChild((eduCovered[0] + 1), node.eduspan[1], misplaced_children) if (child != None): node.nodelist.append(child) misplaced_children.remove(child) elif (eduCovered[1] != node.eduspan[1]): if verbose: print('\tMissing, ', (eduCovered[1] + 1), node.eduspan[1]) child = findChild((eduCovered[1] + 1), node.eduspan[1], misplaced_children) if (child != None): node.nodelist.append(child) misplaced_children.remove(child)
class ConfigCache(object): def __init__(self): self._configs = {} self._default_config = {} def set_default_config(self, config): self._default_config = dict(config) def set_config(self, cls_or_env_id, config): config_key = self._get_config_key(cls_or_env_id) self._configs[config_key] = dict(config) def get_config(self, cls_or_env_id): config_key = self._get_config_key(cls_or_env_id) config = dict(self._default_config) config.update(self._configs.get(config_key, {})) return config def clear_config(self, cls_or_env_id): config_key = self._get_config_key(cls_or_env_id) if (config_key in self._configs): del self._configs[config_key] def _get_config_key(self, cls_or_env_id): if inspect.isclass(cls_or_env_id): return cls_or_env_id env_id = cls_or_env_id assert isinstance(env_id, str) if (env_id not in gym_registry.env_specs): raise ValueError('Unregistered environment name {}.'.format(env_id)) entry_point = gym_registry.env_specs[env_id]._entry_point if callable(entry_point): return entry_point else: return import_class_from_path(entry_point)
def test_signature_setup(): mG = BilinearGroupPair() keypair = BBSPlusKeypair.generate(mG, 9) messages = [Bn(30), Bn(31), Bn(32), Bn(12)] (pk, sk) = (keypair.pk, keypair.sk) (generators, h0) = (keypair.generators, keypair.h0) creator = BBSPlusSignatureCreator(pk) com = creator.commit(messages, zkp=True) presignature = sk.sign(com.com_message) signature = creator.obtain_signature(presignature) assert (com.verify_blinding(pk) and signature.verify_signature(pk, messages))
def concepts_to_adj_matrices_2step_relax_all_pair(data): (qc_ids, ac_ids) = data qa_nodes = (set(qc_ids) | set(ac_ids)) extra_nodes = set() for qid in qc_ids: for aid in ac_ids: if ((qid != aid) and (qid in cpnet_simple.nodes) and (aid in cpnet_simple.nodes)): extra_nodes |= (set(cpnet_simple[qid]) & set(cpnet_simple[aid])) intermediate_ids = (extra_nodes - qa_nodes) for qid in intermediate_ids: for aid in ac_ids: if ((qid != aid) and (qid in cpnet_simple.nodes) and (aid in cpnet_simple.nodes)): extra_nodes |= (set(cpnet_simple[qid]) & set(cpnet_simple[aid])) for qid in qc_ids: for aid in intermediate_ids: if ((qid != aid) and (qid in cpnet_simple.nodes) and (aid in cpnet_simple.nodes)): extra_nodes |= (set(cpnet_simple[qid]) & set(cpnet_simple[aid])) extra_nodes = (extra_nodes - qa_nodes) schema_graph = ((sorted(qc_ids) + sorted(ac_ids)) + sorted(extra_nodes)) arange = np.arange(len(schema_graph)) qmask = (arange < len(qc_ids)) amask = ((arange >= len(qc_ids)) & (arange < (len(qc_ids) + len(ac_ids)))) (adj, concepts) = concepts2adj(schema_graph) return (adj, concepts, qmask, amask)
def untokenize(raw: str, tokens: List[str], return_mask: bool=False, token_sym: Any=True, untoken_sym: Any=False) -> T_untokenized: mask = [] untokenized = [] pos = raw.find(tokens[0]) if (pos != 0): untokenized.append(raw[:pos]) mask.append(untoken_sym) raw = raw[pos:] prev_token = tokens[0] for token in tokens[1:]: raw = raw[len(prev_token):] pos = raw.find(token) untokenized.append(prev_token) mask.append(token_sym) if pos: mask.append(untoken_sym) untokenized.append(raw[:pos]) prev_token = token raw = raw[pos:] untokenized.append(prev_token) mask.append(token_sym) cur = len(prev_token) if (cur != len(raw)): untokenized.append(raw[cur:]) mask.append(untoken_sym) if return_mask: return (untokenized, mask) return untokenized
.operations('create_user', 'get_user', 'update_user') .openapi_version('3.0') def test_explicit_headers_reproduction(testdir, openapi3_base_url, app_schema): testdir.make_test(f''' schema.base_url = "{openapi3_base_url}" class APIWorkflow(schema.as_state_machine()): def get_call_kwargs(self, case): return {{"headers": {{"X-Token": "FOOBAR"}}}} def validate_response(self, response, case): assert 0, "Explicit failure" TestCase = APIWorkflow.TestCase ''', schema=app_schema) result = testdir.runpytest() result.assert_outcomes(failed=1) example = find_reproduction_code(result.outlines) assert ("headers={'X-Token': 'FOOBAR'}" in example.splitlines()[1])
def get_abi_tag(): soabi = get_config_var('SOABI') impl = get_abbr_impl() if ((not soabi) and (impl in {'cp', 'pp'}) and hasattr(sys, 'maxunicode')): d = '' m = '' u = '' if get_flag('Py_DEBUG', (lambda : hasattr(sys, 'gettotalrefcount')), warn=(impl == 'cp')): d = 'd' if get_flag('WITH_PYMALLOC', (lambda : (impl == 'cp')), warn=(impl == 'cp')): m = 'm' if (get_flag('Py_UNICODE_SIZE', (lambda : (sys.maxunicode == 1114111)), expected=4, warn=((impl == 'cp') and six.PY2)) and six.PY2): u = 'u' abi = ('%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)) elif (soabi and soabi.startswith('cpython-')): abi = ('cp' + soabi.split('-')[1]) elif soabi: abi = soabi.replace('.', '_').replace('-', '_') else: abi = None return abi
class TestTimeSimulation(unittest.TestCase): def setUp(self): mesh = discretize.TensorMesh([10, 10]) self.sim = simulation.BaseTimeSimulation(mesh=mesh) def test_time_simulation_time_steps(self): self.sim.time_steps = [(1e-06, 3), 1e-05, (0.0001, 2)] true_time_steps = np.r_[(1e-06, 1e-06, 1e-06, 1e-05, 0.0001, 0.0001)] self.assertTrue(np.all((true_time_steps == self.sim.time_steps))) true_time_steps = np.r_[(1e-07, 1e-06, 1e-06, 1e-05, 0.0001, 0.0001)] self.sim.time_steps = true_time_steps self.assertTrue(np.all((true_time_steps == self.sim.time_steps))) self.assertTrue((self.sim.nT == 6)) self.assertTrue(np.all((self.sim.times == np.r_[(0, true_time_steps)].cumsum()))) self.sim.t0 = 1 self.assertTrue(np.all((self.sim.times == np.r_[(1, true_time_steps)].cumsum())))
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Ns3Ipv6Header_Unsigned_short_Ns3Ptr__lt__ns3Ipv6Interface__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Header, unsigned short, ns3::Ptr< ns3::Ipv6Interface >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Packet >', 'arg0'), param('ns3::Ipv6Header', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::Ptr< ns3::Ipv6Interface >', 'arg3')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
class TestCNNModel(TfGraphTestCase): def setup_method(self): super().setup_method() self.batch_size = 5 self.input_width = 10 self.input_height = 10 self.obs_input = np.ones((self.batch_size, self.input_width, self.input_height, 3)) input_shape = self.obs_input.shape[1:] self._input_ph = tf.compat.v1.placeholder(tf.float32, shape=((None,) + input_shape), name='input') .parametrize('filters, in_channels, strides', [(((32, (1, 1)),), (3,), (1,)), (((32, (3, 3)),), (3,), (1,)), (((32, (3, 3)),), (3,), (2,)), (((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2))]) def test_output_value(self, filters, in_channels, strides): model = CNNModel(filters=filters, strides=strides, name='cnn_model', padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=None) outputs = model.build(self._input_ph).outputs output = self.sess.run(outputs, feed_dict={self._input_ph: self.obs_input}) filter_sum = 1 for (filter_iter, in_channel) in zip(filters, in_channels): filter_sum *= ((filter_iter[1][0] * filter_iter[1][1]) * in_channel) height_size = self.input_height width_size = self.input_width for (filter_iter, stride) in zip(filters, strides): height_size = (int(((height_size - filter_iter[1][0]) / stride)) + 1) width_size = (int(((width_size - filter_iter[1][1]) / stride)) + 1) flatten_shape = ((height_size * width_size) * filters[(- 1)][0]) expected_output = np.full((self.batch_size, flatten_shape), filter_sum, dtype=np.float32) assert np.array_equal(output, expected_output) .parametrize('filters, in_channels, strides, pool_strides, pool_shapes', [(((32, (1, 1)),), (3,), (1,), (1, 1), (1, 1)), (((32, (3, 3)),), (3,), (1,), (2, 2), (1, 1)), (((32, (3, 3)),), (3,), (1,), (1, 1), (2, 2)), (((32, (3, 3)),), (3,), (1,), (2, 2), (2, 2)), (((32, (3, 3)),), (3,), (2,), (1, 1), (2, 2)), (((32, (3, 3)),), (3,), (2,), (2, 2), (2, 2)), (((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, 1), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (1, 1), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (1, 1), (1, 1))]) def test_output_value_max_pooling(self, filters, in_channels, strides, pool_strides, pool_shapes): model = CNNModelWithMaxPooling(filters=filters, strides=strides, name='cnn_model', padding='VALID', pool_strides=pool_strides, pool_shapes=pool_shapes, hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=None) outputs = model.build(self._input_ph).outputs output = self.sess.run(outputs, feed_dict={self._input_ph: self.obs_input}) filter_sum = 1 for (filter_iter, in_channel) in zip(filters, in_channels): filter_sum *= ((filter_iter[1][0] * filter_iter[1][1]) * in_channel) height_size = self.input_height width_size = self.input_width for (filter_iter, stride) in zip(filters, strides): height_size = (int(((height_size - filter_iter[1][0]) / stride)) + 1) height_size = (int(((height_size - pool_shapes[0]) / pool_strides[0])) + 1) width_size = (int(((width_size - filter_iter[1][1]) / stride)) + 1) width_size = (int(((width_size - pool_shapes[1]) / pool_strides[1])) + 1) flatten_shape = ((height_size * width_size) * filters[(- 1)][0]) expected_output = np.full((self.batch_size, flatten_shape), filter_sum, dtype=np.float32) assert np.array_equal(output, expected_output) .parametrize('filters, strides', [(((32, (1, 1)),), (1,)), (((32, (3, 3)),), (1,)), (((32, (3, 3)),), (2,)), (((32, (1, 1)), (64, (1, 1))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (2, 2))]) def test_is_pickleable(self, filters, strides): model = CNNModel(filters=filters, strides=strides, name='cnn_model', padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=None) outputs = model.build(self._input_ph).outputs with tf.compat.v1.variable_scope('cnn_model/cnn/h0', reuse=True): bias = tf.compat.v1.get_variable('bias') bias.load(tf.ones_like(bias).eval()) output1 = self.sess.run(outputs, feed_dict={self._input_ph: self.obs_input}) h = pickle.dumps(model) with tf.compat.v1.Session(graph=tf.Graph()) as sess: model_pickled = pickle.loads(h) input_shape = self.obs_input.shape[1:] input_ph = tf.compat.v1.placeholder(tf.float32, shape=((None,) + input_shape), name='input') outputs = model_pickled.build(input_ph).outputs output2 = sess.run(outputs, feed_dict={input_ph: self.obs_input}) assert np.array_equal(output1, output2)
def time_to_minutes(time): if (not isinstance(time, str)): time = str(time) d = {'days': 0, 'hours': 0, 'minutes': 0, 'seconds': 0} regex = list(filter((lambda regex: (regex.match(time) is not None)), timeformats)) if (len(regex) == 0): return assert (len(regex) == 1), 'multiple time formats match' m = regex[0].match(time) d.update(m.groupdict()) minutes = (((((int(d['days']) * 24) * 60) + (int(d['hours']) * 60)) + int(d['minutes'])) + math.ceil((int(d['seconds']) / 60))) assert (minutes > 0), 'minutes has to be greater than 0' return minutes
def create_linear_transform(param_dim): return transforms.CompositeTransform([transforms.RandomPermutation(features=param_dim), transforms.LULinear(param_dim, identity_init=True)])
def matches_dict(criteria_dict, test_dict): for (k, v) in criteria_dict.items(): if (k not in test_dict): return False elif (test_dict[k] != v): return False return True
def main(config, stdout_dir, args_str): args_list = ['train.py'] args_list += (args_str.split(' ') if (len(args_str) > 0) else []) args_list.append('--config={}'.format(config)) num_gpus = torch.cuda.device_count() args_list.append('--num_gpus={}'.format(num_gpus)) args_list.append('--group_name=group_{}'.format(time.strftime('%Y_%m_%d-%H%M%S'))) if (not os.path.isdir(stdout_dir)): os.makedirs(stdout_dir) os.chmod(stdout_dir, 509) workers = [] for i in range(num_gpus): args_list[(- 2)] = '--rank={}'.format(i) stdout = (None if (i == 0) else open(os.path.join(stdout_dir, 'GPU_{}.log'.format(i)), 'w')) print(args_list) p = subprocess.Popen(([str(sys.executable)] + args_list), stdout=stdout) workers.append(p) for p in workers: p.wait()
def validate_il_idnr(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(idnr.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(idnr.is_valid) else: return df.applymap(idnr.is_valid) return idnr.is_valid(df)
class DepthWiseConv2dImplicitGEMM(nn.Conv2d): def __init__(self, channels, kernel, bias=False): super().__init__(channels, channels, kernel, groups=channels, bias=bias) def forward(self, x): if (x.dtype == torch.float32): x = _DepthWiseConv2dImplicitGEMMFP32.apply(x, self.weight) elif (x.dtype == torch.float16): x = _DepthWiseConv2dImplicitGEMMFP16.apply(x, self.weight) else: raise TypeError('Only support fp32 and fp16, get {}'.format(x.dtype)) if (self.bias is not None): x = (x + self.bias.to(x).view(1, (- 1), 1, 1)) return x
def test_fortran_frontend_merge_comparison_arrays(): test_string = '\n PROGRAM merge_test\n implicit none\n double precision, dimension(7) :: input1\n double precision, dimension(7) :: input2\n double precision, dimension(7) :: res\n CALL merge_test_function(input1, input2, res)\n end\n\n SUBROUTINE merge_test_function(input1, input2, res)\n double precision, dimension(7) :: input1\n double precision, dimension(7) :: input2\n double precision, dimension(7) :: res\n\n res = MERGE(input1, input2, input1 .lt. input2)\n\n END SUBROUTINE merge_test_function\n ' sdfg = fortran_parser.create_sdfg_from_string(test_string, 'merge_test', True) sdfg.simplify(verbose=True) sdfg.compile() size = 7 first = np.full([size], 13, order='F', dtype=np.float64) second = np.full([size], 42, order='F', dtype=np.float64) res = np.full([size], 40, order='F', dtype=np.float64) sdfg(input1=first, input2=second, res=res) for val in res: assert (val == 13) for i in range(int((size / 2))): first[i] = 45 sdfg(input1=first, input2=second, res=res) for i in range(int((size / 2))): assert (res[i] == 42) for i in range(int((size / 2)), size): assert (res[i] == 13) first[:] = 13 for i in range(size): if ((i % 2) == 1): first[i] = 45 sdfg(input1=first, input2=second, res=res) for i in range(size): if ((i % 2) == 1): assert (res[i] == 42) else: assert (res[i] == 13)
class BlobAlgebra(CombinatorialFreeModule): def __classcall_private__(cls, k, q1, q2, q3, base_ring=None, prefix='B'): if (base_ring is None): base_ring = get_coercion_model().common_parent(q1, q2, q3) q1 = base_ring(q1) q2 = base_ring(q2) q3 = base_ring(q3) return super().__classcall__(cls, k, q1, q2, q3, base_ring, prefix) def __init__(self, k, q1, q2, q3, base_ring, prefix): self._q1 = q1 self._q2 = q2 self._q3 = q3 diagrams = BlobDiagrams(k) cat = Algebras(base_ring.category()).FiniteDimensional().WithBasis() CombinatorialFreeModule.__init__(self, base_ring, diagrams, category=cat, prefix=prefix, bracket=False) def _ascii_art_term(self, diagram): return TL_diagram_ascii_art((diagram.marked + diagram.unmarked), use_unicode=False, blobs=diagram.marked) def _unicode_art_term(self, diagram): return TL_diagram_ascii_art((diagram.marked + diagram.unmarked), use_unicode=True, blobs=diagram.marked) def _latex_term(self, diagram): def edge_options(P): if (P[1] < P[0]): P = [P[1], P[0]] if (tuple(P) in diagram.marked): return 'blue,very thick' return '' def edge_additions(P): if (P[1] < P[0]): P = [P[1], P[0]] if (tuple(P) in diagram.marked): return 'node[midway,circle,fill,scale=0.6] {} ' return '' return diagram_latex((diagram.marked + diagram.unmarked), edge_options=edge_options, edge_additions=edge_additions) def order(self): return self._indices.order() _method def one_basis(self): B = self._indices return B.element_class(B, [], [[i, (- i)] for i in range(1, (self.order() + 1))]) def product_on_basis(self, top, bot): ret_lists = [[], []] coeff = self.base_ring().one() top_marked = set(top.marked) top_unmarked = set(top.unmarked) bot_marked = set(bot.marked) bot_unmarked = set(bot.unmarked) for (top_set, is_unmarked) in [(top_marked, 0), (top_unmarked, 1)]: while top_set: (cur, stop) = top_set.pop() unmarked = is_unmarked if (cur > 0): ret_lists[unmarked].append((cur, stop)) continue anchored = bool((stop > 0)) while (anchored or (cur != stop)): cur = (- cur) for X in bot_marked: if (cur in X): if unmarked: unmarked = 0 else: coeff *= self._q2 prev = cur cur = X[(1 - X.index(prev))] bot_marked.remove(X) break for X in bot_unmarked: if (cur in X): prev = cur cur = X[(1 - X.index(prev))] bot_unmarked.remove(X) break if (cur < 0): if anchored: ret_lists[unmarked].append((stop, cur)) break else: anchored = True (stop, cur) = (cur, stop) continue cur = (- cur) for X in top_marked: if (cur in X): if unmarked: unmarked = 0 else: coeff *= self._q2 prev = cur cur = X[(1 - X.index(prev))] top_marked.remove(X) break for X in top_unmarked: if (cur in X): prev = cur cur = X[(1 - X.index(prev))] top_unmarked.remove(X) break if (cur > 0): if anchored: ret_lists[unmarked].append((stop, cur)) break else: anchored = True (stop, cur) = (cur, stop) if (cur == stop): if unmarked: coeff *= self._q1 else: coeff *= self._q3 ret_lists[0].extend(bot_marked) ret_lists[1].extend(bot_unmarked) if (coeff == 0): return self.zero() diagram = self._indices.element_class(self._indices, ret_lists[0], ret_lists[1]) return self._from_dict({diagram: coeff}, remove_zeros=False)
def build(log_file, session_file): cluster_file = (log_file + '.cacb-clst.pkl') if os.path.isfile(cluster_file): logger.info('Cluster file already detected skipping') else: build_cluster(log_file, cluster_file) build_cacb(cluster_file, session_file)
def threshold_func(item, class_index, classes, threshold): class_name = classes[class_index] if (item[class_index] >= threshold): return class_name _classes = classes[:] _classes.remove(class_name) return _classes[0]
def register_Ns3CallbackImplBase_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> ']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> ']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType']) cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::QueueDiscItem const> ']) return
def labeled_unlabeled_split(labels, num_labeled, sample_mode, incl_labeled_in_unlabeled): labels = np.array(labels) (classes, class_counts) = np.unique(labels, return_counts=True) num_classes = len(classes) class_dist = (class_counts / class_counts.sum()) if (sample_mode == 'equal'): if ((num_labeled % num_classes) != 0): raise ValueError('Number of labels must be divisible by number of classes for equal label allocation') labels_per_class = np.full(num_classes, (num_labeled // num_classes)) elif (sample_mode == 'multinomial'): labels_per_class = np.random.multinomial(num_labeled, class_dist) elif (sample_mode == 'multinomial_min1'): if (num_labeled < num_classes): raise ValueError('Number of labels must be at least the number of classes') labels_per_class = (np.random.multinomial((num_labeled - num_classes), class_dist) + 1) elif (sample_mode == 'label_dist'): labels_per_class = (class_dist * num_labeled).astype(int) for _ in range((num_labeled - labels_per_class.sum())): i = np.argmax((class_dist - (labels_per_class / labels_per_class.sum()))) labels_per_class[i] += 1 elif (sample_mode == 'label_dist_min1'): if (num_labeled < num_classes): raise ValueError('Number of labels must be at least the number of classes') labels_per_class = (class_dist * num_labeled).astype(int) for i in range(num_classes): if (labels_per_class[i] == 0): labels_per_class[i] += 1 for _ in range((num_labeled - labels_per_class.sum())): i = np.argmax((class_dist - (labels_per_class / labels_per_class.sum()))) labels_per_class[i] += 1 else: raise ValueError('Invalid sampling mode {}'.format(sample_mode)) labeled_idxs = [] unlabeled_idxs = [] for i in range(num_classes): idxs = np.where((labels == i))[0] np.random.shuffle(idxs) labeled_idxs.extend(idxs[:labels_per_class[i]]) if incl_labeled_in_unlabeled: unlabeled_idxs.extend(idxs) else: unlabeled_idxs.extend(idxs[labels_per_class[i]:]) return (np.array(labeled_idxs), np.array(unlabeled_idxs))
class EnergyPower(BaseDataset): def __init__(self, rootdir=None): super().__init__() if (rootdir is None): fdir = os.path.dirname(os.path.abspath(__file__)) merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..')) rootdir = os.path.join(merlion_root, 'data', 'multivariate', 'energy_power') assert ('energy_power' in rootdir.split('/')[(- 1)]), 'energy_power should be found as the last level of the directory for this dataset' dsetdirs = [rootdir] extension = 'csv.gz' fnames = sum([sorted(glob.glob(f'{d}/*.{extension}')) for d in dsetdirs], []) assert (len(fnames) == 1), f'rootdir {rootdir} does not contain dataset file.' start_timestamp = '2014-01-01 00:00:00' for (i, fn) in enumerate(sorted(fnames)): df = pd.read_csv(fn, index_col='Datetime', parse_dates=True) df = df[(df.index >= start_timestamp)] df.drop(['NI', 'PJM_Load'], axis=1, inplace=True) df.index.rename('timestamp', inplace=True) assert isinstance(df.index, pd.DatetimeIndex) df.sort_index(inplace=True) self.time_series.append(df) self.metadata.append({'trainval': pd.Series((df.index <= '2018-01-01 00:00:00'), index=df.index), 'start_timestamp': start_timestamp})
def extract_celeb(data_dir, data_type): state_type_file_path = os.path.join(data_dir, (data_type + '_state_type.txt')) context_text_file_path = os.path.join(data_dir, (data_type + '_context_text.txt')) celeb_out_file_path = os.path.join(data_dir, (data_type + '_raw_celebs.txt')) state_file = open(state_type_file_path, 'r') text_file = open(context_text_file_path, 'r') cel_list = [] for (state_line, text_line) in zip(state_file, text_file): state = state_line.split(',')[2] context_line = text_line.split('|')[(- 1)] cel_line = '' if (state == 'celebrity'): local_celeb_list = [] pattern = 'cel_' context_words = context_line.strip().strip('?').split(' ') for word in context_words: if (pattern in word): local_celeb_list.append(word) cel_line = ' '.join(local_celeb_list) cel_list.append(cel_line) with open(celeb_out_file_path, 'w') as fp: for instance in cel_list: fp.write((str(instance) + '\n'))
_experiment def vpg_garage_pytorch(ctxt, env_id, seed): deterministic.set_seed(seed) runner = LocalRunner(ctxt) env = GarageEnv(normalize(gym.make(env_id))) policy = PyTorch_GMP(env.spec, hidden_sizes=hyper_parameters['hidden_sizes'], hidden_nonlinearity=torch.tanh, output_nonlinearity=None) value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None) policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=0.00025)), policy, max_optimization_epochs=10, minibatch_size=64) vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=0.00025)), value_function, max_optimization_epochs=10, minibatch_size=64) algo = PyTorch_VPG(env_spec=env.spec, policy=policy, value_function=value_function, policy_optimizer=policy_optimizer, vf_optimizer=vf_optimizer, max_path_length=hyper_parameters['max_path_length'], discount=hyper_parameters['discount'], center_adv=hyper_parameters['center_adv']) runner.setup(algo, env) runner.train(n_epochs=hyper_parameters['n_epochs'], batch_size=hyper_parameters['batch_size'])
def run_chatgpt_prediction(test_file): print('Running ChatGPT on test file: {}'.format(test_file)) output_file = test_file.replace('.json', '.json.chatgpt') if os.path.exists(output_file): passed_cases = open(output_file, 'r').readlines() if (not passed_cases[(- 1)].endswith('\n')): passed_cases = passed_cases[:(- 1)] open(output_file, 'w').writelines(passed_cases) start_idx = len(passed_cases) else: start_idx = 0 output_f = open(output_file, 'a') (predictions, ground_truths) = ([], []) print('Start from {}'.format(start_idx)) with open(test_file, 'r') as f: for (idx, line) in tqdm(enumerate(f.readlines()[start_idx:])): data = json.loads(line) model_input = data['input'] metadata = data['metadata'] model_output = None while (model_output is None): try: model_output = run_chatgpt_api(model_input) except Exception as e: print(e) finally: signal.alarm(0) predictions.append(model_output.strip()) ground_truths.append(data['output'].strip()) if ((idx % 10) == 0): print(model_output) output_f.write((json.dumps({'prediction': model_output.strip(), 'ground_truth': data['output'].strip(), 'input': model_input, 'metadata': metadata}) + '\n')) output_f.flush() output_f.close()
class TestScipyOptimizer(unittest.TestCase): def setUp(self): self.methods = ['Nelder-Mead', 'Powell', 'CG', 'L-BFGS-B', 'TNC', 'SLSQP'] def test_single_variable_quadratic(self): for method in self.methods: (obj, param, optimum) = problems.build_single_variable_quadratic() opt = ScipyOptimizer(obj, param, method) opt.optimize() np.testing.assert_almost_equal(opt.param.to_vector(), optimum, decimal=4) def test_two_variable_quadratic(self): for method in self.methods: (obj, param, optimum) = problems.build_two_variable_quadratic() opt = ScipyOptimizer(obj, param, method) opt.optimize() np.testing.assert_almost_equal(opt.param.to_vector(), optimum, decimal=4) def test_rosenbrock_function(self): for method in self.methods: (obj, param, optimum) = problems.build_rosenbrock_function() opt = ScipyOptimizer(obj, param, method) opt.optimize() np.testing.assert_almost_equal(opt.param.to_vector(), optimum, decimal=2) def test_constrained_optimization(self): var = Variable(2) x_var = var[0] y_var = var[1] obj = Sum([Product([x_var, x_var]), Product([Constant(2), y_var, y_var]), Product([Constant((- 5)), y_var]), Product([Constant((- 2)), x_var, y_var])]) param = DirectParam(np.array([0, 0]), bounds=[(- 10), 10]) constraints = [{'type': 'ineq', 'fun': (lambda z: ((z[0] - z[1]) - 1)), 'jac': (lambda z: np.array([1, (- 1)]))}] opt = ScipyOptimizer(obj, param, 'SLSQP', constraints=constraints) opt.optimize() np.testing.assert_almost_equal(opt.param.to_vector(), [(7 / 2), (5 / 2)]) def test_constrained_optimization2(self): optimizer = ScipyOptimizer(method='SLSQP') for (opt, param, ans) in problems.build_constrained_problem_list(): out_param = optimizer(opt, param) np.testing.assert_array_almost_equal(out_param.to_vector(), ans, decimal=4)
def infer_data_type(feature): if isinstance(feature, np.ndarray): if ((feature.dtype == np.float32) or (feature.dtype == np.float64)): return 'float32' elif ((feature.dtype == np.int32) or (feature.dtype == np.int64)): return 'int64' else: raise ValueError('Not supported data type {}'.format(feature.dtype)) elif isinstance(feature, (np.float32, np.float64, float)): return 'float32' elif isinstance(feature, (np.int32, np.int64, six.integer_types)): return 'int64' else: raise ValueError('Not supported data type {}'.format(type(feature)))
class DummyImpl(): def __init__(self) -> None: self.fc1 = torch.nn.Linear(100, 100) self.fc2 = torch.nn.Linear(100, 100) self.optim = torch.optim.Adam(self.fc1.parameters()) self.modules = DummyModules(self.fc1, self.optim) self.device = 'cpu:0' _api def train_api_func(self) -> None: assert self.fc1.training assert (not self.fc2.training) _api def eval_api_func(self) -> None: assert (not self.fc1.training) assert self.fc2.training
_class(removal_version='0.19.0', future_warn=True) class Simple(WeightedLeastSquares): def __init__(self, mesh=None, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs): super().__init__(mesh=mesh, length_scale_x=alpha_x, length_scale_y=alpha_y, length_scale_z=alpha_z, **kwargs)
_utils.polymorphic_model() class GdsMeshEps(EpsilonSpec): type = schema_utils.polymorphic_model_type('gds_mesh') gds = types.StringType() background = types.ModelType(Material) mesh_list = types.ListType(types.PolyModelType(Mesh)) stack_normal = optplan.vec3d()
class TestFixedKeyConfigDictionary(unittest.TestCase): def setUp(self): self.dictionary = {'zero': 0, 'zeroStr': 'zero', '1': 'one', '2': '', 'None': None} def test_config_correct_attributes(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int, 'zeroStr': str} _OPTIONAL_ATTRIBUTES = {'1': 'one', '2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} expectedDictionary = {'zero': 0, 'zeroStr': 'zero', '1': 'one', '2': '', 'None': 'StrVerOfNone', 'missingKey': 0} config = SomeTestConfigClass(self.dictionary) for (key, value) in config.items(): self.assertEqual(value, expectedDictionary[key]) def test_config_missing_required_attribute(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int, 'zeroStr': str, 'requiredKey': int} _OPTIONAL_ATTRIBUTES = {'1': 'one', '2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} with self.assertRaises(ValueError): SomeTestConfigClass(self.dictionary) def test_config_unexpected_attribute1(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int, 'zeroStr': str} _OPTIONAL_ATTRIBUTES = {'2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} with self.assertRaises(ValueError): SomeTestConfigClass(self.dictionary) def test_config_unexpected_attribute2(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int} _OPTIONAL_ATTRIBUTES = {'1': 'one', '2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} with self.assertRaises(ValueError): SomeTestConfigClass(self.dictionary) def test_config_required_type_mismatch(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int, 'zeroStr': int} _OPTIONAL_ATTRIBUTES = {'1': 'one', '2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} with self.assertRaises(ValueError): SomeTestConfigClass(self.dictionary) def test_config_optional_type_mismatch(self): class SomeTestConfigClass(FixedKeyConfigDictionary): _REQUIRED_ATTRIBUTES = {'zero': int, 'zeroStr': str} _OPTIONAL_ATTRIBUTES = {'1': 1, '2': 'two', 'None': 'StrVerOfNone', 'missingKey': 0} with self.assertRaises(ValueError): SomeTestConfigClass(self.dictionary)
class SyncTestCase(TorchTestCase): def _syncParameters(self, bn1, bn2): bn1.reset_parameters() bn2.reset_parameters() if (bn1.affine and bn2.affine): bn2.weight.data.copy_(bn1.weight.data) bn2.bias.data.copy_(bn1.bias.data) def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False): bn1.train(mode=is_train) bn2.train(mode=is_train) if cuda: input = input.cuda() self._syncParameters(_find_bn(bn1), _find_bn(bn2)) input1 = Variable(input, requires_grad=True) output1 = bn1(input1) output1.sum().backward() input2 = Variable(input, requires_grad=True) output2 = bn2(input2) output2.sum().backward() self.assertTensorClose(input1.data, input2.data) self.assertTensorClose(output1.data, output2.data) self.assertTensorClose(input1.grad, input2.grad) self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean) self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var) def testSyncBatchNormNormalTrain(self): bn = nn.BatchNorm1d(10) sync_bn = SynchronizedBatchNorm1d(10) self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True) def testSyncBatchNormNormalEval(self): bn = nn.BatchNorm1d(10) sync_bn = SynchronizedBatchNorm1d(10) self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False) def testSyncBatchNormSyncTrain(self): bn = nn.BatchNorm1d(10, eps=1e-05, affine=False) sync_bn = SynchronizedBatchNorm1d(10, eps=1e-05, affine=False) sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) bn.cuda() sync_bn.cuda() self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True) def testSyncBatchNormSyncEval(self): bn = nn.BatchNorm1d(10, eps=1e-05, affine=False) sync_bn = SynchronizedBatchNorm1d(10, eps=1e-05, affine=False) sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) bn.cuda() sync_bn.cuda() self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True) def testSyncBatchNorm2DSyncTrain(self): bn = nn.BatchNorm2d(10) sync_bn = SynchronizedBatchNorm2d(10) sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) bn.cuda() sync_bn.cuda() self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True)
def train(model, device, train_loader, optimizer): loss_func = torch.nn.CrossEntropyLoss() all_loss = [] prog_iter = tqdm(train_loader, desc='Training', leave=False) for (batch_idx, batch) in enumerate(prog_iter): (input_x, input_y) = tuple((t.to(device) for t in batch)) pred = model(input_x) loss = loss_func(pred, input_y) optimizer.zero_grad() loss.backward() optimizer.step() all_loss.append(loss.item())
def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ['tf.Tensor']: rng = random.Random() values = [rng.randint(0, (vocab_size - 1)) for i in range((batch_size * sequence_length))] return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
def _linear_to_mel(spectogram): global _mel_basis if (_mel_basis is None): _mel_basis = _build_mel_basis() return np.dot(_mel_basis, spectogram)
def square_dist(X, X2): Xs = tf.reduce_sum(tf.square(X), 1) X2s = tf.reduce_sum(tf.square(X2), 1) return ((((- 2) * tf.matmul(X, X2, transpose_b=True)) + tf.reshape(Xs, ((- 1), 1))) + tf.reshape(X2s, (1, (- 1))))