code
stringlengths
101
5.91M
class SRResNet(nn.Module): def __init__(self, scale=4, depth=16, n_colors=3, n_feats=64, conv=common.default_conv): super(SRResNet, self).__init__() self.conv = common.BasicBlock(n_colors, n_feats, 9, norm=None, act='prelu') resblock = (lambda : common.ResBlock(n_feats, 3, norm='batch', act='prelu', conv=conv)) m = [resblock() for _ in range(depth)] m.append(conv(n_feats, n_feats, 3)) m.append(nn.BatchNorm2d(n_feats)) self.resblocks = nn.Sequential(*m) self.recon = nn.Sequential(common.Upsampler(scale, n_feats, act='prelu', conv=conv), conv(n_feats, n_colors, 3)) self.url = None def get_kwargs(cfg, conv=common.default_conv): return {'scale': cfg.scale, 'depth': cfg.depth, 'n_colors': cfg.n_colors, 'n_feats': cfg.n_feats, 'conv': conv} def forward(self, x): x = self.conv(x) x = (x + self.resblocks(x)) x = self.recon(x) return x
def has_file_allowed_extension(filename: str, extensions: Union[(str, Tuple[(str, ...)])]) -> bool: return filename.lower().endswith((extensions if isinstance(extensions, str) else tuple(extensions)))
def test_list_option_list_offset(): content = ak.contents.ListOffsetArray(ak.index.Index64(np.array([1, 3], dtype=np.int64)), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 2, 2, 3], dtype=np.int64)), ak.contents.NumpyArray(np.array([2, 2, 2], dtype=np.int64)))) index = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 2], dtype=np.int64)), ak.contents.IndexedOptionArray(ak.index.Index64(np.array([0, 1], dtype=np.int64)), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 0, 1], dtype=np.int64)), ak.contents.NumpyArray(np.array([0], dtype=np.int64))))) assert (content[index].to_list() == [[[], [2]]])
class CMUDict(): 'Thin wrapper around CMUDict data. def __init__(self, file_or_path, keep_ambiguous=True): if isinstance(file_or_path, str): with open(file_or_path, encoding='latin-1') as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if (not keep_ambiguous): entries = {word: pron for (word, pron) in entries.items() if (len(pron) == 1)} self._entries = entries def __len__(self): return len(self._entries) def lookup(self, word): return self._entries.get(word.upper())
class TestClog(object): def test_simple(self): x = np.array([(1 + 0j), (1 + 2j)]) y_r = (np.log(np.abs(x)) + (1j * np.angle(x))) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) _skip .skipif((platform.machine() == 'armv5tel'), reason='See gh-413.') def test_special_values(self): xl = [] yl = [] with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex((- np.inf), np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex((- np.inf), 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, (0.5 * np.pi)) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex((- 1), np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([complex(1.0, np.nan)], dtype=complex) y = complex(np.nan, np.nan) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([(np.inf + (1j * np.nan))], dtype=complex) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([((- np.inf) + 1j)], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([(np.inf + 1j)], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex((- np.inf), np.inf)], dtype=complex) y = complex(np.inf, (0.75 * np.pi)) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, (0.25 * np.pi)) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex((- np.inf), np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
class PosteriorPredictiveCheck(): def __init__(self, adata: AnnData, models_dict: dict[(str, BaseModelClass)], count_layer_key: Optional[str]=None, n_samples: int=10): self.adata = adata self.count_layer_key = count_layer_key raw_counts = (adata.layers[count_layer_key] if (count_layer_key is not None) else adata.X) if isinstance(raw_counts, np.ndarray): self.raw_counts = GCXS.from_numpy(raw_counts, compressed_axes=(0,)) elif issparse(raw_counts): self.raw_counts = GCXS.from_scipy_sparse(raw_counts).change_compressed_axes((0,)) else: raise ValueError('raw_counts must be a numpy array or scipy sparse matrix') self.samples_dataset = None self.n_samples = n_samples self.models = models_dict self.metrics = {} self._store_posterior_predictive_samples() def __repr__(self) -> str: return f'''--- Posterior Predictive Checks --- n_samples = {self.n_samples} raw_counts shape = {self.raw_counts.shape} models: {list(self.models.keys())} metrics: {self._metrics_repr()}''' def _metrics_repr(self) -> str: def custom_handle_unserializable(o): if isinstance(o, AnnData): return f'AnnData object with n_obs={o.n_obs}, n_vars={o.n_vars}' elif isinstance(o, pd.DataFrame): s = f'Pandas DataFrame with shape={o.shape}, ' n_cols = 5 if (len(o.columns) > n_cols): return (s + f'first {n_cols} columns={o.columns[:n_cols].to_list()}') return (s + f'columns={o.columns.to_list()}') elif isinstance(o, pd.Series): return f'Pandas Series with n_rows={len(o)}' return f'ERROR unserializable type: {type(o)}' return json.dumps(self.metrics, indent=4, default=custom_handle_unserializable) def _store_posterior_predictive_samples(self, batch_size: int=32, indices: list[int]=None): self.batch_size = batch_size samples_dict = {} for (m, model) in self.models.items(): pp_counts = model.posterior_predictive_sample(model.adata, n_samples=self.n_samples, batch_size=self.batch_size, indices=indices) samples_dict[m] = DataArray(data=pp_counts, coords={'cells': self.adata.obs_names, 'features': model.adata.var_names, 'samples': np.arange(self.n_samples)}) samples_dict[DATA_VAR_RAW] = DataArray(data=self.raw_counts, coords={'cells': self.adata.obs_names, 'features': self.adata.var_names}) self.samples_dataset = Dataset(samples_dict) def coefficient_of_variation(self, dim: Dims='cells') -> None: identifier = (METRIC_CV_CELL if (dim == 'features') else METRIC_CV_GENE) mean = self.samples_dataset.mean(dim=dim, skipna=False) self.samples_dataset = np.square(self.samples_dataset) std = np.sqrt((self.samples_dataset.mean(dim=dim, skipna=False) - np.square(mean))) self.samples_dataset = np.sqrt(self.samples_dataset) cv = (std / mean) cv = _make_dataset_dense(cv) cv_mean = cv.mean(dim='samples', skipna=True) cv_mean[DATA_VAR_RAW].data = np.nan_to_num(cv_mean[DATA_VAR_RAW].data) self.metrics[identifier] = cv_mean.to_dataframe() def zero_fraction(self) -> None: pp_samples = self.samples_dataset mean = (pp_samples != 0).mean(dim='cells', skipna=False).mean(dim='samples', skipna=False) mean = _make_dataset_dense(mean) self.metrics[METRIC_ZERO_FRACTION] = mean.to_dataframe() def calibration_error(self, confidence_intervals: Optional[list[float]]=None) -> None: if (confidence_intervals is None): ps = [2.5, 5, 7.5, 10, 12.5, 15, 17.5, 82.5, 85, 87.5, 90, 92.5, 95, 97.5] ps = [(p / 100) for p in ps] else: if ((len(confidence_intervals) % 2) != 0): raise ValueError('Confidence intervals must be even') ps = confidence_intervals pp_samples = self.samples_dataset pp_samples = _make_dataset_dense(pp_samples) quants = pp_samples.quantile(q=ps, dim='samples', skipna=False) credible_interval_indices = [(i, (len(ps) - (i + 1))) for i in range((len(ps) // 2))] model_cal = {} for model in pp_samples.data_vars: if (model == DATA_VAR_RAW): continue cal_error_features = 0 for interval in credible_interval_indices: start = interval[0] end = interval[1] true_width = (ps[end] - ps[start]) greater_than = (quants[DATA_VAR_RAW] >= quants.model1.isel(quantile=start)).data less_than = (quants[DATA_VAR_RAW] <= quants.model1.isel(quantile=end)).data ci = (greater_than * less_than) pci_features = ci.mean() cal_error_features += ((pci_features - true_width) ** 2) model_cal[model] = {'features': cal_error_features} self.metrics[METRIC_CALIBRATION] = pd.DataFrame.from_dict(model_cal) ('scanpy') def differential_expression(self, de_groupby: str, de_method: str='t-test', n_samples: int=1, cell_scale_factor: float=10000.0, p_val_thresh: float=DEFAULT_DE_P_VAL_THRESHOLD, n_top_genes_fallback: int=DEFAULT_DE_N_TOP_GENES_OVERLAP): import scanpy as sc if (n_samples > self.n_samples): raise ValueError(f'n_samples={n_samples} is greater than the number of samples already recorded ({self.n_samples})') adata_de = AnnData(X=self.raw_counts.to_scipy_sparse().tocsr().copy(), obs=self.adata.obs, var=self.adata.var) sc.pp.normalize_total(adata_de, target_sum=cell_scale_factor) sc.pp.log1p(adata_de) with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) sc.tl.rank_genes_groups(adata_de, de_groupby, use_raw=False, method=de_method, key_added=UNS_NAME_RGG_RAW) pp_samples = self.samples_dataset adata_approx = AnnData(X=adata_de.X, obs=adata_de.obs, var=adata_de.var) de_keys = {} models = [model for model in pp_samples.data_vars if (model != DATA_VAR_RAW)] for model in models: if (model not in de_keys): de_keys[model] = [] for k in range(n_samples): one_sample = pp_samples[model].isel(samples=k) one_sample_data = (one_sample.data.to_scipy_sparse().tocsr() if isinstance(one_sample.data, SparseArray) else one_sample) adata_approx.X = one_sample_data.copy() sc.pp.normalize_total(adata_approx, target_sum=cell_scale_factor) sc.pp.log1p(adata_approx) with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) key_added = f'{UNS_NAME_RGG_PPC}_{model}_sample_{k}' de_keys[model].append(key_added) sc.tl.rank_genes_groups(adata_approx, de_groupby, use_raw=False, method=de_method, key_added=key_added) groups = self.adata.obs[de_groupby].astype('category').cat.categories df = pd.DataFrame(index=np.arange((len(groups) * len(models))), columns=['gene_overlap_f1', 'lfc_mae', 'lfc_pearson', 'lfc_spearman', 'roc_auc', 'pr_auc', 'group', 'model']) i = 0 self.metrics[METRIC_DIFF_EXP] = {} self.metrics[METRIC_DIFF_EXP]['lfc_per_model_per_group'] = {} for g in groups: raw_group_data = sc.get.rank_genes_groups_df(adata_de, group=g, key=UNS_NAME_RGG_RAW) raw_group_data.set_index('names', inplace=True) for model in de_keys.keys(): gene_overlap_f1s = [] rgds = [] sgds = [] lfc_maes = [] lfc_pearsons = [] lfc_spearmans = [] roc_aucs = [] pr_aucs = [] for de_key in de_keys[model]: sample_group_data = sc.get.rank_genes_groups_df(adata_approx, group=g, key=de_key) sample_group_data.set_index('names', inplace=True) all_genes = raw_group_data.index top_genes_raw = raw_group_data[:n_top_genes_fallback].index top_genes_sample = sample_group_data[:n_top_genes_fallback].index true_genes = np.array([(0 if (g not in top_genes_raw) else 1) for g in all_genes]) pred_genes = np.array([(0 if (g not in top_genes_sample) else 1) for g in all_genes]) gene_overlap_f1s.append(_get_precision_recall_f1(true_genes, pred_genes)[2]) sample_group_data = sample_group_data.loc[raw_group_data.index] (rgd, sgd) = (raw_group_data['logfoldchanges'], sample_group_data['logfoldchanges']) rgds.append(rgd) sgds.append(sgd) lfc_maes.append(np.mean(np.abs((rgd - sgd)))) lfc_pearsons.append(pearsonr(rgd, sgd)[0]) lfc_spearmans.append(spearmanr(rgd, sgd)[0]) raw_adj_p_vals = raw_group_data['pvals_adj'] true = (raw_adj_p_vals < p_val_thresh) pred = sample_group_data['scores'] if (true.sum() == 0): true = np.zeros_like(pred) true[np.argsort(raw_adj_p_vals)[:n_top_genes_fallback]] = 1 roc_aucs.append(roc_auc_score(true, pred)) pr_aucs.append(average_precision_score(true, pred)) df.loc[(i, 'model')] = model df.loc[(i, 'group')] = g df.loc[(i, 'gene_overlap_f1')] = np.mean(gene_overlap_f1s) df.loc[(i, 'lfc_mae')] = np.mean(lfc_maes) df.loc[(i, 'lfc_pearson')] = np.mean(lfc_pearsons) df.loc[(i, 'lfc_spearman')] = np.mean(lfc_spearmans) df.loc[(i, 'roc_auc')] = np.mean(roc_aucs) df.loc[(i, 'pr_auc')] = np.mean(pr_aucs) (rgd, sgd) = (pd.DataFrame(rgds).mean(axis=0), pd.DataFrame(sgds).mean(axis=0)) if (model not in self.metrics[METRIC_DIFF_EXP]['lfc_per_model_per_group'].keys()): self.metrics[METRIC_DIFF_EXP]['lfc_per_model_per_group'][model] = {} self.metrics[METRIC_DIFF_EXP]['lfc_per_model_per_group'][model][g] = pd.DataFrame([rgd, sgd], index=['raw', 'approx']).T i += 1 self.metrics[METRIC_DIFF_EXP]['summary'] = df
def train(run_dir: str='./run', datasets_dir: str='./data', dataset: str='imagenet', augmentation: bool=True, validation: int=0, shuffle: bool=True, arch: str='resnet18', optimizer: str='sgd', epochs: Tuple[(int, ...)]=(1, 1, 1, 1, 1, 25, 30, 20, 20), learning_rates: Tuple[(float, ...)]=(0.0167, 0.0333, 0.05, 0.0667, 0.0833, 0.1, 0.01, 0.001, 0.0001), scale_learning_rates: bool=True, momentum: float=0.9, weight_decay: float=0.0001, batch_size: int=256, eval_batch_size: int=256, fp16: bool=False, label_smoothing: float=0.1, loss_scale: float=256.0, cuda: bool=True, device_ids: Tuple[(int, ...)]=tuple(range(cuda.device_count())), num_workers: int=0, eval_num_workers: int=0, seed: Optional[int]=None, checkpoint: str='best', track_test_acc: bool=True): seed = utils.set_random_seed(seed) config = utils.capture_config(**locals()) if scale_learning_rates: learning_rates = tuple((np.array(learning_rates) * (batch_size / 256))) config['scaled_learning_rates'] = learning_rates run_dir = utils.create_run_dir(run_dir, timestamp=config['timestamp']) utils.save_config(config, run_dir) (use_cuda, device, device_ids, num_workers) = utils.config_run_env(cuda=cuda, device_ids=device_ids, num_workers=num_workers) train_dataset = create_dataset(dataset, datasets_dir, train=True, augmentation=augmentation) test_dataset = None if track_test_acc: test_dataset = create_dataset(dataset, datasets_dir, train=False, augmentation=False) (train_loader, dev_loader, test_loader) = create_loaders(train_dataset, batch_size=batch_size, eval_batch_size=eval_batch_size, validation=validation, run_dir=run_dir, test_dataset=test_dataset, use_cuda=use_cuda, shuffle=shuffle, num_workers=num_workers, eval_num_workers=eval_num_workers) num_classes = 1000 (model, _optimizer) = create_model_and_optimizer(run_dir=run_dir, arch=arch, num_classes=num_classes, optimizer=optimizer, learning_rate=learning_rates[0], momentum=momentum, weight_decay=weight_decay) criterion = _LabelSmoothing(label_smoothing) model = model.to(device) criterion = criterion.to(device) if fp16: from apex import amp (model, _optimizer) = amp.initialize(model, _optimizer, loss_scale=loss_scale) if use_cuda: model = nn.DataParallel(model, device_ids=device_ids) return run_training(model=model, optimizer=_optimizer, criterion=criterion, device=device, train_loader=train_loader, epochs=epochs, learning_rates=learning_rates, dev_loader=dev_loader, test_loader=test_loader, fp16=fp16, run_dir=run_dir, checkpoint=checkpoint)
def print_small_float(x): str = ('%.1E' % x) idx = str.find('E') if (str[(idx + 2)] == '0'): str = (str[:(idx + 2)] + str[(idx + 3):]) return str
def register_Ns3PbbTlvBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_constructor([param('ns3::PbbTlvBlock const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Back', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) cls.add_method('Begin', 'ns3::PbbTlvBlock::Iterator', []) cls.add_method('Begin', 'ns3::PbbTlvBlock::ConstIterator', [], is_const=True) cls.add_method('Clear', 'void', []) cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) cls.add_method('Empty', 'bool', [], is_const=True) cls.add_method('End', 'ns3::PbbTlvBlock::Iterator', []) cls.add_method('End', 'ns3::PbbTlvBlock::ConstIterator', [], is_const=True) cls.add_method('Erase', 'ns3::PbbTlvBlock::Iterator', [param('std::list< ns3::Ptr< ns3::PbbTlv > > iterator', 'position')]) cls.add_method('Erase', 'ns3::PbbTlvBlock::Iterator', [param('std::list< ns3::Ptr< ns3::PbbTlv > > iterator', 'first'), param('std::list< ns3::Ptr< ns3::PbbTlv > > iterator', 'last')]) cls.add_method('Front', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) cls.add_method('Insert', 'ns3::PbbTlvBlock::Iterator', [param('std::list< ns3::Ptr< ns3::PbbTlv > > iterator', 'position'), param('ns3::Ptr< ns3::PbbTlv > const', 'tlv')]) cls.add_method('PopBack', 'void', []) cls.add_method('PopFront', 'void', []) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) cls.add_method('PushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) cls.add_method('Size', 'int', [], is_const=True) return
def make_divisible(x: float, widen_factor: float=1.0, divisor: int=8) -> int: return (math.ceil(((x * widen_factor) / divisor)) * divisor)
class BaseModel(): def __init__(self, opt): self.opt = opt self.device = torch.device(('cuda' if (opt['gpu_ids'] is not None) else 'cpu')) self.is_train = opt['is_train'] self.schedulers = [] self.optimizers = [] def feed_data(self, data): pass def optimize_parameters(self): pass def get_current_visuals(self): pass def get_current_losses(self): pass def print_network(self): pass def save(self, label): pass def load(self): pass def _set_lr(self, lr_groups_l): for (optimizer, lr_groups) in zip(self.optimizers, lr_groups_l): for (param_group, lr) in zip(optimizer.param_groups, lr_groups): param_group['lr'] = lr def _get_init_lr(self): init_lr_groups_l = [] for optimizer in self.optimizers: init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups]) return init_lr_groups_l def update_learning_rate(self, cur_iter, warmup_iter=(- 1)): for scheduler in self.schedulers: scheduler.step() if (cur_iter < warmup_iter): init_lr_g_l = self._get_init_lr() warm_up_lr_l = [] for init_lr_g in init_lr_g_l: warm_up_lr_l.append([((v / warmup_iter) * cur_iter) for v in init_lr_g]) self._set_lr(warm_up_lr_l) def get_current_learning_rate(self): lr_l = [] for param_group in self.optimizers[0].param_groups: lr_l.append(param_group['lr']) return lr_l def get_network_description(self, network): if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)): network = network.module s = str(network) n = sum(map((lambda x: x.numel()), network.parameters())) return (s, n) def save_network(self, network, network_label, iter_label): save_filename = '{}_{}.pth'.format(iter_label, network_label) save_path = os.path.join(self.opt['path']['models'], save_filename) if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)): network = network.module state_dict = network.state_dict() for (key, param) in state_dict.items(): state_dict[key] = param.cpu() torch.save(state_dict, save_path) def load_network(self, load_path, network, strict=True): if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)): network = network.module load_net = torch.load(load_path) if ('params' in load_net.keys()): load_net = load_net['params'] load_net_clean = OrderedDict() for (k, v) in load_net.items(): if k.startswith('module.'): load_net_clean[k[7:]] = v else: load_net_clean[k] = v network.load_state_dict(load_net_clean, strict=strict) def save_training_state(self, epoch, iter_step): state = {'epoch': epoch, 'iter': iter_step, 'schedulers': [], 'optimizers': []} for s in self.schedulers: state['schedulers'].append(s.state_dict()) for o in self.optimizers: state['optimizers'].append(o.state_dict()) save_filename = '{}.state'.format(iter_step) save_path = os.path.join(self.opt['path']['training_state'], save_filename) torch.save(state, save_path) def resume_training(self, resume_state): resume_optimizers = resume_state['optimizers'] resume_schedulers = resume_state['schedulers'] assert (len(resume_optimizers) == len(self.optimizers)), 'Wrong lengths of optimizers' assert (len(resume_schedulers) == len(self.schedulers)), 'Wrong lengths of schedulers' for (i, o) in enumerate(resume_optimizers): self.optimizers[i].load_state_dict(o) for (i, s) in enumerate(resume_schedulers): self.schedulers[i].load_state_dict(s)
def chord_function_symbol(index): chord_function_lookup = [0, 0, 1, 1, 1, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2] return chord_function_lookup[index]
(name='mediation_year', default=1980, values=[1980, 2000, 2030, 2030], description='Year to choose the mediation states.') (name='num_mediators', default=3, values=[0, 1, 2, 3, 4], description='How many mediating states to choose.') def mediation_covariates(run, intervention, mediation_year, num_mediators): confounders = run[intervention.time].values() mediators = run[mediation_year].values() return np.concatenate([confounders, mediators[:num_mediators]])
def cross1(c, a, b): (a0, a1, a2) = (a[0], a[1], a[2]) (b0, b1, b2) = (b[0], b[1], b[2]) c[0] = numexpr.evaluate('a1*b2-a2*b1') c[1] = numexpr.evaluate('a2*b0-a0*b2') c[2] = numexpr.evaluate('a0*b1-a1*b0') return c
class EventWriter(): def __init__(self, path: str): self.file = tempfile.NamedTemporaryFile(dir=path, suffix='.csv.zst', delete=False) compressor = zstandard.ZstdCompressor(level=1) self.o = io.TextIOWrapper(compressor.stream_writer(self.file)) self.rows_written = 0 self.writer = csv.DictWriter(self.o, fieldnames=['patient_id', 'start', 'concept_id', 'value', 'metadata']) self.writer.writeheader() def add_event(self, patient_id: int, event: RawEvent) -> None: self.rows_written += 1 data: Dict[(str, Any)] = {} data['patient_id'] = patient_id data['start'] = event.start.isoformat() data['concept_id'] = str(event.concept_id) data['value'] = _encode_value(event.value) data['metadata'] = base64.b64encode(pickle.dumps({a: b for (a, b) in event.__dict__.items() if (a not in ('start', 'concept_id', 'value'))})).decode('utf8') self.writer.writerow(data) def close(self) -> None: if (self.rows_written == 0): warnings.warn(f'Zero rows were written by the `EventWriter` for file: {self.file.name}') self.o.close()
class ClassLieGroupOps(ClassGroupOps): def tangent_dim(a: T.ElementOrType) -> int: return a.tangent_dim() def from_tangent(a: T.ElementOrType, vec: T.List[T.Scalar], epsilon: T.Scalar) -> T.Element: return a.from_tangent(vec, epsilon) def to_tangent(a: T.Element, epsilon: T.Scalar) -> T.List[T.Scalar]: return a.to_tangent(epsilon) def storage_D_tangent(a: T.Element) -> geo.Matrix: return a.storage_D_tangent() def tangent_D_storage(a: T.Element) -> geo.Matrix: return a.tangent_D_storage() def retract(a: T.Element, vec: T.Sequence[T.Scalar], epsilon: T.Scalar) -> T.Element: return a.retract(vec, epsilon) def local_coordinates(a: T.Element, b: T.Element, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]: return a.local_coordinates(b, epsilon)
def get_experiment_name(config_d): model_type = get_model_type(config_d) timestamp = get_timestamp() hp_name = get_hp_value_name(config_d) return ('%s_%s_%s' % (model_type, hp_name, timestamp))
def parse_args(): parser = argparse.ArgumentParser(description='TRAIN SKLARGE') parser.add_argument('--root', default='./SKLARGE', type=str) parser.add_argument('--files', default='./SKLARGE/aug_data/train_pair.lst', type=str) parser.add_argument('--network', default='hed', type=str) parser.add_argument('--pretrained_model', default='pretrained_model/vgg16_caffe.pth', type=str) parser.add_argument('--gpu_id', default=0, type=int) parser.add_argument('--lr', default=1e-06, type=float) parser.add_argument('--lr_step', default=20000, type=int) parser.add_argument('--lr_gamma', default=0.1, type=float) parser.add_argument('--momentum', default=0.9, type=float) parser.add_argument('--weight_decay', default=0.0002, type=float) parser.add_argument('--iter_size', default=10, type=int) parser.add_argument('--max_step', default=25000, type=int) parser.add_argument('--save_interval', default=5000, type=int) parser.add_argument('--disp_interval', default=50, type=int) parser.add_argument('--resume_iter', default=0, type=int) parser.add_argument('--resume_path', default=None, type=str) args = parser.parse_args() return args
def residual_block(incoming, scope, nonlinearity=tf.nn.elu, weights_initializer=tf.truncated_normal_initializer(1000.0), bias_initializer=tf.zeros_initializer(), regularizer=None, increase_dim=False, is_first=False, summarize_activations=True): def network_builder(x, s): return create_inner_block(x, s, nonlinearity, weights_initializer, bias_initializer, regularizer, increase_dim, summarize_activations) return create_link(incoming, network_builder, scope, nonlinearity, weights_initializer, regularizer, is_first, summarize_activations)
def test_insert_random_call_on_object_retry(variable_reference_mock, default_test_case): test_cluster = MagicMock(ModuleTestCluster) test_cluster.num_accessible_objects_under_test.return_value = 1 test_factory = tf.TestFactory(test_cluster) with mock.patch.object(test_factory, '_select_random_variable_for_call') as select_mock: select_mock.return_value = variable_reference_mock with mock.patch.object(test_factory, 'insert_random_call_on_object_at') as insert_random_at_mock: insert_random_at_mock.return_value = False with mock.patch.object(test_factory, 'insert_random_call') as insert_random_mock: insert_random_mock.return_value = False assert (not test_factory.insert_random_call_on_object(default_test_case, 0)) select_mock.assert_called_with(default_test_case, 0) insert_random_at_mock.assert_called_with(default_test_case, variable_reference_mock, 0) insert_random_mock.assert_called_with(default_test_case, 0)
def _initialized_tensor(*sizes): weight = nn.Parameter(torch.Tensor(*sizes)) nn.init.kaiming_uniform_(weight) return weight
class GradedCoalgebrasWithBasis(GradedModulesCategory): class SignedTensorProducts(SignedTensorProductsCategory): _method def extra_super_categories(self): return [self.base_category()]
def _process_date_pattern(dp): (pattern, mask) = dp regex = pattern regex = regex.replace('.', re.escape('.')) regex = regex.replace('-', re.escape('-')) regex = regex.replace(' ', '\\s+') for (field, field_regex) in _FIELD_TO_REGEX: regex = regex.replace(field, field_regex) assert ('%' not in regex), regex return (pattern, mask, re.compile((('^' + regex) + '$')))
class GPTNeoXPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EnumeratedSetFromIterator_function_decorator(Decorator): def __init__(self, f=None, name=None, **options): if (f is not None): self.f = f if hasattr(f, '__name__'): self.__name__ = f.__name__ else: self.__name__ = f.__name__ self.__module__ = f.__module__ self.af = ArgumentFixer(f) if (name is not None): self.name = name self.options = options def __call__(self, *args, **kwds): if hasattr(self, 'f'): if hasattr(self, 'name'): if isinstance(self.name, str): if (args or kwds): (_, kk) = self.af.fix_to_named(*args, **kwds) name = (self.name % dict(kk)) else: name = self.name else: name = self.name(*args, **kwds) return EnumeratedSetFromIterator(self.f, args, kwds, name=name, **self.options) return EnumeratedSetFromIterator(self.f, args, kwds, **self.options) else: if (args == ()): (f,) = kwds.values() else: assert (len(args) == 1) f = args[0] return EnumeratedSetFromIterator_function_decorator(f, name=getattr(self, 'name', None), **self.options)
def mask_fill(t, mask, num): t_dtype = t.dtype mask = tf.cast(mask, dtype=t_dtype) neg_mask = (1 - mask) filled_t = ((t * mask) + (neg_mask * num)) return filled_t
class POPEDataSet(Dataset): def __init__(self, pope_path, data_path, trans): self.pope_path = pope_path self.data_path = data_path self.trans = trans (image_list, query_list, label_list) = ([], [], []) for q in open(pope_path, 'r'): line = json.loads(q) image_list.append(line['image']) query_list.append(line['text']) label_list.append(line['label']) for i in range(len(label_list)): if (label_list[i] == 'no'): label_list[i] = 0 else: label_list[i] = 1 assert (len(image_list) == len(query_list)) assert (len(image_list) == len(label_list)) self.image_list = image_list self.query_list = query_list self.label_list = label_list def __len__(self): return len(self.label_list) def __getitem__(self, index): image_path = os.path.join(self.data_path, self.image_list[index]) raw_image = Image.open(image_path).convert('RGB') image = self.trans(raw_image) query = self.query_list[index] label = self.label_list[index] return {'image': image, 'query': query, 'label': label}
class CIFAR100JustY(CIFAR10JustY): base_folder = 'cifar-100-python' url = ' filename = 'cifar-100-python.tar.gz' tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']] test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']] meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
def test_copysign(): assert_((np.copysign(1, (- 1)) == (- 1))) with np.errstate(divide='ignore'): assert_(((1 / np.copysign(0, (- 1))) < 0)) assert_(((1 / np.copysign(0, 1)) > 0)) assert_(np.signbit(np.copysign(np.nan, (- 1)))) assert_((not np.signbit(np.copysign(np.nan, 1))))
def get_args(): parser = argparse.ArgumentParser(prog='ordinary_demo', usage='Demo with synthetic data.', description='description', epilog='end', add_help=True) parser.add_argument('-ds', '--dataset', help='dataset name', default='gaussian', type=str) parser.add_argument('-lr', '--learning_rate', help="optimizer's learning rate", default=0.001, type=float) parser.add_argument('-bstr', '--batch_size_tr', help='mini batch size for traininig', default=200, type=int) parser.add_argument('-bste', '--batch_size_te', help='mini batch size for validation and test', default=500, type=int) parser.add_argument('-ts', '--training_samples', help='training samples per class', default=500, type=int) parser.add_argument('-vs', '--validation_samples', help='validation samples per class', default=50, type=int) parser.add_argument('-ln', '--label_noise', help='label noise', default=0.1, type=float) parser.add_argument('-nl', '--noise_level', help='noise level (for spiral)', default=1, type=float) parser.add_argument('-gn', '--gradient_norm', help='norm for gradient clipping (disabled if value is less than zero)', default=(- 1), type=float) parser.add_argument('-d', '--dimension', help='number of dimensions', default=10, type=int) parser.add_argument('-m', '--model', help='model name in torchvision', type=str, default='mlp_model') parser.add_argument('-md', '--middle_dim', help='middle dim of model', type=int, default=100) parser.add_argument('-e', '--epochs', help='number of epochs', type=int, default=500) parser.add_argument('-wd', '--weight_decay', help='weight decay', type=float, default=0) parser.add_argument('-mm', '--momentum', help='momentum', type=float, default=0) parser.add_argument('-fl', '--flood_level', help='loss threshold used for flooding', type=float, default=0.0) parser.add_argument('-ngm', '--negative_gaussian_mean', help='negative gaussian mean', default=1, type=float) parser.add_argument('-rs', '--random_seed', help='set random seed', default=0, type=int) parser.add_argument('-tg', '--tags', help='experiment tags. Format: "key1:value1,key2:value2[,...]"', type=str) parser.add_argument('-lb', '--labels', help='comma-separated experiment labels that will be the keys of experiment tags with value=true.', type=str) parser.add_argument('-opt', '--optimizer', help='optimizer name', type=str, default='sgd') parser.add_argument('-sm', '--save_model', help='save models at the beginning of flooding, with best validation accuracy, and after the final epoch.', type=str, default='False') parser.add_argument('-gpu', '--gpu_id', help='gpu id', type=int, default='0') args = parser.parse_args() return args
class TestEvaluation(TestCase): def test__proc_data_continous_column(self): df = pd.DataFrame({0: [1, 2, 3], 1: ['label_1', 'label_2', 'label_3']}) continuous_cols = [0] expected_features = np.array([[1], [2], [3]]) expected_labels = np.array(['label_1', 'label_2', 'label_3']) result = evaluation._proc_data(df, continuous_cols) (features, labels) = result assert_equal(features, expected_features) assert_equal(labels, expected_labels) def test__proc_data_category_column(self): df = pd.DataFrame({0: ['A', 'B', 'A'], 1: ['label_1', 'label_2', 'label_3']}) continuous_cols = [] expected_features = np.array([[1, 0], [0, 1], [1, 0]]) expected_labels = np.array(['label_1', 'label_2', 'label_3']) result = evaluation._proc_data(df, continuous_cols) (features, labels) = result assert_equal(features, expected_features) assert_equal(labels, expected_labels) ('tgan.research.evaluation._proc_data', autospec=True) def test_evaluate_classification(self, proc_mock): train_csv = pd.DataFrame(['train_data']) test_csv = pd.DataFrame(['test_data']) continuous_cols = [] classifier_spec = {'predict.return_value': 'array of predictions'} classifier = MagicMock(**classifier_spec) metric = MagicMock(return_value='score for model') proc_mock.return_value = (['feature_1', 'feature_2'], ['label_1', 'label_2']) expected_result = 'score for model' result = evaluation.evaluate_classification(train_csv, test_csv, continuous_cols, classifier, metric) assert (result == expected_result) classifier.fit.assert_called_once_with(['feature_1'], ['label_1']) classifier.predict.assert_called_once_with(['feature_2']) metric.assert_called_once_with(['label_2'], 'array of predictions')
def get_dataloaders(args, config): if (args.dataset == 'mnist'): (train_loader, test_loader) = get_dataloader(args) (retrain_loader, _) = get_dataloader(args, no_randomness=args.no_random_trainloaders) elif (args.dataset.lower()[0:7] == 'cifar10'): assert (config is not None) args.cifar_init_lr = config['optimizer_learning_rate'] if (args.second_model_name is not None): assert (second_config is not None) assert (args.cifar_init_lr == second_config['optimizer_learning_rate']) print('loading {} dataloaders'.format(args.dataset.lower())) (train_loader, test_loader) = cifar_train.get_dataset(config) (retrain_loader, _) = cifar_train.get_dataset(config, no_randomness=args.no_random_trainloaders) return (train_loader, test_loader, retrain_loader)
def receive_permutation(item, permutation, rank_start=0, rank_end=100): response = clean_response(permutation) response = [(int(x) - 1) for x in response.split()] response = remove_duplicate(response) cut_range = copy.deepcopy(item['hits'][rank_start:rank_end]) original_rank = [tt for tt in range(len(cut_range))] response = [ss for ss in response if (ss in original_rank)] response = (response + [tt for tt in original_rank if (tt not in response)]) for (j, x) in enumerate(response): item['hits'][(j + rank_start)] = copy.deepcopy(cut_range[x]) if ('rank' in item['hits'][(j + rank_start)]): item['hits'][(j + rank_start)]['rank'] = cut_range[j]['rank'] if ('score' in item['hits'][(j + rank_start)]): item['hits'][(j + rank_start)]['score'] = cut_range[j]['score'] return item
def update_db(collection, payload): try: for item in payload: collection.update_one({'$and': [{'outlet': item['outlet']}, {'publishedAt': item['publishedAt']}]}, {'$set': {'totalArticles': item['totalArticles']}}, upsert=True) except Exception as e: print(f'Error: {e}')
def test_check_array_min_samples_and_features_messages(): msg = '0 feature\\(s\\) \\(shape=\\(1, 0\\)\\) while a minimum of 1 is required.' with pytest.raises(ValueError, match=msg): check_array([[]]) msg = '0 sample\\(s\\) \\(shape=\\(0,\\)\\) while a minimum of 1 is required.' with pytest.raises(ValueError, match=msg): check_array([], ensure_2d=False) msg = 'Singleton array array\\(42\\) cannot be considered a valid collection.' with pytest.raises(TypeError, match=msg): check_array(42, ensure_2d=False) X = np.ones((1, 10)) y = np.ones(1) msg = '1 sample\\(s\\) \\(shape=\\(1, 10\\)\\) while a minimum of 2 is required.' with pytest.raises(ValueError, match=msg): check_X_y(X, y, ensure_min_samples=2) with pytest.raises(ValueError, match=msg): check_X_y(X, y, ensure_min_samples=2, ensure_2d=False) X = np.ones((10, 2)) y = np.ones(2) msg = '2 feature\\(s\\) \\(shape=\\(10, 2\\)\\) while a minimum of 3 is required.' with pytest.raises(ValueError, match=msg): check_X_y(X, y, ensure_min_features=3) with pytest.raises(ValueError, match=msg): check_X_y(X, y, ensure_min_features=3, allow_nd=True) X = np.empty(0).reshape(10, 0) y = np.ones(10) msg = '0 feature\\(s\\) \\(shape=\\(10, 0\\)\\) while a minimum of 1 is required.' with pytest.raises(ValueError, match=msg): check_X_y(X, y) X = np.ones((10, 0, 28, 28)) y = np.ones(10) (X_checked, y_checked) = check_X_y(X, y, allow_nd=True) assert_array_equal(X, X_checked) assert_array_equal(y, y_checked)
def process_line_pair(line_zh, line_en, setname): if (setname != 'test'): line_en = remove_control_chars(line_en) line_zh = remove_control_chars(line_zh) line_en = replace_nonbreaking_whitespace(line_en) line_zh = replace_nonbreaking_whitespace(line_zh) if ((not contain_chinese(line_zh)) or contain_chinese(line_en)): return None (line_zh, l1) = tokenize(line_zh, True) (line_en, l2) = tokenize(line_en, False) else: if ((len(line_en) == 0) or (len(line_zh) == 0)): return None (line_zh, l1) = tokenize(line_zh, True) (line_en, l2) = tokenize(line_en, False) return (line_zh, line_en)
def get_weights_for_loss(fxp_model: Model) -> Tuple[(List[list], List[list])]: flp_weights_list = [] fxp_weights_list = [] for layer in fxp_model.layers: if isinstance(layer, KerasTrainableQuantizationWrapper): (_layer_flp_weights, _layer_fxp_weights) = ([], []) for (weight, quantizer_vars, quantizer) in layer.get_weights_vars(): _layer_flp_weights.append(quantizer_vars) _layer_fxp_weights.append(quantizer(training=False, inputs=quantizer_vars)) flp_weights_list.append(_layer_flp_weights) fxp_weights_list.append(_layer_fxp_weights) return (flp_weights_list, fxp_weights_list)
class ColorizationDataset(BaseDataset): def modify_commandline_options(parser, is_train): parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB') return parser def __init__(self, opt): BaseDataset.__init__(self, opt) self.dir = os.path.join(opt.dataroot) self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size)) assert ((opt.input_nc == 1) and (opt.output_nc == 2) and (opt.direction == 'AtoB')) self.transform = get_transform(self.opt, convert=False) def __getitem__(self, index): path = self.AB_paths[index] im = Image.open(path).convert('RGB') im = self.transform(im) im = np.array(im) lab = color.rgb2lab(im).astype(np.float32) lab_t = transforms.ToTensor()(lab) A = ((lab_t[([0], ...)] / 50.0) - 1.0) B = (lab_t[([1, 2], ...)] / 110.0) return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path} def __len__(self): return len(self.AB_paths)
class Individual(object): def __init__(self, net_info): self.net_info = net_info self.gene = np.zeros(((self.net_info.node_num + self.net_info.out_num), (self.net_info.max_in_num + 1))).astype(int) self.is_active = np.empty((self.net_info.node_num + self.net_info.out_num)).astype(bool) self.eval = None self.init_gene() def init_gene(self): for n in range((self.net_info.node_num + self.net_info.out_num)): type_num = (self.net_info.func_type_num if (n < self.net_info.node_num) else self.net_info.out_type_num) self.gene[n][0] = np.random.randint(type_num) col = np.min((int((n / self.net_info.rows)), self.net_info.cols)) max_connect_id = ((col * self.net_info.rows) + self.net_info.input_num) min_connect_id = ((((col - self.net_info.level_back) * self.net_info.rows) + self.net_info.input_num) if ((col - self.net_info.level_back) >= 0) else 0) for i in range(self.net_info.max_in_num): self.gene[n][(i + 1)] = (min_connect_id + np.random.randint((max_connect_id - min_connect_id))) self.check_active() def __check_course_to_out(self, n): if (not self.is_active[n]): self.is_active[n] = True t = self.gene[n][0] if (n >= self.net_info.node_num): in_num = self.net_info.out_in_num[t] else: in_num = self.net_info.func_in_num[t] for i in range(in_num): if (self.gene[n][(i + 1)] >= self.net_info.input_num): self.__check_course_to_out((self.gene[n][(i + 1)] - self.net_info.input_num)) def check_active(self): self.is_active[:] = False for n in range(self.net_info.out_num): self.__check_course_to_out((self.net_info.node_num + n)) def __mutate(self, current, min_int, max_int): mutated_gene = current while (current == mutated_gene): mutated_gene = (min_int + np.random.randint((max_int - min_int))) return mutated_gene def mutation(self, mutation_rate=0.01): active_check = False for n in range((self.net_info.node_num + self.net_info.out_num)): t = self.gene[n][0] type_num = (self.net_info.func_type_num if (n < self.net_info.node_num) else self.net_info.out_type_num) if ((np.random.rand() < mutation_rate) and (type_num > 1)): self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num) if self.is_active[n]: active_check = True col = np.min((int((n / self.net_info.rows)), self.net_info.cols)) max_connect_id = ((col * self.net_info.rows) + self.net_info.input_num) min_connect_id = ((((col - self.net_info.level_back) * self.net_info.rows) + self.net_info.input_num) if ((col - self.net_info.level_back) >= 0) else 0) in_num = (self.net_info.func_in_num[t] if (n < self.net_info.node_num) else self.net_info.out_in_num[t]) for i in range(self.net_info.max_in_num): if ((np.random.rand() < mutation_rate) and ((max_connect_id - min_connect_id) > 1)): self.gene[n][(i + 1)] = self.__mutate(self.gene[n][(i + 1)], min_connect_id, max_connect_id) if (self.is_active[n] and (i < in_num)): active_check = True self.check_active() return active_check def neutral_mutation(self, mutation_rate=0.01): for n in range((self.net_info.node_num + self.net_info.out_num)): t = self.gene[n][0] type_num = (self.net_info.func_type_num if (n < self.net_info.node_num) else self.net_info.out_type_num) if ((not self.is_active[n]) and (np.random.rand() < mutation_rate) and (type_num > 1)): self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num) col = np.min((int((n / self.net_info.rows)), self.net_info.cols)) max_connect_id = ((col * self.net_info.rows) + self.net_info.input_num) min_connect_id = ((((col - self.net_info.level_back) * self.net_info.rows) + self.net_info.input_num) if ((col - self.net_info.level_back) >= 0) else 0) in_num = (self.net_info.func_in_num[t] if (n < self.net_info.node_num) else self.net_info.out_in_num[t]) for i in range(self.net_info.max_in_num): if (((not self.is_active[n]) or (i >= in_num)) and (np.random.rand() < mutation_rate) and ((max_connect_id - min_connect_id) > 1)): self.gene[n][(i + 1)] = self.__mutate(self.gene[n][(i + 1)], min_connect_id, max_connect_id) self.check_active() return False def count_active_node(self): return self.is_active.sum() def copy(self, source): self.net_info = source.net_info self.gene = source.gene.copy() self.is_active = source.is_active.copy() self.eval = source.eval def active_net_list(self): net_list = [['input', 0, 0]] active_cnt = np.arange(((self.net_info.input_num + self.net_info.node_num) + self.net_info.out_num)) active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active) for (n, is_a) in enumerate(self.is_active): if is_a: t = self.gene[n][0] if (n < self.net_info.node_num): type_str = self.net_info.func_type[t] else: type_str = self.net_info.out_type[t] connections = [active_cnt[self.gene[n][(i + 1)]] for i in range(self.net_info.max_in_num)] net_list.append(([type_str] + connections)) return net_list
.parametrize('search_fn', HEURISTIC_SEARCH_FUNCS) .parametrize('target_node', OPTIMAL_COSTS_FROM_ARAD.keys()) .parametrize('hash_fn', HASH_FUNCS) def test_optimal_search_target_node(search_fn, target_node, hash_fn): start_node = 'Arad' goal_fn = (lambda x: (x == target_node)) (goal, cost) = search_fn(start_node=start_node, expand_fn=expand_fn, goal_fn=goal_fn, hash_fn=hash_fn, return_path=False) assert (goal == target_node) assert (cost == OPTIMAL_COSTS_FROM_ARAD[goal])
def read_multi_config(): config = configparser.ConfigParser() config.read('./config/multi.ini') configs = [] for section in config.sections(): single_config = SimpleNamespace() single_config.name = section for (field, value) in config.items(section): single_config.field = value configs.append(single_config) return configs
def CalculateSmemUsagePerStage(operation): (m, n, k) = operation.tile_description.threadblock_shape if (operation.operation_kind == OperationKind.Gemm): stage_barrier_bytes = 32 return (((((DataTypeSize[operation.A.element] * m) * k) // 8) + (((DataTypeSize[operation.B.element] * k) * n) // 8)) + stage_barrier_bytes) else: raise Exception('Unsupported operation kind {}.'.format(operation.operation_kind))
def enable_cpu_fuser(fn): def wrapper(*args, **kwargs): torch._C._jit_override_can_fuse_on_cpu(True) try: fn(*args, **kwargs) finally: torch._C._jit_override_can_fuse_on_cpu(False) return wrapper
def remove_user(username): with connection.cursor() as cursor: cursor.execute('DELETE FROM users WHERE username = %s', [username]) return
def tf_efficientnet_b8_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model
def register_Ns3Ipv6RoutingHelper_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::Ipv6RoutingHelper const &', 'arg0')]) cls.add_method('Copy', 'ns3::Ipv6RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) cls.add_method('Create', 'ns3::Ptr< ns3::Ipv6RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) cls.add_method('PrintNeighborCacheAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) cls.add_method('PrintNeighborCacheAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) cls.add_method('PrintNeighborCacheAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) cls.add_method('PrintNeighborCacheEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')], is_static=True) cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')], is_static=True) cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')], is_static=True) cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')], is_static=True) return
def main(): with open(os.devnull, 'w') as devnull: ret = subprocess.call(['which', 'lspci'], stdout=devnull, stderr=devnull) if (ret != 0): sys.exit("'lspci' not found - please install 'pciutils'") parse_args() check_modules() clear_data() get_device_details(network_devices) do_arg_actions()
def get_cpath_embeddings(sentence): phrpaths = {} for phrpath in set(sentence.cpaths.values()): shp = [ct_x[node] for node in phrpath] if USE_DROPOUT: cpathfwdlstm.set_dropout(DROPOUT_RATE) cpathrevlstm.set_dropout(DROPOUT_RATE) cpfinit = cpathfwdlstm.initial_state() cpathfwd = cpfinit.transduce(shp) cprinit = cpathrevlstm.initial_state() cpathrev = cprinit.transduce(reversed(shp)) cpathlstm = dy.rectify(((w_cp * dy.concatenate([cpathfwd[(- 1)], cpathrev[(- 1)]])) + b_cp)) phrpaths[phrpath] = cpathlstm return phrpaths
def _get_worker_info(): machine_id = int(os.getenv(PARALLAX_MACHINE_ID, (- 1))) if (machine_id == (- 1)): raise RuntimeError('Need to set environment variable PARALLAX_MACHINE_ID') hostname = os.getenv(PARALLAX_HOSTNAME, 0) if (hostname is None): raise RuntimeError('Need to set environment variable PARALLAX_HOSTNAME') return (machine_id, hostname)
class MultiprocessingSolver(LocalMatrixSolver): def __init__(self, wrapped_solvee, num_processes=0): if (num_processes == 0): num_processes = multiprocessing.cpu_count() import signal original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) self.pool = multiprocessing.Pool(num_processes) signal.signal(signal.SIGINT, original_sigint_handler) self.solver = wrapped_solvee def solve_matrix_equation(self, A: scipy.sparse.csr_matrix, b: np.ndarray): try: return self.pool.apply(_worker_simulate, (A, b, self.solver)) except KeyboardInterrupt: self.pool.terminate() self.pool.join()
def audio_albert(refresh=False, *args, **kwargs): return audio_albert_960hr(*args, refresh=refresh, **kwargs)
.parametrize('join', ['none']) def test_combine_workspace_incompatible_parameter_configs(workspace_factory, join): ws = workspace_factory() new_ws = ws.rename(channels={channel: f'renamed_{channel}' for channel in ws.channels}) new_ws.get_measurement(measurement_name='GaussExample')['config']['parameters'][0]['bounds'] = [[0.0, 1.0]] with pytest.raises(pyhf.exceptions.InvalidWorkspaceOperation, match='GaussExample'): pyhf.Workspace.combine(ws, new_ws, join=join)
def test_curl_cc(): (theta, phi) = sp.symbols('x,y', real=True, positive=True) psi = (theta, phi) r = 1 rv = (((r * sp.sin(theta)) * sp.cos(phi)), ((r * sp.sin(theta)) * sp.sin(phi)), (r * sp.cos(theta))) sph = sp.functions.special.spherical_harmonics.Ynm ue = sph(6, 3, theta, phi) (N, M) = (16, 12) L0 = FunctionSpace(N, 'C', domain=(0, np.pi)) F1 = FunctionSpace(M, 'F', dtype='D') T = TensorProductSpace(comm, (L0, F1), coordinates=(psi, rv)) u_hat = Function(T, buffer=ue) du = curl(grad(u_hat)) (du.terms() == [[]]) (r, theta, z) = psi = sp.symbols('x,y,z', real=True, positive=True) rv = ((r * sp.cos(theta)), (r * sp.sin(theta)), z) ue = ((((r * (1 - r)) * sp.cos((4 * theta))) - (1 * (r - 1))) * sp.cos((4 * z))) N = 12 F0 = FunctionSpace(N, 'F', dtype='D') F1 = FunctionSpace(N, 'F', dtype='d') L = FunctionSpace(N, 'L', bc=(0, 0), domain=(0, 1)) T.destroy() T = TensorProductSpace(comm, (L, F0, F1), coordinates=(psi, rv)) T1 = T.get_orthogonal() V = VectorSpace(T1) u_hat = Function(T, buffer=ue) d = curl(grad(u_hat)) assert (d.terms() == [[], [], []]) T.destroy()
class AlignVisionModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Draw(): def __init__(self, image, size=None, color=None): if (not hasattr(image, 'im')): image = Image.new(image, size, color) self.draw = ImageDraw.Draw(image) self.image = image self.transform = None def flush(self): return self.image def render(self, op, xy, pen, brush=None): outline = fill = None width = 1 if isinstance(pen, Pen): outline = pen.color width = pen.width elif isinstance(brush, Pen): outline = brush.color width = brush.width if isinstance(brush, Brush): fill = brush.color elif isinstance(pen, Brush): fill = pen.color if self.transform: xy = ImagePath.Path(xy) xy.transform(self.transform) if (op == 'line'): self.draw.line(xy, fill=outline, width=width) else: getattr(self.draw, op)(xy, fill=fill, outline=outline) def settransform(self, offset): (xoffset, yoffset) = offset self.transform = (1, 0, xoffset, 0, 1, yoffset) def arc(self, xy, start, end, *options): self.render('arc', xy, start, end, *options) def chord(self, xy, start, end, *options): self.render('chord', xy, start, end, *options) def ellipse(self, xy, *options): self.render('ellipse', xy, *options) def line(self, xy, *options): self.render('line', xy, *options) def pieslice(self, xy, start, end, *options): self.render('pieslice', xy, start, end, *options) def polygon(self, xy, *options): self.render('polygon', xy, *options) def rectangle(self, xy, *options): self.render('rectangle', xy, *options) def text(self, xy, text, font): if self.transform: xy = ImagePath.Path(xy) xy.transform(self.transform) self.draw.text(xy, text, font=font.font, fill=font.color) def textsize(self, text, font): return self.draw.textsize(text, font=font.font)
def render_demos(env, data, filename='demo_rendering.mp4', render=None): FPS = 30 render_skip = max(1, round((1.0 / ((FPS * env.sim.model.opt.timestep) * env.frame_skip)))) t0 = timer.time() viewer(env, mode='initialize', render=render) for i_frame in range(data['ctrl'].shape[0]): env.sim.data.qpos[:] = data['qpos'][i_frame].copy() env.sim.data.qvel[:] = data['qvel'][i_frame].copy() env.sim.forward() if ((i_frame % render_skip) == 0): viewer(env, mode='render', render=render) print(i_frame, end=', ', flush=True) viewer(env, mode='save', filename=filename, render=render) print(('time taken = %f' % (timer.time() - t0)))
def test_issue_7406(): np.random.seed(0) assert_equal(binom.ppf(np.random.rand(10), 0, 0.5), 0) assert_equal(binom.ppf(0, 0, 0.5), (- 1)) assert_equal(binom.ppf(1, 0, 0.5), 0)
class CNNLayerNorm(nn.Module): def __init__(self, n_feats): super(CNNLayerNorm, self).__init__() self.layer_norm = nn.LayerNorm(n_feats) def forward(self, x): x = x.transpose(2, 3).contiguous() x = self.layer_norm(x) return x.transpose(2, 3).contiguous()
class TestVAE(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) set_random_seeds() self.model = VAE(config=VAE.config_class(num_epochs=5)) self.dataset = MSL(rootdir=join(rootdir, 'data', 'smap')) (df, metadata) = self.dataset[0] (self.train_df, self.test_df, self.test_labels) = get_train_test_splits(df, metadata, 5000) logger.info('Training model...\n') train_ts = TimeSeries.from_pd(self.train_df) self.model.train(train_ts) def test_score(self): print(('-' * 80)) logger.info((('test_score\n' + ('-' * 80)) + '\n')) test_ts = TimeSeries.from_pd(self.test_df) set_random_seeds() score_ts = self.model.get_anomaly_score(test_ts) scores = score_ts.to_pd().values.flatten() (min_score, max_score, sum_score) = (min(scores), max(scores), sum(scores)) logger.info(f'scores look like: {scores[:10]}') logger.info(f'min score = {min_score}') logger.info(f'max score = {max_score}') logger.info(f'sum score = {sum_score}') def test_save_load(self): print(('-' * 80)) logger.info((('test_save_load\n' + ('-' * 80)) + '\n')) self.model.save(dirname=join(rootdir, 'tmp', 'vae')) loaded_model = VAE.load(dirname=join(rootdir, 'tmp', 'vae')) test_ts = TimeSeries.from_pd(self.test_df) set_random_seeds() scores = self.model.get_anomaly_score(test_ts) set_random_seeds() loaded_model_scores = loaded_model.get_anomaly_score(test_ts) self.assertSequenceEqual(list(scores), list(loaded_model_scores)) set_random_seeds() alarms = self.model.get_anomaly_label(test_ts) set_random_seeds() loaded_model_alarms = loaded_model.get_anomaly_label(test_ts) self.assertSequenceEqual(list(alarms), list(loaded_model_alarms))
def load_oxts_packets_and_poses(oxts_files): scale = None origin = None oxts = [] for filename in oxts_files: with open(filename, 'r') as f: for line in f.readlines(): line = line.split() line[:(- 5)] = [float(x) for x in line[:(- 5)]] line[(- 5):] = [int(float(x)) for x in line[(- 5):]] packet = OxtsPacket(*line) if (scale is None): scale = np.cos(((packet.lat * np.pi) / 180.0)) (R, t) = pose_from_oxts_packet(packet, scale) if (origin is None): origin = t T_w_imu = transform_from_rot_trans(R, (t - origin)) oxts.append(OxtsData(packet, T_w_imu)) return oxts
class ResNeXt(nn.Module): def __init__(self, subtype='resnext50_32x4d', out_stages=[2, 3, 4], output_stride=32, classifier=False, num_classes=1000, backbone_path=None, pretrained=True): super(ResNeXt, self).__init__() self.subtype = subtype self.out_stages = out_stages self.output_stride = output_stride self.classifier = classifier self.num_classes = num_classes self.backbone_path = backbone_path self.pretrained = pretrained if (self.subtype == 'resnext50_32x4d'): resnext = resnext50_32x4d(self.pretrained) self.out_channels = [64, 256, 512, 1024, 2048] elif (self.subtype == 'resnext101_32x8d'): resnext = resnext101_32x8d(self.pretrained) self.out_channels = [64, 256, 512, 1024, 2048] else: raise NotImplementedError self.out_channels = self.out_channels[self.out_stages[0]:(self.out_stages[(- 1)] + 1)] self.stem = nn.Sequential(*list(resnext.children())[:4]) self.layer1 = resnext.layer1 self.layer2 = resnext.layer2 self.layer3 = resnext.layer3 self.layer4 = resnext.layer4 if self.classifier: self.avgpool = resnext.avgpool self.fc = nn.Linear(resnext.fc.in_features, self.num_classes) self.out_channels = self.num_classes if self.pretrained: self.load_pretrained_weights() else: self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0.0001) def forward(self, x): x = self.stem(x) output = [] for i in range(1, 5): res_layer = getattr(self, 'layer{}'.format(i)) x = res_layer(x) if ((i in self.out_stages) and (not self.classifier)): output.append(x) if self.classifier: x = self.avgpool(x) x = self.fc(x) return x return (output if (len(self.out_stages) > 1) else output[0]) def freeze_bn(self): for layer in self.modules(): if isinstance(layer, nn.BatchNorm2d): layer.eval() def freeze_stages(self, stage): if (stage >= 0): self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, (stage + 1)): layer = getattr(self, 'layer{}'.format(i)) layer.eval() for param in layer.parameters(): param.requires_grad = False def load_pretrained_weights(self): url = model_urls[self.subtype] if (url is not None): pretrained_state_dict = model_zoo.load_url(url) print('=> loading pretrained model {}'.format(url)) self.load_state_dict(pretrained_state_dict, strict=False) elif (self.backbone_path is not None): print('=> loading pretrained model {}'.format(self.backbone_path)) self.load_state_dict(torch.load(self.backbone_path))
.parametrize('alpha', [0.5, [0.5], [0.5, 0.9]]) def test_p_values_different_alpha(alpha: Union[(float, NDArray)]) -> None: result = compute_hoeffdding_bentkus_p_value(r_hat, n, alpha) assert isinstance(result, np.ndarray)
class InhomogeneousPoisson(flows.TransformedExponential): def __init__(self, t_max, lambda_init=1.0, **kwargs): transforms = [flows.Cumsum(), flows.Affine(scale_init=(1.0 / lambda_init), use_shift=False), flows.Spline(**kwargs), flows.Affine(scale_init=float(t_max), use_shift=False, trainable=False)] super().__init__(transforms, t_max)
def modelMultiScaleDiscriminator(input_shape, name=None): x1 = Input(input_shape) x2 = AveragePooling2D(pool_size=(2, 2))(x1) out_x1 = modelDiscriminator('D1')(x1) out_x2 = modelDiscriminator('D2')(x2) return Model(inputs=x1, outputs=[out_x1, out_x2], name=name)
def _relative_degree(z, p): degree = (len(p) - len(z)) if (degree < 0): raise ValueError('Improper transfer function. Must have at least as many poles as zeros.') return degree
def rendezvous(url, rank=(- 1), world_size=(- 1), **kwargs): if (not isinstance(url, six.string_classes)): raise RuntimeError('`url` must be a string. {}: {}'.format(type(url), url)) if (not isinstance(rank, numbers.Integral)): raise RuntimeError('`rank` must be an integer. {}'.format(rank)) if (not isinstance(world_size, numbers.Integral)): raise RuntimeError('`world_size` must be an integer. {}'.format(world_size)) result = urlparse(url) if ((rank != (- 1)) or (world_size != (- 1))): query_dict = dict((pair.split('=') for pair in filter(None, result.query.split('&')))) assert (('rank' not in query_dict) and ('world_size' not in query_dict)), 'The url: {url} has node-specific arguments(rank, world_size) already.'.format(url=url) if (rank != (- 1)): query_dict['rank'] = rank if (world_size != (- 1)): query_dict['world_size'] = world_size result = result._replace(query='{}'.format('&'.join(['{}={}'.format(k, v) for (k, v) in query_dict.items()]))) url = urlunparse(result) if (result.scheme not in _rendezvous_handlers): raise RuntimeError('No rendezvous handler for {}://'.format(result.scheme)) return _rendezvous_handlers[result.scheme](url, **kwargs)
def temp_seed(seed): state = np.random.get_state() np.random.seed(seed) try: (yield) finally: np.random.set_state(state)
def test_larger_than_one_attribute(config_sop, F, bcs, J, y, p, geometry): config_sop.set('MeshQuality', 'volume_change', '0.5') with pytest.raises(ConfigError) as e_info: cashocs.ShapeOptimizationProblem(F, bcs, J, y, p, geometry.boundaries, config=config_sop) assert ('Key volume_change in section MeshQuality is smaller than one, but it must be larger.' in str(e_info.value))
def test_tag(): default_clipid = '17' dataset = fsdnoisy18k.Dataset(TEST_DATA_HOME) clip = dataset.clip(default_clipid) tag = clip.tags assert (tag.labels == ['Walk_or_footsteps']) assert (tag.confidence == [1.0])
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Socket >', 'arg0')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
def _exp_i_theta_operator(ncut) -> csc_matrix: dim_theta = ((2 * ncut) + 1) matrix = sparse.dia_matrix((np.ones(dim_theta), [(- 1)]), shape=(dim_theta, dim_theta)).tocsc() return matrix
def evaluate_eval(args, net, optimizer, scheduler, val_loss, hist, dump_images, writer, epoch=0, dataset_name=None, dataset=None, curr_iter=0, optimizer_at=None, scheduler_at=None, save_pth=True): if ((val_loss is not None) and (hist is not None)): acc = (np.diag(hist).sum() / hist.sum()) acc_cls = (np.diag(hist) / hist.sum(axis=1)) acc_cls = np.nanmean(acc_cls) iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist))) print_evaluate_results(hist, iu, dataset_name=dataset_name, dataset=dataset) freq = (hist.sum(axis=1) / hist.sum()) mean_iu = np.nanmean(iu) logging.info('mean {}'.format(mean_iu)) fwavacc = (freq[(freq > 0)] * iu[(freq > 0)]).sum() else: mean_iu = 0 if (dataset_name not in args.last_record.keys()): args.last_record[dataset_name] = {} if save_pth: if ('mean_iu' in args.last_record[dataset_name]): last_snapshot = 'last_{}_epoch_{}_mean-iu_{:.5f}.pth'.format(dataset_name, args.last_record[dataset_name]['epoch'], args.last_record[dataset_name]['mean_iu']) last_snapshot = os.path.join(args.exp_path, last_snapshot) try: os.remove(last_snapshot) except OSError: pass last_snapshot = 'last_{}_epoch_{}_mean-iu_{:.5f}.pth'.format(dataset_name, epoch, mean_iu) last_snapshot = os.path.join(args.exp_path, last_snapshot) args.last_record[dataset_name]['mean_iu'] = mean_iu args.last_record[dataset_name]['epoch'] = epoch torch.cuda.synchronize() if (optimizer_at is not None): torch.save({'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'optimizer_at': optimizer_at.state_dict(), 'scheduler': scheduler.state_dict(), 'scheduler_at': scheduler_at.state_dict(), 'epoch': epoch, 'mean_iu': mean_iu, 'command': ' '.join(sys.argv[1:])}, last_snapshot) else: torch.save({'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'epoch': epoch, 'mean_iu': mean_iu, 'command': ' '.join(sys.argv[1:])}, last_snapshot) if ((val_loss is not None) and (hist is not None)): if (dataset_name not in args.best_record.keys()): args.best_record[dataset_name] = {'epoch': (- 1), 'iter': 0, 'val_loss': .0, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0} if (mean_iu > args.best_record[dataset_name]['mean_iu']): if (args.best_record[dataset_name]['epoch'] != (- 1)): best_snapshot = 'best_{}_epoch_{}_mean-iu_{:.5f}.pth'.format(dataset_name, args.best_record[dataset_name]['epoch'], args.best_record[dataset_name]['mean_iu']) best_snapshot = os.path.join(args.exp_path, best_snapshot) assert os.path.exists(best_snapshot), 'cant find old snapshot {}'.format(best_snapshot) os.remove(best_snapshot) args.best_record[dataset_name]['val_loss'] = val_loss.avg args.best_record[dataset_name]['epoch'] = epoch args.best_record[dataset_name]['acc'] = acc args.best_record[dataset_name]['acc_cls'] = acc_cls args.best_record[dataset_name]['mean_iu'] = mean_iu args.best_record[dataset_name]['fwavacc'] = fwavacc best_snapshot = 'best_{}_epoch_{}_mean-iu_{:.5f}.pth'.format(dataset_name, args.best_record[dataset_name]['epoch'], args.best_record[dataset_name]['mean_iu']) best_snapshot = os.path.join(args.exp_path, best_snapshot) shutil.copyfile(last_snapshot, best_snapshot) else: logging.info('Saved file to {}'.format(last_snapshot)) if ((val_loss is not None) and (hist is not None)): logging.info(('-' * 107)) fmt_str = ('[epoch %d], [dataset name %s], [val loss %.5f], [acc %.5f], [acc_cls %.5f], ' + '[mean_iu %.5f], [fwavacc %.5f]') logging.info((fmt_str % (epoch, dataset_name, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))) if save_pth: fmt_str = ('best record: [dataset name %s], [val loss %.5f], [acc %.5f], [acc_cls %.5f], ' + '[mean_iu %.5f], [fwavacc %.5f], [epoch %d], ') logging.info((fmt_str % (dataset_name, args.best_record[dataset_name]['val_loss'], args.best_record[dataset_name]['acc'], args.best_record[dataset_name]['acc_cls'], args.best_record[dataset_name]['mean_iu'], args.best_record[dataset_name]['fwavacc'], args.best_record[dataset_name]['epoch']))) logging.info(('-' * 107)) writer.add_scalar('{}/acc'.format(dataset_name), acc, curr_iter) writer.add_scalar('{}/acc_cls'.format(dataset_name), acc_cls, curr_iter) writer.add_scalar('{}/mean_iu'.format(dataset_name), mean_iu, curr_iter) writer.add_scalar('{}/val_loss'.format(dataset_name), val_loss.avg, curr_iter)
def get_dataset_confs(path): module_path = datasets.load.dataset_module_factory(path).module_path builder_cls = datasets.load.import_main_class(module_path, dataset=True) confs = builder_cls.BUILDER_CONFIGS if (confs and (len(confs) > 1)): return confs return []
_metric def is50k(opts): opts.dataset_kwargs.update(max_size=None) opts.dataset_kwargs.cfg.update(mirror=False) (mean, std) = inception_score.compute_is(opts, num_gen=50000, num_splits=10) return dict(is50k_mean=mean, is50k_std=std)
def CutoutAbs(img, v): if (v < 0): return img (w, h) = img.size x0 = np.random.uniform(w) y0 = np.random.uniform(h) x0 = int(max(0, (x0 - (v / 2.0)))) y0 = int(max(0, (y0 - (v / 2.0)))) x1 = min(w, (x0 + v)) y1 = min(h, (y0 + v)) xy = (x0, y0, x1, y1) color = (125, 123, 114) img = img.copy() PIL.ImageDraw.Draw(img).rectangle(xy, color) return img
def count_depth_matmul(node, symbols, state): A_memlet = next((e for e in state.in_edges(node) if (e.dst_conn == '_a'))) size_shared_dimension = symeval(A_memlet.data.subset.size()[(- 1)], symbols) return bigo(sp.log(size_shared_dimension))
class Channel(Factor): n_next = 1 n_prev = 1 def compute_forward_message(self, az, bz, ax, bx): (rx, vx) = self.compute_forward_posterior(az, bz, ax, bx) (ax_new, bx_new) = self.compute_ab_new(rx, vx, ax, bx) return (ax_new, bx_new) def compute_backward_message(self, az, bz, ax, bx): (rz, vz) = self.compute_backward_posterior(az, bz, ax, bx) (az_new, bz_new) = self.compute_ab_new(rz, vz, az, bz) return (az_new, bz_new) def compute_forward_state_evolution(self, az, ax, tau_z): vx = self.compute_forward_error(az, ax, tau_z) ax_new = self.compute_a_new(vx, ax) return ax_new def compute_backward_state_evolution(self, az, ax, tau_z): vz = self.compute_backward_error(az, ax, tau_z) az_new = self.compute_a_new(vz, az) return az_new def compute_forward_error(self, az, ax, tau_z): def variance(bz, bx): (rx, vx) = self.compute_forward_posterior(az, bz, ax, bx) return vx error = self.beliefs_measure(az, ax, tau_z, f=variance) return error def compute_backward_error(self, az, ax, tau_z): def variance(bz, bx): (rz, vz) = self.compute_backward_posterior(az, bz, ax, bx) return vz error = self.beliefs_measure(az, ax, tau_z, f=variance) return error def compute_forward_overlap(self, az, ax, tau_z): vx = self.compute_forward_error(az, ax, tau_z) tau_x = self.second_moment(tau_z) mx = (tau_x - vx) return mx def compute_backward_overlap(self, az, ax, tau_z): vz = self.compute_backward_error(az, ax, tau_z) mz = (tau_z - vz) return mz def compute_free_energy(self, az, ax, tau_z): def log_partition(bz, bx): return self.compute_log_partition(az, bz, ax, bx) A = self.beliefs_measure(az, ax, tau_z, f=log_partition) return A def get_alpha(self): return getattr(self, 'alpha', 1) def compute_mutual_information(self, az, ax, tau_z): alpha = self.get_alpha() tau_x = self.second_moment(tau_z) A = self.compute_free_energy(az, ax, tau_z) I = (((0.5 * ((az * tau_z) + ((alpha * ax) * tau_x))) - A) + (0.5 * np.log((((2 * np.pi) * tau_z) / np.e)))) return I def compute_precision(self, vz, vx, tau_z): def f(a): (az, ax) = a fz = (self.compute_backward_error(az, ax, tau_z) - vz) fx = (self.compute_forward_error(az, ax, tau_z) - vx) return (fz, fx) x0 = ((1 / vz), (1 / vx)) sol = root(f, x0=x0, method='hybr') (az, ax) = sol.x return (az, ax) def compute_dual_mutual_information(self, vz, vx, tau_z): alpha = self.get_alpha() (az, ax) = self.compute_precision(vz, vx, tau_z) I = self.compute_mutual_information(az, ax, tau_z) I_dual = (I - (0.5 * ((az * vz) + ((alpha * ax) * vx)))) return I_dual def compute_dual_free_energy(self, mz, mx, tau_z): alpha = self.get_alpha() tau_x = self.second_moment(tau_z) vz = (tau_z - mz) vx = (tau_x - mx) (az, ax) = self.compute_precision(vz, vx, tau_z) A = self.compute_free_energy(az, ax, tau_z) A_dual = ((0.5 * ((az * mz) + ((alpha * ax) * mx))) - A) return A_dual
def loadPassages(filename): passages = {} with open(filename, 'r') as f: for l in f: l = l.strip().split('\t') i = int(l[0]) passages[i] = regex_multi_space.sub(' ', regex_drop_char.sub(' ', ' '.join(l[1:]).lower())).strip() return passages
def SRem(a, b): _check_bv_args(a, b) (a, b) = _coerce_exprs(a, b) return BitVecRef(Z3_mk_bvsrem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
class MLP(nn.Module): def __init__(self, cfg, per_seq=False): super(MLP, self).__init__() self.drop = nn.Dropout(cfg.dropout) self.relu = nn.ReLU() self.per_seq = per_seq if self.per_seq: self.attention = nn.Linear(cfg.input_dim, 1) self.hidden = nn.Linear(cfg.input_dim, cfg.hidden_dim) self.output = nn.Linear(cfg.hidden_dim, cfg.num_classes) def forward(self, X): logits = [] for x in X: if self.per_seq: att = self.attention(x) x = torch.sum((x * F.softmax(att, 1).expand_as(x)), 0) x = self.drop(self.relu(self.hidden(x))) x = self.output(x) if self.per_seq: logits.append(x) else: logits.append(x.unsqueeze(0)) return logits def load_weights(self, pretrained_model): state_dict = {} for (key, value) in torch.load(pretrained_model, map_location=torch.device('cpu')).items(): if key.startswith('module'): state_dict[key[7:]] = value else: state_dict[key] = value self.load_state_dict(state_dict)
def _str_to_unicode(text, encoding=None, errors='strict'): if (encoding is None): encoding = 'utf-8' if isinstance(text, bytes): return text.decode(encoding, errors) return text
def test_float32_passthrough(): x = np.array([(- 1), 1], dtype=np.float32) y = img_as_float(x) assert_equal(y.dtype, x.dtype)
_if_no_torch def test_llama_config(): hf_config = transformers.LlamaConfig.from_pretrained('meta-llama/Llama-2-7b-hf') llama_config = LlamaConfig.from_hf_config(hf_config) config_overrides = {'_name_or_path': hf_config._name_or_path, 'architectures': hf_config.architectures, 'torch_dtype': hf_config.torch_dtype} new_hf_config = llama_config.to_hf_config(vocab_size=hf_config.vocab_size, config_overrides=config_overrides) for k in new_hf_config.__dict__.keys(): if (k in ['_commit_hash', 'transformers_version']): continue assert (getattr(new_hf_config, k) == getattr(hf_config, k)), f'{k} {getattr(new_hf_config, k)} != {getattr(hf_config, k)}'
class ZipArchiveReaderIterDataPipe(IterDataPipe[Tuple[(str, BufferedIOBase)]]): def __init__(self, datapipe: Iterable[Tuple[(str, BufferedIOBase)]], length: int=(- 1)): super().__init__() self.datapipe: Iterable[Tuple[(str, BufferedIOBase)]] = datapipe self.length: int = length deprecation_warning_torchdata(type(self).__name__) def __iter__(self) -> Iterator[Tuple[(str, BufferedIOBase)]]: for data in self.datapipe: validate_pathname_binary_tuple(data) (pathname, data_stream) = data folder_name = os.path.dirname(pathname) try: zips = zipfile.ZipFile(cast(IO[bytes], data_stream)) for zipinfo in zips.infolist(): if (sys.version_info[1] >= 6): if zipinfo.is_dir(): continue elif zipinfo.filename.endswith('/'): continue extracted_fobj = zips.open(zipinfo) inner_pathname = os.path.normpath(os.path.join(folder_name, zipinfo.filename)) (yield (inner_pathname, extracted_fobj)) except Exception as e: warnings.warn(f'Unable to extract files from corrupted zipfile stream {pathname} due to: {e}, abort!') raise e def __len__(self): if (self.length == (- 1)): raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
def process_en_conllpp(paths, short_name): base_input_path = os.path.join(paths['NERBASE'], 'acl2023_conllpp', 'dataset', 'conllpp.txt') base_output_path = paths['NER_DATA_DIR'] sentences = read_tsv(base_input_path, 0, 3, separator=None) sentences = [sent for sent in sentences if ((len(sent) > 1) or (sent[0][0] != '-DOCSTART-'))] write_dataset([sentences], base_output_path, short_name, shard_names=['test'], shards=['test'])
def test_search_no_result(tensor_db): ret = _search(tensor_db.tensor_db, 'tensor_name', 'agg', 1, False, ('col3',)) assert_frame_equal(ret, tensor_db.tensor_db)
def register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'bool', [param('ns3::Ptr< ns3::Packet >', 'arg0')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
def check_commonvoice_folders(data_folder): files_str = '/clips' if (not os.path.exists((data_folder + files_str))): err_msg = ('the folder %s does not exist (it is expected in the Common Voice dataset)' % (data_folder + files_str)) raise FileNotFoundError(err_msg)
def to_bf16(t): return jax.tree_map((lambda x: (x.astype(jnp.bfloat16) if (x.dtype == jnp.float32) else x)), t)
class MPNetForQuestionAnswering(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_supported_platform(): plat = get_build_platform() m = macosVersionString.match(plat) if ((m is not None) and (sys.platform == 'darwin')): try: plat = ('macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))) except ValueError: pass return plat
class CompositeReader(Reader): def __init__(self, names, readers): assert (len(names) == len(readers)) super(CompositeReader, self).__init__(schema=Struct(*[(name, reader.schema()) for (name, reader) in zip(names, readers)])) self._names = names self._readers = readers def setup_ex(self, init_net, finish_net): for reader in self._readers: reader.setup_ex(init_net, finish_net) def read_ex(self, local_init_net, local_finish_net): fields = [] stop_blobs = [] all_sub_read_nets = [] for (name, reader) in zip(self._names, self._readers): (sub_read_nets, should_stop, record) = reader.read_record_ex(local_init_net, local_finish_net) stop_blobs.append(should_stop) all_sub_read_nets.append(sub_read_nets) fields.extend(record.field_blobs()) read_nets = [] local_should_stop = stop_blobs[(- 1)] for (name, sub_read_nets, stop_blob) in zip(self._names, all_sub_read_nets, stop_blobs): read_nets.extend(sub_read_nets) if (stop_blob == local_should_stop): continue stop_net = core.Net('{}_stop'.format(name)) stop_net.Or([local_should_stop, stop_blob], local_should_stop) read_nets.append(stop_net) return (read_nets, local_should_stop, fields) def reset(self, net): for reader in self._readers: reader.reset(net)
def register_Ns3EpcTftClassifier_methods(root_module, cls): cls.add_constructor([param('ns3::EpcTftClassifier const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::EpcTft >', 'tft'), param('uint32_t', 'id')]) cls.add_method('Classify', 'uint32_t', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::EpcTft::Direction', 'direction'), param('uint16_t', 'protocolNumber')]) cls.add_method('Delete', 'void', [param('uint32_t', 'id')]) return
class Spherical(_Coordinates): def transform(self, radius=None, azimuth=None, inclination=None): return (((radius * sin(inclination)) * cos(azimuth)), ((radius * sin(inclination)) * sin(azimuth)), (radius * cos(inclination)))
class MobileNetV2ForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def circuit_training(X_train, Y_train, U, U_params, embedding_type, circuit, cost_fn): if (circuit == 'QCNN'): if ((U == 'U_SU4_no_pooling') or (U == 'U_SU4_1D') or (U == 'U_9_1D')): total_params = (U_params * 3) else: total_params = ((U_params * 3) + (2 * 3)) elif (circuit == 'Hierarchical'): total_params = (U_params * 7) params = np.random.randn(total_params, requires_grad=True) opt = qml.NesterovMomentumOptimizer(stepsize=learning_rate) loss_history = [] for it in range(steps): batch_index = np.random.randint(0, len(X_train), (batch_size,)) X_batch = [X_train[i] for i in batch_index] Y_batch = [Y_train[i] for i in batch_index] (params, cost_new) = opt.step_and_cost((lambda v: cost(v, X_batch, Y_batch, U, U_params, embedding_type, circuit, cost_fn)), params) loss_history.append(cost_new) if ((it % 10) == 0): print('iteration: ', it, ' cost: ', cost_new) return (loss_history, params)
def test_record_tuple_3(): text = '(int64, int64[parameters={"wonky": ["bla", 1, 2]}])' parsedtype = deduce_type(text) assert isinstance(parsedtype, ak.types.RecordType) assert (str(parsedtype) == text)
class InputExample(): def __init__(self, guid: str, texts: List[str], label: Union[(int, float)]): self.guid = guid self.texts = [text.strip() for text in texts] self.label = label
def _multi_variate(base_ring, num_gens=None, names=None, order='negdeglex', default_prec=None, sparse=False): if (names is None): raise TypeError('you must specify a variable name or names') if (num_gens is None): if isinstance(names, str): num_gens = len(names.split(',')) elif isinstance(names, (list, tuple)): num_gens = len(names) else: raise TypeError('variable names must be a string, tuple or list') names = normalize_names(num_gens, names) num_gens = len(names) if (default_prec is None): default_prec = 12 if (base_ring not in commutative_rings.CommutativeRings()): raise TypeError('base_ring must be a commutative ring') from sage.rings.multi_power_series_ring import MPowerSeriesRing_generic R = MPowerSeriesRing_generic(base_ring, num_gens, names, order=order, default_prec=default_prec, sparse=sparse) return R
class TestPersistence(object): __test__ = False def __init__(self): self._data_points = [] def use_on(self, runs): for run in runs: run.add_persistence(self) def get_data_points(self, run_id=None): if run_id: return [dp for dp in self._data_points if (dp.run_id is run_id)] return self._data_points def persist_data_point(self, data_point): self._data_points.append(data_point) def close(self): pass def run_completed(self): pass