code
stringlengths
101
5.91M
def calibration_time_to_event(Forecast, T, E): cdfs = Forecast.cdf(T) kmf = KaplanMeierFitter() kmf.fit(cdfs, E) idxs = np.round(np.linspace(0, (len(kmf.survival_function_) - 1), 11)) preds = np.array(kmf.survival_function_.iloc[idxs].index) obs = (1 - np.array(kmf.survival_function_.iloc[idxs].KM_estimate)) (slope, intercept) = np.polyfit(preds, obs, deg=1) return (preds, obs, slope, intercept)
def get_prober_name(): if which('avprobe'): return 'avprobe' elif which('ffprobe'): return 'ffprobe' else: warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning) return 'ffprobe'
def main_worker(args): train_dataset = MC_Dataset(data_path=args.data_dir, split='train', caption_type='gt') train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=False) model = BartCaptionModel(max_length=args.max_length, label_smoothing=args.label_smoothing) pretrain_dir = f'exp/pretrain/{args.caption_type}/' config = OmegaConf.load(os.path.join(pretrain_dir, 'hparams.yaml')) (model, save_epoch) = load_pretrained(args, pretrain_dir, model, model_types='last', mdp=config.multiprocessing_distributed) print_model_params(model) torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) optimizer = torch.optim.AdamW(model.parameters(), args.lr) save_dir = f'exp/transfer/{args.caption_type}' logger = Logger(save_dir) save_hparams(args, save_dir) for epoch in range(args.start_epoch, args.epochs): train(train_loader, model, optimizer, epoch, logger, args) torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, f'{save_dir}/last.pth')
def init_tparams(params): tparams = OrderedDict() for (kk, pp) in params.iteritems(): tparams[kk] = theano.shared(params[kk], name=kk) return tparams
class FreeGradedModuleElement(IndexedFreeModuleElement): def dense_coefficient_list(self, order=None): if (order is None): order = self.parent()._indices return [self[i] for i in order] def degree(self): if self.is_zero(): raise ValueError('the zero element does not have a well-defined degree') degrees = [] try: for (g, c) in zip(self.parent().generator_degrees(), self.dense_coefficient_list()): if c: degrees.append((g + c.degree())) except ValueError: raise ValueError('this is a nonhomogeneous element, no well-defined degree') m = min(degrees) M = max(degrees) if (m == M): return m raise ValueError('this is a nonhomogeneous element, no well-defined degree') def lift_to_free(self): return self def _lmul_(self, a): return self.parent()(((a * c) for c in self.dense_coefficient_list())) _method def vector_presentation(self): if self.is_zero(): return None P = self.parent() deg = self.degree() m = len(P._generator_degrees) V = P.vector_presentation(deg) ret = V.zero_vector() j = 0 I = P._indices for i in range(m): if (I[i] not in self._monomial_coefficients): j += len(P._basis_coeffs(deg, i)) continue coeff = self._monomial_coefficients[I[i]] mc = coeff.monomial_coefficients(copy=False) for mono in P._basis_coeffs(deg, i): supp = mono.leading_support() if (supp in mc): ret[j] = mc[supp] j += 1 return ret
def get_task_type(values: np.ndarray) -> TaskType: n_unique_values = np.unique(values).shape[0] task: str if (n_unique_values == 1): raise RuntimeError('Only unique value in target') elif (n_unique_values == 2): task = TaskType.BIN else: task = TaskType.REG return task
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10): n = X.shape[0] X = X.copy() incoming_counts = np.asarray(X.sum(axis=1)).ravel() print('Normalizing the graph') for i in incoming_counts.nonzero()[0]: X.data[X.indptr[i]:X.indptr[(i + 1)]] *= (1.0 / incoming_counts[i]) dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0), (1.0 / n), 0)).ravel() scores = np.full(n, (1.0 / n), dtype=np.float32) for i in range(max_iter): print(('power iteration #%d' % i)) prev_scores = scores scores = ((alpha * ((scores * X) + np.dot(dangle, prev_scores))) + (((1 - alpha) * prev_scores.sum()) / n)) scores_max = np.abs(scores).max() if (scores_max == 0.0): scores_max = 1.0 err = (np.abs((scores - prev_scores)).max() / scores_max) print(('error: %0.6f' % err)) if (err < (n * tol)): return scores return scores
_types('array') def change_items(context: MutationContext, draw: Draw, schema: Schema) -> MutationResult: items = schema.get('items', {}) if (not items): return MutationResult.FAILURE if isinstance(items, dict): return _change_items_object(context, draw, schema, items) if isinstance(items, list): return _change_items_array(context, draw, schema, items) return MutationResult.FAILURE
def trivial_task(solvable): variables = sas_tasks.SASVariables([2], [(- 1)], [['Atom dummy(val1)', 'Atom dummy(val2)']]) mutexes = [] init = sas_tasks.SASInit([0]) if solvable: goal_fact = (0, 0) else: goal_fact = (0, 1) goal = sas_tasks.SASGoal([goal_fact]) operators = [] axioms = [] metric = True return sas_tasks.SASTask(variables, mutexes, init, goal, operators, axioms, metric)
class Residual(nn.Module): def __init__(self, numIn, numOut, inputResH, inputResW, stride=1, net_type='preact', useConv=False, baseWidth=9, cardinality=4): super(Residual, self).__init__() self.con = ConcatTable([convBlock(numIn, numOut, inputResH, inputResW, net_type, baseWidth, cardinality, stride), skipLayer(numIn, numOut, stride, useConv)]) self.cadd = CaddTable(True) def forward(self, x): out = self.con(x) out = self.cadd(out) return out
class DistributedDataParallelCommHookTest(MultiProcessTestCase): def setUp(self): super(DistributedDataParallelCommHookTest, self).setUp() self._fork_processes() def tearDown(self): try: os.remove(self.file_name) except OSError: pass def world_size(self): return 2 def _local_model(self): local_model = TestDdpCommHook().cpu() return local_model def _get_grads(self, process_group, hook_type=None): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel(TestDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group) if (hook_type is not None): register_ddp_comm_hook(comm_hook_type=hook_type, model=gpu_model, state=process_group) return self._run_and_get_grads(gpu_model) def _run_and_get_grads(self, model): torch.manual_seed(2020) input = torch.randn(40, 20) output = model(input, self.rank) output.mean().backward() return [p.grad.data.cpu().numpy() for p in model.parameters()] _nccl() _if_lt_x_gpu(2) _if_rocm def test_ddp_comm_hook_allreduce_hook(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) reference_grads = self._get_grads(process_group, None) hook_grads = self._get_grads(process_group, DDPCommHookType.ALLREDUCE) np.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-05, atol=0) _nccl() _if_lt_x_gpu(2) _if_rocm def test_ddp_comm_hook_fp16compress_hook(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) reference_grads = self._get_grads(process_group, None) hook_grads = self._get_grads(process_group, DDPCommHookType.FP16_COMPRESS) np.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-05, atol=0.0001) _nccl() _if_lt_x_gpu(2) _if_rocm def test_ddp_comm_hook_quantize_per_tensor_hook(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) reference_grads = self._get_grads(process_group, None) hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_TENSOR) np.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-05, atol=0.0001) _nccl() _if_lt_x_gpu(2) _if_rocm def test_ddp_comm_hook_quantize_per_channel_hook(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) reference_grads = self._get_grads(process_group, None) hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_CHANNEL) np.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-05, atol=0.0001)
def load_phrases_dict(phrases_dict, style='default'): if (style == 'tone2'): for (k, value) in phrases_dict.items(): v = [list(map(_replace_tone2_style_dict_to_default, pys)) for pys in value] PHRASES_DICT[k] = v else: PHRASES_DICT.update(phrases_dict) mmseg.retrain(mmseg.seg)
_task('masked_lm') class MaskedLMTask(LegacyFairseqTask): def add_args(parser): parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', default=False, action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample') parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') def setup_task(cls, args, **kwargs): paths = utils.split_paths(args.data) assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = maybe_shorten_dataset(dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if self.args.mask_whole_words else None) (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(src_dataset)) self.datasets[split] = SortDataset(NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_dataset, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': RightPadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad()), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes]), sort_order=[shuffle, src_dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = RightPadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad()) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset def source_dictionary(self): return self.dictionary def target_dictionary(self): return self.dictionary
class _DummyChromosomeOutputVariableFactory(ChromosomeOutputVariableFactory): def get_data(self, individual: tsc.TestSuiteChromosome) -> int: return 42
def calculate_matches(all_docs: Dict[(object, Tuple[(str, str)])], answers: List[List[str]], closest_docs: List[Tuple[(List[object], List[float])]], workers_num: int, match_type: str) -> QAMatchStats: global dpr_all_documents dpr_all_documents = all_docs tok_opts = {} tokenizer = SimpleTokenizer(**tok_opts) processes = ProcessPool(processes=workers_num) logger.info('Matching answers in top docs...') get_score_partial = partial(check_answer, match_type=match_type, tokenizer=tokenizer) questions_answers_docs = zip(answers, closest_docs) scores = processes.map(get_score_partial, questions_answers_docs) logger.info('Per question validation results len=%d', len(scores)) n_docs = len(closest_docs[0][0]) top_k_hits = ([0] * n_docs) for question_hits in scores: best_hit = next((i for (i, x) in enumerate(question_hits) if x), None) if (best_hit is not None): top_k_hits[best_hit:] = [(v + 1) for v in top_k_hits[best_hit:]] return QAMatchStats(top_k_hits, scores)
def get_eth_consensus(): client = docker.from_env() all_containers = client.containers.list() for container in all_containers: labels = container.attrs['Config']['Labels'] if ('EthereumService' in labels.get('org.seedsecuritylabs.seedemu.meta.class', [])): return labels.get('org.seedsecuritylabs.seedemu.meta.ethereum.consensus')
def GetKCoreNodes_PNGraph(Graph, CoreIdSzV): return _snap.GetKCoreNodes_PNGraph(Graph, CoreIdSzV)
def pca_feature(feature, dim=None): if torch.is_tensor(feature): feature = feature.numpy() assert isinstance(feature, np.ndarray), 'feature is nor a tensor or a numpy ndarray' pca = decomposition.PCA(n_components=dim) feature_pca = pca.fit_transform(feature) return feature_pca
def test_label_combination_hoeffding_tree_coverage(): max_samples = 10000 max_size_kb = 50 stream = MultilabelGenerator(n_samples=10000, n_features=15, n_targets=3, n_labels=4, random_state=112) learner = LabelCombinationHoeffdingTreeClassifier(n_labels=3, leaf_prediction='mc', memory_estimate_period=200, max_byte_size=(max_size_kb * (2 ** 10))) (X, y) = stream.next_sample(max_samples) learner.partial_fit(X, y) assert (calculate_object_size(learner, 'kB') <= max_size_kb)
class EvalModel(collections.namedtuple('EvalModel', ('graph', 'model', 'src_file_placeholder', 'tgt_file_placeholder', 'iterator'))): pass
class CodeGenConfig(PretrainedConfig): model_type = 'codegen' attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=50400, n_positions=2048, n_ctx=2048, n_embd=4096, n_layer=28, n_head=16, rotary_dim=64, n_inner=None, activation_function='gelu_new', resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.rotary_dim = rotary_dim self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def get_set_encoding(source_set, onehot=True): num_elements = len(source_set) source_list = list(source_set) source_list.sort() thing2idx = {s: i for (i, s) in enumerate(source_list)} idx2thing = [s for (i, s) in enumerate(source_list)] if onehot: thing2vec = {s: idx_to_onehot(i, num_elements) for (i, s) in enumerate(source_list)} return (thing2vec, idx2thing) return (thing2idx, idx2thing)
def analyze_datars(times1, times2, values1, values2, colors=('red', 'navy')): from adjustText import adjust_text all_ts = [] all_times = [*times1, *times2] all_vals = [*values1, *values2] for (times, values, color) in zip([times1, times2], [values1, values2], colors): max = np.max(values) min = values[0] percs = [0.4, 1] percs_nice = [(str(int((a * 100))) + '%') for a in percs] values = np.asarray(values) times = np.asarray(times) ids = [np.argmax((values >= ((x * (max - min)) + min))) for x in percs] points = [(times[i], values[i], pn) for (i, pn) in zip(ids, percs_nice)] ts = [plt.text(*a, color=color) for a in points] all_ts.extend(ts) ax = plt.gca() annotations = [child for child in ax.get_children() if (isinstance(child, matplotlib.text.Annotation) or isinstance(child, matplotlib.legend.Legend))] adjust_text(ts, x=all_times, y=all_vals, add_objects=annotations, arrowprops=dict(arrowstyle='->', fill=True, color=color))
def parse_argv(parser): parser.add_argument('--pred_file', required=True, type=str, help='Name of dataset to run prediction for; will be ignored if --evaluate is test') parser.add_argument('--tasks', dest='task_names', nargs='+', required=True, help='task names for prediction') parser.add_argument('--seed', default=123, type=int, help='Random seed.') parser.add_argument('--overwrite', action='store_true', help='whether to overwrite previously written predictions') parser.add_argument('--subsample', default=, type=int, help='subsample the prediction file') parser.add_argument('--eval_dir', type=str, required=False, help='use this directory to store eval results') parser.add_argument('--pred_languages', type=str, nargs='+', dest='pred_src_languages', default=['en'], help='Specify dataset source languages used during prediction for multilingual tasks') parser.add_argument('--pred_tgt_languages', type=str, nargs='+', default=['en'], help='Specify dataset target languages used during prediction for multilingual tasks') parser.add_argument('--main_metric_only', action='store_true', help='If True, we only calculate the deca score metric for each task.') parser.add_argument('--reduce_metrics', type=str, default='max', choices=['max', 'top_k'], help='How to calculate the metric when there are multiple outputs per input.`max` chooses the best set of generation hyperparameters and reports the metric for that.`top_k` chooses the best generation output per input, and uses that to output the metric. For example, combining this with the exact match metric gives what is commonly known as the top-k accuracy. Note that the output is meaningless if used with corpus-level metrics.') parser.add_argument('--extra_metrics', nargs='+', default=[], help='include these additional metrics in reported results') parser.add_argument('--e2e_dialogue_valid_subtasks', nargs='+', type=str, default=['dst', 'api', 'da', 'rg'], help='Evaluate only on these subtasks when calculating e2e_dialogue_score; rg is not included by default') parser.add_argument('--e2e_dialogue_valid_submetrics', nargs='+', type=str, default=['em', 'em', 'em', 'casedbleu'], help='Specify metrics to use for each of subtasks in e2e_dialogue_valid_subtasks.') parser.add_argument('--e2e_dialogue_valid_subweights', nargs='+', type=float, default=[1.0, 1.0, 1.0, 1.0], help='Specify weights to use for each of subtasks in e2e_dialogue_valid_subtasks.')
def main(args): if (args.func == 'plot_pert'): plot_perturtation(args.plot_data_path) elif (args.func == 'plot_bpf'): plot_band_pass_filter(args.plot_data_path) elif (args.func == 'plot_freq_ana'): plot_freq_analysis(args.plot_data_path) else: raise ValueError
def LF_icd_complication(c): complication = c.complication.get_span().lower() v = ('996' in complication) return (1 if v else 0)
class CorpusReader(): def __init__(self, src_file, trg_file=None, max_sentence_length=80, cache_size=1000): self.src_file = src_file self.trg_file = trg_file self.epoch = 1 self.pending = set() self.length2pending = collections.defaultdict(set) self.next = 0 self.cache = [] self.cache_size = cache_size self.max_sentence_length = max_sentence_length def _fill_cache(self): self.next = 0 self.cache = [self.cache[i] for i in self.pending] self.pending = set() self.length2pending = collections.defaultdict(set) while (len(self.cache) < self.cache_size): src = self.src_file.readline() trg = (self.trg_file.readline() if (self.trg_file is not None) else src) src_length = len(tokenize(src)) trg_length = len(tokenize(trg)) if ((src == '') and (trg == '')): print('this should not happen consecutively') self.epoch += 1 self.src_file.seek(0) if (self.trg_file is not None): self.trg_file.seek(0) elif ((0 < src_length <= self.max_sentence_length) and (0 < trg_length <= self.max_sentence_length)): self.cache.append(((src_length, trg_length), src.strip(), trg.strip())) for i in range(self.cache_size): self.pending.add(i) self.length2pending[self.cache[i][0]].add(i) def _remove(self, index): length = self.cache[index][0] self.pending.remove(index) self.length2pending[length].remove(index) def _score_length(self, src, trg, src_min, src_max, trg_min, trg_max): return max(abs((src - src_min)), abs((src - src_max)), abs((trg - trg_min)), abs((trg - trg_max))) def next_batch(self, size, noop=False): if (size > self.cache_size): raise ValueError('Cache size smaller than twice the batch size') if (len(self.pending) < (self.cache_size / 2)): self._fill_cache() indices = [self.next] length = self.cache[self.next][0] target_length = length src_min = src_max = length[0] trg_min = trg_max = length[1] self._remove(self.next) while (len(indices) < size): try: index = self.length2pending[target_length].pop() self.pending.remove(index) indices.append(index) except KeyError: candidates = [(self._score_length(k[0], k[1], src_min, src_max, trg_min, trg_max), k) for (k, v) in self.length2pending.items() if (len(v) > 0)] target_length = min(candidates)[1] src_min = min(src_min, target_length[0]) src_max = max(src_max, target_length[0]) trg_min = min(trg_min, target_length[1]) trg_max = max(trg_max, target_length[1]) indices = sorted(indices, key=(lambda i: self.cache[i][0]), reverse=True) for i in range(self.next, self.cache_size): if (i in self.pending): self.next = i break return ([self.cache[i][1] for i in indices], [self.cache[i][2] for i in indices])
class BaseBatchNormalizationFolding(BaseKerasFeatureNetworkTest, ABC): def __init__(self, unit_test, linear_layer): self.linear_layer = linear_layer super(BaseBatchNormalizationFolding, self).__init__(unit_test=unit_test, experimental_exporter=True) def get_tpc(self): tp = generate_test_tp_model({'weights_n_bits': 16, 'activation_n_bits': 16, 'enable_weights_quantization': False, 'enable_activation_quantization': False}) return generate_keras_tpc(name='bn_folding_test', tp_model=tp) def get_quantization_config(self): return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING, mct.core.QuantizationErrorMethod.NOCLIPPING, False, False, True) def compare(self, quantized_model, float_model, input_x=None, quantization_info=None): float_conv = float_model.layers[1] is_dw_high_mult = (isinstance(float_conv, layers.DepthwiseConv2D) and (float_conv.depth_multiplier != 1)) if (float_conv.__class__ == layers.SeparableConv2D): float_kernel = float_conv.weights[1] float_bias = float_conv.weights[2] quant_conv = get_layers_from_model_by_type(quantized_model, layers.Conv2D)[0] elif is_dw_high_mult: float_kernel = float_conv.weights[0] float_bias = float_conv.weights[1] quant_conv = get_layers_from_model_by_type(quantized_model, layers.Conv2D)[0] else: float_kernel = float_conv.weights[0] float_bias = float_conv.weights[1] quant_conv = get_layers_from_model_by_type(quantized_model, self.linear_layer)[0] attr = ('depthwise_kernel' if isinstance(quant_conv, layers.DepthwiseConv2D) else 'kernel') quant_kernel = getattr(quant_conv, attr) quant_bias = quant_conv.bias float_bn = float_model.layers[2] float_gamma = float_bn.weights[0] float_beta = float_bn.weights[1] float_moving_mean = float_bn.weights[2] float_moving_variance = float_bn.weights[3] float_epsilon = float_bn.epsilon weights_scale = (float_gamma / np.sqrt((float_moving_variance + float_epsilon))) bias = (float_beta + ((float_bias - float_moving_mean) * weights_scale)) kernel = update_kernel_for_bn_folding_fn(conv_layer=float_conv, kernel=float_kernel.numpy(), weights_scale=weights_scale.numpy()) if is_dw_high_mult: ks = kernel.shape kernel = np.reshape(kernel, [ks[0], ks[1], 1, (ks[2] * ks[3])]) self.unit_test.assertTrue(np.all((quant_kernel.numpy() == kernel))) self.unit_test.assertTrue(np.all((quant_bias.numpy() == bias))) self.unit_test.assertFalse((layers.BatchNormalization in [layer.__class__ for layer in quantized_model.layers])) y = float_model.predict(input_x) y_hat = quantized_model.predict(input_x) cs = cosine_similarity(y, y_hat) self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')
def requeue_job(): if (SLURM_JOBID is None): return if (not REQUEUE.is_set()): return if distrib.is_initialized(): distrib.barrier() if ((not distrib.is_initialized()) or (distrib.get_rank() == 0)): logger.info(f'Requeueing job {SLURM_JOBID}') subprocess.check_call(shlex.split(f'scontrol requeue {SLURM_JOBID}'))
def numberfiltering(sents): sents = [sent.strip().split() for sent in sents] for idx in range(len(sents)): for pos in range(len(sents[idx])): if hasNumbers(sents[idx][pos]): sents[idx][pos] = 'BlahBlah' return [' '.join(sent) for sent in sents]
def quickumls(doc_list): from quickumls import QuickUMLS assert (not (args.quickumls_path is None)), 'Provide path where QuickUMLS is installed' def process_data(pid, doc_list): data = [] matcher = QuickUMLS(args.quickumls_path, 'score', threshold=0.6) for (i, doc) in enumerate(doc_list): qumls_res = matcher.match(doc['text']) res_list = ddict(list) for men in qumls_res: for cand in men: (start, end) = (cand['start'], cand['end']) umls_cui = cand['cui'] score = cand['similarity'] res_list[(start, end)].append((umls_cui, score)) doc['result'] = dict(res_list) data.append(doc) if ((i % 10) == 0): print('Completed [{}] {}, {}'.format(pid, i, ((time.strftime('%d_%m_%Y') + '_') + time.strftime('%H:%M:%S')))) return data num_procs = 1 chunks = partition(doc_list, num_procs) data_list = mergeList(Parallel(n_jobs=num_procs)((delayed(process_data)(i, chunk) for (i, chunk) in enumerate(chunks)))) base_dir = './results/{}'.format(args.data) make_dir(base_dir) dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
class deltaEColorLoss(nn.Module): def __init__(self, normalize=None): super(deltaEColorLoss, self).__init__() self.loss = [] self.normalize = normalize self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) def torchTensorToNumpy(self, image): imageNP = image.cpu().detach().numpy().reshape(image.shape[1], image.shape[2], image.shape[0]) return imageNP def __call__(self, genImage, gtImage): for pair in range(len(genImage)): imageGTNP = self.torchTensorToNumpy(gtImage[pair]) imageGenNP = self.torchTensorToNumpy(genImage[pair]) deltaE = np.absolute(color.deltaE_ciede2000(color.rgb2lab(imageGTNP), color.rgb2lab(imageGenNP))) if self.normalize: deltaE /= 255.0 self.loss.append(np.mean(deltaE)) deltaELoss = torch.mean(torch.tensor(self.loss, requires_grad=True)).to(self.device) return deltaELoss
def sxs_handler(format_string): import itertools import re from . import catalog, metadata, horizons, waveforms if (not format_string): raise ValueError('Empty string cannot be associated with a handler') elif format_string.lower().startswith('catalog'): format_string = re.sub('^catalog\\.?', '', format_string, count=1, flags=re.IGNORECASE) return catalog.formats.get(format_string, catalog.formats[None]) elif format_string.lower().startswith('metadata'): format_string = re.sub('^metadata\\.?', '', format_string, count=1, flags=re.IGNORECASE) return metadata.formats.get(format_string, metadata.formats[None]) elif format_string.lower().startswith('horizons'): format_string = re.sub('^horizons\\.?', '', format_string, count=1, flags=re.IGNORECASE) return horizons.formats.get(format_string, horizons.formats[None]) elif format_string.lower().startswith('waveforms'): format_string = re.sub('^waveforms\\.?', '', format_string, count=1, flags=re.IGNORECASE) return waveforms.formats.get(format_string, waveforms.formats[None]) else: format_list = [catalog.formats, metadata.formats, horizons.formats, waveforms.formats] format_cycler = itertools.cycle(format_list) for _ in range(len(format_list)): format_dict = next(format_cycler) if (format_string in format_dict): if any(((format_string in next(format_cycler)) for _ in range((len(format_list) - 1)))): raise ValueError(f"Format string '{format_string}' found in multiple sxs format groups") return format_dict[format_string] raise ValueError(f"Format '{format_string}' is unknown to the `sxs` package; maybe you need to update `sxs`")
def batchnorm_reconstruction_node_matchers() -> NodeOperationMatcher: conv_node = ((NodeOperationMatcher(DepthwiseConv2D) | NodeOperationMatcher(Conv2D)) | NodeOperationMatcher(Conv2DTranspose)) activation_linear = NodeFrameworkAttrMatcher(ACTIVATION, LINEAR) source_node = (conv_node & activation_linear) return source_node
def parse_args(parser): assert isinstance(parser, ArgumentParser) args = parser.parse_args() (pos_group, optional_group) = (parser._action_groups[0], parser._action_groups[1]) args_dict = args._get_kwargs() pos_optional_arg_names = ([arg.dest for arg in pos_group._group_actions] + [arg.dest for arg in optional_group._group_actions]) pos_optional_args = {name: value for (name, value) in args_dict if (name in pos_optional_arg_names)} other_group_args = dict() if (len(parser._action_groups) > 2): for group in parser._action_groups[2:]: group_arg_names = [arg.dest for arg in group._group_actions] other_group_args[group.title] = Namespace(**{name: value for (name, value) in args_dict if (name in group_arg_names)}) combined_args = pos_optional_args combined_args.update(other_group_args) return Namespace(**combined_args)
def add_upload_command(subparsers): from nnabla.utils.cli.uploader import upload_command subparser = subparsers.add_parser('upload', help='Upload dataset to Neural Network Console.') subparser.add_argument('-e', '--endpoint', help='set endpoint uri', type=str) subparser.add_argument('token', help='token for upload') subparser.add_argument('filename', help='filename to upload') subparser.set_defaults(func=upload_command)
def _get_name(x): if isinstance(x.userData, dict): return x.userData.get('name') return None
def proc_one(path_midi, path_outfile): midi_obj = miditoolkit.midi.parser.MidiFile(path_midi) instr_notes = collections.defaultdict(list) for instr in midi_obj.instruments: if (instr.name not in INSTR_NAME_MAP.keys()): continue instr_idx = INSTR_NAME_MAP[instr.name] for note in instr.notes: note.instr_idx = instr_idx instr_notes[instr_idx].append(note) if (NOTE_SORTING == 0): instr_notes[instr_idx].sort(key=(lambda x: (x.start, x.pitch))) elif (NOTE_SORTING == 1): instr_notes[instr_idx].sort(key=(lambda x: (x.start, (- x.pitch)))) else: raise ValueError(' [x] Unknown type of sorting.') chords = [] for marker in midi_obj.markers: if ((marker.text.split('_')[0] != 'global') and ('Boundary' not in marker.text.split('_')[0])): chords.append(marker) chords.sort(key=(lambda x: x.time)) tempos = midi_obj.tempo_changes tempos.sort(key=(lambda x: x.time)) labels = [] for marker in midi_obj.markers: if ('Boundary' in marker.text.split('_')[0]): labels.append(marker) labels.sort(key=(lambda x: x.time)) gobal_bpm = 120 for marker in midi_obj.markers: if ((marker.text.split('_')[0] == 'global') and (marker.text.split('_')[1] == 'bpm')): gobal_bpm = int(marker.text.split('_')[2]) first_note_time = min([instr_notes[k][0].start for k in instr_notes.keys()]) last_note_time = max([instr_notes[k][(- 1)].start for k in instr_notes.keys()]) quant_time_first = int((np.round((first_note_time / TICK_RESOL)) * TICK_RESOL)) offset = (quant_time_first // BAR_RESOL) last_bar = (int(np.ceil((last_note_time / BAR_RESOL))) - offset) print(' > offset:', offset) print(' > last_bar:', last_bar) intsr_gird = dict() for key in instr_notes.keys(): notes = instr_notes[key] note_grid = collections.defaultdict(list) for note in notes: note.start = (note.start - (offset * BAR_RESOL)) note.end = (note.end - (offset * BAR_RESOL)) quant_time = int((np.round((note.start / TICK_RESOL)) * TICK_RESOL)) note.velocity = DEFAULT_VELOCITY_BINS[np.argmin(abs((DEFAULT_VELOCITY_BINS - note.velocity)))] note.velocity = max(MIN_VELOCITY, note.velocity) note.shift = (note.start - quant_time) note.shift = DEFAULT_SHIFT_BINS[np.argmin(abs((DEFAULT_SHIFT_BINS - note.shift)))] note_duration = (note.end - note.start) if (note_duration > BAR_RESOL): note_duration = BAR_RESOL ntick_duration = int((np.round((note_duration / TICK_RESOL)) * TICK_RESOL)) note.duration = ntick_duration note_grid[quant_time].append(note) intsr_gird[key] = note_grid.copy() chord_grid = collections.defaultdict(list) for chord in chords: chord.time = (chord.time - (offset * BAR_RESOL)) chord.time = (0 if (chord.time < 0) else chord.time) quant_time = int((np.round((chord.time / TICK_RESOL)) * TICK_RESOL)) chord_grid[quant_time].append(chord) tempo_grid = collections.defaultdict(list) for tempo in tempos: tempo.time = (tempo.time - (offset * BAR_RESOL)) tempo.time = (0 if (tempo.time < 0) else tempo.time) quant_time = int((np.round((tempo.time / TICK_RESOL)) * TICK_RESOL)) tempo.tempo = DEFAULT_BPM_BINS[np.argmin(abs((DEFAULT_BPM_BINS - tempo.tempo)))] tempo_grid[quant_time].append(tempo) label_grid = collections.defaultdict(list) for label in labels: label.time = (label.time - (offset * BAR_RESOL)) label.time = (0 if (label.time < 0) else label.time) quant_time = int((np.round((label.time / TICK_RESOL)) * TICK_RESOL)) label_grid[quant_time] = [label] gobal_bpm = DEFAULT_BPM_BINS[np.argmin(abs((DEFAULT_BPM_BINS - gobal_bpm)))] song_data = {'notes': intsr_gird, 'chords': chord_grid, 'tempos': tempo_grid, 'labels': label_grid, 'metadata': {'global_bpm': gobal_bpm, 'last_bar': last_bar}} fn = os.path.basename(path_outfile) os.makedirs(path_outfile[:(- len(fn))], exist_ok=True) pickle.dump(song_data, open(path_outfile, 'wb'))
def test_synthetic_sample_results_with_exponential_delay_function_has_same_delays_each_dataset(): n_actions = 3 delay_function = ExponentialDelaySampler(max_scale=1000.0, random_state=12345).exponential_delay_function dataset = BanditEnvironmentSimulator(n_actions=n_actions, reward_function=logistic_sparse_reward_function, delay_function=delay_function, random_state=12345) actual_delays_1 = dataset.next_bandit_round_batch(n_rounds=5).round_delays delay_function = ExponentialDelaySampler(max_scale=1000.0, random_state=12345).exponential_delay_function dataset = BanditEnvironmentSimulator(n_actions=n_actions, reward_function=logistic_sparse_reward_function, delay_function=delay_function, random_state=12345) actual_delays_2 = dataset.next_bandit_round_batch(n_rounds=5).round_delays expected_round_delays_1 = np.tile([2654.0, 381.0, 204.0, 229.0, 839.0], (n_actions, 1)).T expected_round_delays_2 = np.tile([2654.0, 381.0, 204.0, 229.0, 839.0], (n_actions, 1)).T assert (actual_delays_1 == expected_round_delays_1).all() assert (actual_delays_2 == expected_round_delays_2).all()
def strictly_upper_triangular_matrices(R, n): from sage.matrix.matrix_space import MatrixSpace from sage.algebras.lie_algebras.lie_algebra import LieAlgebraFromAssociative MS = MatrixSpace(R, n, sparse=True) one = R.one() names = tuple(('n{}'.format(i) for i in range((n - 1)))) gens = tuple((MS({(i, (i + 1)): one}) for i in range((n - 1)))) L = LieAlgebraFromAssociative(MS, gens, names=names) L.rename('Lie algebra of {}-dimensional strictly upper triangular matrices over {}'.format(n, L.base_ring())) return L
def test_full_scores_chars_length(): print(('Loaded language model: %s' % language_model_path)) r = list(model.full_scores(sentence_char_split)) n = list(model.full_scores(sentence_char_split, bos=False, eos=False)) print(r) print(n) assert (len(r) == (len(n) + 1)) print(len(n), len(sentence_char_split.split())) assert (len(n) == len(sentence_char_split.split())) k = list(model.full_scores(sentence_char_split, bos=False, eos=True)) print(k, len(k))
class TomlArraySeparatorEncoder(TomlEncoder): def __init__(self, _dict=dict, preserve=False, separator=','): super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve) if (separator.strip() == ''): separator = (',' + separator) elif separator.strip(' \t\n\r,'): raise ValueError('Invalid separator for arrays') self.separator = separator def dump_list(self, v): t = [] retval = '[' for u in v: t.append(self.dump_value(u)) while (t != []): s = [] for u in t: if isinstance(u, list): for r in u: s.append(r) else: retval += ((' ' + unicode(u)) + self.separator) t = s retval += ']' return retval
class SoftmaxShift(common.BaseSubstitution): def __init__(self, nodes: List[BaseNode], bias_str: str): super().__init__(matcher_instance=nodes) self.bias_str = bias_str def substitute(self, graph: Graph, nodes: List[BaseNode]) -> Graph: first_node = nodes[0] if first_node.is_activation_quantization_enabled(): tensor_stat = graph.get_out_stats_collector(first_node) if isinstance(tensor_stat, common.StatsCollector): max_value = tensor_stat.mpcc.max min_value = tensor_stat.mpcc.min shift_value = (((- 1) * (max_value + min_value)) / 2) if (first_node.get_weights_by_keys(self.bias_str) is not None): b1 = first_node.get_weights_by_keys(self.bias_str) else: b1 = 0.0 b1_fixed = (b1 + shift_value) first_node.set_weights_by_keys(self.bias_str, b1_fixed) graph.shift_stats_collector(first_node, shift_value) return graph
class AST_Comment(AST_Node): def __init__(self, context, text): AST_Node.__init__(self, context) self.text = text def get_children(self): return [] def replace_child(self, old, new): raise ValueError('AST_Comment has no children') def generate_code(self, sdfg, state): pass def __repr__(self): text = self.text text = text.encode('unicode_escape').decode('utf-8') return (('AST_Comment("' + text) + '")')
def mse_r(s_hat, log_r_hat, t_hat, y, log_r, t): return (mse_r0(s_hat, log_r_hat, t_hat, y, log_r, t) + mse_r1(s_hat, log_r_hat, t_hat, y, log_r, t))
def down_sample_avg(x, scale_factor=2): return tf.layers.average_pooling2d(x, pool_size=3, strides=scale_factor, padding='SAME')
def main(args): if (os.path.splitext(args.input_file_path)[1] not in FileExtension.as_list()): err_msg = f'The input file is not a jsonl or txt file {args.input_file_path}' raise ValueError(err_msg) verify_input_file(args.input_file_path) output_dir = get_output_dir(args.cmd, args.output_path, args.overwrite_output_path) add_file_handler(args.log_file_path, output_dir) log_git_commit_hash() log_current_datetime() log_input_args(args) (tokenizer, model_config) = get_tokenizer(args.pretrained_tokenizer, args.tokenizer_class, args.vocab_file, args.merges_file, args.special_tokens_dict) category_to_id = get_categories(args.categories_path) if (args.cmd == 'pipeline'): metrics = pipeline_main(args.input_file_path, tokenizer, model_config, output_dir, args.disable_space_separator, args.keep_prompt_only_sequences, args.prompt_keyword, args.completion_keyword, args.shuffle, args.overwrite_output_path, args.num_workers, args.do_not_balance_hdf5, args.keep_split_jsonls, args.max_seq_length, args.input_packing_config, args.packing_boundary, args.attention_boundary, args.num_training_splits, args.num_dev_splits, args.num_test_splits, args.dev_ratio, args.test_ratio, category_to_id, args.prompt_prefix, args.prompt_postfix) elif (args.cmd == 'data_prep'): metrics = data_prep_main(args.silent, tokenizer, args.input_file_path, args.output_path, args.max_seq_length, args.input_packing_config, args.packing_boundary, args.attention_boundary, args.disable_space_separator, args.keep_prompt_only_sequences, args.prompt_keyword, args.completion_keyword, category_to_id, args.prompt_prefix, args.prompt_postfix) log_metrics(metrics) log_elapsed_time()
class RandomUnkFeature(VectorFeature): def __init__(self, parent: EmbeddingBase): super().__init__(parent) self.words: Dict[(str, np.ndarray)] = {} def apply(self, pos: int, word: str, weight: float, vector) -> Tuple[(float, Optional[np.ndarray])]: if (vector is not None): return (weight, vector) else: return (weight, self._vocab_vector(word)) def _vocab_vector(self, word: str) -> np.ndarray: res = self.words.get(word, None) if (res is None): res = np.random.rand(self.parent.dim()) res = (res / np.linalg.norm(res, ord=2)) self.words[word] = res return res
class GraphCL(torch.nn.Module): def __init__(self, gnn, hid_dim=16): super(GraphCL, self).__init__() self.gnn = gnn self.projection_head = torch.nn.Sequential(torch.nn.Linear(hid_dim, hid_dim), torch.nn.ReLU(inplace=True), torch.nn.Linear(hid_dim, hid_dim)) def forward_cl(self, x, edge_index, batch): x = self.gnn(x, edge_index, batch) x = self.projection_head(x) return x def loss_cl(self, x1, x2): T = 0.1 (batch_size, _) = x1.size() x1_abs = x1.norm(dim=1) x2_abs = x2.norm(dim=1) sim_matrix = (torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)) sim_matrix = torch.exp((sim_matrix / T)) pos_sim = sim_matrix[(range(batch_size), range(batch_size))] loss = (pos_sim / ((sim_matrix.sum(dim=1) - pos_sim) + 0.0001)) loss = ((- torch.log(loss).mean()) + 10) return loss
class PersonMaskRCNNDetector(object): COCO_INSTANCE_CATEGORY_NAMES = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] PERSON_IDS = 1 def __init__(self, ks=3, threshold=0.5, to_gpu=True): super(PersonMaskRCNNDetector, self).__init__() self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) self.model.eval() self.threshold = threshold self.ks = ks self.kernel = torch.ones(1, 1, ks, ks, dtype=torch.float32) if to_gpu: self.model = self.model.cuda() self.kernel = self.kernel.cuda() def forward(self, images): predictions = self.model(images) return predictions def get_bbox_max_ids(self, labels, bboxs): cur_pid = (- 1) cur_bbox_area = (- 1) for (i, label) in enumerate(labels): if (label == self.PERSON_IDS): (x0, y0, x1, y1) = bboxs[i] cur_area = torch.abs(((x1 - x0) * (y1 - y0))) if (cur_area > cur_bbox_area): cur_bbox_area = cur_area cur_pid = i return cur_pid def inference(self, img): img_list = [((img + 1) / 2.0)] with torch.no_grad(): predictions = self.forward(img_list)[0] labels = predictions['labels'] bboxs = predictions['boxes'] masks = predictions['masks'] pid = self.get_bbox_max_ids(labels, bboxs) pid_bboxs = bboxs[pid] pid_masks = masks[pid] final_masks = (pid_masks > self.threshold).float() if (self.ks > 0): final_masks = morph(final_masks[None], self.ks, mode='dilate', kernel=self.kernel) return (pid_bboxs, final_masks)
def spectrogram(*args, **kwargs): kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'spectrogram.yaml') return baseline_local(*args, **kwargs)
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] fncall = line for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) break if ((not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall)) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall))): if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall): error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef|using\\s+\\w+\\s*=', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall))): if Search('\\boperator_*\\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall): if Search('^\\s+\\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')
def simReadVisionSensor(sensorHandle): auxValues = ffi.new('float **') auxValuesCount = ffi.new('int **') state = lib.simReadVisionSensor(sensorHandle, auxValues, auxValuesCount) auxValues2 = [] if (state == 0): s = 0 for i in range(auxValuesCount[0]): auxValues2.append(auxValues[s:(s + auxValuesCount[(i + 1)])]) s += auxValuesCount[(i + 1)] simReleaseBuffer(auxValues) simReleaseBuffer(auxValuesCount) return (state, auxValues2)
def get_word_overlap(text1, text2): tokens1 = tokenize(text1, lemmas=True) tokens2 = tokenize(text2, lemmas=True) union = set((tokens1 + tokens2)) intersection = list((set(tokens1) & set(tokens2))) return (len(intersection) / len(union))
def GetQAofImage(id=61512): page = 1 next = ((('/api/v0/image/' + str(id)) + '/qa?page=') + str(page)) qas = [] image_map = {} while True: data = utils.RetrieveData(next) for d in data['results']: if (d['image'] not in image_map): image_map[d['image']] = GetImageData(id=d['image']) qas.extend(utils.ParseQA(data['results'], image_map)) if (data['next'] is None): break page += 1 next = ((('/api/v0/image/' + str(id)) + '/qa?page=') + str(page)) return qas
def autogen_all(): from sage.env import SAGE_SRC interpreters.rebuild(os.path.join(SAGE_SRC, 'sage', 'ext', 'interpreters')) return ['sage.ext.interpreters']
def LinkFileLock(*args, **kwds): from . import linklockfile return _fl_helper(linklockfile.LinkLockFile, 'lockfile.linklockfile', *args, **kwds)
def mobilenet_load_pretrained_imagenet_weights(model): (_, ext) = os.path.splitext(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS) if (ext == '.pkl'): with open(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp: src_blobs = pickle.load(fp, encoding='latin1') if ('blobs' in src_blobs): src_blobs = src_blobs['blobs'] pretrianed_state_dict = src_blobs else: weights_file = os.path.join(cfg.ROOT_DIR, cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS) pretrianed_state_dict = mobilenet_convert_state_dict(torch.load(weights_file)) model.Conv_Body.conv.load_state_dict(pretrianed_state_dict, strict=False) if hasattr(model, 'Box_Head'): model.Box_Head.conv.load_state_dict(pretrianed_state_dict, strict=False)
class ActuatedTrajectoryDataset(dataset.TensorDataset): def __init__(self, traj_q_T_B, traj_v_T_B, traj_u_T_B): self.q_B_T = traj_q_T_B.transpose(1, 0) self.v_B_T = traj_v_T_B.transpose(1, 0) self.u_B_T = traj_u_T_B.transpose(1, 0) assert (self.q_B_T.size(0) == self.v_B_T.size(0) == self.u_B_T.size(0)) def __len__(self): return self.q_B_T.size(0) def __getitem__(self, index): return (self.q_B_T[index], self.v_B_T[index], self.u_B_T[index]) def FromSystem(cls, system, q_B, v_B, u_T_B, t_points, method='rk4'): assert (q_B.size(0) == v_B.size(0) == u_T_B.size(1)) assert (len(t_points) == u_T_B.size(0)) if (not isinstance(system, ActuatedODEWrapper)): system = ActuatedODEWrapper(system) with torch.no_grad(): (q_T_B, v_T_B) = odeint(system, (q_B, v_B), t_points, u=u_T_B, method=method, transforms=((lambda x: utils.wrap_to_pi(x, system.thetamask)), (lambda x: x))) q_T_B = utils.wrap_to_pi(q_T_B.view((- 1), system._qdim), system.thetamask).view(len(t_points), (- 1), system._qdim) return cls(q_T_B, v_T_B, u_T_B)
def compute_measures_for_binary_segmentation_summed(predictions, targets): res = [compute_measures_for_binary_segmentation_single_image(p, t) for (p, t) in zip(predictions, targets)] accum = res[0] for r in res[1:]: for (k, v) in r.items(): accum[k] += v return accum
def test_case42(): url = (brokerIp + '/ngsi-ld/v1/subscriptions/') headers = {'Content-Type': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata30), headers=headers) print(r.content) print(r.status_code) url = (discoveryIp + '/ngsi9/subscription') r = requests.get(url) print(r.content) print(r.status_code) url = (brokerIp + '/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:10') headers = {'Content-Type': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.patch(url, data=json.dumps(ld_data.subdata31), headers=headers) print(r.content) print(r.status_code) url = (discoveryIp + '/ngsi9/subscription') r = requests.get(url) print(r.content) print(r.status_code) assert (r.status_code == 200)
def block1_deactivate_all(): for I in ti.grouped(block3): ti.deactivate(block3, I) for I in ti.grouped(block2): ti.deactivate(block2, I) for I in ti.grouped(block1): ti.deactivate(block1, I)
class HighResolutionModule(nn.Module): def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, fuse_method, multi_scale_output=True): super(HighResolutionModule, self).__init__() self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels) self.num_inchannels = num_inchannels self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=True) def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): if (num_branches != len(num_blocks)): error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) logger.error(error_msg) raise ValueError(error_msg) if (num_branches != len(num_channels)): error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) logger.error(error_msg) raise ValueError(error_msg) if (num_branches != len(num_inchannels)): error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels)) logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if ((stride != 1) or (self.num_inchannels[branch_index] != (num_channels[branch_index] * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.num_inchannels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((num_channels[branch_index] * block.expansion), momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)) self.num_inchannels[branch_index] = (num_channels[branch_index] * block.expansion) for i in range(1, num_blocks[branch_index]): layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if (self.num_branches == 1): return None num_branches = self.num_branches num_inchannels = self.num_inchannels fuse_layers = [] for i in range((num_branches if self.multi_scale_output else 1)): fuse_layer = [] for j in range(num_branches): if (j > i): fuse_layer.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM))) elif (j == i): fuse_layer.append(None) else: conv3x3s = [] for k in range((i - j)): if (k == ((i - j) - 1)): num_outchannels_conv3x3 = num_inchannels[i] conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM))) else: num_outchannels_conv3x3 = num_inchannels[j] conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_inchannels(self): return self.num_inchannels def forward(self, x): if (self.num_branches == 1): return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) return x
def _ntuple(n): def parse(x): if isinstance(x, collections.Iterable): return x return tuple(repeat(x, n)) return parse
class TestKind(util.F2PyTest): sources = [_path('src', 'kind', 'foo.f90')] .slow def test_all(self): selectedrealkind = self.module.selectedrealkind selectedintkind = self.module.selectedintkind for i in range(40): assert_((selectedintkind(i) in [selected_int_kind(i), (- 1)]), ('selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i)))) for i in range(20): assert_((selectedrealkind(i) in [selected_real_kind(i), (- 1)]), ('selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i))))
def build_head(cfg): head_cfg = deepcopy(cfg) name = head_cfg.pop('name') if (name == 'YOLOv5Head'): return YOLOv5Head(**head_cfg) elif (name == 'YOLOXHead'): return YOLOXHead(**head_cfg) elif (name == 'YOLOv6Effidehead'): return YOLOv6Effidehead(**head_cfg) elif (name == 'YOLOv7Head'): return YOLOv7Head(**head_cfg) elif (name == 'FCOSHead'): return FCOSHead(**head_cfg) elif (name == 'NanoDetHead'): return NanoDetHead(**head_cfg) elif (name == 'NanoDetPlusHead'): return NanoDetPlusHead(**head_cfg) elif (name == 'NanoDetPlusAuxHead'): return NanoDetPlusAuxHead(**head_cfg) elif (name == 'GFocalHeadV2'): return GFocalHeadV2(**head_cfg) elif (name == 'YOLOPHead'): return YOLOPHead(**head_cfg) elif (name == 'EfficientdetHead'): return EfficientdetHead(**head_cfg) elif (name == 'FastestDetHead'): return FastestDetHead(**head_cfg) elif (name == 'Deeplabv3Head'): return Deeplabv3Head(**head_cfg) elif (name == 'Deeplabv3PlusHead'): return Deeplabv3PlusHead(**head_cfg) elif (name == 'STDCHead'): return STDCHead(**head_cfg) elif (name == 'FCNHead'): return FCNHead(**head_cfg) elif (name == 'LightHamHead'): return LightHamHead(**head_cfg) elif (name == 'LSPNetHead'): return LSPNetHead(**head_cfg) elif (name == 'PPLiteSegHead'): return PPLiteSegHead(**head_cfg) elif (name == 'RegSegHead'): return RegSegHead(**head_cfg) elif (name == 'SGCPNetHead'): return SGCPNetHead(**head_cfg) elif (name == 'TopFormerHead'): return TopFormerHead(**head_cfg) elif (name == 'PSPHead'): return PSPHead(**head_cfg) elif (name == 'UPerHead'): return UPerHead(**head_cfg) elif (name == 'SegFormerHead'): return SegFormerHead(**head_cfg) elif (name == 'UperNetAlignHead'): return UperNetAlignHead(**head_cfg) elif (name == 'UpConcatHead'): return UpConcatHead(**head_cfg) elif (name == 'OpenPoseHead'): return OpenPoseHead(**head_cfg) else: raise NotImplementedError(name)
class Function_Bessel_J(BuiltinFunction): def __init__(self): BuiltinFunction.__init__(self, 'bessel_J', nargs=2, conversions=dict(maple='BesselJ', mathematica='BesselJ', maxima='bessel_j', sympy='besselj', fricas='besselJ', giac='BesselJ')) def _eval_(self, n, x): if ((not isinstance(x, Expression)) and (x == 0)): if (n == 0): return ZZ.one() elif ((n.real() > 0) or (n in ZZ)): return ZZ.zero() elif (n.real() < 0): return unsigned_infinity if (n == QQ((1, 2))): return (sqrt(((2 / pi) / x)) * sin(x)) elif (n == QQ(((- 1), 2))): return (sqrt(((2 / pi) / x)) * cos(x)) def _evalf_(self, n, x, parent=None, algorithm=None): if (parent is not None): x = parent(x) try: return x.jn(Integer(n)) except Exception: pass (n, x) = get_coercion_model().canonical_coercion(n, x) return _mpmath_utils_call(_mpmath_besselj, n, x, parent=parent) def _derivative_(self, n, x, diff_param): if (diff_param == 1): return ((bessel_J((n - 1), x) - bessel_J((n + 1), x)) / Integer(2)) else: raise NotImplementedError('derivative with respect to order') def _print_latex_(self, n, z): return ('J_{%s}(%s)' % (latex(n), latex(z)))
def record_result(results_dir, result): with open(os.path.join(results_dir, 'results.json'), 'a') as results_file: results_file.write(json.dumps(result)) results_file.write('\n')
_metaclass(ABCMeta) class CostFunction(object): def __init__(self, ds, da, *args, **kwargs): (self.ds, self.da) = (ds, da) def get_parameters(self): pass def log_likelihood(self, states, costs): pass def evaluate(self, states): pass def __getstate__(self): return {'ds': self.ds, 'da': self.da} def __setstate__(self, state): self.__init__(state['ds'], state['da']) def is_cost_function(self): pass
def report_upload(setup_server, next_url, upload_message, correlation_id): return setup_server((lambda h: h.respond_with_json({'message': upload_message, 'next': next_url, 'correlation_id': correlation_id}, status=202)), 'POST', '/reports/upload/')
class LampOff(Task): def init_task(self) -> None: self.bulb_glass_visual = Shape('bulb') self.bulb_glass_visual.set_color([1, 1, 1]) self.joint = Joint('target_button_joint') self.condition = JointCondition(self.joint, 0.003) def init_episode(self, index: int) -> List[str]: self.bulb_glass_visual.set_color([1, 1, 1]) self.register_success_conditions([self.condition]) return ['turn off the light', 'press the button to turn off the lamp', 'press the light switch', 'turn the lamp off', 'close the gripper and press on the button until the light turns off'] def variation_count(self) -> int: return 1 def step(self) -> None: if (self.condition.condition_met() == (True, True)): self.bulb_glass_visual.set_color([0, 0, 0])
def _worker_init(G, id): if (singleton_pool.n_parallel > 1): import os os.environ['THEANO_FLAGS'] = 'device=cpu' os.environ['CUDA_VISIBLE_DEVICES'] = '' G.worker_id = id
class OptGapC3Test(AbstractTest): def __init__(self): super().__init__() self.problem = OptGapC3() def name(self): return 'optgapc3' def run(self): ncf = NcfEpi.new_total_flow(4) hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 2, 2, 3, 4]) ncf.solve(self.problem, hc) self.assert_feasibility(ncf) self.assert_geq_epsilon(ncf.obj_val, 1.0) self.assert_leq_epsilon(ncf.obj_val, 8.0)
class TFRobertaForTokenClassification(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
.parametrize('tree,dataset', [(DecisionTreeClassifier(max_depth=2, random_state=0), datasets.make_classification(random_state=0)), (DecisionTreeRegressor(max_depth=2, random_state=0), datasets.make_regression(random_state=0))]) def test_score_sample_weight(tree, dataset): rng = np.random.RandomState(0) (X, y) = dataset tree.fit(X, y) sample_weight = rng.randint(1, 10, size=len(y)) score_unweighted = tree.score(X, y) score_weighted = tree.score(X, y, sample_weight=sample_weight) msg = 'Unweighted and weighted scores are unexpectedly equal' assert (score_unweighted != score_weighted), msg
def anisotropic_primes(self): possible_primes = (prime_divisors((2 * self.det())) + [(- 1)]) return [p for p in possible_primes if self.is_anisotropic(p)]
def read_vasp(filename): with open(filename) as f: lines = f.readlines() return _get_cell(lines) return None
def convert_dynamic_fx(graph_module, inplace=False, debug=False): return _convert_fx(graph_module, inplace, debug, is_dynamic_quant=True)
def get_fngrad_norm(loader, model, device, fngrads=None, grads=None): if (fngrads is None): fngrads = get_fngrads(loader, model, device, grads=grads) return torch.cat([fngrad.view((- 1)) for fngrad in fngrads]).norm()
def find_bpe_position_by_offset(bpe_offsets, target_offset): bpe_nums = [] for (sent_num, sent) in enumerate(bpe_offsets): if (sent[(- 1)][0] < target_offset[0]): continue for (bpe_num, bpe) in enumerate(sent): if ((target_offset[0] <= bpe[0]) and (bpe[1] <= target_offset[1])): bpe_nums.append(bpe_num) return (sent_num, bpe_nums)
class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Test_sctype2char(object): def test_scalar_type(self): assert_equal(np.sctype2char(np.double), 'd') assert_equal(np.sctype2char(np.int_), 'l') assert_equal(np.sctype2char(np.unicode_), 'U') assert_equal(np.sctype2char(np.bytes_), 'S') def test_other_type(self): assert_equal(np.sctype2char(float), 'd') assert_equal(np.sctype2char(list), 'O') assert_equal(np.sctype2char(np.ndarray), 'O') def test_third_party_scalar_type(self): from numpy.core._rational_tests import rational assert_raises(KeyError, np.sctype2char, rational) assert_raises(KeyError, np.sctype2char, rational(1)) def test_array_instance(self): assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') def test_abstract_type(self): assert_raises(KeyError, np.sctype2char, np.floating) def test_non_type(self): assert_raises(ValueError, np.sctype2char, 1)
def test_wilson_efficient_sample(kernel, inducing_variable, whiten): eigenfunctions = RandomFourierFeaturesCosine(kernel, 100, dtype=default_float()) eigenvalues = np.ones((100, 1), dtype=default_float()) kernel2 = KernelWithFeatureDecomposition(kernel, eigenfunctions, eigenvalues) (q_mu, q_sqrt) = _get_qmu_qsqrt(kernel, inducing_variable) sample_func = efficient_sample(inducing_variable, kernel2, q_mu, q_sqrt=(0.001 * tf.convert_to_tensor(q_sqrt[np.newaxis])), whiten=whiten) X = np.linspace((- 1), 0, 100).reshape((- 1), 1) np.testing.assert_array_almost_equal(sample_func(X), sample_func(X))
class BaseConverter(metaclass=ABCMeta): ACCEPTED_MODES = None def __init__(self, modes=[]): self.modes = modes for mode in self.modes: if (mode not in self.ACCEPTED_MODES): raise ValueError(f'Input mode not in {self.ACCEPTED_MODES}') def convert(self): pass def _bbox_expand(bbox_xyxy: List[float], scale_factor: float) -> List[float]: center = [((bbox_xyxy[0] + bbox_xyxy[2]) / 2), ((bbox_xyxy[1] + bbox_xyxy[3]) / 2)] x1 = ((scale_factor * (bbox_xyxy[0] - center[0])) + center[0]) y1 = ((scale_factor * (bbox_xyxy[1] - center[1])) + center[1]) x2 = ((scale_factor * (bbox_xyxy[2] - center[0])) + center[0]) y2 = ((scale_factor * (bbox_xyxy[3] - center[1])) + center[1]) return [x1, y1, x2, y2] def _xyxy2xywh(bbox_xyxy: List[float]) -> List[float]: (x1, y1, x2, y2) = bbox_xyxy return [x1, y1, (x2 - x1), (y2 - y1)]
class hypergeom_gen(rv_discrete): def _rvs(self, M, n, N): return self._random_state.hypergeometric(n, (M - n), N, size=self._size) def _get_support(self, M, n, N): return (np.maximum((N - (M - n)), 0), np.minimum(n, N)) def _argcheck(self, M, n, N): cond = (((M > 0) & (n >= 0)) & (N >= 0)) cond &= ((n <= M) & (N <= M)) return cond def _logpmf(self, k, M, n, N): (tot, good) = (M, n) bad = (tot - good) result = (((((betaln((good + 1), 1) + betaln((bad + 1), 1)) + betaln(((tot - N) + 1), (N + 1))) - betaln((k + 1), ((good - k) + 1))) - betaln(((N - k) + 1), (((bad - N) + k) + 1))) - betaln((tot + 1), 1)) return result def _pmf(self, k, M, n, N): return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): (M, n, N) = ((1.0 * M), (1.0 * n), (1.0 * N)) m = (M - n) p = (n / M) mu = (N * p) var = (((((m * n) * N) * (M - N)) * 1.0) / ((M * M) * (M - 1))) g1 = ((((m - n) * (M - (2 * N))) / (M - 2.0)) * sqrt(((M - 1.0) / (((m * n) * N) * (M - N))))) g2 = (((M * (M + 1)) - ((6.0 * N) * (M - N))) - ((6.0 * n) * m)) g2 *= (((M - 1) * M) * M) g2 += (((((6.0 * n) * N) * (M - N)) * m) * ((5.0 * M) - 6)) g2 /= (((((n * N) * (M - N)) * m) * (M - 2.0)) * (M - 3.0)) return (mu, var, g1, g2) def _entropy(self, M, n, N): k = np.r_[(N - (M - n)):(min(n, N) + 1)] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): res = [] for (quant, tot, good, draw) in zip(k, M, n, N): k2 = np.arange((quant + 1), (draw + 1)) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) def _logsf(self, k, M, n, N): res = [] for (quant, tot, good, draw) in zip(k, M, n, N): if (((quant + 0.5) * (tot + 0.5)) < ((good - 0.5) * (draw - 0.5))): res.append(log1p((- exp(self.logcdf(quant, tot, good, draw))))) else: k2 = np.arange((quant + 1), (draw + 1)) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) def _logcdf(self, k, M, n, N): res = [] for (quant, tot, good, draw) in zip(k, M, n, N): if (((quant + 0.5) * (tot + 0.5)) > ((good - 0.5) * (draw - 0.5))): res.append(log1p((- exp(self.logsf(quant, tot, good, draw))))) else: k2 = np.arange(0, (quant + 1)) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res)
class ATTACK(object): FGSM = 'fgsm' BIM = 'bim' BIM_L2 = 'bim_l2' BIM_Li = 'bim_li' DEEPFOOL = 'deepfool' CW_L0 = 'cw_l0' CW_L2 = 'cw_l2' CW_Linf = 'cw_linf' JSMA = 'jsma' ONE_PIXEL = 'onepixel' MIM = 'mim' PGD = 'pgd' def get_supported_attacks(cls): return [cls.FGSM, cls.BIM_L2, cls.BIM_Li, cls.DEEPFOOL, cls.JSMA, cls.CW_L0, cls.CW_L2, cls.CW_Linf, cls.ONE_PIXEL, cls.PGD, cls.MIM] def get_AETypes(cls): AETypes = [] AETypes.extend(cls.get_fgsm_AETypes()) AETypes.extend(cls.get_bim_AETypes()) AETypes.extend(cls.get_df_AETypes()) AETypes.extend(cls.get_cwl2_AETypes()) AETypes.extend(cls.get_jsma_AETypes()) AETypes.extend(cls.get_op_AETypes()) AETypes.extend(cls.get_mim_AETypes()) AETypes.extend(cls.get_pgd_AETypes()) return AETypes def get_fgsm_eps(cls): return [0.1, 0.25, 0.3] def get_fgsm_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return ['fgsm_eps10', 'fgsm_eps50', 'fgsm_eps100'] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['fgsm_eps100', 'fgsm_eps250', 'fgsm_eps300'] def get_bim_nbIter(cls): return [100] def get_bim_norm(cls): return [np.inf, 2] def get_bim_eps(cls, order): if (order == 2): return [0.75, 1.0, 1.2] elif (order == np.inf): return [0.075, 0.09, 0.12] def get_bim_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return ['bim_ord2_nbIter100_eps500', 'bim_ord2_nbIter100_eps1000', 'bim_ordinf_nbIter100_eps50', 'bim_ordinf_nbIter100_eps100'] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['bim_ord2_nbIter100_eps750', 'bim_ord2_nbIter100_eps1000', 'bim_ord2_nbIter100_eps1200', 'bim_ordinf_nbIter100_eps75', 'bim_ordinf_nbIter100_eps90', 'bim_ordinf_nbIter100_eps120'] def get_df_maxIter(cls): return [500] def get_df_norm(cls): return [2] def get_df_overshoots(cls, order): if (order == 2): return [3, 8, 20] elif (order == np.inf): return [0.2, 0.5, 0.9] def get_df_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return ['deepfool_maxIter100', 'deepfool_maxIter10000'] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['deepfool_l2_overshoot3', 'deepfool_l2_overshoot8', 'deepfool_l2_overshoot20'] def get_cwl2_maxIter(cls): return [100] def get_cwl2_lr(cls): return [0.01, 0.012, 0.015] def get_cwl0_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return [] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return [] def get_cwl2_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return [] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['cw_l2_lr10_maxIter100', 'cw_l2_lr12_maxIter100', 'cw_l2_lr15_maxIter100'] def get_cwlinf_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return [] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return [] def get_jsma_theta(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return [(- 1.0), (- 0.5), (- 0.3), 0.3, 0.5, 1.0] else: return [0.15, 0.18, 0.21] def get_jsma_gamma(cls): return [0.5] def get_jsma_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return ['jsma_theta30_gamma50', 'jsma_theta50_gamma70'] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['jsma_theta15_gamma50', 'jsma_theta18_gamma50', 'jsma_theta21_gamma50'] def get_op_pxCnt(cls): return [5, 30, 75] def get_op_maxIter(cls): return [30] def get_op_popsize(cls): return [100] def get_op_AETypes(cls): return ['onepixel_pxCount5_maxIter30_popsize100', 'onepixel_pxCount30_maxIter30_popsize100', 'onepixel_pxCount75_maxIter30_popsize100'] def get_mim_eps(cls): return [0.05, 0.075, 0.1] def get_mim_nbIter(cls): return [1000] def get_mim_decayFactor(cls): return [0.75] def get_mim_AETypes(cls): return ['mim_eps50_nbIter1000', 'mim_eps75_nbIter1000', 'mim_eps100_nbIter1000'] def get_pgd_eps(cls): return [0.075, 0.09, 0.1] def get_pgd_AETypes(cls): if (DATA.CUR_DATASET_NAME == DATA.cifar_10): return ['pgd_eps500', 'pgd_eps100'] elif (DATA.CUR_DATASET_NAME == DATA.mnist): return ['pgd_eps75_nbIter1000_epsIter50', 'pgd_eps90_nbIter1000_epsIter50', 'pgd_eps100_nbIter1000_epsIter50']
def train(epoch): global trainloader, optimizer, args, feat_net, pred_net pred_net.train() feat_net.eval() correct = 0 total = 0 total_loss = 0 optimizer.zero_grad() tot_iters = len(trainloader) for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters): (inputs, targets) = next(iter(trainloader)) if use_cuda: (inputs, targets) = (inputs.cuda(), targets.cuda()) outputs = pred_net(feat_net(inputs, state=args.state)) loss = criterion(outputs, targets) total_loss_ = loss total_loss_.backward() total_loss += loss.data.cpu() (_, predicted) = torch.max(nn.Softmax(dim=1)(outputs).data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() optimizer.step() optimizer.zero_grad() acc = ((100.0 * correct) / total) return ((total_loss / (batch_idx + 1)), acc)
class SequenceTaggingDecoderMixin(DecoderMixinBase): def scheme(self): return self._scheme def scheme(self, scheme: str): self._scheme = scheme self.translator = ChunksTagsTranslator(scheme=scheme) def idx2tag(self): return self._idx2tag .setter def idx2tag(self, idx2tag: List[str]): self._idx2tag = idx2tag self.tag2idx = ({t: i for (i, t) in enumerate(self.idx2tag)} if (idx2tag is not None) else None) def voc_dim(self): return len(self.tag2idx) def pad_idx(self): return self.tag2idx['<pad>'] def exemplify(self, data_entry: dict, training: bool=True): return {'tags_obj': Tags(data_entry, self, training=training)} def batchify(self, batch_examples: List[dict]): return {'tags_objs': [ex['tags_obj'] for ex in batch_examples]} def retrieve(self, batch: Batch): return [tags_obj.chunks for tags_obj in batch.tags_objs] def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]): (scores, ave_scores) = precision_recall_f1_report(y_gold, y_pred) return ave_scores['micro']['f1']
def check_sampler_get_feature_names_out_pandas(name, sampler_orig): try: import pandas as pd except ImportError: raise SkipTest('pandas is not installed: not checking column name consistency for pandas') tags = sampler_orig._get_tags() if (('2darray' not in tags['X_types']) or tags['no_validation']): return (X, y) = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) sampler = clone(sampler_orig) X = _enforce_estimator_tags_x(sampler, X) n_features = X.shape[1] set_random_state(sampler) y_ = y feature_names_in = [f'col{i}' for i in range(n_features)] df = pd.DataFrame(X, columns=feature_names_in) (X_res, y_res) = sampler.fit_resample(df, y=y_) invalid_feature_names = [f'bad{i}' for i in range(n_features)] with raises(ValueError, match='input_features is not equal to feature_names_in_'): sampler.get_feature_names_out(invalid_feature_names) feature_names_out_default = sampler.get_feature_names_out() feature_names_in_explicit_names = sampler.get_feature_names_out(feature_names_in) assert_array_equal(feature_names_out_default, feature_names_in_explicit_names) n_features_out = X_res.shape[1] assert (len(feature_names_out_default) == n_features_out), f'Expected {n_features_out} feature names, got {len(feature_names_out_default)}'
class TestEmptyField(object): def test_assign(self): a = np.arange(10, dtype=np.float32) a.dtype = [('int', '<0i4'), ('float', '<2f4')] assert_((a['int'].shape == (5, 0))) assert_((a['float'].shape == (5, 2)))
class ResBlock(BaseModule): def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(ResBlock, self).__init__(init_cfg) assert ((in_channels % 2) == 0) half_in_channels = (in_channels // 2) cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule(half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = (out + residual) return out
def test_instruction_equal(): module = 'foo' code_object_id = 1 node_id = 1 opcode = 1 arg = None lineno = 42 offset = 42 instr1 = ExecutedInstruction(module, code_object_id, node_id, opcode, arg, lineno, offset) instr2 = ExecutedInstruction(module, code_object_id, node_id, opcode, arg, lineno, offset) assert (instr1 == instr2)
class TestPPOPendulumGRU(TfGraphTestCase): .mujoco_long def test_ppo_pendulum_gru(self): with LocalTFRunner(snapshot_config) as runner: env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2'))) gru_policy = GaussianGRUPolicy(env_spec=env.spec) baseline = GaussianMLPBaseline(env_spec=env.spec, regressor_args=dict(hidden_sizes=(32, 32))) algo = PPO(env_spec=env.spec, policy=gru_policy, baseline=baseline, max_path_length=100, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False) runner.setup(algo, env) last_avg_ret = runner.train(n_epochs=10, batch_size=2048) assert (last_avg_ret > 80)
class Partition4(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[attn_drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.5.mlp.drop', 'l_1': 'blocks.5.drop_path', 'l_2': 'blocks.6.norm1', 'l_3': 'blocks.6.attn.qkv', 'l_4': 'blocks.6.attn.attn_drop', 'l_5': 'blocks.6.attn.proj', 'l_6': 'blocks.6.attn.proj_drop', 'l_7': 'blocks.6.drop_path', 'l_8': 'blocks.6.norm2', 'l_9': 'blocks.6.mlp.fc1', 'l_10': 'blocks.6.mlp.act', 'l_11': 'blocks.6.mlp.drop', 'l_12': 'blocks.6.mlp.fc2', 'l_13': 'blocks.6.mlp.drop', 'l_14': 'blocks.6.drop_path', 'l_15': 'blocks.7.norm1', 'l_16': 'blocks.7.attn.qkv', 'l_17': 'blocks.7.attn.attn_drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 12) t_5 = t_1.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 12) t_6 = t_2.reshape(t_3, t_5, 3, 12, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) return list(flatten((t_0, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def get_missing_parameters_message(keys: List[str]) -> str: groups = _group_checkpoint_keys(keys) msg = 'Some model parameters or buffers are not found in the checkpoint:\n' msg += '\n'.join(((' ' + colored((k + _group_to_str(v)), 'blue')) for (k, v) in groups.items())) return msg
class TestMultivariate(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.max_forecast_steps = 10 self.i = 0 dataset = 'seattle_trail' (d, md) = SeattleTrail(rootdir=join(rootdir, 'data', 'multivariate', dataset))[0] t = int(d[md['trainval']].index[(- 1)].to_pydatetime().timestamp()) data = TimeSeries.from_pd(d) cleanup_transform = TransformSequence([TemporalResample(missing_value_policy='FFill'), LowerUpperClip(upper=300)]) cleanup_transform.train(data) data = cleanup_transform(data) (train_data, test_data) = data.bisect(t) minmax_transform = MinMaxNormalize() minmax_transform.train(train_data) self.train_data_norm = minmax_transform(train_data) self.test_data_norm = minmax_transform(test_data) self.model = DefaultForecaster(DefaultForecasterConfig(target_seq_index=self.i)) def test_forecast(self): logger.info('Training model...') (yhat, _) = self.model.train(self.train_data_norm) maxlags = self.model.model.config.maxlags dataset = RollingWindowDataset(self.test_data_norm, self.i, maxlags, self.max_forecast_steps, ts_index=True) (testing_instance, testing_label) = next(iter(dataset)) (pred, _) = self.model.forecast(testing_label.time_stamps, testing_instance) self.assertEqual(len(pred), self.max_forecast_steps) smape = ForecastMetric.sMAPE.value(predict=pred, ground_truth=testing_label) logger.info(f'SMAPE = {smape}') savedir = join(rootdir, 'tmp', 'default', 'forecast', 'multi') self.model.save(dirname=savedir) loaded_model = DefaultForecaster.load(dirname=savedir) (new_pred, _) = loaded_model.forecast(testing_label.time_stamps, testing_instance) new_smape = ForecastMetric.sMAPE.value(predict=new_pred, ground_truth=testing_label) self.assertAlmostEqual(smape, new_smape, places=4)
def do_flop(cfg): if isinstance(cfg, CfgNode): data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: data_loader = instantiate(cfg.dataloader.test) model = instantiate(cfg.model) model.to(cfg.train.device) DetectionCheckpointer(model).load(cfg.train.init_checkpoint) model.eval() counts = Counter() total_flops = [] for (idx, data) in zip(tqdm.trange(args.num_inputs), data_loader): flops = FlopCountAnalysis(model, data) if (idx > 0): flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False) counts += flops.by_operator() total_flops.append(flops.total()) logger.info(('Flops table computed from only one input sample:\n' + flop_count_table(flops))) logger.info(('Average GFlops for each type of operators:\n' + str([(k, ((v / (idx + 1)) / .0)) for (k, v) in counts.items()]))) logger.info('Total GFlops: {:.1f}{:.1f}'.format((np.mean(total_flops) / .0), (np.std(total_flops) / .0)))
class StoDepth_BasicBlock(nn.Module): expansion = 1 def __init__(self, prob, multFlag, inplanes, planes, stride=1, downsample=None): super(StoDepth_BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.prob = prob self.m = torch.distributions.bernoulli.Bernoulli(torch.Tensor([self.prob])) self.multFlag = multFlag def forward(self, x): identity = x.clone() if self.training: if torch.equal(self.m.sample(), torch.ones(1)): self.conv1.weight.requires_grad = True self.conv2.weight.requires_grad = True out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity else: self.conv1.weight.requires_grad = False self.conv2.weight.requires_grad = False if (self.downsample is not None): identity = self.downsample(x) out = identity else: out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): identity = self.downsample(x) if self.multFlag: out = ((self.prob * out) + identity) else: out = (out + identity) out = self.relu(out) return out
def create_logger(filepath, rank): log_formatter = LogFormatter() if (filepath is not None): if (rank > 0): filepath = ('%s-%i' % (filepath, rank)) file_handler = logging.FileHandler(filepath, 'a') file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(log_formatter) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(log_formatter) logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.DEBUG) logger.propagate = False if (filepath is not None): logger.addHandler(file_handler) logger.addHandler(console_handler) def reset_time(): log_formatter.start_time = time.time() logger.reset_time = reset_time return logger
def is_tuple(ann) -> bool: if (ann is Tuple): raise_error_container_parameter_missing('Tuple') if (not hasattr(ann, '__module__')): return False return ((ann.__module__ == 'typing') and ((getattr(ann, '__origin__', None) is Tuple) or (getattr(ann, '__origin__', None) is tuple)))