code
stringlengths
101
5.91M
def main(argv=sys.argv[1:]): p = argparse.ArgumentParser() p.add_argument('--contigs-db', help='contigs sqlite database') p.add_argument('node_list_file', help='a cdbg_ids.txt.gz file') p.add_argument('-o', '--output') p.add_argument('-v', '--verbose', action='store_true') args = p.parse_args(argv) if (not args.output): outname = (args.node_list_file + '.contigs.fa.gz') else: outname = args.output if outname.endswith('.gz'): outfp = gzip.open(outname, 'wt') else: outfp = open(outname, 'wt') with gzip.open(args.node_list_file, 'rt') as fp: cdbg_shadow = set([int(x.strip()) for x in fp if x.strip()]) if (not len(cdbg_shadow)): print('no contigs to extract; exiting.') return 0 total_bp = 0 total_seqs = 0 print(f'reading contigs from sqlite DB {args.contigs_db}') print(f'extracting contigs to {outname}.') sqlite_db = sqlite3.connect(args.contigs_db) for (n, record) in enumerate(search_utils.contigs_iter_sqlite(sqlite_db)): if ((n % 10000) == 0): offset_f = (total_seqs / len(cdbg_shadow)) print('...at n {} ({:.1f}% of shadow)'.format(total_seqs, (offset_f * 100)), end='\r') contig_id = int(record.name) if (contig_id not in cdbg_shadow): continue outfp.write('>{}\n{}\n'.format(record.name, record.sequence)) total_bp += len(record.sequence) total_seqs += 1 print('') print('fetched {} contigs, {} bp matching node list.'.format(total_seqs, total_bp)) return 0
class FixedpointObj(ctypes.c_void_p): def __init__(self, fixedpoint): self._as_parameter_ = fixedpoint def from_param(obj): return obj
def getCoordPoints(handRight): handRightPoints = [] handRightX = [] handRightY = [] for x in range(0, len(handRight), 3): handRightX.append(handRight[x]) for x in range(1, len(handRight), 3): handRightY.append(handRight[x]) for x in range(len(handRightX)): handRightPoints.append((int(handRightX[x]), int(handRightY[x]))) return handRightPoints
def have_prerequisites(debug=True): try: from notebook.notebookapp import NotebookApp return True except ImportError: if debug: import traceback traceback.print_exc() return False
class Program(): def __init__(self, name, version_cmd, version_regex, environment_var=None, debug=False): self.name = name self.debug = debug if ((environment_var is not None) and (environment_var in os.environ)): if self.debug: print(self.name, '- getting path from environment variable', environment_var, '=', os.environ[environment_var], flush=True) self.path = os.environ[environment_var] else: if self.debug: print(self.name, '- not using environment variable', flush=True) self.path = name self.version_cmd = version_cmd self.version_regex = version_regex if self.debug: print(self.name, (('- checking which(' + self.path) + ')'), flush=True) self.from_which = shutil.which(self.path) if self.debug: print(' ... got: "', self.from_which, '"', sep='', flush=True) self._set_version() def in_path(self): return (self.from_which is not None) def _set_version(self): self.version = None if self.debug: print(self.name, '- checking version ...') if (not self.in_path()): if self.debug: print(' ... not in path so cannot get version', flush=True) return cmd = ((self.exe() + ' ') + self.version_cmd) if self.debug: print('Running this command to get version:', cmd) cmd_output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() cmd_output = (common.decode(cmd_output[0]).split('\n')[:(- 1)] + common.decode(cmd_output[1]).split('\n')[:(- 1)]) if self.debug: print('__________ (begin output from ', cmd, ')___________', sep='') print(*cmd_output, sep='\n') print('__________ (end of output from ', cmd, ')___________', sep='') print('Looking far a match to the regex "', self.version_regex.pattern, '" in the above output', sep='', flush=True) for line in cmd_output: hits = self.version_regex.search(line) if hits: if self.debug: print('Match to this line:', line) print('Got version:', hits.group(1), flush=True) self.version = hits.group(1) break else: if self.debug: print('No match found to the regex', flush=True) def version_at_least(self, min_version): v = self.version if (v is None): return None return (LooseVersion(v) >= LooseVersion(min_version)) def version_at_most(self, max_version): v = self.version if (v is None): return None return (LooseVersion(v) <= LooseVersion(max_version)) def exe(self): return self.path
def run(data_ids: List[int], methods: List[Callable[([], operations.GraphOfOperations)]], budget: float, lm_name: str) -> float: orig_budget = budget data_path = os.path.join(os.path.dirname(__file__), 'sorting_032.csv') data = [] with open(data_path, 'r') as f: reader = csv.reader(f) next(reader) for row in reader: data.append([int(row[0]), row[1], row[2]]) if ((data_ids is None) or (len(data_ids) == 0)): data_ids = list(range(len(data))) selected_data = [data[i] for i in data_ids] results_dir = os.path.join(os.path.dirname(__file__), 'results') if (not os.path.exists(results_dir)): os.makedirs(results_dir) timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') extra_info = f"{lm_name}_{'-'.join([method.__name__ for method in methods])}" folder_name = f'{extra_info}_{timestamp}' results_folder = os.path.join(results_dir, folder_name) os.makedirs(results_folder) config = {'data': selected_data, 'methods': [method.__name__ for method in methods], 'lm': lm_name, 'budget': budget} with open(os.path.join(results_folder, 'config.json'), 'w') as f: json.dump(config, f) logging.basicConfig(filename=os.path.join(results_folder, 'log.log'), filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) for method in methods: os.makedirs(os.path.join(results_folder, method.__name__)) for data in selected_data: logging.info(f'Running data {data[0]}: {data[1]}') if (budget <= 0.0): logging.error(f'Budget has been depleted, stopping. Data {data[0]} has not been run.') break for method in methods: logging.info(f'Running method {method.__name__}') logging.info(f'Budget left: {budget}') if (budget <= 0.0): logging.error(f'Budget has been depleted, stopping. Method {method.__name__} has not been run.') break lm = language_models.ChatGPT(os.path.join(os.path.dirname(__file__), '../../graph_of_thoughts/language_models/config.json'), model_name=lm_name, cache=True) operations_graph = method() executor = controller.Controller(lm, operations_graph, SortingPrompter(), SortingParser(), {'original': data[1], 'current': '', 'phase': 0, 'method': method.__name__}) try: executor.run() except Exception as e: logging.error(f'Exception: {e}') path = os.path.join(results_folder, method.__name__, f'{data[0]}.json') executor.output_graph(path) budget -= lm.cost return (orig_budget - budget)
def conv1x1_bn_relu(in_planes, out_planes, stride=1): return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True))
def _get_item_settings(item, marker=None): timeout = func_only = None if (not marker): marker = item.get_closest_marker('timeout') if (marker is not None): settings = _parse_marker(item.get_closest_marker(name='timeout')) timeout = settings.timeout func_only = bool(settings.func_only) if (timeout is None): timeout = item.config._env_timeout if (func_only is None): func_only = item.config._env_timeout_func_only if (func_only is None): func_only = False return Settings(timeout, func_only)
def test_sine_positional_encoding(num_feats=16, batch_size=2): with pytest.raises(AssertionError): module = SinePositionalEncoding(num_feats, scale=(3.0,), normalize=True) module = SinePositionalEncoding(num_feats) (h, w) = (10, 6) mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int) assert (not module.normalize) out = module(mask) assert (out.shape == (batch_size, (num_feats * 2), h, w)) module = SinePositionalEncoding(num_feats, normalize=True) assert module.normalize out = module(mask) assert (out.shape == (batch_size, (num_feats * 2), h, w))
def longest_gap_duration(df: DataFrame, obj_frequencies: DataFrame) -> float: if (len(obj_frequencies.index) == 0): return np.nan lgd = 0 missed_tracks = 0 for gt_tracking_id in obj_frequencies.index: dfo = df.noraw[(df.noraw.OId == gt_tracking_id)] matched = set(dfo[(dfo.Type != 'MISS')].index.get_level_values(0).values) if (len(matched) == 0): gap = 0 missed_tracks += 1 else: gap = 0 cur_gap = 0 first_index = dfo.index[0][0] last_index = dfo.index[(- 1)][0] for i in range(first_index, (last_index + 1)): if (i in matched): gap = np.maximum(gap, cur_gap) cur_gap = 0 else: cur_gap += 1 gap = np.maximum(gap, cur_gap) assert (gap >= 0), 'Time difference should be larger than or equal to zero: %.2f' lgd += (gap * 0.5) matched_tracks = (len(obj_frequencies) - missed_tracks) if (matched_tracks == 0): lgd = np.nan else: lgd = (lgd / matched_tracks) return lgd
def load(model, model_path): state_dict = torch.load(model_path) model.load_state_dict({k: v for (k, v) in state_dict.items() if (k in model.state_dict())})
def test_arraytype_categorical_1(): pytest.importorskip('pyarrow') text = str(ak.str.to_categorical(ak.Array(['one', 'one', 'two', 'three', 'one', 'three'])).type) parsedtype = ak.types.from_datashape(text, highlevel=True) assert isinstance(parsedtype, ak.types.ArrayType) assert (str(parsedtype) == text)
def test_compare_op_int(dynamic_instr, dummy_module): (dynamic, instr) = dynamic_instr dummy_module.compare_op_dummy.__code__ = instr.instrument_module(dummy_module.compare_op_dummy.__code__) res = dummy_module.compare_op_dummy(10, 11) assert (res == 1) assert (dynamic.get_all_constants_for(int) == OrderedSet([11, 10]))
def color_jitter_rand(image, brightness=0, contrast=0, saturation=0, hue=0, impl='simclrv2'): with tf.name_scope('distort_color'): def apply_transform(i, x): def brightness_foo(): if (brightness == 0): return x else: return random_brightness(x, max_delta=brightness, impl=impl) def contrast_foo(): if (contrast == 0): return x else: return tf.image.random_contrast(x, lower=(1 - contrast), upper=(1 + contrast)) def saturation_foo(): if (saturation == 0): return x else: return tf.image.random_saturation(x, lower=(1 - saturation), upper=(1 + saturation)) def hue_foo(): if (hue == 0): return x else: return tf.image.random_hue(x, max_delta=hue) x = tf.cond(tf.less(i, 2), (lambda : tf.cond(tf.less(i, 1), brightness_foo, contrast_foo)), (lambda : tf.cond(tf.less(i, 3), saturation_foo, hue_foo))) return x perm = tf.random_shuffle(tf.range(4)) for i in range(4): image = apply_transform(perm[i], image) image = tf.clip_by_value(image, 0.0, 1.0) return image
class SubsetExtractor(): def __init__(self, config): super().__init__() self.data_dir = config.data_dir self.save_data_dir = config.save_data_dir def extract_text(self, type: str): data_path = self._get_json_path(type) print(f'Reading json {data_path}') data_json = json.load(open(data_path)) questions = data_json['data']['questions'] answers = data_json['data']['answers'] dialogs = data_json['data']['dialogs'] captions = [] image_ids = [] for dialog in dialogs: captions.append(dialog['caption']) image_ids.append(dialog['image_id']) print('Number of dialogs: {}'.format(len(dialogs))) print('Number of questions: {}'.format(len(questions))) print('Number of answers: {}'.format(len(answers))) print('Number of captions: {}'.format(len(captions))) save_file_path = self._save_file_path(type) text_data = {'questions': questions, 'answers': answers, 'captions': captions} self.print_analysis(captions) self.print_analysis(questions, 'questions') self.print_analysis(answers, 'answers') with open(save_file_path, 'wb') as outfile: pickle.dump(text_data, outfile) captions_file_path = self._save_txt_file_path(dataset_type=type, list_type='captions') with open(captions_file_path, 'w') as outfile: for item in captions: outfile.write('{}\n'.format(item.encode('utf-8'))) print('Captions written') questions_file_path = self._save_txt_file_path(dataset_type=type, list_type='questions') with open(questions_file_path, 'w') as outfile: for item in questions: outfile.write('{}\n'.format(item.encode('utf-8'))) answers_file_path = self._save_txt_file_path(dataset_type=type, list_type='answers') with open(answers_file_path, 'w') as outfile: for item in answers: outfile.write('{}\n'.format(item.encode('utf-8'))) image_ids_file_path = self._save_txt_file_path(dataset_type=type, list_type='image_ids') with open(image_ids_file_path, 'w') as outfile: for item in image_ids: outfile.write('{}\n'.format(item)) def print_analysis(self, _list, _type='caption'): df = pd.DataFrame(_list) df.columns = ['text'] df['words'] = df['text'].str.split().str.len() print('\n') print('Total unique {} responses'.format(_type)) print(count_unique(df, 'text')) print('\n') print('Stats for {} responses'.format(_type)) print(get_column_stats(df, 'text')) print('\n') print('Number of words in text for {} responses'.format(_type)) print(df['words'].describe()) return def get_stats_list(self, _list, _type='caption'): lengths = map(len, _list) print(lengths) average = (float(sum(lengths)) / float(len(_list))) print('Max length of {} list {}'.format(_type, max(lengths))) print('Avg length of {} list {}'.format(_type, average)) p = np.percentile(lengths, 50) print('Median of {} list {}'.format(_type, average)) return def _get_json_path(self, type: str, split: str='1.0') -> str: json_path = '{}/visdial_{}_{}.json'.format(self.data_dir, split, type) return json_path def _save_file_path(self, type: str, split: str='1.0') -> str: file_path = '{}/visdial_{}_{}_raw_text.pkl'.format(self.save_data_dir, split, type) return file_path def _save_txt_file_path(self, dataset_type: str='train', split: str='1.0', list_type: str='captions') -> str: file_path = f'{self.save_data_dir}/visdial_{split}_{dataset_type}_{list_type}.txt' return file_path
def check_edge_case_of_sample_int(sample_without_replacement): with pytest.raises(ValueError): sample_without_replacement(0, 1) with pytest.raises(ValueError): sample_without_replacement(1, 2) assert (sample_without_replacement(0, 0).shape == (0,)) assert (sample_without_replacement(1, 1).shape == (1,)) assert (sample_without_replacement(5, 0).shape == (0,)) assert (sample_without_replacement(5, 1).shape == (1,)) with pytest.raises(ValueError): sample_without_replacement((- 1), 5) with pytest.raises(ValueError): sample_without_replacement(5, (- 1))
def softmax(logits, dim=(- 1), name=None): try: return tf.nn.softmax(logits, dim=dim, name=name) except TypeError: return tf.nn.softmax(logits, axis=dim, name=name)
def test(hparams, run_opts, locales, wer_file='wer_test.txt'): for locale in locales: run_on_main(prepare_common_voice, kwargs={'locales': [locale], 'data_folder': hparams['data_folder'], 'max_durations': hparams['max_durations']}) if (locale in ['zh-CN', 'ja']): hparams['wer_computer'] = (lambda *args, **kwargs: sb.utils.metric_stats.ErrorRateStats(split_tokens=True)) else: hparams['wer_computer'] = sb.utils.metric_stats.ErrorRateStats tokenizer = hparams['wavlm'].tokenizer (_, _, test_data) = dataio_prepare(hparams, tokenizer) asr_brain = ASR(modules=hparams['modules'], hparams=hparams, run_opts=run_opts) asr_brain.tokenizer = tokenizer locale_folder = os.path.join(hparams['output_folder'], locale) os.makedirs(locale_folder, exist_ok=True) asr_brain.hparams.wer_file = os.path.join(locale_folder, wer_file) if hparams['skip_test']: train_log_backup = asr_brain.hparams.train_logger.save_file asr_brain.hparams.train_logger.save_file = asr_brain.hparams.wer_file = os.path.join(locale_folder, 'tmp.txt') test_data.data_ids = list(test_data.data.keys())[:1] test_data.data = {k: test_data.data[k] for k in test_data.data_ids} asr_brain.evaluate(test_data, min_key='CER', test_loader_kwargs=hparams['valid_dataloader_kwargs']) os.remove(asr_brain.hparams.wer_file) asr_brain.hparams.train_logger.save_file = train_log_backup asr_brain.hparams.wer_file = os.path.join(locale_folder, wer_file) else: asr_brain.evaluate(test_data, min_key='CER', test_loader_kwargs=hparams['valid_dataloader_kwargs']) if (not hparams['skip_test']): try: profile(hparams, run_opts) except Exception: logging.warning('Install ptflops and torchinfo to profile the model (e.g. `pip install ptflops torchinfo`)')
class SemistandardTableaux_all(SemistandardTableaux, DisjointUnionEnumeratedSets): def __init__(self, max_entry=None): if (max_entry is not PlusInfinity()): self.max_entry = max_entry def SST_n(n): return SemistandardTableaux_size(n, max_entry) DisjointUnionEnumeratedSets.__init__(self, Family(NonNegativeIntegers(), SST_n), facade=True, keepkey=False) else: self.max_entry = None def _repr_(self): if (self.max_entry is not None): return ('Semistandard tableaux with maximum entry %s' % str(self.max_entry)) return 'Semistandard tableaux' def list(self): raise NotImplementedError
class CoefficientDrifter(): drift_interval: int transition_period: int = 0 transition_type: str = 'linear' seasonal: bool = False base_coefficient_weight: float = 0.0 effective_dim_action_context: Optional[int] = None effective_dim_context: Optional[int] = None random_state: int = 12345 played_rounds: int = 0 context_coefs: Optional[deque] = None action_coefs: Optional[deque] = None context_action_coefs: Optional[deque] = None base_context_coef: Optional[np.ndarray] = None base_action_coef: Optional[np.ndarray] = None base_context_action_coef: Optional[np.ndarray] = None def __post_init__(self) -> None: if (self.random_state is None): raise ValueError('`random_state` must be given') self.random_ = check_random_state(self.random_state) self.available_rounds = self.drift_interval self.context_coefs = deque(maxlen=2) self.action_coefs = deque(maxlen=2) self.context_action_coefs = deque(maxlen=2) if (self.effective_dim_action_context and self.effective_dim_context): self.update_coef() def update_coef(self) -> None: if (self.base_context_coef is None): (self.base_context_coef, self.base_action_coef, self.base_context_action_coef) = sample_random_uniform_coefficients(self.effective_dim_action_context, self.effective_dim_context, self.random_) if (len(self.context_coefs) == 0): self.context_coefs.append(self.base_context_coef) self.action_coefs.append(self.base_action_coef) self.context_action_coefs.append(self.base_context_action_coef) if (self.seasonal and (len(self.context_coefs) == 2)): self.context_coefs.rotate() self.action_coefs.rotate() self.context_action_coefs.rotate() else: (tmp_context_coef, tmp_action_coef, tmp_action_context_coef) = sample_random_uniform_coefficients(self.effective_dim_action_context, self.effective_dim_context, self.random_) self.context_coefs.append(tmp_context_coef) self.action_coefs.append(tmp_action_coef) self.context_action_coefs.append(tmp_action_context_coef) def get_coefficients(self, n_rounds: int, effective_dim_context: int=None, effective_dim_action_context: int=None, **kwargs) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]: if (effective_dim_action_context and effective_dim_context): eff_dim_not_set = ((not self.effective_dim_action_context) and (not self.effective_dim_context)) eff_dim_equal = ((self.effective_dim_action_context == effective_dim_action_context) and (self.effective_dim_context == effective_dim_context)) if (eff_dim_not_set or eff_dim_equal): self.effective_dim_action_context = effective_dim_action_context self.effective_dim_context = effective_dim_context else: raise RuntimeError('Trying to change the effective dimensions') if (len(self.context_coefs) == 0): self.update_coef() required_rounds = n_rounds context_coefs = [] action_coefs = [] context_action_coefs = [] while (required_rounds > 0): if (required_rounds >= self.available_rounds): self.append_current_coefs(context_coefs, action_coefs, context_action_coefs, rounds=self.available_rounds) required_rounds -= self.available_rounds self.update_coef() self.available_rounds = self.drift_interval else: self.append_current_coefs(context_coefs, action_coefs, context_action_coefs, rounds=required_rounds) self.available_rounds -= required_rounds required_rounds = 0 return (np.vstack(context_coefs), np.vstack(action_coefs), np.vstack(context_action_coefs)) def append_current_coefs(self, context_coefs: List[np.ndarray], action_coefs: List[np.ndarray], context_action_coefs: List[np.ndarray], rounds: int) -> None: shift_start = (self.available_rounds - self.transition_period) transition_steps = np.arange(start=1, stop=(self.transition_period + 1)) if (shift_start >= 0): transition_steps = np.pad(transition_steps, pad_width=[(shift_start, 0)]) if (shift_start < 0): transition_steps = transition_steps[(- shift_start):] shift_remainder = (self.available_rounds - rounds) if (shift_remainder > 0): transition_steps = transition_steps[shift_remainder:] weights = (transition_steps / (self.transition_period + 1)) if (self.transition_type == 'weighted_sampled'): weights = self.random_.binomial(n=1, p=weights) context_coefs.append(self.compute_weighted_coefs(self.context_coefs, self.base_context_coef, rounds, weights)) action_coefs.append(self.compute_weighted_coefs(self.action_coefs, self.base_action_coef, rounds, weights)) context_action_coefs.append(self.compute_weighted_coefs(self.context_action_coefs, self.base_context_action_coef, rounds, weights)) def compute_weighted_coefs(self, coefs, base_coef, rounds, weights): base_coef = (self.base_coefficient_weight * base_coef) A = np.tile(coefs[0], ([rounds] + [1 for _ in coefs[0].shape])) B = np.tile(coefs[1], ([rounds] + [1 for _ in coefs[1].shape])) coefs = ((base_coef + (A * np.expand_dims(((1 - self.base_coefficient_weight) * (1 - weights)), list(range(1, len(A.shape)))))) + (B * np.expand_dims(((1 - self.base_coefficient_weight) * weights), list(range(1, len(B.shape)))))) return coefs
def create_go_mask(adata, go2gene): genes = adata.var_names gene2index = {g: i for (i, g) in enumerate(genes)} GO_IDs = sorted(go2gene.keys()) go_mask = [] for go in GO_IDs: go_genes = go2gene[go] go_mask.append([gene2index[gene] for gene in go_genes]) return go_mask
def generate(max_time, n_sequences, filename='stationary_renewal'): (times, nll) = ([], []) for _ in range(n_sequences): s = np.sqrt(np.log(((6 * 6) + 1))) mu = (((- s) * s) / 2) tau = lognorm.rvs(s=s, scale=np.exp(mu), size=1000) lpdf = lognorm.logpdf(tau, s=s, scale=np.exp(mu)) T = tau.cumsum() T = T[(T < max_time)] lpdf = lpdf[:len(T)] score = (- np.sum(lpdf)) times.append(T) nll.append(score) if (filename is not None): mean_number_items = (sum((len(t) for t in times)) / len(times)) nll = [(n / mean_number_items) for n in nll] np.savez(f'{dataset_dir}/{filename}.npz', arrival_times=times, nll=nll, t_max=max_time, mean_number_items=mean_number_items) else: return times
class LabelingFunction(): def __init__(self, name, label): self.name = name self.label = label
class iCIFAR100(iData): use_path = False train_trsf = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=(63 / 255)), transforms.ToTensor()] test_trsf = [transforms.ToTensor()] common_trsf = [transforms.Normalize(mean=(0.5071, 0.4867, 0.4408), std=(0.2675, 0.2565, 0.2761))] class_order = np.arange(100).tolist() def download_data(self): train_dataset = datasets.cifar.CIFAR100('./data', train=True, download=True) test_dataset = datasets.cifar.CIFAR100('./data', train=False, download=True) (self.train_data, self.train_targets) = (train_dataset.data, np.array(train_dataset.targets)) (self.test_data, self.test_targets) = (test_dataset.data, np.array(test_dataset.targets))
.parametrize('create_solver', [f for (name, f) in ss.solvers.items() if (name != 'yices2')]) def test_identity_visit_basic(create_solver): solver = create_solver(False) bv32 = solver.make_sort(ss.sortkinds.BV, 32) x = solver.make_symbol('x', bv32) y = solver.make_symbol('y', bv32) a = solver.make_symbol('a', bv32) b = solver.make_symbol('b', bv32) y_assignment = solver.make_term(Ite, solver.make_term(BVUlt, x, y), solver.make_term(BVOr, x, a), solver.make_term(BVOr, x, b)) idvisitor = ss.IdentityVisitor(solver) rebuilt_y_assignment = idvisitor.walk_dag(y_assignment) assert (y_assignment == rebuilt_y_assignment)
def bs(F, K, V, o='call'): w = 1 if (o == 'put'): w = (- 1) elif (o == 'otm'): w = ((2 * (K > 1.0)) - 1) sv = np.sqrt(V) d1 = ((np.log((F / K)) / sv) + (0.5 * sv)) d2 = (d1 - sv) P = (((w * F) * norm.cdf((w * d1))) - ((w * K) * norm.cdf((w * d2)))) return P
class MemnetTest(absltest.TestCase): def test_simple_run_and_check_shapes(self): batch_size = 64 vocab_size = 177 embedding_size = 64 sentence_size = 11 memory_size = 320 linear_output_size = 128 num_hops = 2 use_ln = True def forward_fn(queries, stories): model = processors.MemNetFull(vocab_size=vocab_size, embedding_size=embedding_size, sentence_size=sentence_size, memory_size=memory_size, linear_output_size=linear_output_size, num_hops=num_hops, use_ln=use_ln) return model._apply(queries, stories) forward = hk.transform(forward_fn) queries = jnp.ones([batch_size, sentence_size], dtype=jnp.int32) stories = jnp.ones([batch_size, memory_size, sentence_size], dtype=jnp.int32) key = hk.PRNGSequence(42) params = forward.init(next(key), queries, stories) model_output = forward.apply(params, None, queries, stories) chex.assert_shape(model_output, [batch_size, vocab_size]) chex.assert_type(model_output, jnp.float32)
class Parser(): def __init__(self, name, batch_size=64, language_code=None): self._parser = load_trained_model(name) if torch.cuda.is_available(): self._parser.cuda() if (language_code is not None): self._language_code = language_code else: self._language_code = guess_language(self._parser.config['label_vocab']) self._tokenizer_lang = TOKENIZER_LOOKUP.get(self._language_code, None) self.batch_size = batch_size def parse(self, sentence): return list(self.parse_sents([sentence]))[0] def parse_sents(self, sents): if isinstance(sents, str): if (self._tokenizer_lang is None): raise ValueError('No tokenizer available for this language. Please split into individual sentences and tokens before calling the parser.') sents = nltk.sent_tokenize(sents, self._tokenizer_lang) end_sentinel = object() for batch_sents in itertools.zip_longest(*([iter(sents)] * self.batch_size), fillvalue=end_sentinel): batch_inputs = [] for sent in batch_sents: if (sent is end_sentinel): break elif isinstance(sent, str): if (self._tokenizer_lang is None): raise ValueError('No word tokenizer available for this language. Please tokenize before calling the parser.') escaped_words = nltk.word_tokenize(sent, self._tokenizer_lang) sent = InputSentence(escaped_words=escaped_words) elif isinstance(sent, (list, tuple)): sent = InputSentence(words=sent) elif (not isinstance(sent, InputSentence)): raise ValueError('Sentences must be one of: InputSentence, list, tuple, or str') batch_inputs.append(self._with_missing_fields_filled(sent)) for (inp, output) in zip(batch_inputs, self._parser.parse(batch_inputs, return_compressed=True)): if (inp.tags is not None): output = output.without_predicted_tags() (yield output.to_tree(inp.pos(), self._parser.decoder.label_from_index, self._parser.tag_from_index)) def _with_missing_fields_filled(self, sent): if (not isinstance(sent, InputSentence)): raise ValueError('Input is not an instance of InputSentence') if ((sent.words is None) and (sent.escaped_words is None)): raise ValueError('At least one of words or escaped_words is required') elif (sent.words is None): sent = dataclasses.replace(sent, words=ptb_unescape(sent.escaped_words)) elif (sent.escaped_words is None): escaped_words = [word.replace('(', '-LRB-').replace(')', '-RRB-').replace('{', '-LCB-').replace('}', '-RCB-').replace('[', '-LSB-').replace(']', '-RSB-') for word in sent.words] sent = dataclasses.replace(sent, escaped_words=escaped_words) elif (len(sent.words) != len(sent.escaped_words)): raise ValueError(f'Length of words ({len(sent.words)}) does not match escaped_words ({len(sent.escaped_words)})') if (sent.space_after is None): if (self._language_code == 'zh'): space_after = [False for _ in sent.words] elif (self._language_code in ('ar', 'he')): space_after = [True for _ in sent.words] else: space_after = guess_space_after(sent.words) sent = dataclasses.replace(sent, space_after=space_after) elif (len(sent.words) != len(sent.space_after)): raise ValueError(f'Length of words ({len(sent.words)}) does not match space_after ({len(sent.space_after)})') assert (len(sent.words) == len(sent.escaped_words) == len(sent.space_after)) return sent
def get_imdb(name): if (name not in __sets): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]()
def _data_kwargs_from_dataset_key(dataset, key): if (key in dataset.get_target_list()): available_for_inference = False else: available_for_inference = True dim = dataset.get_data_dim(key) shape = ([None] + list(dataset.get_data_shape(key))) sparse = dataset.is_data_sparse(key) dtype = dataset.get_data_dtype(key) if ((not sparse) and (shape[(- 1)] is None)): dim = None return dict(batch_dim_axis=0, time_dim_axis=1, shape=shape, dim=dim, sparse=sparse, dtype=dtype, available_for_inference=available_for_inference)
class ABCFolderDataset(FolderDataset): _extension = 'abc' def read(self, filename: Tuple[(str, Tuple[(int, int)])]) -> Music: (filename_, (start, end)) = filename data = [] with open(filename_, encoding='utf-8') as f: for (idx, line) in enumerate(f): if ((start <= idx < end) and (not line.startswith('%'))): data.append(line) return read_abc_string(''.join(data)) def on_the_fly(self: FolderDatasetT) -> FolderDatasetT: if (not self.raw_filenames): filenames = sorted((filename for filename in self.root.rglob(('*.' + self._extension)) if (not str(filename.relative_to(self.root)).startswith('_converted/')))) self.raw_filenames = [] for filename in filenames: idx = 0 start = 0 with open(filename, errors='ignore', encoding='utf-8') as f: for (idx, line) in enumerate(f): if line.startswith('X:'): if start: self.raw_filenames.append((filename, (start, idx))) start = idx if start: self.raw_filenames.append((filename, (start, idx))) self._filenames = self.raw_filenames self._use_converted = False self._factory = self.read return self
def main_train(): kwargs = {'num_workers': 1, 'pin_memory': True} test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) model = Net(args).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-05) scheduler = StepLR(optimizer, step_size=45, gamma=args.gamma) test_acc = 0 pruned_dim = 0 saved_model = {} for epoch in range(1, (args.epochs + 1)): if ((epoch % 10) == 1): train_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.batch_size, shuffle=True, **kwargs) print('\nepoch:', epoch) train(args, model, device, train_loader, optimizer, epoch) scheduler.step() accuracy = 0 t = 5 for i in range(t): (acc, pruned_number) = test(args, model, device, test_loader, args.channel_noise) accuracy += acc print('Test Accuracy:', (accuracy / t), 'Pruned dim', pruned_number, 'Activated dim:', (args.intermediate_dim - pruned_number)) accuracy = (accuracy / t) if (epoch > 300): if (((accuracy > test_acc) and (pruned_number == pruned_dim)) or (pruned_number > pruned_dim)): test_acc = accuracy pruned_dim = pruned_number saved_model = copy.deepcopy(model.state_dict()) print('Best Accuray:', test_acc, 'pruned_number:', pruned_dim, 'activated_dim:', (args.intermediate_dim - pruned_dim)) torch.save({'model': saved_model}, './MNIST_model_dim:{}_beta:{}_accuracy:{:.4f}_model.pth'.format((args.intermediate_dim - pruned_dim), args.beta, test_acc))
class ResBlock(chainer.Chain): def __init__(self, ch, norm='instance', activation='relu', equalised=False, separable=False, skip_conv=False): super(ResBlock, self).__init__() self.activation = activation_func[activation] nobias = False with self.init_scope(): self.c0 = EqualizedConv2d(ch, ch, 3, 1, 1, pad_type='zero', equalised=equalised, nobias=nobias, separable=separable) self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1, pad_type='zero', equalised=equalised, nobias=nobias, separable=separable) if skip_conv: self.cs = EqualizedConv2d(ch, ch, 1, 1, 0) else: self.cs = F.identity self.norm0 = norm_layer[norm](ch) self.norm1 = norm_layer[norm](ch) def __call__(self, x): h = self.c0(x) h = self.norm0(h) h = self.activation(h) h = self.c1(h) h = self.norm1(h) return (h + self.cs(x))
def _dist_matrix(x, y, c): sqrt_c = (c ** 0.5) return ((2 / sqrt_c) * artanh((sqrt_c * torch.norm(_mobius_addition_batch((- x), y, c=c), dim=(- 1)))))
def print_correlation(topk_systems, metric_pairs): headers = ['metric_pair', 'pearson', 'spearman', 'kendalltau'] print_list = [] for pair in metric_pairs: if (('bart_en_sim' in pair[1]) or ('bart_sim' in pair[1])): continue m1_scores = [] m2_scores = [] for scores in topk_systems.values(): m1_scores.append(scores[pair[0]]) m2_scores.append(scores[pair[1]]) (pearson, _) = pearsonr(m1_scores, m2_scores) (spearman, _) = spearmanr(m1_scores, m2_scores) (ktau, _) = kendalltau(m1_scores, m2_scores) print_list.append([f'{pair[1]}', pearson, spearman, ktau]) print_list = sorted(print_list, key=(lambda x: x[2]), reverse=True) print(tabulate(print_list, headers=headers, tablefmt='simple'))
class LM(sb.core.Brain): def compute_forward(self, batch, stage): batch = batch.to(self.device) (tokens_bos, _) = batch.tokens_bos logits = self.hparams.model(tokens_bos) pred = self.hparams.log_softmax(logits) return pred def compute_objectives(self, predictions, batch, stage): batch = batch.to(self.device) (tokens_eos, tokens_len) = batch.tokens_eos loss = self.hparams.compute_cost(predictions, tokens_eos, length=tokens_len) return loss def fit_batch(self, batch): predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) (loss / self.hparams.accu_steps).backward() if ((self.step % self.hparams.accu_steps) == 0): self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() if (isinstance(self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler) or isinstance(self.hparams.lr_annealing, sb.nnet.schedulers.CyclicCosineScheduler)): self.hparams.lr_annealing(self.optimizer) return loss def on_stage_end(self, stage, stage_loss, epoch): stage_stats = {'loss': stage_loss} if (stage == sb.Stage.TRAIN): self.train_stats = stage_stats if (stage == sb.Stage.VALID): if (not (isinstance(self.hparams.lr_annealing, sb.nnet.schedulers.NoamScheduler) or isinstance(self.hparams.lr_annealing, sb.nnet.schedulers.CyclicCosineScheduler))): (old_lr, new_lr) = self.hparams.lr_annealing(stage_loss) sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr) else: old_lr = self.hparams.lr_annealing.current_lr self.hparams.train_logger.log_stats(stats_meta={'epoch': epoch, 'lr': old_lr}, train_stats=self.train_stats, valid_stats=stage_stats) self.checkpointer.save_and_keep_only(meta=stage_stats, min_keys=['loss'])
def get_plot_label(xm, ym): template = '%(xlabel)s-%(ylabel)s tradeoff - %(updown)s and to the %(leftright)s is better' return (template % {'xlabel': xm['description'], 'ylabel': ym['description'], 'updown': get_up_down(ym), 'leftright': get_left_right(xm)})
def louvain(G, resolution=1, eps=0.001, unit_weights=True, copy_graph=False): if copy_graph: F = G.copy() else: F = G if unit_weights: for (u, v) in F.edges(): F[u][v]['weight'] = 1 cluster = maximize(F, resolution, eps) n = len(cluster) k = len(set(cluster.values())) while (k < n): H = aggregate(F, cluster) new_cluster = maximize(F, resolution, eps) cluster = {u: new_cluster[cluster[u]] for u in F.nodes()} n = k k = len(set(cluster.values())) return get_clustering(cluster)
def train_fn(): global epoch, args epoch = 0 while (epoch < args.epochs): epoch += 1 (loss, jac) = train(epoch) valid_loss = test(epoch, testloader) status = (str(os.getpid()) + ' Epoch {}/{} | Loss {:3.4f} | jac {:3.4f}'.format(epoch, args.epochs, loss, jac)) print(status) with open((args.save_dir + '/log.txt'), 'a') as f: f.write((status + '\n')) print(('-' * 89))
def read_config(config_path): with open(config_path, 'r') as conf: config_dict = convert_values(yaml.load(conf, Loader=YamlUniqueLoader)) if ('seml' not in config_dict): raise ConfigError("Please specify a 'seml' dictionary.") seml_dict = config_dict['seml'] del config_dict['seml'] for k in seml_dict.keys(): if (k not in VALID_CONFIG_VALUES): raise ConfigError(f'{k} is not a valid value in the `seml` config block.') set_executable_and_working_dir(config_path, seml_dict) if ('output_dir' in seml_dict): seml_dict['output_dir'] = str(Path(seml_dict['output_dir']).expanduser().resolve()) if ('slurm' in config_dict): slurm_dict = config_dict['slurm'] del config_dict['slurm'] for k in slurm_dict.keys(): if (k not in VALID_SLURM_CONFIG_VALUES): raise ConfigError(f'{k} is not a valid value in the `slurm` config block.') return (seml_dict, slurm_dict, config_dict) else: return (seml_dict, None, config_dict)
class ModelArgs(): attention_window: int = field(default=512, metadata={'help': 'Size of attention window'}) max_pos: int = field(default=4096, metadata={'help': 'Maximum position'})
class MemoryReportBuilder(ReportBuilderBase): Version = 1 def __init__(self, file=None): super().__init__(file) def add_weight_entry(self, weight_name, size_bytes, grad_size_bytes, stack_context): cursor = self._connection.cursor() cursor.execute(queries.add_weight_entry, (weight_name, size_bytes, grad_size_bytes)) self._add_stack_frames(cursor=cursor, entry_id=cursor.lastrowid, entry_type=queries.EntryType.Weight, stack_context=stack_context) return self def add_activation_entry(self, operation_name, size_bytes, stack_context): cursor = self._connection.cursor() cursor.execute(queries.add_activation_entry, (operation_name, size_bytes)) self._add_stack_frames(cursor=cursor, entry_id=cursor.lastrowid, entry_type=queries.EntryType.Activation, stack_context=stack_context) return self def add_misc_entry(self, size_type: MiscSizeType, size_bytes): cursor = self._connection.cursor() cursor.execute(queries.add_misc_entry, (size_type.value, size_bytes)) return self def build(self): self._connection.commit() return MemoryReport(self._connection) def _create_report_tables(self): cursor = self._connection.cursor() cursor.execute(queries.set_report_format_version.format(version=MemoryReportBuilder.Version)) for creation_query in queries.create_report_tables.values(): cursor.execute(creation_query) cursor.executemany(queries.add_entry_type, map((lambda entry: (entry.value, entry.name)), queries.EntryType)) self._connection.commit() def _add_stack_frames(self, cursor, entry_id, entry_type: queries.EntryType, stack_context): cursor.execute(queries.add_correlation_entry, (entry_id, entry_type.value)) correlation_id = cursor.lastrowid def stack_frame_generator(): for (idx, frame) in enumerate(stack_context.frames): (yield (correlation_id, idx, frame.file_path, frame.line_number)) cursor.executemany(queries.add_stack_frame, stack_frame_generator())
def register_Ns3EpcSgwPgwApplication_methods(root_module, cls): cls.add_constructor([param('ns3::EpcSgwPgwApplication const &', 'arg0')]) cls.add_constructor([param('ns3::Ptr< ns3::VirtualNetDevice > const', 'tunDevice'), param('ns3::Ptr< ns3::Socket > const', 's1uSocket')]) cls.add_method('AddEnb', 'void', [param('uint16_t', 'cellId'), param('ns3::Ipv4Address', 'enbAddr'), param('ns3::Ipv4Address', 'sgwAddr')]) cls.add_method('AddUe', 'void', [param('uint64_t', 'imsi')]) cls.add_method('DoDispose', 'void', [], is_virtual=True) cls.add_method('GetS11SapSgw', 'ns3::EpcS11SapSgw *', []) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('RecvFromS1uSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')]) cls.add_method('RecvFromTunDevice', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')]) cls.add_method('SendToS1uSocket', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'enbS1uAddress'), param('uint32_t', 'teid')]) cls.add_method('SendToTunDevice', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint32_t', 'teid')]) cls.add_method('SetS11SapMme', 'void', [param('ns3::EpcS11SapMme *', 's')]) cls.add_method('SetUeAddress', 'void', [param('uint64_t', 'imsi'), param('ns3::Ipv4Address', 'ueAddr')]) cls.add_method('SetUeAddress6', 'void', [param('uint64_t', 'imsi'), param('ns3::Ipv6Address', 'ueAddr')]) return
def launch_process_helper(args, proc_env=None, cwd=None): if (proc_env is None): proc_env = os.environ.copy() with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=proc_env, cwd=cwd) as proc: (cmd_out, cmd_err) = proc.communicate() if (cmd_out is not None): cmd_out = cmd_out.decode('utf-8') sys.stdout.write(cmd_out) if (cmd_err is not None): cmd_err = cmd_err.decode('utf-8') sys.stderr.write(cmd_err) return (cmd_out, cmd_err)
class QueryBertLikeLayer(torch.nn.Module): def __init__(self, origin: transformers.models.bert.modeling_bert.BertLayer, share_weights: bool=False): super().__init__() self.attention = QueryBertLikeAttention(origin.attention, share_weights=share_weights) if share_weights: self.intermediate = origin.intermediate self.output = origin.output else: self.intermediate = copy.deepcopy(origin.intermediate) self.output = copy.deepcopy(origin.output) def forward(self, query_states, hidden_states, **kwargs): self_attention_outputs = self.attention(query_states, hidden_states, **kwargs) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = ((layer_output,) + outputs) return outputs
def run(n, stmt, fuzzer_cls): float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n) int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n) raw_results = [] for (i, (float_values, int_values)) in enumerate(zip(float_iter, int_iter)): (float_tensors, float_tensor_params, float_params) = float_values (int_tensors, int_tensor_params, int_params) = int_values assert_dicts_equal(float_params, int_params) assert_dicts_equal(float_tensor_params['x'], int_tensor_params['x']) (float_measurement, int_measurement) = [Timer(stmt, globals=tensors).blocked_autorange(min_run_time=_MEASURE_TIME) for tensors in (float_tensors, int_tensors)] descriptions = [] for name in float_tensors: shape_str = (('(' + ', '.join([(f'2 ** {int(np.log2(i))}' if (((2 ** int(np.log2(i))) == i) and (i > 1)) else str(i)) for i in float_tensors[name].shape])) + ')') order = float_tensor_params[name]['order'] order_str = ('' if all((order == np.arange(len(order)))) else str(tuple(order))) steps = float_tensor_params[name]['steps'] steps_str = (str(steps) if (sum(steps) > len(steps)) else '') descriptions.append((name, shape_str, order_str, steps_str)) raw_results.append((float_measurement, int_measurement, descriptions)) print(f''' {(i + 1)} / {n}''', end='') print() (parsed_results, name_len, shape_len, order_len, steps_len) = ([], 0, 0, 0, 0) for (float_measurement, int_measurement, descriptions) in raw_results: t_float = (float_measurement.median * 1000000.0) t_int = (int_measurement.median * 1000000.0) rel_diff = ((abs((t_float - t_int)) / (t_float + t_int)) * 2) parsed_results.append((t_float, t_int, rel_diff, descriptions)) for (name, shape, order, steps) in descriptions: name_len = max(name_len, len(name)) shape_len = max(shape_len, len(shape)) order_len = max(order_len, len(order)) steps_len = max(steps_len, len(steps)) parsed_results.sort(key=(lambda x: x[2])) print(f'stmt: {stmt}') print(f" diff faster{'':>17}{(' ' * name_len)} ", end='') print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end='') print(f''' steps {('-' * 100)}''') for (results, spacer) in [(parsed_results[:10], '...'), (parsed_results[(- 10):], '')]: for (t_float, t_int, rel_diff, descriptions) in results: time_str = [f"{(rel_diff * 100):>4.1f}% {('int' if (t_int < t_float) else 'float'):<20}"] time_str.extend([''.ljust(len(time_str[0])) for _ in descriptions[:(- 1)]]) for (t_str, (name, shape, order, steps)) in zip(time_str, descriptions): name = f'{name}:'.ljust((name_len + 1)) shape = shape.ljust((shape_len + 10)) order = order.ljust(order_len) print(f'{t_str} {name} {shape}| {order} | {steps}') print(spacer)
def _build_demo_runner(): class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(2, 1) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) model = Model() tmp_dir = tempfile.mkdtemp() runner = EpochBasedRunner(model=model, work_dir=tmp_dir, logger=logging.getLogger()) return runner
def Replace(s, src, dst): ctx = _get_ctx2(dst, s) if ((ctx is None) and is_expr(src)): ctx = src.ctx src = _coerce_seq(src, ctx) dst = _coerce_seq(dst, ctx) s = _coerce_seq(s, ctx) return SeqRef(Z3_mk_seq_replace(src.ctx_ref(), s.as_ast(), src.as_ast(), dst.as_ast()), s.ctx)
class Multinomial(Distribution): arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} def mean(self): return (self.probs * self.total_count) def variance(self): return ((self.total_count * self.probs) * (1 - self.probs)) def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): if (not isinstance(total_count, Number)): raise NotImplementedError('inhomogeneous total_count is not supported') self.total_count = total_count self._categorical = Categorical(probs=probs, logits=logits) batch_shape = self._categorical.batch_shape event_shape = self._categorical.param_shape[(- 1):] super(Multinomial, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Multinomial, _instance) batch_shape = torch.Size(batch_shape) new.total_count = self.total_count new._categorical = self._categorical.expand(batch_shape) super(Multinomial, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._categorical._new(*args, **kwargs) _property def support(self): return constraints.integer_interval(0, self.total_count) def logits(self): return self._categorical.logits def probs(self): return self._categorical.probs def param_shape(self): return self._categorical.param_shape def sample(self, sample_shape=torch.Size()): sample_shape = torch.Size(sample_shape) samples = self._categorical.sample((torch.Size((self.total_count,)) + sample_shape)) shifted_idx = list(range(samples.dim())) shifted_idx.append(shifted_idx.pop(0)) samples = samples.permute(*shifted_idx) counts = samples.new(self._extended_shape(sample_shape)).zero_() counts.scatter_add_((- 1), samples, torch.ones_like(samples)) return counts.type_as(self.probs) def log_prob(self, value): if self._validate_args: self._validate_sample(value) (logits, value) = broadcast_all(self.logits, value) logits = logits.clone(memory_format=torch.contiguous_format) log_factorial_n = torch.lgamma((value.sum((- 1)) + 1)) log_factorial_xs = torch.lgamma((value + 1)).sum((- 1)) logits[((value == 0) & (logits == (- inf)))] = 0 log_powers = (logits * value).sum((- 1)) return ((log_factorial_n - log_factorial_xs) + log_powers)
class MulConstant(Module): def __init__(self, constant_scalar, inplace=False): super(MulConstant, self).__init__() self.constant_scalar = constant_scalar self.inplace = inplace def updateOutput(self, input): if self.inplace: input.mul_(self.constant_scalar) self.output.set_(input) else: self.output.resize_as_(input) self.output.copy_(input) self.output.mul_(self.constant_scalar) return self.output def updateGradInput(self, input, gradOutput): if (self.gradInput is None): return if self.inplace: gradOutput.mul_(self.constant_scalar) self.gradInput.set_(gradOutput) input.div_(self.constant_scalar) else: self.gradInput.resize_as_(gradOutput) self.gradInput.copy_(gradOutput) self.gradInput.mul_(self.constant_scalar) return self.gradInput
def get_declr(module: Module, x: EntryBase, with_docs=False): out = [] if with_docs: out += get_api_ref(module, x) ty = type(x) if (ty is BuiltInType): out += [''] elif (ty is Alias): out += [f'typedef {get_type_name(x.alias_of)} {get_type_name(x)};'] elif (ty is Definition): out += [f'#define {x.name.screaming_snake_case} {x.value}'] elif (ty is Handle): out += [f'typedef struct {get_type_name(x)}_t* {get_type_name(x)};'] elif (ty is Enumeration): out += [(('typedef enum ' + get_type_name(x)) + ' {')] for (name, value) in x.cases.items(): if with_docs: out += get_api_field_ref(module, x, name) name = x.name.extend(name).screaming_snake_case out += [f' {name} = {value},'] out += [f" {x.name.extend('max_enum').screaming_snake_case} = 0xffffffff,"] out += [(('} ' + get_type_name(x)) + ';')] elif (ty is BitField): bit_type_name = x.name.extend('flag_bits').upper_camel_case out += [(('typedef enum ' + bit_type_name) + ' {')] for (name, value) in x.bits.items(): if with_docs: out += get_api_field_ref(module, x, name) name = x.name.extend(name).extend('bit').screaming_snake_case out += [f' {name} = 1 << {value},'] out += [(('} ' + bit_type_name) + ';')] out += [f'typedef TiFlags {get_type_name(x)};'] elif (ty is Structure): out += [(('typedef struct ' + get_type_name(x)) + ' {')] for field in x.fields: if with_docs: out += get_api_field_ref(module, x, field.name) out += [f' {get_field(field)};'] out += [(('} ' + get_type_name(x)) + ';')] elif (ty is Union): out += [(('typedef union ' + get_type_name(x)) + ' {')] for variant in x.variants: if with_docs: out += get_api_field_ref(module, x, variant.name) out += [f' {get_field(variant)};'] out += [(('} ' + get_type_name(x)) + ';')] elif (ty is Callback): return_value_type = ('void' if (x.return_value_type == None) else get_type_name(x.return_value_type)) out += [f'typedef {return_value_type} (TI_API_CALL *{get_type_name(x)})('] if x.params: for (i, param) in enumerate(x.params): if (i != 0): out[(- 1)] += ',' if with_docs: out += get_api_field_ref(module, x, param.name) out += [f' {get_field(param)}'] out += [');'] elif (ty is Function): return_value_type = ('void' if (x.return_value_type == None) else get_type_name(x.return_value_type)) out += [(((('TI_DLL_EXPORT ' + return_value_type) + ' TI_API_CALL ') + x.name.snake_case) + '(')] if x.params: for (i, param) in enumerate(x.params): if (i != 0): out[(- 1)] += ',' if with_docs: out += get_api_field_ref(module, x, param.name) out += [f' {get_field(param)}'] out += [');'] else: raise RuntimeError(f"'{x.id}' doesn't need declaration") return '\n'.join(out)
class Obstacle(PhysicalObject): def __init__(self, *args, **kwargs): kwargs['color'] = (80, 80, 80) super(Obstacle, self).__init__('obstacle.png', *args, **kwargs) def create_physical_entity(self): body = self._engine.CreateStaticBody(position=self.physical_position) body.CreatePolygonFixture(box=(((self.width / 2.0) / self._world.physical_scale), ((self.height / 2.0) / self._world.physical_scale)), density=10.0, friction=0.0, restitution=0.0) return body
class OverlapPatchEmbed(nn.Module): def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size (self.H, self.W) = ((img_size[0] // patch_size[0]), (img_size[1] // patch_size[1])) self.num_patches = (self.H * self.W) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=((patch_size[0] // 2), (patch_size[1] // 2))) self.norm = nn.LayerNorm(embed_dim) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) fan_out //= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / fan_out))) if (m.bias is not None): m.bias.data.zero_() def forward(self, x): x = self.proj(x) (_, _, H, W) = x.shape x = x.flatten(2).transpose(1, 2) x = self.norm(x) return (x, H, W)
def run(dataset, model, str_optimizer, str_preconditioner, runs, epochs, lr, weight_decay, early_stopping, logger, momentum, eps, update_freq, gamma, alpha, hyperparam): if (logger is not None): if hyperparam: logger += f'-{hyperparam}{eval(hyperparam)}' path_logger = os.path.join(path_runs, logger) print(f'path logger: {path_logger}') ut.empty_dir(path_logger) logger = (SummaryWriter(log_dir=os.path.join(path_runs, logger)) if (logger is not None) else None) (val_losses, accs, durations) = ([], [], []) torch.manual_seed(42) for i_run in range(runs): data = dataset[0] data = data.to(device) model.to(device).reset_parameters() if (str_preconditioner == 'KFAC'): preconditioner = psgd.KFAC(model, eps, sua=False, pi=False, update_freq=update_freq, alpha=(alpha if (alpha is not None) else 1.0), constraint_norm=False) else: preconditioner = None if (str_optimizer == 'Adam'): optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) elif (str_optimizer == 'SGD'): optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum) if torch.cuda.is_available(): torch.cuda.synchronize() t_start = time.perf_counter() best_val_loss = float('inf') test_acc = 0 val_loss_history = [] for epoch in range(1, (epochs + 1)): lam = (((float(epoch) / float(epochs)) ** gamma) if (gamma is not None) else 0.0) train(model, optimizer, data, preconditioner, lam) eval_info = evaluate(model, data) eval_info['epoch'] = int(epoch) eval_info['run'] = int((i_run + 1)) eval_info['time'] = (time.perf_counter() - t_start) eval_info['eps'] = eps eval_info['update-freq'] = update_freq if (gamma is not None): eval_info['gamma'] = gamma if (alpha is not None): eval_info['alpha'] = alpha if (logger is not None): for (k, v) in eval_info.items(): logger.add_scalar(k, v, global_step=epoch) if (eval_info['val loss'] < best_val_loss): best_val_loss = eval_info['val loss'] test_acc = eval_info['test acc'] val_loss_history.append(eval_info['val loss']) if ((early_stopping > 0) and (epoch > (epochs // 2))): tmp = tensor(val_loss_history[(- (early_stopping + 1)):(- 1)]) if (eval_info['val loss'] > tmp.mean().item()): break if torch.cuda.is_available(): torch.cuda.synchronize() t_end = time.perf_counter() val_losses.append(best_val_loss) accs.append(test_acc) durations.append((t_end - t_start)) if (logger is not None): logger.close() (loss, acc, duration) = (tensor(val_losses), tensor(accs), tensor(durations)) print('Val Loss: {:.4f}, Test Accuracy: {:.2f} {:.2f}, Duration: {:.3f} \n'.format(loss.mean().item(), (100 * acc.mean().item()), (100 * acc.std().item()), duration.mean().item()))
def _coco_box_to_bbox(box): bbox = np.array([box[0], box[1], (box[0] + box[2]), (box[1] + box[3])], dtype=np.int32) return bbox
class NormalizingFlowDensity(nn.Module): def __init__(self, dim, flow_length, flow_type='planar_flow'): super(NormalizingFlowDensity, self).__init__() self.dim = dim self.flow_length = flow_length self.flow_type = flow_type self.mean = nn.Parameter(torch.zeros(self.dim), requires_grad=False) self.cov = nn.Parameter(torch.eye(self.dim), requires_grad=False) if (self.flow_type == 'radial_flow'): self.transforms = nn.Sequential(*(Radial(dim) for _ in range(flow_length))) elif (self.flow_type == 'iaf_flow'): self.transforms = nn.Sequential(*(affine_autoregressive(dim, hidden_dims=[128, 128]) for _ in range(flow_length))) else: raise NotImplementedError def forward(self, z): sum_log_jacobians = 0 for transform in self.transforms: z_next = transform(z) sum_log_jacobians = (sum_log_jacobians + transform.log_abs_det_jacobian(z, z_next)) z = z_next return (z, sum_log_jacobians) def log_prob(self, x): (z, sum_log_jacobians) = self.forward(x) log_prob_z = tdist.MultivariateNormal(self.mean, self.cov).log_prob(z) log_prob_x = (log_prob_z + sum_log_jacobians) return log_prob_x
def _os_system(cmd: list, save_log: bool=False): cmd_str = '' for s in cmd: cmd_str += (str(s) + ' ') if (not save_log): print('[Running]: {}'.format(cmd_str)) ret = os.system(cmd_str) if (ret == 0): print('[Success]: {}'.format(cmd_str)) else: raise RuntimeError('[!Error]: {}'.format(cmd_str)) else: _os_system_log(cmd_str)
def test_num_6(): content = ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])) offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.contents.listoffsetarray.ListOffsetArray(offsets, content)) cuda_array = ak.to_backend(array, 'cuda') assert (ak.num(cuda_array, 0) == ak.num(array, 0)) assert (ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist())
def GenerateSM80_TensorOp_16816(manifest, cuda_version): if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)): return layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)] math_instructions = [MathInstruction([16, 8, 16], DataType.f16, DataType.f16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 16], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 16], DataType.bf16, DataType.bf16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add)] min_cc = 80 max_cc = 1024 alignment_constraints = [8, 4, 2] for math_inst in math_instructions: tile_descriptions = [TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 10, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc)] data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator] CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8]) CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type, 8) if (math_inst.element_a != math_inst.element_accumulator): data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator] CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints) CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8]) CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type_mixed, 8)
def match_regex(rgx, span): m = (re.search(rgx, span.text, re.I) if (type(rgx) is str) else rgx.search(span.text)) if (not m): return None (i, j) = m.span() if (type(span) is Span): i += span.char_start j += span.char_start return Span(i, (j - 1), span.sentence) return Span(i, (j - 1), span)
def prepare_common_voice(data_folder, save_folder, train_tsv_file=None, dev_tsv_file=None, test_tsv_file=None, accented_letters=False, language='en', skip_prep=False): if skip_prep: return if (train_tsv_file is None): train_tsv_file = (data_folder + '/train.tsv') else: train_tsv_file = train_tsv_file if (dev_tsv_file is None): dev_tsv_file = (data_folder + '/dev.tsv') else: dev_tsv_file = dev_tsv_file if (test_tsv_file is None): test_tsv_file = (data_folder + '/test.tsv') else: test_tsv_file = test_tsv_file if (not os.path.exists(save_folder)): os.makedirs(save_folder) save_csv_train = (save_folder + '/train.csv') save_csv_dev = (save_folder + '/dev.csv') save_csv_test = (save_folder + '/test.csv') if skip(save_csv_train, save_csv_dev, save_csv_test): msg = ('%s already exists, skipping data preparation!' % save_csv_train) logger.info(msg) msg = ('%s already exists, skipping data preparation!' % save_csv_dev) logger.info(msg) msg = ('%s already exists, skipping data preparation!' % save_csv_test) logger.info(msg) return check_commonvoice_folders(data_folder) file_pairs = zip([train_tsv_file, dev_tsv_file, test_tsv_file], [save_csv_train, save_csv_dev, save_csv_test]) for (tsv_file, save_csv) in file_pairs: create_csv(tsv_file, save_csv, data_folder, accented_letters, language)
def _is_checked_function(item): if (not inspect.isfunction(item)): return False if item.__name__.startswith('_'): return False mod = item.__module__ if ((not mod.startswith('skglm.')) or mod.endswith('estimator_checks')): return False return True
def test_slate_ope_performance_using_independent_log(): n_unique_action = 10 len_list = 3 dim_context = 2 reward_type = 'binary' random_state = 12345 n_rounds = 1000 reward_structure = 'independent' click_model = None behavior_policy_function = linear_behavior_policy_logit reward_function = logistic_reward_function dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, click_model=click_model, random_state=random_state, behavior_policy_function=behavior_policy_function, base_reward_function=reward_function) random_behavior_dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, click_model=click_model, random_state=random_state, behavior_policy_function=None, base_reward_function=reward_function) bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds) slate_id = bandit_feedback['slate_id'] reward = bandit_feedback['reward'] pscore = bandit_feedback['pscore'] pscore_item_position = bandit_feedback['pscore_item_position'] pscore_cascade = bandit_feedback['pscore_cascade'] position = bandit_feedback['position'] random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds) sips_estimated_policy_value = sips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore=pscore, position=position, evaluation_policy_pscore=random_behavior_feedback['pscore']) iips_estimated_policy_value = iips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore_item_position=pscore_item_position, position=position, evaluation_policy_pscore_item_position=random_behavior_feedback['pscore_item_position']) rips_estimated_policy_value = rips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore_cascade=pscore_cascade, position=position, evaluation_policy_pscore_cascade=random_behavior_feedback['pscore_cascade']) snsips_estimated_policy_value = snsips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore=pscore, position=position, evaluation_policy_pscore=random_behavior_feedback['pscore']) sniips_estimated_policy_value = sniips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore_item_position=pscore_item_position, position=position, evaluation_policy_pscore_item_position=random_behavior_feedback['pscore_item_position']) snrips_estimated_policy_value = snrips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore_cascade=pscore_cascade, position=position, evaluation_policy_pscore_cascade=random_behavior_feedback['pscore_cascade']) q_pi_e = random_behavior_feedback['reward'].reshape((n_rounds, dataset.len_list)).sum(axis=1) gt_mean = q_pi_e.mean() gt_std = q_pi_e.std(ddof=1) print('Independent') ci_bound = ((gt_std * 3) / np.sqrt(q_pi_e.shape[0])) print(f'gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}') estimated_policy_value = {'sips': sips_estimated_policy_value, 'iips': iips_estimated_policy_value, 'rips': rips_estimated_policy_value, 'snsips': snsips_estimated_policy_value, 'sniips': sniips_estimated_policy_value, 'snrips': snrips_estimated_policy_value} for key in estimated_policy_value: print(f'estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, ') assert (np.abs((gt_mean - estimated_policy_value[key])) <= ci_bound), f'OPE of {key} did not work well (absolute error is greater than 3*sigma)'
def save_results(path, name, img, gt_depth, pred_depth, validmask, cv_mask, costvolume): savepath = os.path.join(path, name) device = img.device (bs, _, h, w) = img.shape img = (img[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() + 0.5) gt_depth = gt_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() gt_depth[(gt_depth == 80)] = 0 pred_depth = pred_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() validmask = validmask[(0, 0, ...)].detach().cpu().numpy() cv_mask = cv_mask[(0, 0, ...)].detach().cpu().numpy() img = img (error_map, _) = get_error_map_value(pred_depth, gt_depth, grag_crop=False, median_scaling=False) errorpil = numpy_intensitymap_to_pcolor(error_map, vmin=0, vmax=0.5, colormap='jet') pred_pil = numpy_intensitymap_to_pcolor(pred_depth) gt_pil = numpy_intensitymap_to_pcolor(gt_depth) img_pil = numpy_rgb_to_pil(img) validmask_pil = Image.fromarray((validmask * 255.0).astype(np.uint8)) cv_mask_pil = Image.fromarray((cv_mask * 255.0).astype(np.uint8)) print(bs, h, w) depths = (1 / torch.linspace(0.0025, 0.33, 32, device=device)).cuda().view(1, (- 1), 1, 1).expand(bs, (- 1), h, w) cost_volume_depth = find_mincost_depth(costvolume, depths) cost_volume_depth = cost_volume_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() cv_depth_pil = numpy_intensitymap_to_pcolor(cost_volume_depth) (h, w, _) = gt_depth.shape dst = Image.new('RGB', (w, (h * 3))) dst.paste(img_pil, (0, 0)) dst.paste(pred_pil, (0, h)) dst.paste(gt_pil, (0, (2 * h))) dst.save(savepath) print(f'saved to {savepath}')
def copy_encoder(hf_encoder, pt_model): hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks)
def visualize_size_wise_sampling_scores(filename, tp=False): key = 'micro' methods = ['kd_kldiv_wa1', 'cn_lfc_mr', 'kd_kldiv_ilos', 'ce_online_ewc'] lr = {'pamap': '0.01', 'dsads': '0.01', 'twor': '0.1', 'milan': '0.01'} samplings = ['random', 'icarl', 'kmeans', 'boundary', 'fwsr'] sizes = ([2, 4, 6, 8, 10, 15] if (not tp) else [0.4, 0.6, 0.8, 1.0]) for method in methods: size_to_all_accs = {} path_base = f'output_reports/{filename}/{method}' for sampling in samplings: lr_ = (str(0.001) if ((filename == 'twor') and (method in ['ce_online_ewc'])) else str(lr[filename])) path_name = path_base path_name += (f'_tp_{lr_}_random_{1.0}_6' if tp else f'_{sampling}_1.0_15') analyser = ResultAnalysis(path_name, 30) acc = analyser.compute_avg_report_by_sizes_wo_stddev() for size_ in sizes: if (key == 'micro'): if (sampling not in size_to_all_accs): size_to_all_accs[sampling] = {size_: acc[size_]['accuracy']} else: size_to_all_accs[sampling][size_] = acc[size_]['accuracy'] elif (key == 'macro'): if (sampling not in size_to_all_accs): size_to_all_accs[sampling] = {size_: acc[size_]['macro avg']['f1-score']} else: size_to_all_accs[sampling][size_] = acc[size_]['macro avg']['f1-score'] stability_visualizer.draw_accs_by_size(size_to_all_accs, filename, key=key, method=method)
def results2json(dataset, results, out_file): if isinstance(results[0], list): json_results = det2json(dataset, results) elif isinstance(results[0], tuple): json_results = segm2json(dataset, results) elif isinstance(results[0], np.ndarray): json_results = proposal2json(dataset, results) else: raise TypeError('invalid type of results') mmcv.dump(json_results, out_file)
def combine_parsed_consensus_results(results): relays = {} network_stats = {} (min_unix_time, max_unix_time) = (None, None) (counts_t, counts_eg, counts_e, counts_g, counts_m) = ([], [], [], [], []) (weights_t, weights_eg, weights_e, weights_g, weights_m) = ([], [], [], [], []) for result in results: if (result is None): continue if (result['type'] != 'consensus'): continue if (result['pub_dt'] is not None): unix_time = result['pub_dt'].replace(tzinfo=timezone.utc).timestamp() if ((min_unix_time is None) or (unix_time < min_unix_time)): min_unix_time = unix_time if ((max_unix_time is None) or (unix_time > max_unix_time)): max_unix_time = unix_time weights_t.append(result['weights']['total']) weights_eg.append(result['weights']['exitguard']) weights_g.append(result['weights']['guard']) weights_e.append(result['weights']['exit']) weights_m.append(result['weights']['middle']) counts_t.append(result['counts']['total']) counts_eg.append(result['counts']['exitguard']) counts_g.append(result['counts']['guard']) counts_e.append(result['counts']['exit']) counts_m.append(result['counts']['middle']) for fingerprint in result['relays']: relays.setdefault(fingerprint, Relay(fingerprint, result['relays'][fingerprint]['address'])) r = relays[fingerprint] r.weights.append(result['relays'][fingerprint]['weight']) if result['relays'][fingerprint]['is_exit']: r.num_exit += 1 if result['relays'][fingerprint]['is_guard']: r.num_guard += 1 network_stats = {'med_count_exitguard': int(round(median(counts_eg))), 'med_count_guard': int(round(median(counts_g))), 'med_count_exit': int(round(median(counts_e))), 'med_count_middle': int(round(median(counts_m))), 'med_count_total': int(round(median(counts_t))), 'med_weight_exitguard': float(median(weights_eg)), 'med_weight_guard': float(median(weights_g)), 'med_weight_exit': float(median(weights_e)), 'med_weight_middle': float(median(weights_m)), 'med_weight_total': 1.0} timestr = get_time_suffix(min_unix_time, max_unix_time) logging.info('Found {} total unique relays during {} with a median network size of {} relays'.format(len(relays), timestr, network_stats['med_count_total'])) return (relays, min_unix_time, max_unix_time, network_stats)
def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
class ExpLeakSqueeze(ExpLeak, SqueezeMixin): def __init__(self, batch_size=None, num_timesteps=None, **kwargs): super().__init__(**kwargs) self.squeeze_init(batch_size, num_timesteps) def forward(self, input_data: torch.Tensor) -> torch.Tensor: return self.squeeze_forward(input_data, super().forward) def _param_dict(self) -> dict: return self.squeeze_param_dict(super()._param_dict)
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) x = Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs) x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return Activation(relu6, name='conv1_relu')(x)
def eval_step(eval_len=args.seq_len, ood=False, n_evals=10): model.eval() total_loss = 0.0 total_acc = 0.0 for _ in range(n_evals): (data, label, op) = rules(args.batch_size, eval_len, args.gt_rules, args.order, args.d_dim, args.data_seed, ood) data = torch.Tensor(data).to(device) label = torch.Tensor(label).to(device) op = torch.Tensor(op).to(device) (out, score) = model(data, op) loss = criterion(out, label) acc = torch.eq((out >= 0.0), label).double().mean() total_acc += acc.item() total_loss += loss.item() return ((total_loss / float(n_evals)), ((total_acc * 100.0) / float(n_evals)))
class TripletLoss(object): def __init__(self, margin=None): self.margin = margin if (margin is not None): self.ranking_loss = nn.MarginRankingLoss(margin=margin) else: self.ranking_loss = nn.SoftMarginLoss() def __call__(self, global_feat, labels, normalize_feature=False): if normalize_feature: global_feat = normalize(global_feat, axis=(- 1)) dist_mat = euclidean_dist(global_feat, global_feat) (dist_ap, dist_an) = hard_example_mining(dist_mat, labels) y = dist_an.new().resize_as_(dist_an).fill_(1) if (self.margin is not None): loss = self.ranking_loss(dist_an, dist_ap, y) else: loss = self.ranking_loss((dist_an - dist_ap), y) return (loss, dist_ap, dist_an)
def test_maximum_bipartite_matching_explicit_zeros_count_as_edges(): data = [0, 0] indices = [1, 0] indptr = [0, 1, 2] graph = csr_matrix((data, indices, indptr), shape=(2, 2)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') expected_matching = np.array([1, 0]) assert_array_equal(expected_matching, x) assert_array_equal(expected_matching, y)
class SymforcePyTorchTest(TestCase): ((importlib.util.find_spec('torch') is None), 'Requires PyTorch') def test_backend_test_function(self) -> None: import torch backend_test_function = codegen_util.load_generated_function('backend_test_function', TEST_DATA_DIR) backend_test_function(torch.tensor(1.0), torch.tensor(2.0)) x = torch.tensor(1.0).tile((1, 2, 3)) y = torch.tensor(2.0).tile((5, 1, 3)) results = backend_test_function(x, y) self.assertEqual(results[0].shape, ()) self.assertEqual(results[(- 1)].shape, (5, 2, 3)) results = backend_test_function(torch.tensor(1.0, dtype=torch.float64), torch.tensor(2.0, dtype=torch.float64)) self.assertEqual(results[0].dtype, torch.float64) self.assertEqual(results[(- 1)].dtype, torch.float64) results = backend_test_function(torch.tensor(1.0, dtype=torch.float64), torch.tensor(2.0, dtype=torch.float64), tensor_kwargs={'dtype': torch.float64}) self.assertEqual(results[0].dtype, torch.float64) self.assertEqual(results[(- 1)].dtype, torch.float64) ((importlib.util.find_spec('torch') is None), 'Requires PyTorch') def test_vector_matrix_args(self) -> None: import torch pytorch_func = codegen_util.load_generated_function('pytorch_func', TEST_DATA_DIR) (a_out, b_out, c_out, d_out, e_out, f_out) = pytorch_func(a=torch.tensor(1.0).tile((1, 2)), b=torch.tensor([1.0]).tile((1, 2, 1)), c=torch.tensor([1.0, 2.0, 3.0]).tile(3, 2, 1), d=torch.tensor([[1.0, 2.0], [3.0, 4.0]]).tile(1, 1, 1, 1), e=torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0]).tile(1, 1, 1), f=torch.tensor(np.eye(6)).tile(1, 1, 1, 1)) self.assertEqual(a_out.shape, (1, 2)) self.assertEqual(b_out.shape, (1, 2, 1)) self.assertEqual(c_out.shape, (3, 2, 3)) self.assertEqual(d_out.shape, (1, 1, 2, 2)) self.assertEqual(e_out.shape, (1, 1, 5)) self.assertEqual(f_out.shape, (1, 1, 6, 6))
def plot_entropy(X, attn): (unif_H, attn_H) = ([], []) for i in range(len(X)): L = len(X[i]) h = attn[i][1:(L - 1)] a = (h * np.log(np.clip(h, a_min=1e-08, a_max=None))) a = (- a.sum()) unif_H.append(np.log((L - 2))) attn_H.append(a) plt.scatter(unif_H, attn_H, s=1)
def got() -> operations.GraphOfOperations: operations_graph = operations.GraphOfOperations() plans = operations.Generate(1, 1) operations_graph.append_operation(plans) for i in range(1, 3): list_id = f'List {i}' sub_list = operations.Selector((lambda thoughts, list_id=list_id: [thought for thought in thoughts if (thought.state['part'] == list_id)])) sub_list.add_predecessor(plans) operations_graph.add_operation(sub_list) intersected_subset = operations.Generate(1, 5) intersected_subset.add_predecessor(sub_list) operations_graph.add_operation(intersected_subset) score_sub_list = operations.Score(1, False, utils.num_errors) score_sub_list.add_predecessor(intersected_subset) operations_graph.add_operation(score_sub_list) keep_best_sub_list = operations.KeepBestN(1, False) keep_best_sub_list.add_predecessor(score_sub_list) operations_graph.add_operation(keep_best_sub_list) final_aggregate = operations.Aggregate(10) operations_graph.append_operation(final_aggregate) operations_graph.append_operation(operations.Score(1, False, utils.num_errors)) keep_best_aggregate_final = operations.KeepBestN(1, False) operations_graph.append_operation(keep_best_aggregate_final) operations_graph.append_operation(operations.GroundTruth(utils.test_set_intersection)) return operations_graph
class RandomJournal(): def __init__(self): self._entries: List[RandomJournalEntry] = [] self._cur_entry_idx = 0 self._graph_reader_nodes: List[Tuple[(Tensor, rf.RunCtx)]] = [] def append(self, *, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, static: Optional[bool]=None, out: Optional[Tensor[numpy.ndarray]]): self._entries.append(RandomJournalEntry(out=out, control_flow_ctx=rf.get_current_control_flow_ctx(), run_ctx=rf.get_run_ctx(), distribution=distribution, mean=mean, stddev=stddev, bound=bound, minval=minval, maxval=maxval, seed=seed, static=static)) def get_next(self, *, new_out_template: Optional[Tensor]=None) -> RandomJournalEntry: assert (self._cur_entry_idx < len(self._entries)) entry = self._entries[self._cur_entry_idx] if new_out_template: assert (new_out_template.dtype == entry.out.dtype), f'random journal entry dtype mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}' assert (len(new_out_template.dims) == len(entry.out.dims)), f'random journal entry dims mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}' for (new_dim, old_dim) in zip(new_out_template.dims, entry.out.dims): new_dim: Dim old_dim: Dim assert (new_dim.dimension == old_dim.dimension), f'random journal entry dim mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}' self._cur_entry_idx += 1 return entry def reached_end(self) -> bool: return (self._cur_entry_idx >= len(self._entries)) def add_graph_reader_node(self, out): self._graph_reader_nodes.append((out, rf.get_run_ctx())) def get_graph_reader_idx(self) -> int: return len(self._graph_reader_nodes) def get_recent_graph_reader_node_in_accessible_ctx(self) -> Optional[Tensor]: cur_control_flow_ctx = rf.get_current_control_flow_ctx() cur_run_ctx = rf.get_run_ctx() for (prev_out, prev_run_ctx) in reversed(self._graph_reader_nodes): if (prev_run_ctx != cur_run_ctx): return None if ControlFlowContext.is_parent_or_same(prev_out.control_flow_ctx, cur_control_flow_ctx): return prev_out consumers = rf.walk_tensor_consumers(prev_out, filter_outputs=(lambda x: ControlFlowContext.is_parent_or_same(x.control_flow_ctx, cur_control_flow_ctx)), ending_condition=(lambda x: ControlFlowContext.is_parent_or_same(x.control_flow_ctx, cur_control_flow_ctx))) if (not consumers): raise Exception(f'cannot handle {prev_out} in current {cur_control_flow_ctx}') return consumers[0] return None
def get_topk_classes(explainer, image, k=5): class_masks = explainer(image)[0].sigmoid() class_mask_means = class_masks.mean(dim=(1, 2)) (values, topk_classes) = class_mask_means.topk(k) return (values.cpu().numpy(), topk_classes.cpu().numpy())
def define_flags_with_default(**kwargs): for (key, val) in kwargs.items(): if isinstance(val, ConfigDict): config_flags.DEFINE_config_dict(key, val) elif isinstance(val, bool): absl.flags.DEFINE_bool(key, val, 'automatically defined flag') elif isinstance(val, int): absl.flags.DEFINE_integer(key, val, 'automatically defined flag') elif isinstance(val, float): absl.flags.DEFINE_float(key, val, 'automatically defined flag') elif isinstance(val, str): absl.flags.DEFINE_string(key, val, 'automatically defined flag') else: raise ValueError('Incorrect value type') return kwargs
.parametrize('func', [ak.covar, ak.corr, ak.linear_fit]) def test_covar(func): assert isinstance(func([[1, 2, 3, 4], [5], [10]], [[4, 4, 0, 2], [1], [10]], axis=(- 1), highlevel=True), ak.Array) assert isinstance(func([[1, 2, 3, 4], [5], [10]], [[4, 4, 0, 2], [1], [10]], axis=(- 1), highlevel=False), ak.contents.Content) assert (func(ak.Array([[1, 2, 3, 4], [5], [10]], behavior=behavior_1), [[4, 4, 0, 2], [1], [10]], axis=(- 1), highlevel=True, behavior=behavior_2).behavior == behavior_2) assert (func([[1, 2, 3, 4], [5], [10]], ak.Array([[4, 4, 0, 2], [1], [10]], behavior=behavior_1), axis=(- 1), highlevel=True, behavior=behavior_2).behavior == behavior_2) assert (func(ak.Array([[1, 2, 3, 4], [5], [10]], behavior=behavior_1), [[4, 4, 0, 2], [1], [10]], axis=(- 1), highlevel=True).behavior == behavior_1) assert (func([[1, 2, 3, 4], [5], [10]], ak.Array([[4, 4, 0, 2], [1], [10]], behavior=behavior_1), axis=(- 1), highlevel=True).behavior == behavior_1) assert (func([[1, 2, 3, 4], [5], [10]], [[4, 4, 0, 2], [1], [10]], weight=ak.Array([[1, 2, 3, 2], [1], [1]], behavior=behavior_1), axis=(- 1), highlevel=True, behavior=behavior_2).behavior == behavior_2) assert (func([[1, 2, 3, 4], [5], [10]], [[4, 4, 0, 2], [1], [10]], weight=ak.Array([[1, 2, 3, 2], [1], [1]], behavior=behavior_1), axis=(- 1), highlevel=True).behavior == behavior_1)
def SuperNNova_stats_and_plots_thread(df, settings, plots=True, debug=False): pd.set_option('max_colwidth', 1000) print(lu.str_to_greenstr('STATISTICS USED IN SUPERNNOVA')) baseline(df, settings, plots, debug) (df_delta, df_delta_ood) = sm.get_delta_metrics(df, settings) bayesian(df, df_delta, df_delta_ood, settings, plots, debug) towards_cosmo(df, df_delta, df_delta_ood, settings, plots, debug)
def pdf(x, mu, sigma): x = ((x - mu) / sigma) return (numpy.exp(((- (x ** 2)) / 2)) / (numpy.sqrt((2 * numpy.pi)) * sigma))
def main(): cfg = get_cfg() cfg.merge_from_file(args.cfg_file) cfg.merge_from_list(args.opts) cfg = infer_cfg(cfg) cfg.freeze() if cfg.MODEL_ANALYSE: model = Generalized_CNN(cfg) model.eval() analyser = Analyser(cfg, model, param_details=False) n_params = analyser.get_params()[1] (conv_flops, model_flops) = analyser.get_flops_activs(args.size[0], args.size[1], mode='flops') (conv_activs, model_activs) = analyser.get_flops_activs(args.size[0], args.size[1], mode='activations') logging_rank('') logging_rank('Params: {}'.format(n_params)) logging_rank('FLOPs: {:.4f} M / Conv_FLOPs: {:.4f} M'.format(model_flops, conv_flops)) logging_rank('ACTIVATIONs: {:.4f} M / Conv_ACTIVATIONs: {:.4f} M'.format(model_activs, conv_activs)) logging_rank('') del model
def test_UnionArray_FIXME(): content0 = ak.operations.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False) content1 = ak.operations.from_iter(['one', 'two', 'three', 'four', 'five'], highlevel=False) tags = ak.index.Index8([]) index = ak.index.Index32([]) array = ak.contents.UnionArray(tags, index, [content0, content1]) assert (to_list(array) == []) assert (to_list(ak.operations.sort(array)) == []) assert (to_list(ak.operations.argsort(array)) == [])
class Function_Hankel1(BuiltinFunction): def __init__(self): BuiltinFunction.__init__(self, 'hankel1', nargs=2, conversions=dict(maple='HankelH1', mathematica='HankelH1', maxima='hankel1', sympy='hankel1', fricas='hankelH1')) def _evalf_(self, nu, z, parent, algorithm=None): return _mpmath_utils_call(_mpmath_hankel1, nu, z, parent=parent) def _latex_(self): return 'H_{\\nu}^{(1)}' def _print_latex_(self, nu, z): return 'H_{{{}}}^{{(1)}}\\left({}\\right)'.format(latex(nu), latex(z)) def _derivative_(self, nu, z, diff_param): if (diff_param == 1): return (((nu * hankel1(nu, z)) / z) - hankel1((nu + 1), z)) else: raise NotImplementedError('derivative with respect to order')
def pt_project(train_queue, valid_queue, model, architect, criterion, optimizer, epoch, args, infer, query): def project(model, args): (num_edge, num_op) = (model.num_edge, model.num_op) remain_eids = torch.nonzero(model.candidate_flags).cpu().numpy().T[0] if (args.edge_decision == 'random'): selected_eid = np.random.choice(remain_eids, size=1)[0] if (args.proj_crit == 'loss'): crit_idx = 1 compare = (lambda x, y: (x > y)) if (args.proj_crit == 'acc'): crit_idx = 0 compare = (lambda x, y: (x < y)) best_opid = 0 crit_extrema = None for opid in range(num_op): weights = model.get_projected_weights() proj_mask = torch.ones_like(weights[selected_eid]) proj_mask[opid] = 0 weights[selected_eid] = (weights[selected_eid] * proj_mask) valid_stats = infer(valid_queue, model, criterion, log=False, eval=False, weights=weights) crit = valid_stats[crit_idx] if ((crit_extrema is None) or compare(crit, crit_extrema)): crit_extrema = crit best_opid = opid logging.info('valid_acc %f', valid_stats[0]) logging.info('valid_loss %f', valid_stats[1]) logging.info('best opid %d', best_opid) return (selected_eid, best_opid) if (not args.fast): api = API('../data/NAS-Bench-201-v1_0-e61699.pth') model.train() model.printing(logging) (train_acc, train_obj) = infer(train_queue, model, criterion, log=False) logging.info('train_acc %f', train_acc) logging.info('train_loss %f', train_obj) (valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False) logging.info('valid_acc %f', valid_acc) logging.info('valid_loss %f', valid_obj) objs = ig_utils.AvgrageMeter() top1 = ig_utils.AvgrageMeter() top5 = ig_utils.AvgrageMeter() num_edges = model.arch_parameters()[0].shape[0] proj_intv = args.proj_intv tune_epochs = (proj_intv * (num_edges - 1)) model.reset_optimizer((args.learning_rate / 10), args.momentum, args.weight_decay) for epoch in range(tune_epochs): logging.info('epoch %d', epoch) if (((epoch % proj_intv) == 0) or (epoch == (tune_epochs - 1))): logging.info('project') (selected_eid, best_opid) = project(model, args) model.project_op(selected_eid, best_opid) model.printing(logging) for (step, (input, target)) in enumerate(train_queue): model.train() n = input.size(0) input = input.cuda() target = target.cuda(non_blocking=True) (input_search, target_search) = next(iter(valid_queue)) input_search = input_search.cuda() target_search = target_search.cuda(non_blocking=True) optimizer.zero_grad() architect.optimizer.zero_grad() shared = architect.step(input, target, input_search, target_search, return_logits=True) optimizer.zero_grad() architect.optimizer.zero_grad() (logits, loss) = model.step(input, target, args, shared=shared) (prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5)) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if ((step % args.report_freq) == 0): logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) if args.fast: break model.printing(logging) (train_acc, train_obj) = infer(train_queue, model, criterion, log=False) logging.info('train_acc %f', train_acc) logging.info('train_loss %f', train_obj) (valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False) logging.info('valid_acc %f', valid_acc) logging.info('valid_loss %f', valid_obj) if (not args.fast): query(api, model.genotype(), logging) return
def compatible_tags(python_version=None, interpreter=None, platforms=None): if (not python_version): python_version = sys.version_info[:2] platforms = list((platforms or _platform_tags())) for version in _py_interpreter_range(python_version): for platform_ in platforms: (yield Tag(version, 'none', platform_)) if interpreter: (yield Tag(interpreter, 'none', 'any')) for version in _py_interpreter_range(python_version): (yield Tag(version, 'none', 'any'))
def test_invalid_json_minor(): json_str = '{"name": "John", "age": 30, "city": "New York",}' assert (fix_and_parse_json(json_str, try_to_fix_with_gpt=False) == {'name': 'John', 'age': 30, 'city': 'New York'})
def _get_codegen_gemm_opts(node, state, sdfg, adesc, bdesc, cdesc, alpha, beta, cdtype, func) -> Dict[(str, Any)]: from dace.codegen.common import sym2cpp from dace.libraries.blas.blas_helpers import get_gemm_opts ((_, _, ashape, astride), (_, _, bshape, bstride), (_, _, cshape, cstride)) = _get_matmul_operands(node, state, sdfg) if getattr(node, 'transA', False): ashape = list(reversed(ashape)) astride = list(reversed(astride)) if getattr(node, 'transB', False): bshape = list(reversed(bshape)) bstride = list(reversed(bstride)) opt = get_gemm_opts(astride, bstride, cstride) bopt = _get_batchmm_opts(ashape, astride, bshape, bstride, cshape, cstride) opt['x'] = '_a' opt['y'] = '_b' opt['xdtype'] = adesc.dtype opt['ydtype'] = bdesc.dtype opt['cdtype'] = cdesc.dtype opt['M'] = sym2cpp(ashape[(- 2)]) opt['N'] = sym2cpp(bshape[(- 1)]) opt['K'] = sym2cpp(ashape[(- 1)]) opt['lda'] = sym2cpp(opt['lda']) opt['ldb'] = sym2cpp(opt['ldb']) opt['ldc'] = sym2cpp(opt['ldc']) if opt['swap']: if bopt: (bopt['sa'], bopt['sb']) = (bopt['sb'], bopt['sa']) (opt['lda'], opt['ldb']) = (opt['ldb'], opt['lda']) (opt['x'], opt['y']) = (opt['y'], opt['x']) (opt['xdtype'], opt['ydtype']) = (opt['ydtype'], opt['xdtype']) (opt['ta'], opt['tb']) = (opt['tb'], opt['ta']) (opt['M'], opt['N']) = (opt['N'], opt['M']) opt['alpha'] = alpha opt['beta'] = beta opt['dtype'] = cdtype opt['func'] = func if bopt: opt['stride_a'] = sym2cpp(bopt['sa']) opt['stride_b'] = sym2cpp(bopt['sb']) opt['stride_c'] = sym2cpp(bopt['sc']) opt['BATCH'] = sym2cpp(bopt['b']) else: opt['BATCH'] = None return opt
class Dict(object): def __init__(self, data=None, lower=False, seq_len=50): self.idxToLabel = {} self.labelToIdx = {} self.frequencies = {} self.lower = lower self.seq_length = seq_len self.special = [] if (data is not None): if (type(data) == str): self.loadFile(data) else: self.addSpecials(data) def size(self): return len(self.idxToLabel) def loadFile(self, filename): for line in codecs.open(filename, 'rb', 'utf-8'): fields = line.split() if (len(fields) > 2): label = ' '.join(fields[:(- 1)]) idx = int(fields[(- 1)]) else: label = fields[0] idx = int(fields[1]) self.add(label, idx) def writeFile(self, filename): with codecs.open(filename, 'w', 'utf-8') as file: for i in range(self.size()): label = self.idxToLabel[i] file.write(('%s %d\n' % (label, i))) file.close() def lookup(self, key, default=None): key = (key.lower() if self.lower else key) try: return self.labelToIdx[key] except KeyError: return default def getLabel(self, idx, default=None): try: return self.idxToLabel[idx] except KeyError: return default def addSpecial(self, label, idx=None): idx = self.add(label, idx) self.special += [idx] def addSpecials(self, labels): for label in labels: self.addSpecial(label) def add(self, label, idx=None): label = (label.lower() if self.lower else label) if (idx is not None): self.idxToLabel[idx] = label self.labelToIdx[label] = idx elif (label in self.labelToIdx): idx = self.labelToIdx[label] else: idx = len(self.idxToLabel) self.idxToLabel[idx] = label self.labelToIdx[label] = idx if (idx not in self.frequencies): self.frequencies[idx] = 1 else: self.frequencies[idx] += 1 return idx def prune(self, size): if (size >= self.size()): return self freq = torch.Tensor([self.frequencies[i] for i in range(len(self.frequencies))]) (_, idx) = torch.sort(freq, 0, True) newDict = Dict() newDict.lower = self.lower newDict.seq_length = self.seq_length for i in self.special: newDict.addSpecial(self.idxToLabel[i]) for i in idx[:size]: newDict.add(self.idxToLabel[i.item()]) return newDict def convertToIdx(self, labels, unkWord, padding=False, bosWord=None, eosWord=None): vec = [] if (bosWord is not None): vec += [self.lookup(bosWord)] unk = self.lookup(unkWord) vec += [self.lookup(label, default=unk) for label in labels] if (padding == True): vec += ([onmt.Constants.PAD] * (self.seq_length - len(labels))) if (eosWord is not None): vec += [self.lookup(eosWord)] return torch.LongTensor(vec) def convertToLabels(self, idx, stop): labels = [] for i in idx: labels += [self.getLabel(i)] if (i == stop): break return labels
def np_func_to_list(func): if (not func.is_numpy_attribute): return [] return (np_func_to_list(func.obj) + [func.attribute])
class ReflectionPad1d(_ReflectionPadNd): padding: Tuple[(int, int)] def __init__(self, padding: _size_2_t) -> None: super(ReflectionPad1d, self).__init__() self.padding = _pair(padding)
def is_non_empty_query(query: dict[(str, Any)]) -> bool: result = [] for (key, values) in query.items(): if (isinstance(values, str) or (not hasattr(values, '__iter__'))): values = [values] for value in values: if (value is not None): result.append(((key.encode('utf-8') if isinstance(key, str) else key), (value.encode('utf-8') if isinstance(value, str) else value))) return (urlencode(result, doseq=True) != '')
class LevitModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class InterfaceFunctionElement(SageObject): def __init__(self, obj, name): self._obj = obj self._name = name def _repr_(self): return ('%s' % self._name) def __call__(self, *args, **kwds): return self._obj.parent().function_call(self._name, ([self._obj] + list(args)), kwds) def help(self): print(self.__doc__) def _instancedoc_(self): M = self._obj.parent() return M.help(self._name)
class QATConfig(): def __init__(self, weight_training_method: TrainingMethod=TrainingMethod.STE, activation_training_method: TrainingMethod=TrainingMethod.STE, weight_quantizer_params_override: Dict=None, activation_quantizer_params_override: Dict=None): self.weight_training_method = weight_training_method self.activation_training_method = activation_training_method self.weight_quantizer_params_override = ({} if (weight_quantizer_params_override is None) else weight_quantizer_params_override) self.activation_quantizer_params_override = ({} if (activation_quantizer_params_override is None) else activation_quantizer_params_override)
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels): num_imgs = 1 feat = torch.rand(1, 1, 3, 3) assign_config = dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=(- 1)) sampler_config = dict(type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=(- 1), add_gt_as_proposals=True) bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) return sampling_results
def to_eval_kwargs(args): kwargs_dict = {'eval_log_dir': args.eval_log_dir, 'log_dir': args.log_dir, 'loss_mode': args.loss_mode, 'run_id': args.run_id} return kwargs_dict
_utils.test(arch=supported_archs_taichi_ndarray) def test_gaussian_kernel(): M_PI = 3. def gaussian(x, sigma): return (ti.exp(((- 0.5) * ti.pow((x / sigma), 2))) / (sigma * ti.sqrt((2.0 * M_PI)))) def fill_gaussian_kernel(ker: ti.types.ndarray(ti.f32, ndim=1), N: ti.i32): sum = 0.0 for i in range(((2 * N) + 1)): ker[i] = gaussian((i - N), ti.sqrt(N)) sum += ker[i] for i in range(((2 * N) + 1)): ker[i] = (ker[i] / sum) N = 4 arr = ti.ndarray(dtype=ti.f32, shape=20) fill_gaussian_kernel(arr, N) res = arr.to_numpy() np_arr = np.zeros(20, dtype=np.float32) fill_gaussian_kernel(np_arr, N) assert test_utils.allclose(res, np_arr)