code
stringlengths
101
5.91M
def save_results(results_path: str, random_seed: int, model_path: str, custom_objects: dict, final_hist: dict, init_hist: dict): model = tf.keras.models.load_model(model_path, custom_objects=custom_objects) model.save((results_path + 'final_model')) print('Best model saved to:', model_path) hist_filepath = (results_path + 'initial_pool_results.json') json.dump(init_hist, open(hist_filepath, 'w')) final_hist['random_seed'] = random_seed hist_filepath = (results_path + 'final_model_history.json') json.dump(final_hist, open(hist_filepath, 'w')) print('Exported training dictionaries to: ', results_path)
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=(- 1)): def lr_lambda(current_step): if (current_step < num_warmup_steps): return (float(current_step) / float(max(1, num_warmup_steps))) progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps)))) if (progress >= 1.0): return 0.0 return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0)))))) return LambdaLR(optimizer, lr_lambda, last_epoch)
def inner_dtype_of_form(form): if (form is None): return None elif isinstance(form, ak.forms.NumpyForm): return numba.from_dtype(ak.types.numpytype.primitive_to_dtype(form.primitive)) elif isinstance(form, ak.forms.EmptyForm): return numba.types.float64 elif isinstance(form, (ak.forms.RegularForm, ak.forms.ListForm, ak.forms.ListOffsetForm, ak.forms.IndexedForm)): return inner_dtype_of_form(form.content) elif isinstance(form, (ak.forms.RecordForm, ak.forms.IndexedOptionForm, ak.forms.ByteMaskedForm, ak.forms.BitMaskedForm, ak.forms.UnmaskedForm)): return None elif isinstance(form, ak.forms.UnionForm): context = numba.core.typing.Context() return context.unify_types(*[inner_dtype_of_form(x) for x in form.contents]) else: raise AssertionError(f'unrecognized Form type: {type(form)}')
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-08, scale=None, offset=None, messages=MSG_ALL, maxCGit=(- 1), maxfun=None, eta=(- 1), stepmx=0, accuracy=0, fmin=0, ftol=(- 1), xtol=(- 1), pgtol=(- 1), rescale=(- 1), disp=None, callback=None): if approx_grad: fun = func jac = None elif (fprime is None): fun = MemoizeJac(func) jac = fun.derivative else: fun = func jac = fprime if (disp is not None): mesg_num = disp else: mesg_num = {0: MSG_NONE, 1: MSG_ITER, 2: MSG_INFO, 3: MSG_VERS, 4: MSG_EXIT, 5: MSG_ALL}.get(messages, MSG_ALL) opts = {'eps': epsilon, 'scale': scale, 'offset': offset, 'mesg_num': mesg_num, 'maxCGit': maxCGit, 'maxfun': maxfun, 'eta': eta, 'stepmx': stepmx, 'accuracy': accuracy, 'minfev': fmin, 'ftol': ftol, 'xtol': xtol, 'gtol': pgtol, 'rescale': rescale, 'disp': False} res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) return (res['x'], res['nfev'], res['status'])
def de_escape(string: str): string = string.replace('&amp;', '&') string = string.replace('&lt;', '<') string = string.replace('&gt;', '>') return string
def get_net_vectors(subject_list, kind, atlas_name='aal'): networks = load_all_networks(subject_list, kind, atlas_name=atlas_name) norm_networks = [np.arctanh(mat) for mat in networks] idx = np.triu_indices_from(norm_networks[0], 1) vec_networks = [mat[idx] for mat in norm_networks] matrix = np.vstack(vec_networks) return matrix
() def blacklist_cl(): (cfg_path, cfg) = make_config(master_port='5055', worker_port='5060') master = 'localhost:5055' workers = ['localhost:{:04d}'.format((5060 + d)) for d in range(4)] with Client(config_path=cfg_path, no_workers_timeout=120, master=master, workers=workers, enable_watchdog=False) as cl: (vid1_path, vid2_path) = download_videos() cl.ingest_videos([('test1', vid1_path), ('test2', vid2_path)]) (yield cl) run(['rm', '-rf', cfg['storage']['db_path'], cfg_path, vid1_path, vid2_path])
class OpenAIGPTTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs): super().__init__(unk_token=unk_token, **kwargs) try: import ftfy from spacy.lang.en import English _nlp = English() self.nlp = _nlp.Defaults.create_tokenizer(_nlp) self.fix_text = ftfy.fix_text except ImportError: logger.warning('ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.') self.nlp = BasicTokenizer(do_lower_case=True) self.fix_text = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[1:(- 1)] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def do_lower_case(self): return True def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == '\n </w>'): word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text): split_tokens = [] if (self.fix_text is None): text = self.nlp.tokenize(text) for token in text: split_tokens.extend([t for t in self.bpe(token).split(' ')]) else: text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')]) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace('</w>', ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file)) index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (vocab_file, merge_file)
class SumSegmentTree(SegmentTree): def __init__(self, capacity): super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0) def sum(self, start=0, end=None): return super(SumSegmentTree, self).reduce(start, end) def find_prefixsum_idx(self, prefixsum): assert (0 <= prefixsum <= (self.sum() + 1e-05)) idx = 1 while (idx < self._capacity): if (self._value[(2 * idx)] > prefixsum): idx = (2 * idx) else: prefixsum -= self._value[(2 * idx)] idx = ((2 * idx) + 1) return (idx - self._capacity)
def create_model_and_diffusion_2d(learn_sigma, num_channels, num_res_blocks, dropout, diffusion_steps, noise_schedule, timestep_respacing, use_kl, predict_xstart, rescale_timesteps, rescale_learned_sigmas, use_checkpoint, use_fp16, in_channels=2): model = SyntheticModel(in_channels=in_channels, model_channels=num_channels, out_channels=in_channels, num_res_blocks=num_res_blocks, dropout=dropout, use_checkpoint=use_checkpoint, use_fp16=use_fp16) diffusion = create_gaussian_diffusion(steps=diffusion_steps, learn_sigma=learn_sigma, noise_schedule=noise_schedule, use_kl=use_kl, predict_xstart=predict_xstart, rescale_timesteps=rescale_timesteps, rescale_learned_sigmas=rescale_learned_sigmas, timestep_respacing=timestep_respacing) return (model, diffusion)
def xDeepFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128, 64), cin_layer_size=(128, 128), cin_split_half=True, cin_activation='relu', l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, l2_reg_cin=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): features = build_input_features((linear_feature_columns + dnn_feature_columns)) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) (sparse_embedding_list, dense_value_list) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed) fm_input = concat_func(sparse_embedding_list, axis=1) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input) dnn_logit = Dense(1, use_bias=False)(dnn_output) final_logit = add_func([linear_logit, dnn_logit]) if (len(cin_layer_size) > 0): exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed)(fm_input) exFM_logit = Dense(1, use_bias=False)(exFM_out) final_logit = add_func([final_logit, exFM_logit]) output = PredictionLayer(task)(final_logit) model = Model(inputs=inputs_list, outputs=output) return model
class TFBertPreTrainedModel(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def save_json(path: Union[(str, Path, TextIO)], music: 'Music', skip_missing: bool=True, ensure_ascii: bool=False, compressed: bool=None, **kwargs): data = json.dumps(music.to_ordered_dict(skip_missing=skip_missing, deepcopy=False), ensure_ascii=ensure_ascii, **kwargs) if isinstance(path, (str, Path)): if (compressed is None): if str(path).lower().endswith('.gz'): compressed = True else: compressed = False if compressed: with gzip.open(path, 'wt', encoding='utf-8') as f: f.write(data) else: with open(path, 'w', encoding='utf-8') as f: f.write(data) return path.write(data)
def load_model(model_fn, model_name, args=None): model = load_module(model_fn, model_name) if args: return model(**args) return model()
def download_models(folder=CACHE_DIR_4DHUMANS): import os os.makedirs(folder, exist_ok=True) download_files = {'hmr2_data.tar.gz': [' folder]} for (file_name, url) in download_files.items(): output_path = os.path.join(url[1], file_name) if (not os.path.exists(output_path)): print(('Downloading file: ' + file_name)) output = cache_url(url[0], output_path) assert os.path.exists(output_path), f'{output} does not exist' if file_name.endswith('.tar.gz'): print(('Extracting file: ' + file_name)) os.system(((('tar -xvf ' + output_path) + ' -C ') + url[1]))
class GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell): def __init__(self, attention_cell, cells, use_new_attention=False): cells = ([attention_cell] + cells) self.use_new_attention = use_new_attention super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True) def __call__(self, inputs, state, scope=None): if (not nest.is_sequence(state)): raise ValueError(('Expected state to be a tuple of length %d, but received: %s' % (len(self.state_size), state))) with tf.variable_scope((scope or 'multi_rnn_cell')): new_states = [] with tf.variable_scope('cell_0_attention'): attention_cell = self._cells[0] attention_state = state[0] (cur_inp, new_attention_state) = attention_cell(inputs, attention_state) new_states.append(new_attention_state) for i in range(1, len(self._cells)): with tf.variable_scope(('cell_%d' % i)): cell = self._cells[i] cur_state = state[i] if (not isinstance(cur_state, tf.contrib.rnn.LSTMStateTuple)): raise TypeError('`state[{}]` must be a LSTMStateTuple'.format(i)) if self.use_new_attention: cur_state = cur_state._replace(h=tf.concat([cur_state.h, new_attention_state.attention], 1)) else: cur_state = cur_state._replace(h=tf.concat([cur_state.h, attention_state.attention], 1)) (cur_inp, new_state) = cell(cur_inp, cur_state) new_states.append(new_state) return (cur_inp, tuple(new_states))
class NormalizedMatcher(Matcher): version_class = NormalizedVersion _operators = {'~=': '_match_compatible', '<': '_match_lt', '>': '_match_gt', '<=': '_match_le', '>=': '_match_ge', '==': '_match_eq', '===': '_match_arbitrary', '!=': '_match_ne'} def _adjust_local(self, version, constraint, prefix): if prefix: strip_local = (('+' not in constraint) and version._parts[(- 1)]) else: strip_local = ((not constraint._parts[(- 1)]) and version._parts[(- 1)]) if strip_local: s = version._string.split('+', 1)[0] version = self.version_class(s) return (version, constraint) def _match_lt(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) if (version >= constraint): return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return (not _match_prefix(version, pfx)) def _match_gt(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) if (version <= constraint): return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return (not _match_prefix(version, pfx)) def _match_le(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) return (version <= constraint) def _match_ge(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) return (version >= constraint) def _match_eq(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) if (not prefix): result = (version == constraint) else: result = _match_prefix(version, constraint) return result def _match_arbitrary(self, version, constraint, prefix): return (str(version) == str(constraint)) def _match_ne(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) if (not prefix): result = (version != constraint) else: result = (not _match_prefix(version, constraint)) return result def _match_compatible(self, version, constraint, prefix): (version, constraint) = self._adjust_local(version, constraint, prefix) if (version == constraint): return True if (version < constraint): return False release_clause = constraint._release_clause if (len(release_clause) > 1): release_clause = release_clause[:(- 1)] pfx = '.'.join([str(i) for i in release_clause]) return _match_prefix(version, pfx)
def learning_rate(hparams, global_step): lr = hparams.learning_rate warmup_steps = hparams.warmup_steps decay_rate = hparams.decay_rate if (warmup_steps > 0): global_step_float = (tf.cast(global_step, tf.float32) + 1.0) lr *= tf.minimum(tf.rsqrt(global_step_float), tf.multiply(global_step_float, (warmup_steps ** (- decay_rate)))) return lr else: decay_steps = hparams.decay_steps if (decay_steps > 0): return (lr * (decay_rate ** (global_step / decay_steps))) else: return lr
def read_glove_vectors(glove_vector_path): embeddings_index = {} with open(glove_vector_path, 'r') as f: for line in f: values = line.strip().split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs[:] return embeddings_index
def eg_rule_condition(memory_info, manager, args): if (memory_info.state == 'RAW'): return [memory_info] else: return []
def load_data(task, path, train=True): if (task == 'ECG'): return load_ECG_data(path, train) elif (task == 'satellite'): return load_satellite_data(path, train) elif (task == 'deepsea'): return load_deepsea_data(path, train) else: raise NotImplementedError
class VehicleConfig(): dynamics_model: DynamicsModel = MISSING car_model: CarModel = CarModel.BMW320i v_min: float = 0.0 v_max: float = 13.889 accel_max: float = 3.5 decel_max: float = (- 5.0) length: float = 4.2 width: float = 1.6 caster_effect: bool = False course_resolution: float = 0.1
def BM_all_minimal(vp, return_transformation=False, D=None): mp = copy(vp) mp.normalize_coordinates() BR = mp.domain().base_ring() MS = MatrixSpace(QQ, 2) M_Id = MS.one() (F, G) = list(mp) aff_map = mp.dehomogenize(1) (f, g) = (aff_map[0].numerator(), aff_map[0].denominator()) z = aff_map.domain().gen(0) Res = mp.resultant() from sage.dynamics.arithmetic_dynamics.affine_ds import DynamicalSystem_affine h = (f - (z * g)) A = AffineSpace(BR, 1, h.parent().variable_name()) res = DynamicalSystem_affine([(h / g)], domain=A).homogenize(1).resultant() if (D is None): D = ZZ(Res).prime_divisors() all_pM = [] for p in D: all_pM.append(Min(mp, p, res, M_Id, all_orbits=True)) if ([p, 0, 0] not in all_pM[(- 1)]): all_pM[(- 1)].append([p, 0, 0]) all_M = [M_Id] for prime_data in all_pM: new_M = [] if prime_data: p = prime_data[0][0] for m in prime_data: mat = MS([(m[0] ** m[1]), m[2], 0, 1]) new_map = mp.conjugate(mat) new_map.normalize_coordinates() if ((mat == M_Id) or ((new_map.resultant().valuation(p) == Res.valuation(p)) and (mat.det() not in [1, (- 1)]))): new_M.append(m) if new_M: all_M = [(m1 * MS([(m[0] ** m[1]), m[2], 0, 1])) for m1 in all_M for m in new_M] all_maps = [] for M in all_M: new_map = mp.conjugate(M) new_map.normalize_coordinates() if (not ([new_map, M] in all_maps)): all_maps.append([new_map, M]) classes = [] for (funct, mat) in all_maps: if (not classes): classes.append([funct, mat]) else: found = False for (Func, Mat) in classes: M = (mat.inverse() * Mat) assert (funct.conjugate(M) == Func) if (M.det() in [1, (- 1)]): found = True break if (found is False): classes.append([funct, mat]) if return_transformation: return classes else: return [funct for (funct, matr) in classes]
def add_signal_handlers(): signal.signal(signal.SIGINT, _clean_exit_handler) signal.signal(signal.SIGTERM, _clean_exit_handler) signal.signal(signal.SIGUSR2, _clean_exit_handler) signal.signal(signal.SIGUSR1, _requeue_handler)
def adjust_learning_rate(optimizer, epoch, args): lr = args.lr if (epoch >= 30): lr = (0.1 * lr) if (epoch >= 60): lr = (0.1 * lr) if (epoch >= 80): lr = (0.1 * lr) optimizer.param_groups[0]['lr'] = lr
_tf class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ((TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_saved_model_creation(self): pass
def add_model_arguments(parser): from onmt.modules.SRU import CheckSRU group = parser.add_argument_group('Model') group.add_argument('--word-vec-size', type=int, default=300, help='Word embedding size for src and tgt.') group.add_argument('--share-decoder-embeddings', action='store_true', help='Use a shared weight matrix for the input and\n output word embeddings in the decoder.') group.add_argument('--encoder-type', type=str, default='rnn', choices=['rnn', 'brnn', 'transformer', 'cnn'], help='Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].') group.add_argument('--context-embedder-type', type=str, default='mean', choices=['rnn', 'mean', 'brnn'], help='Encoder to use for embedding prev turns context') group.add_argument('--decoder-type', type=str, default='rnn', choices=['rnn', 'transformer', 'cnn'], help='Type of decoder layer to use. Non-RNN layers\n are experimental. Options are [rnn|transformer|cnn].') group.add_argument('-copy_attn', action='store_true', help='Train copy attention layer.') group.add_argument('--layers', type=int, default=(- 1), help='Number of layers in enc/dec.') group.add_argument('--enc-layers', type=int, default=1, help='Number of layers in the encoder') group.add_argument('--dec-layers', type=int, default=1, help='Number of layers in the decoder') group.add_argument('--rnn-size', type=int, default=500, help='Size of rnn hidden states') group.add_argument('--rnn-type', type=str, default='LSTM', choices=['LSTM', 'GRU', 'SRU'], action=CheckSRU, help='The gate type to use in the RNNs') group.add_argument('--input-feed', action='store_true', help='Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.') group.add_argument('--global-attention', type=str, default='multibank_general', choices=['dot', 'general', 'mlp', 'multibank_dot', 'multibank_general', 'multibank_mlp'], help='The attention type to use: dotprod or general (Luong)\n or MLP (Bahdanau), prepend multibank to add context') group.add_argument('--model', type=str, default='seq2seq', choices=['seq2seq', 'lf2lf'], help='Model type') group.add_argument('--num-context', type=int, default=2, help='Number of sentences to consider as dialogue context (in addition to the encoder input)') group.add_argument('--stateful', action='store_true', help='Whether to pass on the hidden state throughout the dialogue encoding/decoding process') group.add_argument('--share-embeddings', action='store_true', help='Share source and target vocab embeddings')
def get_robobin_benchmark_goals(): pos1 = np.array([(- 0.1), 0.7, 0.04]) pos2 = np.array([0.1, 0.7, 0.04]) delta = np.array([0, 0.15, 0]) v_delta = np.array([0, 0, 0.06]) hand = np.array([0, 0.65, 0.2]) goaldictlist = [{'obj1': pos1, 'obj2': pos2, 'hand': (hand + np.array([0.12, 0.1, (- 0.1)]))}, {'obj1': pos1, 'obj2': pos2, 'hand': (hand + np.array([(- 0.1), 0.2, (- 0.1)]))}, {'obj1': pos1, 'obj2': (pos2 + delta), 'hand': hand}, {'obj1': (pos1 - delta), 'obj2': pos2, 'hand': hand}, {'obj1': (pos1 + delta), 'obj2': (pos2 + delta), 'hand': hand}, {'obj1': (pos1 - delta), 'obj2': (pos2 - delta), 'hand': hand}, {'obj1': (pos2 + delta), 'obj2': pos2, 'hand': hand}, {'obj1': (pos2 + delta), 'obj2': (pos1 + delta), 'hand': hand}] return [np.concatenate([_dict['hand'], _dict['obj1'], _dict['obj2']]) for _dict in goaldictlist]
class ShardDescriptor(): def __len__(self): raise NotImplementedError def get_item(self, index: int): raise NotImplementedError def sample_shape(self): raise NotImplementedError def target_shape(self): raise NotImplementedError def dataset_description(self) -> str: return ''
def main(weights, targets, sets, batchsize, num_workers, sampling_rate=None): weights = Path(weights) targets = Path(targets) sets = sets.split(',') version = sorted(weights.iterdir())[(- 1)] config_path = (version / 'hparams.yaml') with open(config_path, 'r') as f: config = yaml.full_load(f) model_cls = models.__getattribute__((config['model'] + 'Lit')) model = load_best_model(model_cls, weights, version.name) data_name = data_aliases[targets.name] if (data_name != config['data']): logging.warning('Detected cross-domain evaluation') pred_root = 'pred_cross' parts = weights.name.split() weight_path_name = '_'.join(((parts[:2] + [targets.name]) + parts[2:])) else: pred_root = 'pred' weight_path_name = weights.name dataset = data.get_dataset_by_name(data_name)(sampling_rate=100, component_order='ZNE', dimension_order='NCW', cache='full') if (sampling_rate is not None): dataset.sampling_rate = sampling_rate pred_root = (pred_root + '_resampled') weight_path_name = (weight_path_name + f'_{sampling_rate}') for eval_set in sets: split = dataset.get_split(eval_set) if (targets.name == 'instance'): logging.warning('Overwriting noise trace_names to allow correct identification') split._metadata['trace_name'].values[(- len(split.datasets[(- 1)])):] = split._metadata['trace_name'][(- len(split.datasets[(- 1)])):].apply((lambda x: ('noise_' + x))) split._build_trace_name_to_idx_dict() logging.warning(f'Starting set {eval_set}') split.preload_waveforms(pbar=True) for task in ['1', '23']: task_csv = (targets / f'task{task}.csv') if (not task_csv.is_file()): continue logging.warning(f'Starting task {task}') task_targets = pd.read_csv(task_csv) task_targets = task_targets[(task_targets['trace_split'] == eval_set)] if ((task == '1') and (targets.name == 'instance')): border = _identify_instance_dataset_border(task_targets) task_targets['trace_name'].values[border:] = task_targets['trace_name'][border:].apply((lambda x: ('noise_' + x))) if (sampling_rate is not None): for key in ['start_sample', 'end_sample', 'phase_onset']: if (key not in task_targets.columns): continue task_targets[key] = ((task_targets[key] * sampling_rate) / task_targets['sampling_rate']) task_targets[sampling_rate] = sampling_rate restrict_to_phase = config.get('restrict_to_phase', None) if ((restrict_to_phase is not None) and ('phase_label' in task_targets.columns)): mask = task_targets['phase_label'].isin(list(restrict_to_phase)) task_targets = task_targets[mask] if ((restrict_to_phase is not None) and (task == '1')): logging.warning('Skipping task 1 as restrict_to_phase is set.') continue generator = sbg.SteeredGenerator(split, task_targets) generator.add_augmentations(model.get_eval_augmentations()) loader = DataLoader(generator, batch_size=batchsize, shuffle=False, num_workers=num_workers) trainer = pl.Trainer(accelerator='gpu', devices=1) predictions = trainer.predict(model, loader) merged_predictions = [] for (i, _) in enumerate(predictions[0]): merged_predictions.append(torch.cat([x[i] for x in predictions])) merged_predictions = [x.cpu().numpy() for x in merged_predictions] task_targets['score_detection'] = merged_predictions[0] task_targets['score_p_or_s'] = merged_predictions[1] task_targets['p_sample_pred'] = (merged_predictions[2] + task_targets['start_sample']) task_targets['s_sample_pred'] = (merged_predictions[3] + task_targets['start_sample']) pred_path = ((((weights.parent.parent / pred_root) / weight_path_name) / version.name) / f'{eval_set}_task{task}.csv') pred_path.parent.mkdir(exist_ok=True, parents=True) task_targets.to_csv(pred_path, index=False)
def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while (i < len(input_tokens)): tok = input_tokens[i] flag_prev_dash = False if (tok in _tok_dict.keys()): output_tokens.append(_tok_dict[tok]) i += 1 elif (tok == '"'): if has_left_quote: output_tokens.append("''") else: output_tokens.append('``') has_left_quote = (not has_left_quote) i += 1 elif ((tok == "'") and (len(output_tokens) > 0) and output_tokens[(- 1)].endswith('n') and (i < (len(input_tokens) - 1)) and (input_tokens[(i + 1)] == 't')): output_tokens[(- 1)] = output_tokens[(- 1)][:(- 1)] output_tokens.append("n't") i += 2 elif ((tok == "'") and (i < (len(input_tokens) - 1)) and (input_tokens[(i + 1)] in ('s', 'd', 'll'))): output_tokens.append(("'" + input_tokens[(i + 1)])) i += 2 elif (tok == "'"): if has_left_single_quote: output_tokens.append("'") else: output_tokens.append('`') has_left_single_quote = (not has_left_single_quote) i += 1 elif ((tok == '.') and (i < (len(input_tokens) - 2)) and (input_tokens[(i + 1)] == '.') and (input_tokens[(i + 2)] == '.')): output_tokens.append('...') i += 3 elif ((tok == ',') and (len(output_tokens) > 0) and _is_digit(output_tokens[(- 1)]) and (i < (len(input_tokens) - 1)) and _is_digit(input_tokens[(i + 1)])): output_tokens[(- 1)] += (',' + input_tokens[(i + 1)]) i += 2 elif ((tok == '.') and (len(output_tokens) > 0) and output_tokens[(- 1)].isdigit() and (i < (len(input_tokens) - 1)) and input_tokens[(i + 1)].isdigit()): output_tokens[(- 1)] += ('.' + input_tokens[(i + 1)]) i += 2 elif ((tok == '.') and (len(output_tokens) > 0) and (len(output_tokens[(- 1)]) == 1) and output_tokens[(- 1)].isupper() and (i < (len(input_tokens) - 2)) and (len(input_tokens[(i + 1)]) == 1) and input_tokens[(i + 1)].isupper() and (input_tokens[(i + 2)] == '.')): k = (i + 3) while ((k + 2) < len(input_tokens)): if ((len(input_tokens[(k + 1)]) == 1) and input_tokens[(k + 1)].isupper() and (input_tokens[(k + 2)] == '.')): k += 2 else: break output_tokens[(- 1)] += ''.join(input_tokens[i:k]) i += 2 elif (tok == '-'): if ((i < (len(input_tokens) - 1)) and (input_tokens[(i + 1)] == '-')): output_tokens.append('--') i += 2 elif ((i == (len(input_tokens) - 1)) or (i == 0)): output_tokens.append('-') i += 1 elif ((output_tokens[(- 1)] not in string.punctuation) and (input_tokens[(i + 1)][0] not in string.punctuation)): output_tokens[(- 1)] += '-' i += 1 flag_prev_dash = True else: output_tokens.append('-') i += 1 elif (prev_dash and (len(output_tokens) > 0) and (tok[0] not in string.punctuation)): output_tokens[(- 1)] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return ' '.join(output_tokens)
class CocoDetection(datasets.coco.CocoDetection): def __init__(self, root, data_split, img_size=224, p=1, annFile='', label_mask=None, partial=(1 + 1e-06)): self.classnames = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] self.root = root if (annFile == ''): annFile = os.path.join(self.root, 'annotations', ('instances_%s.json' % data_split)) cls_id = list(range(len(self.classnames))) else: cls_id = pickle.load(open(os.path.join(self.root, 'annotations', 'cls_ids.pickle'), 'rb')) if ('train' in annFile): cls_id = cls_id['train'] elif ('val' in annFile): if ('unseen' in annFile): cls_id = cls_id['test'] else: cls_id = (cls_id['train'] | cls_id['test']) else: raise ValueError('unknown annFile') cls_id = list(cls_id) cls_id.sort() self.coco = COCO(annFile) self.data_split = data_split ids = list(self.coco.imgToAnns.keys()) if (data_split == 'train2014'): num_examples = len(ids) pick_example = int((num_examples * p)) self.ids = ids[:pick_example] else: self.ids = ids train_transform = transforms.Compose([transforms.Resize((img_size, img_size)), CutoutPIL(cutout_factor=0.5), RandAugment(), transforms.ToTensor(), transforms.Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) test_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(), transforms.Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) if (self.data_split == 'train2014'): self.transform = train_transform elif (self.data_split == 'val2014'): self.transform = test_transform else: raise ValueError(('data split = %s is not supported in mscoco' % self.data_split)) self.cat2cat = dict() cats_keys = [*self.coco.cats.keys()] cats_keys.sort() for (cat, cat2) in zip(cats_keys, cls_id): self.cat2cat[cat] = cat2 self.cls_id = cls_id self.mask = None self.partial = partial if ((data_split == 'train2014') and (partial < 1.0)): if (label_mask is None): rand_tensor = torch.rand(len(self.ids), len(self.classnames)) mask = (rand_tensor < partial).long() mask = torch.stack([mask, mask, mask], dim=1) torch.save(mask, os.path.join(self.root, 'annotations', ('partial_label_%.2f.pt' % partial))) else: mask = torch.load(os.path.join(self.root, 'annotations', label_mask)) self.mask = mask.long() def __getitem__(self, index): coco = self.coco img_id = self.ids[index] ann_ids = coco.getAnnIds(imgIds=img_id) target = coco.loadAnns(ann_ids) output = torch.zeros((3, len(self.classnames)), dtype=torch.long) for obj in target: if (obj['area'] < (32 * 32)): output[0][self.cat2cat[obj['category_id']]] = 1 elif (obj['area'] < (96 * 96)): output[1][self.cat2cat[obj['category_id']]] = 1 else: output[2][self.cat2cat[obj['category_id']]] = 1 target = output if (self.mask is not None): masked = (- torch.ones((3, len(self.classnames)), dtype=torch.long)) target = ((self.mask[index] * target) + ((1 - self.mask[index]) * masked)) path = coco.loadImgs(img_id)[0]['file_name'] img = Image.open(os.path.join(self.root, self.data_split, path)).convert('RGB') if (self.transform is not None): img = self.transform(img) return (img, target) def name(self): return 'coco'
class TestPutOps(TestCase): def test_default_value(self): magnitude_expand = int(.0) stat_name = 'stat'.encode('ascii') sum_postfix = '/stat_value/sum'.encode('ascii') count_postfix = '/stat_value/count'.encode('ascii') default_value = 16.0 workspace.FeedBlob('value', np.array([], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('AveragePut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand, bound=True, default_value=default_value)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + sum_postfix), stat_dict) self.assertIn((stat_name + count_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + sum_postfix)], (default_value * magnitude_expand)) self.assertEquals(stat_dict[(stat_name + count_postfix)], 1) def test_clamp(self): put_value = 10 magnitude_expand = int(1e+18) stat_name = 'stat'.encode('ascii') sum_postfix = '/stat_value/sum'.encode('ascii') count_postfix = '/stat_value/count'.encode('ascii') workspace.FeedBlob('value', np.array([put_value], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('AveragePut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand, bound=True)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + sum_postfix), stat_dict) self.assertIn((stat_name + count_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + sum_postfix)], ) self.assertEquals(stat_dict[(stat_name + count_postfix)], 1) def test_clamp_with_out_of_bounds(self): put_value = float(1e+20) magnitude_expand = stat_name = 'stat'.encode('ascii') sum_postfix = '/stat_value/sum'.encode('ascii') count_postfix = '/stat_value/count'.encode('ascii') workspace.FeedBlob('value', np.array([put_value], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('AveragePut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand, bound=True)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + sum_postfix), stat_dict) self.assertIn((stat_name + count_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + sum_postfix)], ) self.assertEquals(stat_dict[(stat_name + count_postfix)], 1) def test_avg_put_ops(self): put_value = 15.1111 magnitude_expand = 10000 stat_name = 'a1'.encode('ascii') sum_postfix = '/stat_value/sum'.encode('ascii') count_postfix = '/stat_value/count'.encode('ascii') workspace.FeedBlob('value', np.array([put_value], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('AveragePut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + sum_postfix), stat_dict) self.assertIn((stat_name + count_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + sum_postfix)], (put_value * magnitude_expand)) self.assertEquals(stat_dict[(stat_name + count_postfix)], 1) def test_increment_put_ops(self): put_value = 15.1111 magnitude_expand = 10000 stat_name = 'i1'.encode('ascii') member_postfix = '/stat_value'.encode('ascii') workspace.FeedBlob('value', np.array([put_value], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('IncrementPut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + member_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + member_postfix)], (put_value * magnitude_expand)) def test_stddev_put_ops(self): put_value = 15.1111 magnitude_expand = 10000 stat_name = 's1'.encode('ascii') sum_postfix = '/stat_value/sum'.encode('ascii') count_postfix = '/stat_value/count'.encode('ascii') sumoffset_postfix = '/stat_value/sumoffset'.encode('ascii') sumsqoffset_postfix = '/stat_value/sumsqoffset'.encode('ascii') workspace.FeedBlob('value', np.array([put_value], dtype=np.float)) workspace.RunOperatorOnce(core.CreateOperator('StdDevPut', 'value', [], stat_name=stat_name, magnitude_expand=magnitude_expand)) workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k', 'v', 't'])) k = workspace.FetchBlob('k') v = workspace.FetchBlob('v') stat_dict = dict(zip(k, v)) self.assertIn((stat_name + sum_postfix), stat_dict) self.assertIn((stat_name + count_postfix), stat_dict) self.assertIn((stat_name + sumoffset_postfix), stat_dict) self.assertIn((stat_name + sumsqoffset_postfix), stat_dict) self.assertEquals(stat_dict[(stat_name + sum_postfix)], (put_value * magnitude_expand)) self.assertEquals(stat_dict[(stat_name + count_postfix)], 1)
def register_Ns3PacketTagListTagData_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) cls.add_instance_attribute('count', 'uint32_t', is_const=False) cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False) cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) cls.add_instance_attribute('size', 'uint32_t', is_const=False) cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return
def display_execution_result(context: ExecutionContext, event: events.AfterExecution) -> None: (symbol, color) = {Status.success: ('.', 'green'), Status.failure: ('F', 'red'), Status.error: ('E', 'red'), Status.skip: ('S', 'yellow')}[event.status] context.current_line_length += len(symbol) click.secho(symbol, nl=False, fg=color)
class EntityDB(object): def __init__(self, entity_db_file: str): self.entity_db_file = entity_db_file data = joblib.load(entity_db_file) self._title_trie = data['title_trie'] self._mention_trie = data['mention_trie'] self._data_trie = data['data_trie'] def __reduce__(self): return (self.__class__, (self.entity_db_file,)) def query(self, title: str): try: return [(title, self._mention_trie.restore_key(args[0]), *args[1:]) for args in self._data_trie[title]] except KeyError: return [] def save(self, out_file: str): joblib.dump(dict(title_trie=self._title_trie, mention_trie=self._mention_trie, data_trie=self._data_trie), out_file) def build_from_wikipedia(dump_db: DumpDB, out_file, max_candidate_size, min_mention_count, pool_size, chunk_size): logger.info('Extracting all entity names...') title_dict = defaultdict(Counter) with tqdm(total=dump_db.page_size(), mininterval=0.5) as pbar: with closing(Pool(pool_size, initializer=EntityDB._initialize_worker, initargs=(dump_db,))) as pool: for ret in pool.imap_unordered(EntityDB._extract_name_entity_pairs, dump_db.titles(), chunksize=chunk_size): for (name, title) in ret: title_dict[title][name] += 1 pbar.update() logger.info('Building DB...') mentions = frozenset([mention for mention_counter in title_dict.values() for mention in mention_counter.keys()]) title_trie = frozenset(title_dict.keys()) mention_trie = marisa_trie.Trie(mentions) def item_generator(): for (title, mention_counter) in title_dict.items(): for (mention, mention_count) in mention_counter.most_common()[:max_candidate_size]: if (mention_count < min_mention_count): continue (yield (title, (mention_trie[mention], mention_count))) data_trie = marisa_trie.RecordTrie('<II', item_generator()) joblib.dump(dict(title_trie=title_trie, mention_trie=mention_trie, data_trie=data_trie), out_file) def _initialize_worker(dump_db, name_trie=None): global _dump_db, _name_trie _dump_db = dump_db _name_trie = name_trie def _extract_name_entity_pairs(article_title: str): ret = [] for paragraph in _dump_db.get_paragraphs(article_title): for wiki_link in paragraph.wiki_links: link_title = _dump_db.resolve_redirect(wiki_link.title) if link_title.startswith('Category:'): continue mention_text = wiki_link.text ret.append((mention_text, link_title)) return ret
class FaultyAgentDistAutogradTest(RpcAgentTestFixture): def context_cleanup_test_helper(self, rpc_args, func): initialize_pg(self.init_method, self.rank, self.world_size) dst_ranks = {rank for rank in range(self.world_size) if (rank != self.rank)} with dist_autograd.context() as context_id: for dst_rank in dst_ranks: rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)) with self.assertRaises(RuntimeError): dist_autograd._retrieve_context(context_id) dist.barrier() success = _all_contexts_cleaned_up() self.assertTrue(success) _init def test_context_cleanup_tensor_with_grad(self): t1 = torch.ones(3, 3, requires_grad=True) t2 = torch.zeros(3, 3, requires_grad=True) self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) _init def test_verify_backend_options(self): self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP) self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8) self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
def test_unused_retval_2(): sdfg = dace.SDFG('tester') sdfg.add_transient('tmp', [2], dace.float64) sdfg.add_array('output', [2], dace.float64) state = sdfg.add_state() nsdfg = dace.SDFG('nester') nsdfg.add_array('used', [1], dace.float64) nsdfg.add_array('__return', [1], dace.float64) nstate = nsdfg.add_state() a = nstate.add_access('used') nstate.add_edge(nstate.add_tasklet('do', {}, {'out'}, 'out = 1'), 'out', a, None, dace.Memlet('used[0]')) nstate.add_nedge(a, nstate.add_write('__return'), dace.Memlet('__return[0]')) nsnode = state.add_nested_sdfg(nsdfg, None, {}, {'used', '__return'}) (me, mx) = state.add_map('doit', dict(i='0:2')) state.add_nedge(me, nsnode, dace.Memlet()) state.add_memlet_path(nsnode, mx, state.add_write('output'), memlet=dace.Memlet('output[i]'), src_conn='used') state.add_memlet_path(nsnode, mx, state.add_write('tmp'), memlet=dace.Memlet('tmp[i]'), src_conn='__return') nsnode.no_inline = True sdfg.simplify() assert (len(sdfg.arrays) == 1) assert (len(nsdfg.arrays) == 1) a = np.random.rand(2) sdfg(output=a) assert np.allclose(a, 1)
class Block(nn.Module): def __init__(self, embed_dim: int, n_heads: int, mlp_ratio: float=4.0, dropout: float=0.0, do_rms_norm: bool=False, do_swish_glu: bool=False, do_layer_scale: bool=False) -> None: super().__init__() (self.embed_dim, self.n_heads, self.do_layer_scale) = (embed_dim, n_heads, do_layer_scale) self.pre_norm_attn = (RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-06)) self.attn = Attention(self.embed_dim, n_heads=n_heads, dropout=dropout) if do_layer_scale: self.layer_scale_attn = LayerScale(self.embed_dim) self.pre_norm_mlp = (RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-06)) self.mlp = nn.Sequential((SwishGLU(embed_dim, int((mlp_ratio * embed_dim))) if do_swish_glu else nn.Sequential(nn.Linear(embed_dim, int((mlp_ratio * embed_dim))), nn.GELU())), nn.Dropout(dropout), nn.Linear(int((mlp_ratio * embed_dim)), embed_dim)) if self.do_layer_scale: self.layer_scale_mlp = LayerScale(self.embed_dim) def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: if self.do_layer_scale: x = (x + self.layer_scale_attn(self.attn(self.pre_norm_attn(x), mask))) x = (x + self.layer_scale_mlp(self.mlp(self.pre_norm_mlp(x)))) else: x = (x + self.attn(self.pre_norm_attn(x), mask)) x = (x + self.mlp(self.pre_norm_mlp(x))) return x
def get_bucket_accuracy(bucket_values, y_true, y_pred): per_bucket_correct = [[int((y_true[i] == y_pred[i])) for i in bucket] for bucket in bucket_values] return [(np.mean(bucket) if (len(bucket) > 0) else (- 1.0)) for bucket in per_bucket_correct]
class _IntermediateStruct(Struct): def __init__(self, entries): assert isinstance(entries, dict) self._Struct__methods = entries.pop('__struct_methods', {}) self._register_methods() self._Struct__entries = entries self._register_members()
class BayesFeatureSet(FeatureSet): def __init__(self, datatype, metadata, nbins=10, quids=None): assert (datatype in [DataFrame, ndarray]), 'Unknown data type {}'.format(datatype) self.datatype = datatype self.histograms = HistogramFeatureSet(datatype, metadata, nbins, quids) self.correlations = CorrelationsFeatureSet(datatype, metadata, quids) def extract(self, data): Hist = self.histograms.extract(data) Corr = self.correlations.extract(data) return concatenate([Hist, Corr])
class LinearNoiseLayer(nn.Module): def __init__(self, opt, noise_sz=20, output_sz=32): super().__init__() self.noise_sz = noise_sz linear_layer = get_linear_layer(opt, bias=False) self.gain = linear_layer(noise_sz, output_sz) self.bias = linear_layer(noise_sz, output_sz) self.bn = bn(output_sz) self.noise_sz = noise_sz self.opt = opt def forward(self, x): if (('bn_noise_misc' in self.opt) and self.opt.bn_noise_misc): noise = torch.zeros(x.size(0), self.noise_sz).to(x.device) else: noise = torch.randn(x.size(0), self.noise_sz).to(x.device) gain = (1 + self.gain(noise)).view(noise.size(0), (- 1), 1, 1) bias = self.bias(noise).view(noise.size(0), (- 1), 1, 1) xp = self.bn(x, gain=gain, bias=bias) return xp
def build_graphs(train_inputs, valid_inputs): varscope = tf.get_variable_scope() with tf.name_scope('train'): train_graph = build_graph(train_inputs['features'], train_inputs['groups']) (opt, train_op) = set_up_optimizer(train_graph['loss']) varscope.reuse_variables() with tf.name_scope('valid'): valid_graph = build_graph(valid_inputs['features'], valid_inputs['groups']) debug_graph = build_debug_graph(valid_inputs) return (train_op, train_graph, valid_graph, debug_graph)
class CloudRunnerPreprocessor(InsertTaggedCellsPreprocessor): metadata_tag = 'CloudRunner' git_branch = 'master' demos_path_prefix = 'demos/' colab_import_code = f'''# install StellarGraph if running on Google Colab import sys if 'google.colab' in sys.modules: %pip install -q stellargraph[demos]=={SG_VERSION}''' def _binder_url(self, notebook_path): return f' def _colab_url(self, notebook_path): return f' def _binder_badge(self, notebook_path): return f'<a href="{self._binder_url(notebook_path)}" alt="Open In Binder" target="_parent"><img src=" def _colab_badge(self, notebook_path): return f'<a href="{self._colab_url(notebook_path)}" alt="Open In Colab" target="_parent"><img src=" def _badge_markdown(self, notebook_path): return f'<table><tr><td>Run the latest release of this notebook:</td><td>{self._binder_badge(notebook_path)}</td><td>{self._colab_badge(notebook_path)}</td></tr></table>' def preprocess(self, nb, resources): notebook_path = resources[PATH_RESOURCE_NAME] if (not notebook_path.startswith(self.demos_path_prefix)): print(f"WARNING: Notebook file path of {notebook_path} didn't start with {self.demos_path_prefix}, and may result in bad links to cloud runners.") self.remove_tagged_cells_from_notebook(nb) badge_cell = nbformat.v4.new_markdown_cell(self._badge_markdown(notebook_path)) self.tag_cell(badge_cell) hide_cell_from_docs(badge_cell) if (nb.cells[0].cell_type == 'code'): nb.cells.insert(0, badge_cell) else: nb.cells.insert(1, badge_cell) first_code_cell_id = next((index for (index, cell) in enumerate(nb.cells) if (cell.cell_type == 'code'))) import_cell = nbformat.v4.new_code_cell(self.colab_import_code) self.tag_cell(import_cell) hide_cell_from_docs(import_cell) nb.cells.insert(first_code_cell_id, import_cell) nb.cells.append(badge_cell) return (nb, resources)
class SimCLR(object): def __init__(self, *args, **kwargs): self.args = kwargs['args'] self.model = kwargs['model'].to(self.args.device) self.optimizer = kwargs['optimizer'] self.scheduler = kwargs['scheduler'] self.writer = SummaryWriter() logging.basicConfig(filename=os.path.join(self.writer.log_dir, 'training.log'), level=logging.DEBUG) self.criterion = torch.nn.CrossEntropyLoss().to(self.args.device) def info_nce_loss(self, features): labels = torch.cat([torch.arange(self.args.batch_size) for i in range(self.args.n_views)], dim=0) labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float() labels = labels.to(self.args.device) features = F.normalize(features, dim=1) similarity_matrix = torch.matmul(features, features.T) mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.args.device) labels = labels[(~ mask)].view(labels.shape[0], (- 1)) similarity_matrix = similarity_matrix[(~ mask)].view(similarity_matrix.shape[0], (- 1)) positives = similarity_matrix[labels.bool()].view(labels.shape[0], (- 1)) negatives = similarity_matrix[(~ labels.bool())].view(similarity_matrix.shape[0], (- 1)) logits = torch.cat([positives, negatives], dim=1) labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.args.device) logits = (logits / self.args.temperature) return (logits, labels) def train(self, train_loader): scaler = GradScaler(enabled=self.args.fp16_precision) save_config_file(self.writer.log_dir, self.args) n_iter = 0 logging.info(f'Start SimCLR training for {self.args.epochs} epochs.') logging.info(f'Training with gpu: {self.args.disable_cuda}.') for epoch_counter in range(self.args.epochs): for (images, _) in tqdm(train_loader): images = torch.cat(images, dim=0) images = images.to(self.args.device) with autocast(enabled=self.args.fp16_precision): features = self.model(images) (logits, labels) = self.info_nce_loss(features) loss = self.criterion(logits, labels) self.optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(self.optimizer) scaler.update() if ((n_iter % self.args.log_every_n_steps) == 0): (top1, top5) = accuracy(logits, labels, topk=(1, 5)) self.writer.add_scalar('loss', loss, global_step=n_iter) self.writer.add_scalar('acc/top1', top1[0], global_step=n_iter) self.writer.add_scalar('acc/top5', top5[0], global_step=n_iter) self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], global_step=n_iter) n_iter += 1 if (epoch_counter >= 10): self.scheduler.step() logging.debug(f'Epoch: {epoch_counter} Loss: {loss} Top1 accuracy: {top1[0]}') logging.info('Training has finished.') checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(self.args.epochs) save_checkpoint({'epoch': self.args.epochs, 'arch': self.args.arch, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}, is_best=False, filename=os.path.join(self.writer.log_dir, checkpoint_name)) logging.info(f'Model checkpoint and metadata has been saved at {self.writer.log_dir}.')
def test_mobilenet_v3(): with pytest.raises(AssertionError): MobileNetV3('big') with pytest.raises(AssertionError): MobileNetV3(reduction_factor=0) with pytest.raises(ValueError): MobileNetV3(out_indices=(0, 1, 15)) with pytest.raises(ValueError): MobileNetV3(frozen_stages=15) with pytest.raises(TypeError): model = MobileNetV3() model.init_weights(pretrained=8) model = MobileNetV3() model.init_weights() model.train() imgs = torch.randn(2, 3, 56, 56) feat = model(imgs) assert (len(feat) == 3) assert (feat[0].shape == (2, 16, 28, 28)) assert (feat[1].shape == (2, 16, 14, 14)) assert (feat[2].shape == (2, 576, 7, 7)) model = MobileNetV3(arch='large', out_indices=(1, 3, 16)) model.init_weights() model.train() imgs = torch.randn(2, 3, 56, 56) feat = model(imgs) assert (len(feat) == 3) assert (feat[0].shape == (2, 16, 28, 28)) assert (feat[1].shape == (2, 24, 14, 14)) assert (feat[2].shape == (2, 960, 7, 7)) model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5) with pytest.raises(TypeError): model.init_weights(pretrained=8) model.init_weights() model.train() imgs = torch.randn(2, 3, 56, 56) feat = model(imgs) assert (len(feat) == 3) assert (feat[0].shape == (2, 16, 28, 28)) assert (feat[1].shape == (2, 16, 14, 14)) assert (feat[2].shape == (2, 576, 7, 7))
class TestMachAr(object): def _run_machar_highprec(self): try: hiprec = ntypes.float96 MachAr((lambda v: array([v], hiprec))) except AttributeError: def test_underlow(self): with errstate(all='raise'): try: self._run_machar_highprec() except FloatingPointError as e: msg = ('Caught %s exception, should not have been raised.' % e) raise AssertionError(msg)
def Zq(q, prec=None, type='capped-rel', modulus=None, names=None, print_mode=None, ram_name=None, res_name=None, print_pos=None, print_sep=None, print_max_ram_terms=None, print_max_unram_terms=None, print_max_terse_terms=None, show_prec=None, check=True, implementation='FLINT'): if check: if (isinstance(q, Factorization) or isinstance(q, (list, tuple))): if ((not isinstance(q, Factorization)) and (len(q) == 2)): F = [(Integer(q[0]), Integer(q[1]))] else: if (len(q) != 1): raise ValueError('q must be a prime power') if (len(q[0]) != 2): raise ValueError('q must have shape [(p, k)]') F = [(Integer(q[0][0]), Integer(q[0][1]))] if ((not F[0][0].is_prime()) or (F[0][1] <= 0)): raise ValueError('q must be a prime power') q = (F[0][0] ** F[0][1]) else: q = Integer(q) F = q.factor() if (len(F) != 1): raise ValueError('q must be a prime power') if ((prec is not None) and (not isinstance(prec, Integer))): prec = Integer(prec) if isinstance(names, (list, tuple)): names = names[0] from sage.structure.element import Expression if (not ((modulus is None) or isinstance(modulus, Polynomial) or isinstance(modulus, Expression))): raise TypeError('modulus must be a polynomial') if ((names is not None) and (not isinstance(names, str))): names = str(names) q = Integer(q) F = q.factor() if (len(F) != 1): raise ValueError('q must be a prime power') else: F = q q = (F[0][0] ** F[0][1]) base = Zp(p=F[0][0], prec=prec, type=type, print_mode=print_mode, names=ram_name, print_pos=print_pos, print_sep=print_sep, print_max_terms=print_max_ram_terms, show_prec=show_prec, check=False) if (F[0][1] == 1): return base elif (names is None): raise TypeError('You must specify the name of the generator.') if (res_name is None): res_name = (names + '0') if (modulus is None): from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF if (ram_name is None): ram_name = str(F[0][0]) modulus = GF(q, res_name).modulus().change_ring(ZZ) return ExtensionFactory(base=base, modulus=modulus, prec=prec, print_mode=print_mode, names=names, res_name=res_name, ram_name=ram_name, print_pos=print_pos, print_sep=print_sep, print_max_ram_terms=print_max_ram_terms, print_max_unram_terms=print_max_unram_terms, print_max_terse_terms=print_max_terse_terms, show_prec=show_prec, check=check, unram=True, implementation=implementation)
def svm_test(clf: sklearn.base.BaseEstimator, test_input: np.ndarray, test_output: np.ndarray) -> Tuple[(Mapping[(str, Any)], str)]: probas = clf.predict(test_input) y_pred = probas y_true = np.argmax(test_output, axis=1) result_string = classification_report(y_true, y_pred, digits=3) print(confusion_matrix(y_true, y_pred)) print(result_string) return (classification_report(y_true, y_pred, output_dict=True, digits=3), result_string)
def to_str(item): if isinstance(item, float): return ('%.4g' % item) return str(item)
def train(shared_model, task, batch_size, train_steps, gpu_id, start, restore, counter, barrier=None, save_interval=None, eval_interval=None, log=True): log_dir = ('logs/%s' % task) if (not os.path.exists(log_dir)): os.makedirs(log_dir) if (log == True): summary_writer = SummaryWriter(log_dir) torch.manual_seed(int((random.random() * 1000))) if (gpu_id > 0): model = omninet.OmniNet(gpu_id=gpu_id) model = model.cuda(gpu_id) else: model = shared_model if (task == 'caption'): (DL, val_dl) = dl.coco_cap_batchgen(caption_dir=caption_dir, image_dir=coco_images, num_workers=8, batch_size=batch_size) optimizer = ScheduledOptim(Adam(filter((lambda x: x.requires_grad), shared_model.parameters()), betas=(0.9, 0.98), eps=1e-09), 512, 16000, restore, init_lr=0.02) elif (task == 'vqa'): (DL, val_dl) = dl.vqa_batchgen(vqa_dir, coco_images, num_workers=8, batch_size=batch_size) optimizer = ScheduledOptim(Adam(filter((lambda x: x.requires_grad), shared_model.parameters()), betas=(0.9, 0.98), eps=1e-09), 512, 16000, restore, max_lr=0.0001, init_lr=0.02) elif (task == 'hmdb'): (DL, val_dl) = dl.hmdb_batchgen(hmdb_data_dir, hmdb_process_dir, num_workers=8, batch_size=batch_size, test_batch_size=int((batch_size / 4)), clip_len=16) optimizer = ScheduledOptim(Adam(filter((lambda x: x.requires_grad), shared_model.parameters()), betas=(0.9, 0.98), eps=1e-09), 512, 16000, restore, max_lr=0.0001, init_lr=0.02) elif (task == 'penn'): (DL, val_dl, test_dl) = dl.penn_dataloader(penn_data_dir, batch_size=batch_size, test_batch_size=int((batch_size / 2)), num_workers=4, vocab_file='conf/penn_vocab.json') optimizer = ScheduledOptim(Adam(filter((lambda x: x.requires_grad), shared_model.parameters()), betas=(0.9, 0.98), eps=1e-09), 512, 16000, restore, init_lr=0.02) model = model.train() for i in range(start, train_steps): model.zero_grad() if (barrier is not None): barrier.wait() if (gpu_id > 0): with torch.cuda.device(gpu_id): model.load_state_dict(shared_model.state_dict()) step = counter.increment() if (task == 'caption'): if (log and (eval_interval is not None) and ((i % eval_interval) == 0)): model = model.eval() val_loss = 0 val_acc = 0 print(('-' * 100)) print('Evaluation step') for b in tqdm(val_dl): imgs = b['img'] if (gpu_id >= 0): imgs = imgs.cuda(device=gpu_id) captions = b['cap'] (_, loss, acc) = r.image_caption(model, imgs, targets=captions, mode='val', return_str_preds=True) val_loss += float(loss.detach().cpu().numpy()) val_acc += acc val_loss /= len(val_dl) val_acc = (val_acc / len(val_dl)) summary_writer.add_scalar('Val_loss', val_loss, step) print(('Step %d, COCO validation loss: %f, Accuracy %f %%' % (step, val_loss, val_acc))) print(('-' * 100)) model = model.train() batch = next(DL) if (gpu_id >= 0): imgs = batch['img'].cuda(device=gpu_id) else: imgs = batch['img'] captions = batch['cap'] (_, loss, acc) = r.image_caption(model, imgs, targets=captions) loss.backward() loss = loss.detach() if log: summary_writer.add_scalar('Loss', loss, step) print(('Step %d, Caption Loss: %f, Accuracy: %f %%' % (step, loss, acc))) elif (task == 'vqa'): if (log and (eval_interval is not None) and ((i % eval_interval) == 0)): model = model.eval() val_loss = 0 val_acc = 0 print(('-' * 100)) print('Evaluation step') for b in tqdm(val_dl): imgs = b['img'] answers = b['ans'] if (gpu_id >= 0): imgs = imgs.cuda(device=gpu_id) answers = answers.cuda(device=gpu_id) questions = b['ques'] (pred, loss, acc) = r.vqa(model, imgs, questions, targets=answers, mode='val', return_str_preds=True) val_loss += float(loss.detach().cpu().numpy()) val_acc += acc val_loss /= len(val_dl) val_acc = (val_acc / len(val_dl)) summary_writer.add_scalar('Val_loss', val_loss, step) print(('Step %d, VQA validation loss: %f, Accuracy %f %%' % (step, val_loss, val_acc))) print(('-' * 100)) model = model.train() continue batch = next(DL) if (gpu_id >= 0): imgs = batch['img'].cuda(device=gpu_id) answers = batch['ans'].cuda(device=gpu_id) else: imgs = batch['img'] answers = batch['ans'] questions = batch['ques'] (_, loss, acc) = r.vqa(model, imgs, questions, targets=answers) loss.backward() loss = loss.detach() if log: summary_writer.add_scalar('Loss', loss, step) print(('Step %d, VQA Loss: %f, Accuracy: %f %%' % (step, loss, acc))) elif (task == 'hmdb'): if (log and (eval_interval is not None) and ((i % eval_interval) == 0)): model = model.eval() val_loss = 0 val_acc = 0 print(('-' * 100)) print('Evaluation step') for b in tqdm(val_dl): (vid, labels) = b if (gpu_id >= 0): vid = vid.cuda(device=gpu_id) labels = labels.cuda(device=gpu_id) (_, loss, acc) = r.hmdb(model, vid, targets=labels, mode='val') val_loss += float(loss.detach().cpu().numpy()) val_acc += acc val_loss /= len(val_dl) val_acc = (val_acc / len(val_dl)) summary_writer.add_scalar('Val_loss', val_loss, step) print(('Step %d, HMDB validation loss: %f, Accuracy %f %%' % (step, val_loss, val_acc))) print(('-' * 100)) model = model.train() continue (vid, labels) = next(DL) if (gpu_id >= 0): vid = vid.cuda(device=gpu_id) labels = labels.cuda(device=gpu_id) (_, loss, acc) = r.hmdb(model, vid, targets=labels, return_str_preds=True) loss.backward() loss = loss.detach() if log: summary_writer.add_scalar('Loss', loss, step) print(('Step %d, HMDB Loss: %f, Accuracy: %f %%' % (step, loss, acc))) elif (task == 'penn'): if (log and (eval_interval is not None) and ((i % eval_interval) == 0)): model = model.eval() val_loss = 0 val_acc = 0 print(('-' * 100)) print('Evaluation step') for b in tqdm(test_dl): en = b['text'] targets = b['tokens'] pad_id = b['pad_id'] pad_mask = b['pad_mask'] if (gpu_id >= 0): targets = targets.to(gpu_id) pad_mask = pad_mask.to(gpu_id) (_, loss, acc) = r.penn(model, en, target_pad_mask=pad_mask, pad_id=pad_id, targets=targets, mode='val', return_str_preds=True) loss = loss.detach() val_loss += float(loss.cpu().numpy()) val_acc += acc val_loss /= len(val_dl) val_acc = (val_acc / len(val_dl)) summary_writer.add_scalar('Val_loss', val_loss, step) print(('Step %d, PENN validation loss: %f, Accuracy %f %%' % (step, val_loss, val_acc))) print(('-' * 100)) model = model.train() batch = next(DL) en = batch['text'] targets = batch['tokens'] pad_id = batch['pad_id'] pad_mask = batch['pad_mask'] if (gpu_id >= 0): targets = targets.to(gpu_id) pad_mask = pad_mask.to(gpu_id) (_, loss, acc) = r.penn(model, en, pad_id=pad_id, targets=targets, target_pad_mask=pad_mask) loss.backward() loss = loss.detach() if log: summary_writer.add_scalar('Loss', loss, step) print(('Step %d, PENN Loss: %f, Accuracy: %f %%' % (step, loss, acc))) if (gpu_id > 0): ensure_shared_grads(model, shared_model, gpu_id) optimizer.step() if ((save_interval != None) and (((i + 1) % save_interval) == 0)): shared_model.save(model_save_path, step) sys.stdout.flush()
def griffin_lim_ogg(linear_hdf, name, iterations=1): from recipe.tts.toolchain import GriffinLim gl_job = GriffinLim(linear_hdf, iterations=iterations, sample_rate=16000, window_shift=0.0125, window_size=0.05, preemphasis=0.97) gl_job.add_alias(('gl_conversion/' + name)) tk.register_output((('generated_audio/' + name) + '_audio'), gl_job.out_folder) return (gl_job.out_corpus, gl_job)
class ValueSecondMomentTest(BaseSecondMomentTest): def create_feature_network(self, input_shape): return ConvSecondMomentNet() def run_test(self, seed=0): np.random.seed(seed) random.seed(a=seed) torch.random.manual_seed(seed) fw_info = DEFAULT_PYTORCH_INFO pytorch_impl = PytorchImplementation() input_shapes = self.create_inputs_shape() x = self.generate_inputs(input_shapes) def representative_data_gen(): (yield x) model_float = self.create_feature_network(input_shapes) core_config_dict = self.get_core_configs() tpc_dict = self.get_tpc() for (model_name, core_config) in core_config_dict.items(): tpc = tpc_dict[model_name] (tg, graph_after_second_moment_correction) = self.prepare_graph(model_float, representative_data_gen, core_config=core_config, fw_info=DEFAULT_PYTORCH_INFO, target_platform_capabilities=tpc) for node in graph_after_second_moment_correction.nodes: if (node.layer_class == torch.nn.BatchNorm2d): bf_second_moment_node = tg.find_node_by_name(node.name)[0] gamma0 = bf_second_moment_node.get_weights_by_keys(GAMMA) beta0 = bf_second_moment_node.get_weights_by_keys(BETA) moving_mean0 = bf_second_moment_node.get_weights_by_keys(MOVING_MEAN) moving_variance0 = bf_second_moment_node.get_weights_by_keys(MOVING_VARIANCE) gamma1 = node.get_weights_by_keys(GAMMA) beta1 = node.get_weights_by_keys(BETA) moving_mean1 = node.get_weights_by_keys(MOVING_MEAN) moving_variance1 = node.get_weights_by_keys(MOVING_VARIANCE) self.unit_test.assertTrue((gamma0 == gamma1)) self.unit_test.assertTrue((beta0 == beta1)) self.unit_test.assertFalse((moving_mean0 == moving_mean1)) self.unit_test.assertFalse((moving_variance0 == moving_variance1)) def prepare_graph(self, in_model: Module, representative_data_gen: Callable, core_config: CoreConfig=CoreConfig(), fw_info: FrameworkInfo=DEFAULT_PYTORCH_INFO, target_platform_capabilities: TargetPlatformCapabilities=DEFAULT_PYTORCH_INFO) -> Tuple[(Graph, Graph)]: tb_w = init_tensorboard_writer(fw_info) fw_impl = PytorchImplementation() (tg, bit_widths_config, _) = core_runner(in_model=in_model, representative_data_gen=representative_data_gen, core_config=core_config, fw_info=fw_info, fw_impl=fw_impl, tpc=target_platform_capabilities, tb_w=tb_w) graph_to_apply_second_moment = copy.deepcopy(tg) semi_quantized_model = quantized_model_builder_for_second_moment_correction(graph_to_apply_second_moment, fw_info, fw_impl) pytorch_apply_second_moment_correction(semi_quantized_model, core_config, representative_data_gen, graph_to_apply_second_moment) return (tg, graph_to_apply_second_moment)
class FullTensorProductOfCrystals(TensorProductOfCrystals): def __init__(self, crystals, **options): category = Category.meet([crystal.category() for crystal in crystals]) category = category.TensorProducts() if any(((c in Sets().Infinite()) for c in crystals)): category = category.Infinite() Parent.__init__(self, category=category) self.crystals = crystals if ('cartan_type' in options): self._cartan_type = CartanType(options['cartan_type']) elif (not crystals): raise ValueError('you need to specify the Cartan type if the tensor product list is empty') else: self._cartan_type = crystals[0].cartan_type() self.cartesian_product = cartesian_product(self.crystals) self.module_generators = self def _repr_(self): if (self.options.convention == 'Kashiwara'): st = repr(list(reversed(self.crystals))) else: st = repr(list(self.crystals)) return 'Full tensor product of the crystals {}'.format(st) def __iter__(self): for x in self.cartesian_product: (yield self(*x)) def cardinality(self): return self.cartesian_product.cardinality() _method def weight_lattice_realization(self): cm = get_coercion_model() return cm.common_parent(*[crystal.weight_lattice_realization() for crystal in self.crystals])
def get_args_and_hdf5_file(activation, config: Tuple[(str, str, str)]): (config_name, config_settings, human_name) = config output_name = ('run_%s_%s' % (config_name, activation.replace(':', '-'))) parameters = [sys.executable, 'volnet/train_volnet.py', config_settings, '--train:mode', 'world', '--train:samples', '256**3', '--train:batchsize', '64*64*128', '--train:sampler_importance', '0.01', '--val:copy_and_split', '--outputmode', 'density:direct', '--lossmode', 'density', '-l1', '1', '--lr_step', '50', '-i', '200', '--fouriercount', str(((BEST_NETWORK[0] - 4) // 2)), '--fourierstd', '1.0', '--activation', activation, '--layers', ':'.join(([str(BEST_NETWORK[0])] * (BEST_NETWORK[1] - 1))), '--volumetric_features_resolution', str(GRID_RESOLUTION), '--volumetric_features_channels', str(GRID_CHANNELS), '--logdir', (BASE_PATH + '/log'), '--modeldir', (BASE_PATH + '/model'), '--hdf5dir', (BASE_PATH + '/hdf5'), '--name', output_name, '--save_frequency', '50'] hdf5_file = (((BASE_PATH + '/hdf5/') + output_name) + '.hdf5') return (parameters, hdf5_file, output_name)
def get_metadata_only(): args = dotdict() args.hostname = socket.gethostname() try: args.git_branch = git.Repo(search_parent_directories=True).active_branch.name args.git_hash = git.Repo(search_parent_directories=True).head.object.hexsha args.git_commit_datetime = str(git.Repo(search_parent_directories=True).head.object.committed_datetime) except git.exc.InvalidGitRepositoryError as e: pass args.current_datetime = time.strftime('%b %d, %Y ; %l:%M%p %Z (%z)') args.cmd = ' '.join(sys.argv) return args
_model def caformer_s18_in21k(pretrained=False, **kwargs): model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs) model.default_cfg = default_cfgs['caformer_s18_in21k'] if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True) model.load_state_dict(state_dict) return model
.parametrize('cont,disc,min_feature', [([0, 0, 0.4, 1, 1, 0.8, 0, 0], [2.6, 5.8], 2), ([0, 0, 0.4, 0.8, 0.8, 0.8, 0, 0], [2.6, 5.8], 2), ([0, 0, 0.4, 0.2, 1, 0.8, 0, 0], [3.8, 5.8], 2), ([1, 1, 0.6, 0, 0, 0.2, 1, 1], [0, 2.6, 5.8, 8], 2)]) def test_grating_edge_discretization(cont, disc, min_feature): param = parametrization.DirectParam(cont) trans = grating.GratingEdgeDiscretization(param, (40 * min_feature), 40) param_disc = parametrization.GratingParam([], num_pixels=len(param.to_vector())) trans(param_disc, None) np.testing.assert_almost_equal(param_disc.to_vector(), disc)
.parametrize('func', UFUNCS, ids=UFUNC_NAMES) def test_nan_inputs(func): args = ((np.nan,) * func.nin) with suppress_warnings() as sup: sup.filter(RuntimeWarning, 'floating point number truncated to an integer') try: with suppress_warnings() as sup: sup.filter(DeprecationWarning) res = func(*args) except TypeError: return if (func in POSTPROCESSING): res = POSTPROCESSING[func](*res) msg = f'got {res} instead of nan' assert_array_equal(np.isnan(res), True, err_msg=msg)
def get_stats(): model = create_model(to_device=False, dim_in=1, dim_out=1) return params_count(model)
class TimeRecurrentCell(nn.Module): def __init__(self, cell, batch_first=False, lstm=True, with_attention=False, reverse=False): super(TimeRecurrentCell, self).__init__() self.cell = cell self.lstm = lstm self.reverse = reverse self.batch_first = batch_first self.with_attention = with_attention def forward(self, inputs, hidden=None, context=None, mask_attention=None, get_attention=False): if (self.with_attention and (mask_attention is not None)): self.cell.attention.set_mask(mask_attention) hidden_size = self.cell.hidden_size batch_dim = (0 if self.batch_first else 1) time_dim = (1 if self.batch_first else 0) batch_size = inputs.size(batch_dim) if (hidden is None): num_layers = getattr(self.cell, 'num_layers', 1) zero = inputs.data.new(1).zero_() h0 = zero.view(1, 1, 1).expand(num_layers, batch_size, hidden_size) hidden = h0 if self.lstm: hidden = (hidden, h0) if (self.with_attention and ((not isinstance(hidden, tuple)) or (self.lstm and (not isinstance(hidden[0], tuple))))): zero = inputs.data.new(1).zero_() attn_size = self.cell.attention.output_size a0 = zero.view(1, 1).expand(batch_size, attn_size) hidden = (hidden, a0) outputs = [] attentions = [] inputs_time = inputs.split(1, time_dim) if self.reverse: inputs_time.reverse() for input_t in inputs_time: input_t = input_t.squeeze(time_dim) if self.with_attention: input_t = (input_t, context) if (self.with_attention and get_attention): (output_t, hidden, attn) = self.cell(input_t, hidden, get_attention=True) attentions += [attn] else: (output_t, hidden) = self.cell(input_t, hidden) outputs += [output_t] if self.reverse: outputs.reverse() outputs = torch.stack(outputs, time_dim) if get_attention: attentions = torch.stack(attentions, time_dim) return (outputs, hidden, attentions) else: return (outputs, hidden)
def test_logsumexp_b_zero(): a = [1, 10000] b = [1, 0] assert_almost_equal(logsumexp(a, b=b), 1)
def build_wheel_pep517(name, backend, metadata_directory, build_options, tempd): assert (metadata_directory is not None) if build_options: logger.error('Cannot build wheel for %s using PEP 517 when --build-option is present', name) return None try: logger.debug('Destination directory: %s', tempd) runner = runner_with_spinner_message('Building wheel for {} (PEP 517)'.format(name)) with backend.subprocess_runner(runner): wheel_name = backend.build_wheel(tempd, metadata_directory=metadata_directory) except Exception: logger.error('Failed building wheel for %s', name) return None return os.path.join(tempd, wheel_name)
def dictionary_from_generator(gen): L = list(gen) setofkeys = list(set((item[0] for item in L))) return dict(((key, sum((entry[1] for entry in (pair for pair in L if (pair[0] == key))))) for key in setofkeys))
_function_api('triplane_feature', [('F', 'Grid faeture', '[3, G, G, D]', True)]) def _query_on_triplane(x, G, feature_size, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], use_ste=False, f_init=None, fix_parameters=False, rng=None): f_init = (f_init if (f_init is not None) else I.NormalInitializer(0.001)) shape = [3, G, G, feature_size] feature = nn.parameter.get_parameter_or_create('F', shape, f_init, True, (not fix_parameters)) h = F.lanczos_query_on_triplane(x, feature, min_, max_, use_ste) return h
def display_info(epoch, step, watch_list): sys.stdout.write(('[%d][%d]' % ((epoch + 1), (step + 1)))) for item in watch_list.items(): if (type(item[1]) in [float, np.float32, np.float64]): sys.stdout.write((' %s: %2.3f' % (item[0], item[1]))) elif (type(item[1]) in [int, bool, np.int32, np.int64, np.bool]): sys.stdout.write((' %s: %d' % (item[0], item[1]))) sys.stdout.write('\n')
class BadPayload(BadData): def __init__(self, message, original_error=None): BadData.__init__(self, message) self.original_error = original_error
class ElectraForSequenceClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_cache_path(rel_path): return os.path.expanduser(os.path.join(registry.get_path('cache_root'), rel_path))
def plot_likelihood_grad_BO_BN(likelihood): df = check_likelihood_grad_BO_BN(likelihood) (fig, axs) = plt.subplots(1, 2, figsize=(8, 4)) axs[0].plot(df['az'], df['mz'], '-', label='$m_z$') axs[0].plot(df['az'], (2 * df['grad_az_A']), '--', label='$2\\partial_{a_z^+} A$') axs[0].set(xlabel='$a_z^+$') axs[0].legend() axs[1].plot(df['az'], df['vz'], '-', label='$v_z$') axs[1].plot(df['az'], (2 * df['grad_az_I']), '--', label='$2\\partial_{a_z^+} I$') axs[1].set(xlabel='$a_z^+$') axs[1].legend() fig.suptitle(likelihood) fig.tight_layout(rect=[0, 0.03, 1, 0.95])
def rand_cutout(x, ratio=0.5): cutout_size = (int(((x.size(2) * ratio) + 0.5)), int(((x.size(3) * ratio) + 0.5))) offset_x = torch.randint(0, (x.size(2) + (1 - (cutout_size[0] % 2))), size=[x.size(0), 1, 1], device=x.device) offset_y = torch.randint(0, (x.size(3) + (1 - (cutout_size[1] % 2))), size=[x.size(0), 1, 1], device=x.device) (grid_batch, grid_x, grid_y) = torch.meshgrid(torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(cutout_size[0], dtype=torch.long, device=x.device), torch.arange(cutout_size[1], dtype=torch.long, device=x.device)) grid_x = torch.clamp(((grid_x + offset_x) - (cutout_size[0] // 2)), min=0, max=(x.size(2) - 1)) grid_y = torch.clamp(((grid_y + offset_y) - (cutout_size[1] // 2)), min=0, max=(x.size(3) - 1)) mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device) mask[(grid_batch, grid_x, grid_y)] = 0 x = (x * mask.unsqueeze(1)) return x
def test_ll_step(): options = [(0, 5), (5, 5), (10, 5)] env = OptionsEnv(IntersimpleLidarFlat(n_rays=5), options) env.reset() (_, _, _, info) = env.step(0) assert (info['ll']['observations'].shape == (6, 36)) assert (info['ll']['actions'].shape == (6, 1)) assert (info['ll']['rewards'].shape == (6,)) assert (info['ll']['env_done'].shape == (6,)) assert (info['ll']['plan_done'].shape == (6,)) assert (info['ll']['plan_done'][5] == True) assert (info['ll']['steps'] == 5) assert (len(info['ll']['infos']) == 5)
def triframes_call(watset): os.chdir('./base/triframes') if watset: os.system(f'WEIGHT=0 W2V=w2v.bin VSO=triplets.tsv make triw2v-watset.txt') else: os.system(f'WEIGHT=0 W2V=w2v.bin VSO=triplets.tsv make triw2v.txt') os.chdir('../../')
def test_keyword_ifelse(): N.set(128) A = np.random.rand(N.get()).astype(np.float32) B = np.zeros((N.get(),), dtype=np.float32) C = np.int32(2) try: keyword_ifelse(A, B, C) except Exception as e: print(e) return False assert np.allclose(A, B)
def get_rnn_cell(cell_class, cell_params, num_layers=1, dropout_input_keep_prob=1.0, dropout_output_keep_prob=1.0, residual_connections=False, residual_combiner='add', residual_dense=False): cells = [] for _ in range(num_layers): cell = cell_from_spec(cell_class, cell_params) if ((dropout_input_keep_prob < 1.0) or (dropout_output_keep_prob < 1.0)): cell = tf.contrib.rnn.DropoutWrapper(cell=cell, input_keep_prob=dropout_input_keep_prob, output_keep_prob=dropout_output_keep_prob) cells.append(cell) if (len(cells) > 1): final_cell = rnn_cell.ExtendedMultiRNNCell(cells=cells, residual_connections=residual_connections, residual_combiner=residual_combiner, residual_dense=residual_dense) else: final_cell = cells[0] return final_cell
def pre_callback(): print('Solving low Re Navier-Stokes equations for homotopy.') (v, q) = TestFunctions(V) e = (((((inner(grad(u), grad(v)) * dx) + ((Constant((Re / 10.0)) * dot((grad(u) * u), v)) * dx)) - ((p * div(v)) * dx)) - ((q * div(u)) * dx)) - (inner(c, v) * dx)) cashocs.newton_solve(e, up, bcs, verbose=False)
def get_g77_abi_wrappers(info): wrapper_sources = [] path = os.path.abspath(os.path.dirname(__file__)) if needs_g77_abi_wrapper(info): wrapper_sources += [os.path.join(path, 'src', 'wrap_g77_abi_f.f'), os.path.join(path, 'src', 'wrap_g77_abi_c.c')] else: wrapper_sources += [os.path.join(path, 'src', 'wrap_dummy_g77_abi.f')] return wrapper_sources
def case_sampling_SQLNet(K=100): from SQLNet_model.sqlnet.utils import load_data data_dir = 'SQLNet_model/data/' (sql_data, table_data) = load_data((data_dir + 'test_tok.jsonl'), (data_dir + 'test_tok.tables.jsonl')) size = len(sql_data) print(size) sampled_ids = [] while (len(sampled_ids) < K): id = random.choice(range(size)) if (id in sampled_ids): continue question = sql_data[id]['question'] table_id = sql_data[id]['table_id'] headers = table_data[table_id]['header'] try: print('question: {}\nheaders: {}'.format(question, headers)) action = raw_input('Take or not?') if (action == 'y'): sampled_ids.append(id) json.dump(sampled_ids, open((data_dir + 'user_study_ids.json'), 'w')) except: pass
def overlaps(a, b): a_start = a.get_attrib_tokens('abs_char_offsets')[0] b_start = b.get_attrib_tokens('abs_char_offsets')[0] a_end = (a_start + len(a.text)) b_end = (b_start + len(b.text)) v = ((a_start >= b_start) and (a_start <= b_end)) return (v or ((a_end >= b_start) and (a_end <= b_end)))
def test_forward_key_position(pretrain_file): model = build_model(pretrain_file, '--constituency_composition', 'untied_key', '--reduce_position', '0') run_forward_checks(model, num_states=2) model = build_model(pretrain_file, '--constituency_composition', 'untied_key', '--reduce_position', '32') run_forward_checks(model, num_states=2) model = build_model(pretrain_file, '--constituency_composition', 'key', '--reduce_position', '0') run_forward_checks(model, num_states=2) model = build_model(pretrain_file, '--constituency_composition', 'key', '--reduce_position', '32') run_forward_checks(model, num_states=2)
class TorchLoaderIter(BatchIter): def __init__(self, dataset, collate_fn, batch_size=1, sampler=None, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, batch_sampler=None): assert (len(dataset) > 0) assert (collate_fn is not None), 'You must pass collate_fn to pad the batch.' if (batch_sampler is not None): batch_size = 1 sampler = None drop_last = False super().__init__(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, collate_fn=collate_fn, batch_sampler=batch_sampler) def __iter__(self): self.init_iter() for (batch_x, batch_y) in self.dataiter: self.cur_batch_indices = None (yield (batch_x, batch_y))
def test_concept_drift_stream_with_alpha(test_path): stream = ConceptDriftStream(alpha=0.01, random_state=1, position=20) expected_info = 'ConceptDriftStream(alpha=0.01,\n drift_stream=AGRAWALGenerator(balance_classes=False,\n classification_function=2,\n perturbation=0.0,\n random_state=112),\n position=20, random_state=1,\n stream=AGRAWALGenerator(balance_classes=False,\n classification_function=0,\n perturbation=0.0, random_state=112),\n width=5729)' assert (stream.get_info() == expected_info) with pytest.warns(FutureWarning) as actual_warning: ConceptDriftStream(alpha=0, random_state=1, position=20) assert (actual_warning[0].message.args[0] == "Default value for 'alpha' has changed from 0 to None. 'alpha=0' will throw an error from v0.7.0")
def value_indices(arr, *, ignore_value=None): ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype) ignoreIsNone = (ignore_value is None) if (not ignoreIsNone): ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value) val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr) return val_indices
class CenterCrop(object): def __init__(self, size): self.size = size def __call__(self, img): (image_width, image_height) = img.size (crop_height, crop_width) = self.size crop_top = int(round(((image_height - crop_height) / 2.0))) crop_left = int(round(((image_width - crop_width) / 2.0))) return crop(img, (crop_top, crop_left, crop_height, crop_width))
def parse_runlog(path: str): results = defaultdict((lambda : defaultdict(list))) with open(path, 'r', encoding='utf-8') as input_file: for line in input_file: obj = json.loads(line.strip()) model_dir = obj['params']['model_dir'] model_results = results[model_dir] parse_runlog_line(obj, model_results) for (key, val) in results.items(): print(key) print_table(val) print('\n')
def tally_directory(directory, size=10000, seed=1): if directory.endswith('.npz'): with np.load(directory) as f: images = torch.from_numpy(f['fake']) images = images.permute(0, 3, 1, 2) images = ((images / 127.5) - 1) images = torch.nn.functional.interpolate(images, size=(256, 256)) print(images.shape, images.max(), images.min()) dataset = TensorDataset(images) else: dataset = parallelfolder.ParallelImageFolders([directory], transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(256), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])) loader = DataLoader(dataset, sampler=FixedRandomSubsetSampler(dataset, end=size, seed=seed), batch_size=10, pin_memory=True) upp = segmenter.UnifiedParsingSegmenter() (labelnames, catnames) = upp.get_label_and_category_names() result = numpy.zeros((size, NUM_OBJECTS), dtype=numpy.float) batch_result = torch.zeros(loader.batch_size, NUM_OBJECTS, dtype=torch.float).cuda() with torch.no_grad(): batch_index = 0 for [batch] in pbar(loader): seg_result = upp.segment_batch(batch.cuda()) for i in range(len(batch)): batch_result[i] = (seg_result[(i, 0)].view((- 1)).bincount(minlength=NUM_OBJECTS).float() / (seg_result.shape[2] * seg_result.shape[3])) result[batch_index:(batch_index + len(batch))] = batch_result.cpu().numpy() batch_index += len(batch) return result
class TaskConfig(ConfigDictionaryOfType): _type = Task def __init__(self, taskYamlFile: str): dictionary = load_tasks(taskYamlFile) super().__init__(dictionary) def get_descriptions(self) -> List[str]: return [value.description for (key, value) in self.items() if ((key not in {'positive', 'negative'}) and value.samples)]
def flatten_modules(module_list: Union[(Iterable, Module)]): res = [] if isinstance(module_list, Module): return [module_list] for module in module_list: res += flatten_modules(module) return res
def get_confusion_set(char): confusion_set = c.same_pinyin.get(char, set()).union(c.same_stroke.get(char, set())) confusion_set.add(char) return confusion_set
class DRMM(nn.Module): def __init__(self, word_embeddings: TextFieldEmbedder, bin_count: int): super(DRMM, self).__init__() self.word_embeddings = word_embeddings self.cosine_module = CosineMatrixAttention() self.bin_count = bin_count self.matching_classifier = FeedForward(input_dim=bin_count, num_layers=2, hidden_dims=[bin_count, 1], activations=[Activation.by_name('tanh')(), Activation.by_name('tanh')()]) self.query_gate = FeedForward(input_dim=self.word_embeddings.get_output_dim(), num_layers=2, hidden_dims=[self.word_embeddings.get_output_dim(), 1], activations=[Activation.by_name('tanh')(), Activation.by_name('tanh')()]) self.query_softmax = MaskedSoftmax() def forward(self, query: Dict[(str, torch.Tensor)], document: Dict[(str, torch.Tensor)]) -> torch.Tensor: if (len(query['tokens'].shape) == 2): query_pad_oov_mask = (query['tokens'] > 1).float() document_pad_oov_mask = (document['tokens'] > 1).float() else: query_pad_oov_mask = (torch.sum(query['tokens'], 2) > 0).float() document_pad_oov_mask = (torch.sum(document['tokens'], 2) > 0).float() query_embeddings = (self.word_embeddings(query) * query_pad_oov_mask.unsqueeze((- 1))) document_embeddings = (self.word_embeddings(document) * document_pad_oov_mask.unsqueeze((- 1))) cosine_matrix = self.cosine_module.forward(query_embeddings, document_embeddings).cpu() histogram_tensor = torch.empty((cosine_matrix.shape[0], cosine_matrix.shape[1], self.bin_count)) for b in range(cosine_matrix.shape[0]): for q in range(cosine_matrix.shape[1]): histogram_tensor[(b, q)] = torch.histc(cosine_matrix[(b, q)], bins=self.bin_count, min=(- 1), max=1) histogram_tensor = histogram_tensor.to(device=query_embeddings.device) classified_matches_per_query = self.matching_classifier(torch.log1p(histogram_tensor)) query_gates_raw = self.query_gate(query_embeddings) query_gates = self.query_softmax(query_gates_raw.squeeze((- 1)), query_pad_oov_mask).unsqueeze((- 1)) scores = torch.sum((classified_matches_per_query * query_gates), dim=1) return scores def get_param_stats(self): return 'DRMM: -'
def prune_model(model: nn.Module, test_set: data.DataLoader, watermark_set: List, number_of_classes: int) -> Dict[(float, Dict[(str, Any)])]: pruning_levels = [0.01, 0.05, 0.1, 0.25, 0.4, 0.5, 0.75, 0.9] pruning_results = {} log.info('Accuracy before pruning:') _ = test_model(model, test_set, number_of_classes) _ = test_watermark(model, watermark_set) for level in pruning_levels: model_local = copy.deepcopy(model) for (_, module) in model_local.named_modules(): if isinstance(module, torch.nn.Conv2d): prune.l1_unstructured(module, name='weight', amount=level) elif isinstance(module, torch.nn.Linear): prune.l1_unstructured(module, name='weight', amount=level) log.info('Testing with pruning level {}.'.format(level)) (test_float_score, test_dict_score) = test_model(model_local, test_set, number_of_classes) watermark_float_score = test_watermark(model_local, watermark_set) pruning_results[level] = {'test': (test_float_score, test_dict_score), 'watermark': watermark_float_score} return pruning_results
def contrastive_loss(sd, l, margin=1.0, eps=0.0001): sim_cost = (l * sd) dissim_cost = ((1 - l) * (F.maximum_scalar((margin - ((sd + eps) ** 0.5)), 0) ** 2)) return (sim_cost + dissim_cost)
class BlueEmojiBar(IncrementalBar): suffix = '%(percent)d%%' bar_prefix = ' ' bar_suffix = ' ' phases = (u'', u'', u'')
def get_fused_adam_class(): try: global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module('fused_adam_cuda') return FusedAdamV1 except ImportError: try: from apex.optimizers import FusedAdam as _FusedAdam return FusedAdamV2 except ImportError: pass return None
def run_on_one_sequence(sess, model, batch_img, args): with sess.as_default(): prob = sess.run(model.preds, feed_dict={images: batch_img, K.learning_phase(): 0}) print(prob) (gb_grad_value, target_conv_layer_value, target_conv_layer_grad_value) = sess.run([gb_grad, target_conv_layer, target_conv_layer_grad], feed_dict={images: batch_img, labels: batch_label, K.learning_phase(): 0}) utils.visualize_overlays(batch_img, target_conv_layer_value, target_conv_layer_grad_value, args)
def ucs4_to_utf8_aux(): for x in range(256): shuffle = [] utf8bits = [] output_bytes = 0 def bit(k): return int(bool(((1 << k) & x))) def code(bit1, bit0): return ((2 * bit1) + bit0) ab = code(bit(1), bit(0)) cd = code(bit(5), bit(4)) ef = code(bit(3), bit(2)) gh = code(bit(7), bit(6)) for (i, count) in enumerate([ab, cd, ef, gh]): if (count == 0): shuffle.append(((4 * i) + 0)) utf8bits.append(0) utf8bits.append(0) utf8bits.append(0) utf8bits.append(0) output_bytes += 1 elif (count == 1): shuffle.append(((4 * i) + 1)) shuffle.append(((4 * i) + 0)) utf8bits.append(128) utf8bits.append(192) utf8bits.append(0) utf8bits.append(0) output_bytes += 2 elif (count == 2): shuffle.append(((4 * i) + 2)) shuffle.append(((4 * i) + 1)) shuffle.append(((4 * i) + 0)) utf8bits.append(128) utf8bits.append(128) utf8bits.append(224) utf8bits.append(0) output_bytes += 3 elif (count == 3): shuffle.append(((4 * i) + 3)) shuffle.append(((4 * i) + 2)) shuffle.append(((4 * i) + 1)) shuffle.append(((4 * i) + 0)) utf8bits.append(128) utf8bits.append(128) utf8bits.append(128) utf8bits.append(240) output_bytes += 4 else: assert False assure_array_length(shuffle, 16, 128) assert (len(utf8bits) == 16) assert (len(shuffle) == 16) (yield (shuffle, utf8bits, output_bytes))
class DeployTest(tf.test.TestCase): def setUp(self): np.random.seed(0) self._inputs = np.zeros((16, 4)) self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32) self._logdir = self.get_temp_dir() for i in range(16): j = int(((2 * self._labels[i]) + np.random.randint(0, 2))) self._inputs[(i, j)] = 1 def testLocalTrainOp(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier model_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) self.assertEqual(slim.get_variables(), []) model = model_deploy.deploy(deploy_config, model_fn, model_args, optimizer=optimizer) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 4) self.assertEqual(len(model.clones), 2) self.assertEqual(model.total_loss.op.name, 'total_loss') self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op') self.assertEqual(model.train_op.op.name, 'train_op') with tf.Session() as sess: sess.run(tf.initialize_all_variables()) moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0] moving_variance = tf.contrib.framework.get_variables_by_name('moving_variance')[0] initial_loss = sess.run(model.total_loss) (initial_mean, initial_variance) = sess.run([moving_mean, moving_variance]) self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0]) self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0]) for _ in range(10): sess.run(model.train_op) final_loss = sess.run(model.total_loss) self.assertLess(final_loss, (initial_loss / 10.0)) (final_mean, final_variance) = sess.run([moving_mean, moving_variance]) self.assertAllClose(final_mean, [0.125, 0.25, 0.375, 0.25]) self.assertAllClose(final_variance, [0.109375, 0.1875, 0.234375, 0.1875]) def testNoSummariesOnGPU(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) model = model_deploy.deploy(deploy_config, ModelFn, optimizer=tf.train.GradientDescentOptimizer(1.0)) self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device) def testNoSummariesOnGPUForEvals(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) model = model_deploy.deploy(deploy_config, ModelFn) self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device)
def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs): (kwarg_keys, flat_args) = pack_kwargs(*args, **kwargs) parent_ctx_dict = {'offload': offload_to_cpu} output = CheckpointFunction.apply(original_forward, parent_ctx_dict, kwarg_keys, *flat_args) if isinstance(output, torch.Tensor): return output else: packed_non_tensor_outputs = parent_ctx_dict['packed_non_tensor_outputs'] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) return output
class Partition3(nn.Module): LAYER_SCOPES = ['Net/Linear[h1_layer]', 'Net/BatchNorm1d[bn2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'h1_layer', 'l_1': 'bn2'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_0 = self.l_1(t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)