code
stringlengths
101
5.91M
class Function(with_metaclass(FunctionMeta, _C._FunctionBase, _ContextMethodMixin, _HookMixin)): __call__ = _C._FunctionBase._do_forward is_traceable = False def forward(ctx, *args, **kwargs): raise NotImplementedError def backward(ctx, *grad_outputs): raise NotImplementedError
def simulate_calibration_ref(n=1000, fixm=False, fixz=False, fixalign=False): logger.info('Generating calibration data with %s images from prior', n) (f_sub, beta) = draw_params_from_prior(n) (theta, x, _, _, _, z) = augmented_data(f_sub=f_sub, beta=beta, n_images=n, mine_gold=False, draw_host_mass=(not fixm), draw_host_redshift=(not fixz), draw_alignment=(not fixalign)) results = {} results['theta'] = theta results['x'] = x results['z'] = z return results
def parse_batch(batch, keys=None): keys = (keys or ['image', 'target']) assert isinstance(keys, list) outputs = {} if (not isinstance(batch, dict)): return batch if all((isinstance(v, dict) for v in batch.values())): for (k, v) in batch.items(): values = [v.get(key) for key in keys] outputs[k] = (values[0] if (len(values) == 1) else values) return outputs outputs = [batch.get(k) for k in keys] return (outputs[0] if (len(outputs) == 1) else outputs)
class TestULP(object): def test_equal(self): x = np.random.randn(10) assert_array_max_ulp(x, x, maxulp=0) def test_single(self): x = np.ones(10).astype(np.float32) x += (0.01 * np.random.randn(10).astype(np.float32)) eps = np.finfo(np.float32).eps assert_array_max_ulp(x, (x + eps), maxulp=20) def test_double(self): x = np.ones(10).astype(np.float64) x += (0.01 * np.random.randn(10).astype(np.float64)) eps = np.finfo(np.float64).eps assert_array_max_ulp(x, (x + eps), maxulp=200) def test_inf(self): for dt in [np.float32, np.float64]: inf = np.array([np.inf]).astype(dt) big = np.array([np.finfo(dt).max]) assert_array_max_ulp(inf, big, maxulp=200) def test_nan(self): for dt in [np.float32, np.float64]: if (dt == np.float32): maxulp = 1000000.0 else: maxulp = .0 inf = np.array([np.inf]).astype(dt) nan = np.array([np.nan]).astype(dt) big = np.array([np.finfo(dt).max]) tiny = np.array([np.finfo(dt).tiny]) zero = np.array([np.PZERO]).astype(dt) nzero = np.array([np.NZERO]).astype(dt) assert_raises(AssertionError, (lambda : assert_array_max_ulp(nan, inf, maxulp=maxulp))) assert_raises(AssertionError, (lambda : assert_array_max_ulp(nan, big, maxulp=maxulp))) assert_raises(AssertionError, (lambda : assert_array_max_ulp(nan, tiny, maxulp=maxulp))) assert_raises(AssertionError, (lambda : assert_array_max_ulp(nan, zero, maxulp=maxulp))) assert_raises(AssertionError, (lambda : assert_array_max_ulp(nan, nzero, maxulp=maxulp)))
def get_learner_data(stage1_result_dir, pred_datatrack, use_cv_result, use_upper_lower, column_tag, k_cv=K_CV): df_vals = [] df_tests = [] for i_cv in range(k_cv): if use_cv_result: df_vals.append(pd.read_csv(((stage1_result_dir / str(i_cv)) / f'val.csv'), index_col=0)) df_tests.append(pd.read_csv(((stage1_result_dir / str(i_cv)) / f'test.csv'), index_col=0)) else: pred_dir = ((stage1_result_dir / str(i_cv)) / f'pred-{pred_datatrack}') df_vals.append(pd.read_csv((pred_dir / f'train.csv'), index_col=0)) df_tests.append(pd.read_csv((pred_dir / f'test.csv'), index_col=0)) if use_cv_result: df_train = pd.concat(df_vals) else: df_train = (sum(df_vals) / len(df_vals)) df_test = (sum(df_tests) / len(df_tests)) df_train_new = df_train[[]].copy() df_test_new = df_test[[]].copy() if use_upper_lower: col_name = ('mean-' + column_tag) df_train_new[col_name] = df_train['pred_mos'].copy() df_test_new[col_name] = df_test['pred_mos'].copy() col_name = ('lower-' + column_tag) df_train_new[col_name] = df_train['lower_mos'].copy() df_test_new[col_name] = df_test['lower_mos'].copy() col_name = ('upper-' + column_tag) df_train_new[col_name] = df_train['upper_mos'].copy() df_test_new[col_name] = df_test['upper_mos'].copy() else: col_name = ('pred-' + column_tag) df_train_new[col_name] = df_train['pred_mos'].copy() df_test_new[col_name] = df_test['pred_mos'].copy() return (df_train_new, df_test_new)
def run_sanity_check(cmd_args: Namespace, partitioner: PartitioningTask, analysis_config: AnalysisPipelineConfig, device='cpu', training=False, check_grads=True, ref_model=None, check_init=False): try: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False try: torch.use_deterministic_algorithms(True) except: pass except: pass is_ok = True (args, kwargs) = get_input_args_kwargs(partitioner.get_input(cmd_args, analysis=False)) assert (len(args) == 0), 'only kwargs are supported for sanity checks' if check_init: torch.cuda.synchronize() torch.manual_seed(0) model = partitioner.get_model(cmd_args) model.train(training) a1 = model(*args, **kwargs) del model torch.cuda.synchronize() torch.manual_seed(0) model = partitioner.get_model(cmd_args) model.train(training) a2 = model(*args, **kwargs) torch.cuda.synchronize() assert torch.allclose(a1, a2), ('intialization check failed' + str((a1, a2))) del a1, a2, model torch.manual_seed(0) torch.cuda.synchronize() (output, (activations, req_grad)) = run_partitions_fwd(kwargs, analysis_config, device=device, return_info_for_bwd=True) torch.cuda.synchronize() assert ((len(output) == 1) and isinstance(output[0], torch.Tensor)) output = output[0] if (device == 'cpu'): assert (not output.is_cuda) else: assert output.is_cuda torch.manual_seed(0) torch.cuda.synchronize() if (ref_model is None): ref_model = partitioner.get_model(cmd_args) ref_model.to(device).train(training) torch.cuda.synchronize() with torch.no_grad(): kwargs_to_ref_model = move_tensors(kwargs, device) args_to_ref_model = move_tensors(args, device) torch.cuda.synchronize() torch.manual_seed(0) ref_output = ref_model(*args_to_ref_model, **kwargs_to_ref_model) torch.cuda.synchronize() del kwargs_to_ref_model, args_to_ref_model ref_model = ref_model.cpu() assert isinstance(ref_output, torch.Tensor) if (device == 'cpu'): assert (not ref_output.is_cuda) else: assert ref_output.is_cuda assert (ref_output.device == output.device) if torch.allclose(output, ref_output): print(f''' outputs are the same in {('training' if training else 'evaluation')} ''') print(output, ref_output) else: print(f''' outputs are not the same in {('training' if training else 'evaluation')} ''') print(output, ref_output) is_ok = False g1 = make_dot(output) g2 = make_dot(ref_output) g1.save('p_output') g2.save('ref_output') print('saved dot files: p_output ref_output') if check_grads: ref_output.backward() torch.cuda.synchronize() del ref_output ref_grads = dict() shared = dict() for (name, p) in ref_model.named_parameters(): ref_grads[name] = p.grad = p.grad.cpu() if (p.grad in shared): print(f'{name} is {shared[p.grad]}') shared[p.grad] = name torch.cuda.synchronize() print() output.backward() torch.cuda.synchronize() del output partitioned_grads = dict() shared = dict() for idx in range(cmd_args.n_partitions): for (name, p) in analysis_config.stage_to_model[idx].named_parameters(): partitioned_grads[name] = p.grad = p.grad.cpu() if (p.grad in shared): print(f'{name} is {shared[p.grad]}') shared[p.grad] = name torch.cuda.synchronize() for (name, g) in partitioned_grads.items(): assert isinstance(g, torch.Tensor) if (not (name in ref_grads)): msg = f'{name} is missing in ref_grads' assert (name == 'lm_head.weight') is_same = torch.allclose(ref_grads['shared_embed_weight'], partitioned_grads['lm_head.weight']) msg += (' but grad is the same' if is_same else ' and grad is different') print(msg) elif (not torch.allclose(g, ref_grads[name])): abs_error = torch.abs((g - ref_grads[name])) max_abs = abs_error.max() abs_error = (abs_error.sum() / abs_error.numel()) print(f'{name} grad is different avg_abs {abs_error} N {g.numel()} max_abs {max_abs}') is_ok = False else: pass return is_ok
class DLRep(ComposableProofStmt): verifier_cls = DLRepVerifier def __init__(self, lhs, expr, simulated=False): if isinstance(expr, Expression): self.bases = list(expr.bases) self.secret_vars = list(expr.secrets) else: raise TypeError('Expected an Expression. Got: {}'.format(expr)) test_group = self.bases[0].group for g in self.bases: if (g.group != test_group): raise InvalidExpression('All bases should come from the same group', g.group) self.secret_values = {} for sec in self.secret_vars: if (sec.value is not None): self.secret_values[sec] = sec.value self.lhs = lhs self.set_simulated(simulated) def get_prover(self, secrets_dict=None): if (secrets_dict is None): secrets_dict = {} self.secret_values.update(secrets_dict) secrets_dict = self.secret_values if (self.set_simulated() or (secrets_dict == {}) or any(((sec not in secrets_dict.keys()) for sec in set(self.secret_vars)))): return None for (name, sec) in secrets_dict.items(): if (not isinstance(sec, Bn)): secrets_dict[name] = Bn(sec) return DLRepProver(self, secrets_dict) def get_proof_id(self, secret_id_map=None): proof_id = super().get_proof_id(secret_id_map) return (proof_id + [self.lhs]) def get_randomizers(self): output = {} order = self.bases[0].group.order() for sec in set(self.secret_vars): output.update({sec: order.random()}) return output def recompute_commitment(self, challenge, responses): commitment = (self.lhs.group.wsum(responses, self.bases) + ((- challenge) * self.lhs)) return commitment def simulate_proof(self, responses_dict=None, challenge=None): responses_dict = self.update_randomizers(responses_dict) if (challenge is None): challenge = get_random_num(CHALLENGE_LENGTH) responses = [responses_dict[m] for m in self.secret_vars] commitment = self.recompute_commitment(challenge, responses) return SimulationTranscript(commitment=commitment, challenge=challenge, responses=responses)
def style_doc_files(*files, max_len=119, check_only=False): changed = [] black_errors = [] for file in files: if os.path.isdir(file): files = [os.path.join(file, f) for f in os.listdir(file)] files = [f for f in files if (os.path.isdir(f) or f.endswith('.mdx') or f.endswith('.py'))] changed += style_doc_files(*files, max_len=max_len, check_only=check_only) elif file.endswith('.mdx'): try: (diff, black_error) = style_mdx_file(file, max_len=max_len, check_only=check_only) if diff: changed.append(file) if (len(black_error) > 0): black_errors.append(f'There was a problem while formatting an example in {file} with black:\m{black_error}') except Exception: print(f'There is a problem in {file}.') raise elif file.endswith('.py'): try: (diff, black_error) = style_file_docstrings(file, max_len=max_len, check_only=check_only) if diff: changed.append(file) if (len(black_error) > 0): black_errors.append(f'There was a problem while formatting an example in {file} with black:\m{black_error}') except Exception: print(f'There is a problem in {file}.') raise else: warnings.warn(f"Ignoring {file} because it's not a py or an mdx file or a folder.") if (len(black_errors) > 0): black_message = '\n\n'.join(black_errors) raise ValueError(((("Some code examples can't be interpreted by black, which means they aren't regular python:\n\n" + black_message) + '\n\nMake sure to fix the corresponding docstring or doc file, or remove the py/python after ``` if it ') + 'was not supposed to be a Python code sample.')) return changed
def check_core_pattern(): rv = True core_pattern_file = '/proc/sys/kernel/core_pattern' if os.path.exists(core_pattern_file): with open(core_pattern_file, 'r') as f: if (f.readline().rstrip()[0] == '|'): print(("[*] afl-fuzz requires 'echo core >%s'" % core_pattern_file)) rv = False return rv
def test_ccprmod_one_support(): supports = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]] idx_correct_label = [2, 1] assert np.isclose(ccprmod(supports, idx_correct_label), 1, atol=0.01).all()
class RectTupleData(): def __init__(self, len_tuple, DATA_PATH, n=N): self._len_tuple = len_tuple self._cur_ind = 0 self._N = n try: self._fid_X = open((DATA_PATH + ('/X_%d_%d.bin' % (n, len_tuple))), 'rb') self._fid_Y = open((DATA_PATH + ('/Y_%d_%d.bin' % (n, len_tuple))), 'rb') self._fid_Y_sup = open((DATA_PATH + ('/Y_sup_%d_%d.bin' % (n, len_tuple))), 'rb') except: fid_X = open((DATA_PATH + ('/X_%d_%d.bin' % (n, len_tuple))), 'wb') fid_Y = open((DATA_PATH + ('/Y_%d_%d.bin' % (n, len_tuple))), 'wb') fid_Y_sup = open((DATA_PATH + ('/Y_sup_%d_%d.bin' % (n, len_tuple))), 'wb') self._X = np.zeros((len_tuple, IM_SIZE, IM_SIZE), dtype=np.float32) self._Y = 0 self._Y_sup = np.zeros(len_tuple, dtype=np.int32) for i in range(n): self._X *= 0 self._Y = 0 self._Y_sup *= 0 sys.stderr.write(('\r%d/%d' % (i, n))) for k in range(len_tuple): theta = np.random.uniform(0, np.pi) unit_vec = np.array([np.cos(theta), np.sin(theta)]) self._Y_sup[k] = (1.0 if (theta < (np.pi / 2)) else (- 1.0)) self._Y += (1.0 if (theta < (np.pi / 2)) else 0.0) center = (IM_SIZE * np.random.uniform(0, 1, size=2)) length = np.random.uniform(5, (IM_SIZE - 5)) for l in range(int((length // 2))): for sgn in [(- 1), 1]: pt = (center + ((sgn * unit_vec) * l)).astype(np.int32) for (dx, dy) in product([0], repeat=2): ptpt = (pt + np.array([dx, dy])) if all(((0 <= ptpt) * (ptpt < IM_SIZE))): self._X[(k, ptpt[0], ptpt[1])] = 1 self._Y = ((- 1) + (2 * np.mod(self._Y, 2))).astype(np.float32) self._X.tofile(fid_X) fid_X.flush() self._Y.tofile(fid_Y) fid_Y.flush() self._Y_sup.tofile(fid_Y_sup) fid_Y_sup.flush() fid_X.close() fid_Y.close() fid_Y_sup.close() self._fid_X = open((DATA_PATH + ('/X_%d_%d.bin' % (n, len_tuple))), 'rb') self._fid_Y = open((DATA_PATH + ('/Y_%d_%d.bin' % (n, len_tuple))), 'rb') self._fid_Y_sup = open((DATA_PATH + ('/Y_sup_%d_%d.bin' % (n, len_tuple))), 'rb') def next_batch(self, BATCH_SIZE): self._X = np.frombuffer(self._fid_X.read(((((4 * BATCH_SIZE) * self._len_tuple) * IM_SIZE) * IM_SIZE)), dtype=np.float32) self._Y = np.frombuffer(self._fid_Y.read((4 * BATCH_SIZE)), dtype=np.float32) self._Y_sup = np.frombuffer(self._fid_Y_sup.read(((4 * BATCH_SIZE) * self._len_tuple)), dtype=np.int32) if (self._X.size < (((BATCH_SIZE * self._len_tuple) * IM_SIZE) * IM_SIZE)): self._fid_X.seek(0) self._fid_Y.seek(0) self._fid_Y_sup.seek(0) self._X = np.frombuffer(self._fid_X.read(((((4 * BATCH_SIZE) * self._len_tuple) * IM_SIZE) * IM_SIZE)), dtype=np.float32) self._Y = np.frombuffer(self._fid_Y.read((4 * BATCH_SIZE)), dtype=np.float32) self._Y_sup = np.frombuffer(self._fid_Y_sup.read(((4 * BATCH_SIZE) * self._len_tuple)), dtype=np.int32) return (self._X.reshape([(- 1), IM_SIZE, IM_SIZE]), self._Y.reshape([(- 1)]), self._Y_sup.reshape([(- 1)]))
class RandomSized_new(object): def __init__(self, size, scale1=0.5, scale2=2): self.size = size self.crop = RandomCrop_new(self.size) self.small_scale = scale1 self.big_scale = scale2 def __call__(self, sample): img = sample['image'] mask = sample['label'] assert (img.size == mask.size) w = int((random.uniform(self.small_scale, self.big_scale) * img.size[0])) h = int((random.uniform(self.small_scale, self.big_scale) * img.size[1])) (img, mask) = (img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)) sample = {'image': img, 'label': mask} return self.crop(sample)
def matmul_delegation_test2(matrix0: dace.float32[(N, K)], matrix1: dace.float32[(K, M)], vector0: dace.float32[M], vector1: dace.float32[N], result: dace.float32[1]): result[0] = ((vector1 (matrix0 matrix1)) vector0)
def calculate_cm(cm_dump_filepath: str, gt_filepath: str, n: int) -> np.ndarray: cm = np.zeros((n, n), dtype=int) print(cm_dump_filepath, gt_filepath) predictions = [] with open(cm_dump_filepath, 'r') as fp: reader = csv.reader(fp, delimiter=';', quotechar='"') predictions = list(reader) with open(gt_filepath) as fp: reader = csv.reader(fp, delimiter=';', quotechar='"') truths = list(reader) ident2truth_index = {} for (identifier, truth_index) in truths: ident2truth_index[identifier] = int(truth_index) if (len(predictions) != len(truths)): msg = 'len(predictions) = {} != {} = len(truths)"'.format(len(predictions), len(truths)) raise ValueError(msg) for (ident, pred_index) in predictions: cm[ident2truth_index[ident]][int(pred_index)] += 1 return cm
class LowInkRandomLines(LowInkLine): def __init__(self, count_range=(5, 10), use_consistent_lines=True, noise_probability=0.1, p=1): super().__init__(use_consistent_lines=use_consistent_lines, noise_probability=noise_probability, p=p) self.count_range = count_range def __repr__(self): return f'LowInkRandomLines(count_range={self.count_range}, use_consistent_lines={self.use_consistent_lines}, p={self.p})' def __call__(self, image, layer=None, mask=None, keypoints=None, bounding_boxes=None, force=False): if (force or self.should_run()): image = image.copy() count = random.randint(self.count_range[0], self.count_range[1]) for i in range(count): if ((image.shape[0] - 1) >= 1): image = self.add_transparency_line(image, random.randint(1, (image.shape[0] - 1))) outputs_extra = [] if ((mask is not None) or (keypoints is not None) or (bounding_boxes is not None)): outputs_extra = [mask, keypoints, bounding_boxes] if outputs_extra: return ([image] + outputs_extra) else: return image
class GaussianConnector(nn.Module): def __init__(self, use_gpu): super(GaussianConnector, self).__init__() self.use_gpu = use_gpu def forward(self, mu, logvar): epsilon = th.randn(logvar.size()) epsilon = cast_type(Variable(epsilon), FLOAT, self.use_gpu) std = th.exp((0.5 * logvar)) z = (mu + (std * epsilon)) return z
class WordEmbeddings(): def __init__(self, file_name, word2cnt=None): self.id2word = {} self.word2id = {} self.embeddings = [] if word2cnt: self.load_based_word2cnt(file_name, word2cnt) else: self.load_from_file(file_name) self.word2id['<UNK>'] = len(self.word2id) self.id2word[self.word2id['<UNK>']] = '<UNK>' self.embeddings.append(self.get_init_vector(len(self.embeddings[0]))) self.word2id['<PADDING>'] = len(self.word2id) self.id2word[self.word2id['<PADDING>']] = '<PADDING>' self.embeddings.append(self.get_zero_vector(len(self.embeddings[0]))) def get_unk_id(self): return self.word2id['<UNK>'] def get_padding_id(self): return self.word2id['<PADDING>'] def words_to_ids(self, words, maxlen=50, padding=True): ids = [] for w in words: if (not (w in self.word2id)): ids.append(self.word2id['<UNK>']) continue ids.append(self.word2id[w]) length = len(ids) if padding: while (len(ids) < maxlen): ids.append(self.word2id['<PADDING>']) ids = ids[:maxlen] length = min(length, len(ids)) return (ids, length) def ids_to_words(self, ids): words = [] for i in ids: words.append(self.id2word[i]) return words def get_init_vector(self, dim): scale = 0.1 vec = np.random.uniform(low=(- scale), high=scale, size=[dim]) vec = (vec / np.sqrt(np.sum((vec * vec)))) assert (abs((np.sum((vec * vec)) - 1.0)) < 0.1) return list(vec) def get_embeddings(self): return np.array(self.embeddings) def get_zero_vector(self, dim): return ([0.0] * dim)
def _remote_method(method, rref, *args, **kwargs): args = ([method, rref] + list(args)) return rpc.rpc_async(rref.owner(), _call_method, args=args, kwargs=kwargs)
def process_fa_arman(paths, short_name): assert (short_name == 'fa_arman') language = 'fa' base_input_path = os.path.join(paths['NERBASE'], 'PersianNER') train_input_file = os.path.join(base_input_path, 'train_fold1.txt') test_input_file = os.path.join(base_input_path, 'test_fold1.txt') if ((not os.path.exists(train_input_file)) or (not os.path.exists(test_input_file))): full_corpus_file = os.path.join(base_input_path, 'ArmanPersoNERCorpus.zip') if os.path.exists(full_corpus_file): raise FileNotFoundError('Please unzip the file {}'.format(full_corpus_file)) raise FileNotFoundError('Cannot find the arman corpus in the expected directory: {}'.format(base_input_path)) base_output_path = paths['NER_DATA_DIR'] test_output_file = os.path.join(base_output_path, ('%s.test.bio' % short_name)) split_wikiner(base_output_path, train_input_file, prefix=short_name, train_fraction=0.8, test_section=False) shutil.copy2(test_input_file, test_output_file) convert_bio_to_json(base_output_path, base_output_path, short_name)
def Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): if (weights not in {'imagenet', None}): raise ValueError('The `weights` argument should be either `None` (random initialization) or `imagenet` (pre-training on ImageNet).') if ((weights == 'imagenet') and include_top and (classes != 1000)): raise ValueError('If using `weights` as imagenet with `include_top` as true, `classes` should be 1000') if (K.backend() != 'tensorflow'): raise RuntimeError('The Xception model is only available with the TensorFlow backend.') if (K.image_data_format() != 'channels_last'): warnings.warn('The Xception model is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.') K.set_image_data_format('channels_last') old_data_format = 'channels_first' else: old_data_format = None input_shape = _obtain_input_shape(input_shape, default_size=299, min_size=71, data_format=K.image_data_format(), require_flatten=False, weights=weights) if (input_tensor is None): img_input = Input(shape=input_shape) elif (not K.is_keras_tensor(input_tensor)): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input) x = BatchNormalization(name='block1_conv1_bn')(x) x = Activation('relu', name='block1_conv1_act')(x) x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x) x = BatchNormalization(name='block1_conv2_bn')(x) x = Activation('relu', name='block1_conv2_act')(x) residual = Conv2D(128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = BatchNormalization()(residual) x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x) x = BatchNormalization(name='block2_sepconv1_bn')(x) x = Activation('relu', name='block2_sepconv2_act')(x) x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x) x = BatchNormalization(name='block2_sepconv2_bn')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x) x = layers.add([x, residual]) residual = Conv2D(256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = BatchNormalization()(residual) x = Activation('relu', name='block3_sepconv1_act')(x) x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x) x = BatchNormalization(name='block3_sepconv1_bn')(x) x = Activation('relu', name='block3_sepconv2_act')(x) x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x) x = BatchNormalization(name='block3_sepconv2_bn')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x) x = layers.add([x, residual]) residual = Conv2D(728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = BatchNormalization()(residual) x = Activation('relu', name='block4_sepconv1_act')(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x) x = BatchNormalization(name='block4_sepconv1_bn')(x) x = Activation('relu', name='block4_sepconv2_act')(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x) x = BatchNormalization(name='block4_sepconv2_bn')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x) x = layers.add([x, residual]) for i in range(8): residual = x prefix = ('block' + str((i + 5))) x = Activation('relu', name=(prefix + '_sepconv1_act'))(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv1'))(x) x = BatchNormalization(name=(prefix + '_sepconv1_bn'))(x) x = Activation('relu', name=(prefix + '_sepconv2_act'))(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv2'))(x) x = BatchNormalization(name=(prefix + '_sepconv2_bn'))(x) x = Activation('relu', name=(prefix + '_sepconv3_act'))(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv3'))(x) x = BatchNormalization(name=(prefix + '_sepconv3_bn'))(x) x = layers.add([x, residual]) residual = Conv2D(1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = BatchNormalization()(residual) x = Activation('relu', name='block13_sepconv1_act')(x) x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x) x = BatchNormalization(name='block13_sepconv1_bn')(x) x = Activation('relu', name='block13_sepconv2_act')(x) x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x) x = BatchNormalization(name='block13_sepconv2_bn')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x) x = layers.add([x, residual]) x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x) x = BatchNormalization(name='block14_sepconv1_bn')(x) x = Activation('relu', name='block14_sepconv1_act')(x) x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x) x = BatchNormalization(name='block14_sepconv2_bn')(x) x = Activation('relu', name='block14_sepconv2_act')(x) if include_top: x = GlobalAveragePooling2D(name='avg_pool')(x) x = Dense(classes, activation='softmax', name='predictions')(x) elif (pooling == 'avg'): x = GlobalAveragePooling2D()(x) elif (pooling == 'max'): x = GlobalMaxPooling2D()(x) if (input_tensor is not None): inputs = get_source_inputs(input_tensor) else: inputs = img_input model = Model(inputs, x, name='xception') if (weights == 'imagenet'): if include_top: weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models', file_hash='0a58e3b7378bc2990ea3b43d5981f1f6') else: weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='b0042744bf5b25fce3cb969f33bebb97') model.load_weights(weights_path) if old_data_format: K.set_image_data_format(old_data_format) return model
def getILD(category_id, recList, reverse_item): score = 0 n = len(recList) for i in range(0, n): for j in range(0, n): if ((j != i) and (category_id[reverse_item[recList[i]]] != category_id[reverse_item[recList[j]]])): score += 1 return (score / (n * (n - 1)))
class TestImbalance(unittest.TestCase): def test(self): feature_names = ['Age', 'Workclass', 'fnlwgt', 'Education', 'Education-Num', 'Marital Status', 'Occupation', 'Relationship', 'Race', 'Sex', 'Capital Gain', 'Capital Loss', 'Hours per week', 'Country', 'label'] data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../datasets') data = np.genfromtxt(os.path.join(data_dir, 'adult.data'), delimiter=', ', dtype=str) tabular_data = Tabular(data, feature_columns=feature_names, categorical_columns=[feature_names[i] for i in [1, 3, 5, 6, 7, 8, 9, 13]], target_column='label') explainer = CorrelationAnalyzer(tabular_data) explanations = explainer.explain() explanations.plot() s = explanations.to_json() e = ExplanationBase.from_json(s) self.assertEqual(s, e.to_json())
def parse_detail_file(dict_exp, file_path) -> defaultdict: combos = generate_combos() i = 0 with open(os.path.join(os.curdir, file_path)) as f: lines = f.readlines() curr_exp = None for line in lines: values = line.split() if (len(values) == 2): if (values[1] == '1'): curr_exp = values[0] while (curr_exp not in combos[i]): i += 1 if (i == len(combos)): print(f'detail file {file_path} badly formatted') print(f'{curr_exp} not in combos {combos}') sys.exit(1) background_exp = set(combos[i]) background_exp.remove(curr_exp) background_exp = background_exp.pop() curr_exp = (curr_exp, background_exp) i += 1 elif (values[0] not in curr_exp): print(f'detail file {file_path} badly formatted') sys.exit(1) else: if (curr_exp == None): print(f'detail file {file_path} badly formatted') sys.exit(1) x = (0 if (len(dict_exp[curr_exp]) == 0) else (dict_exp[curr_exp][(- 1)][0] + float(values[4]))) dict_exp[curr_exp].append([x, float(values[2])]) return dict_exp
class RandomSideObstacleSpaceInvadersWorld(SpaceInvadersWorld): def reset_world(self): super(RandomSideObstacleSpaceInvadersWorld, self).reset_world() self.reset_obstacle() def reset_obstacle(self): if hasattr(self, 'obstacle'): self.obstacle.kill() side = self.np_random.choice(['left', 'right']) width = int(self.np_random.uniform((- 8), 2)) if (side == 'left'): x = width elif (side == 'right'): x = (self._width - width) self.obstacle = SideObstacle(world=self, position=(x, (self._height / 2))) self._batch.add(self.obstacle, z=1)
class CARHead(torch.nn.Module): def __init__(self, in_channels, out_channels, cls_out_num_classes): super(CARHead, self).__init__() self.fi = nn.Sequential(nn.Conv2d((in_channels * 2), out_channels, 1, 1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)) cls_tower = [] reg_tower = [] for i in range(3): cls_tower.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=0)) cls_tower.append(nn.GroupNorm(32, in_channels)) cls_tower.append(nn.ReLU()) reg_tower.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=0)) reg_tower.append(nn.GroupNorm(32, in_channels)) reg_tower.append(nn.ReLU()) self.add_module('cls_tower', nn.Sequential(*cls_tower)) self.add_module('reg_tower', nn.Sequential(*reg_tower)) self.cls_logits = nn.Conv2d(out_channels, cls_out_num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = nn.Conv2d(out_channels, 4, kernel_size=3, stride=1, padding=1) for modules in [self.cls_tower, self.reg_tower, self.cls_logits, self.bbox_pred]: for l in modules.modules(): if isinstance(l, nn.Conv2d): torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) prior_prob = 0.01 bias_value = (- math.log(((1 - prior_prob) / prior_prob))) torch.nn.init.constant_(self.cls_logits.bias, bias_value) def forward(self, x): x = self.fi(x) cls_tower = self.cls_tower(x) logits = self.cls_logits(cls_tower) reg_tower = self.reg_tower(x) bbox_reg = self.bbox_pred(reg_tower) return (logits, bbox_reg)
class WeightedSSLModel(torch.nn.Module): def __init__(self, hub, num_layers, layernorm=False): super().__init__() self.encoder = AutoModel.from_pretrained(hub, output_hidden_states=True) self.num_layers = num_layers zero_init = torch.cat([torch.zeros(self.num_layers)]) self.weights = torch.nn.Parameter(zero_init, requires_grad=True) self.layernorm = layernorm def forward(self, wav, wav_lens=None): feats = self.encoder(wav) hidden_states = torch.stack(feats.hidden_states, dim=0).detach() assert (self.num_layers == hidden_states.shape[0]), 'Num layers not equal to num hidden states' norm_weights = torch.nn.functional.softmax(self.weights, dim=(- 1)) if self.layernorm: hidden_states = [F.layer_norm(t, (t.shape[(- 1)],)) for t in hidden_states] weighted_feats = (hidden_states[0] * norm_weights[0]) for i in range(1, len(hidden_states)): weighted_feats += (hidden_states[i] * norm_weights[i]) return weighted_feats
def test_regulartype_numpytype_categorical_parameter(): t = RegularType(NumpyType('int32'), 5, parameters={'__categorical__': True, '__array__': 'Something'}) assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t))
(frozen=True) class PLASWithPerturbationModules(PLASModules): perturbation: DeterministicResidualPolicy targ_perturbation: DeterministicResidualPolicy
def get_workflow_jobs(): config_list = instantiate_configs() x = [] for conf_options in config_list: phases = (conf_options.restrict_phases or dimensions.PHASES) for phase in phases: if (Conf.is_test_phase(phase) and (conf_options.cuda_version == '10')): continue x.append(conf_options.gen_workflow_job(phase)) for conf in conf_options.get_dependents(): x.append(conf.gen_workflow_job('test')) return x
def test_setup_no_batch_size(): deterministic.set_seed(0) runner = LocalRunner(snapshot_config) algo = CrashingAlgo() algo.max_path_length = 100 algo.policy = None runner.setup(algo, None, sampler_cls=LocalSampler) with pytest.raises(ValueError, match='batch_size'): runner.train(n_epochs=5)
def arg_casts(arg): if (arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2', '_dselect2', '_dselect3', '_sselect2', '_sselect3', '_zselect1', '_zselect2']): return '<{0}*>'.format(arg) return ''
def register_Ns3ChannelAccessManager_methods(root_module, cls): cls.add_constructor([param('ns3::ChannelAccessManager const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Txop >', 'dcf')]) cls.add_method('GetEifsNoDifs', 'ns3::Time', [], is_const=True) cls.add_method('IsBusy', 'bool', [], is_const=True) cls.add_method('NotifyAckTimeoutResetNow', 'void', []) cls.add_method('NotifyAckTimeoutStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyCtsTimeoutResetNow', 'void', []) cls.add_method('NotifyCtsTimeoutStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyMaybeCcaBusyStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyNavResetNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyNavStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyOffNow', 'void', []) cls.add_method('NotifyOnNow', 'void', []) cls.add_method('NotifyRxEndErrorNow', 'void', []) cls.add_method('NotifyRxEndOkNow', 'void', []) cls.add_method('NotifyRxStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifySleepNow', 'void', []) cls.add_method('NotifySwitchingStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyTxStartNow', 'void', [param('ns3::Time', 'duration')]) cls.add_method('NotifyWakeupNow', 'void', []) cls.add_method('RemovePhyListener', 'void', [param('ns3::Ptr< ns3::WifiPhy >', 'phy')]) cls.add_method('RequestAccess', 'void', [param('ns3::Ptr< ns3::Txop >', 'state'), param('bool', 'isCfPeriod', default_value='false')]) cls.add_method('SetEifsNoDifs', 'void', [param('ns3::Time', 'eifsNoDifs')]) cls.add_method('SetSifs', 'void', [param('ns3::Time', 'sifs')]) cls.add_method('SetSlot', 'void', [param('ns3::Time', 'slotTime')]) cls.add_method('SetupLow', 'void', [param('ns3::Ptr< ns3::MacLow >', 'low')]) cls.add_method('SetupPhyListener', 'void', [param('ns3::Ptr< ns3::WifiPhy >', 'phy')]) cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return
def test_MemoryTimeCard_add(): timecard = MemoryTimeCard(0) r1 = Reservation('', '', 10, 20, 5, 0.9) assert (timecard.add(r1) is True) r2 = Reservation('', '', 5, 7, 5, 0.9) assert (timecard.add(r2) is True) r3 = Reservation('', '', 20, 25, 5, 0.9) assert (timecard.add(r3) is False) r4 = Reservation('', '', 15, 25, 5, 0.9) assert (timecard.add(r4) is False)
def test_load_metadata(): default_clipid = 'Beach-01-Raw' dataset = eigenscape_raw.Dataset(TEST_DATA_HOME) clip = dataset.clip(default_clipid) assert (clip.location == 'Bridlington Beach') assert (clip.time == '10:42') assert (clip.date == '09/05/2017') assert (clip.additional_information == '')
def CreateDataset(dataroots, dataset_mode='2afc', load_size=64): dataset = None if (dataset_mode == '2afc'): from dataset.twoafc_dataset import TwoAFCDataset dataset = TwoAFCDataset() elif (dataset_mode == 'jnd'): from dataset.jnd_dataset import JNDDataset dataset = JNDDataset() else: raise ValueError(('Dataset Mode [%s] not recognized.' % self.dataset_mode)) dataset.initialize(dataroots, load_size=load_size) return dataset
class flickr30k_train(Dataset): def __init__(self, transform, image_root, ann_root, max_words=30, prompt=''): url = ' filename = 'flickr30k_train.json' download_url(url, ann_root) self.annotation = json.load(open(os.path.join(ann_root, filename), 'r')) self.transform = transform self.image_root = image_root self.max_words = max_words self.prompt = prompt self.img_ids = {} n = 0 for ann in self.annotation: img_id = ann['image_id'] if (img_id not in self.img_ids.keys()): self.img_ids[img_id] = n n += 1 def __len__(self): return len(self.annotation) def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.image_root, ann['image']) image = Image.open(image_path).convert('RGB') image = self.transform(image) caption = (self.prompt + pre_caption(ann['caption'], self.max_words)) return (image, caption, self.img_ids[ann['image_id']])
def subprocess_call(cmd, logger='bar', errorprint=True): logger = proglog.default_bar_logger(logger) logger(message='Moviepy - Running:\n>>> "+ " ".join(cmd)') popen_params = {'stdout': DEVNULL, 'stderr': sp.PIPE, 'stdin': DEVNULL} if (os.name == 'nt'): popen_params['creationflags'] = proc = sp.Popen(cmd, **popen_params) (out, err) = proc.communicate() proc.stderr.close() if proc.returncode: if errorprint: logger(message='Moviepy - Command returned an error') raise IOError(err.decode('utf8')) else: logger(message='Moviepy - Command successful') del proc
class Model(nn.Module): def __init__(self, args): super().__init__() self.graph = Graph() self.source_nodes = self.graph.source_nodes self.target_nodes = self.graph.target_nodes A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) dropout = args.dropout spatial_kernel_size = A.size(0) temporal_kernel_size = 5 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.in_channels = args.in_channels self.out_channels = args.out_channels self.n_joints = args.n_joints self.out_joints = args.out_joints self.data_bn = nn.BatchNorm1d((self.in_channels * A.size(1))) self.down_stage = nn.ModuleList((st_gcn(self.in_channels, 16, kernel_size, 1, dropout, graph=self.graph, residual=False), nn.ModuleList((st_gcn(16, 32, kernel_size, 2, dropout, graph=self.graph), st_gcn(32, 32, kernel_size, 1, dropout, graph=self.graph))), nn.ModuleList((st_gcn(32, 64, kernel_size, 2, dropout, graph=self.graph), st_gcn(64, 64, kernel_size, 1, dropout, graph=self.graph))), nn.ModuleList((st_gcn(64, 128, kernel_size, 2, dropout, graph=self.graph), st_gcn(128, 128, kernel_size, 1, dropout, graph=self.graph))), nn.ModuleList((st_gcn(128, 256, kernel_size, 2, dropout, graph=self.graph), st_gcn(256, 256, kernel_size, 1, dropout, graph=self.graph))))) self.up_stage = nn.ModuleList((nn.Identity(), nn.ModuleList((st_gcn(32, 16, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(2, 1), mode='bilinear', align_corners=True))), nn.ModuleList((st_gcn(64, 32, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(2, 1), mode='bilinear', align_corners=True))), nn.ModuleList((st_gcn(128, 64, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(2, 1), mode='bilinear', align_corners=True))), nn.ModuleList((st_gcn(256, 128, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(2, 1), mode='bilinear', align_corners=True))))) self.merge_stage = nn.ModuleList((cond_st_gcn(16, 16, kernel_size, 1, dropout, graph=self.graph), nn.Identity(), nn.ModuleList((cond_st_gcn(32, 16, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(4, 1), mode='bilinear', align_corners=True))), nn.ModuleList((cond_st_gcn(64, 16, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(8, 1), mode='bilinear', align_corners=True))), nn.ModuleList((cond_st_gcn(128, 16, kernel_size, 1, dropout, graph=self.graph), nn.Upsample(scale_factor=(16, 1), mode='bilinear', align_corners=True))))) self.head = nn.Sequential(nn.BatchNorm2d(16, momentum=1), nn.Conv2d(16, 3, kernel_size=1)) def forward(self, x): (N, C, T, V, M) = x.size() x = x.permute(0, 4, 3, 1, 2).contiguous() x = x.view((N * M), (V * C), T) x = self.data_bn(x) x = x.view(N, M, V, C, T) x = x.permute(0, 1, 3, 4, 2).contiguous() x = x.view((N * M), C, T, V) e = (x[(..., self.source_nodes)] - x[(..., self.target_nodes)]) (x_d0, e_d0) = self.down_stage[0](x, e) (x_d1, e_d1) = self.down_stage[1][0](x_d0, e_d0) (x_d1, e_d1) = self.down_stage[1][1](x_d1, e_d1) (x_d2, e_d2) = self.down_stage[2][0](x_d1, e_d1) (x_d2, e_d2) = self.down_stage[2][1](x_d2, e_d2) (x_d3, e_d3) = self.down_stage[3][0](x_d2, e_d2) (x_d3, e_d3) = self.down_stage[3][1](x_d3, e_d3) (x_d4, e_d4) = self.down_stage[4][0](x_d3, e_d3) (x_d4, e_d4) = self.down_stage[4][1](x_d4, e_d4) (x_u4, e_u4) = self.up_stage[4][0](x_d4, e_d4) (x_u3, e_u3) = (self.up_stage[4][1](x_u4), self.up_stage[4][1](e_u4)) (x_u3, e_u3) = self.up_stage[3][0]((x_d3 + x_u3), (e_d3 + e_u3)) (x_u2, e_u2) = (self.up_stage[3][1](x_u3), self.up_stage[3][1](e_u3)) (x_u2, e_u2) = self.up_stage[2][0]((x_d2 + x_u2), (e_d2 + e_u2)) (x_u1, e_u1) = (self.up_stage[2][1](x_u2), self.up_stage[2][1](e_u2)) (x_u1, e_u1) = self.up_stage[1][0]((x_d1 + x_u1), (e_d1 + e_u1)) (x_u0, e_u0) = (self.up_stage[1][1](x_u1), self.up_stage[1][1](e_u1)) (x_m4, e_m4) = self.merge_stage[4][0](x_u4, e_u4) (x_m4, e_m4) = (self.merge_stage[4][1](x_m4), self.merge_stage[4][1](e_m4)) (x_m3, e_m3) = self.merge_stage[3][0](x_u3, e_u3) (x_m3, e_m3) = (self.merge_stage[3][1](x_m3), self.merge_stage[3][1](e_m3)) (x_m2, e_m2) = self.merge_stage[2][0](x_u2, e_u2) (x_m2, e_m2) = (self.merge_stage[2][1](x_m2), self.merge_stage[2][1](e_m2)) (x, e) = self.merge_stage[0](((((x_u0 + x_d0) + x_m2) + x_m3) + x_m4), ((((e_u0 + e_d0) + e_m2) + e_m3) + e_m4)) x = self.head(x) return x.unsqueeze(dim=(- 1))
class MultipleOutputsNet(torch.nn.Module): def __init__(self): super(MultipleOutputsNet, self).__init__() self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1) self.conv2 = torch.nn.Conv2d(1, 3, kernel_size=1, stride=1) self.conv3 = torch.nn.Conv2d(1, 3, kernel_size=1, stride=1) def forward(self, x, y): x = self.conv1(x) y = self.conv1(y) (x1, x2, x3) = torch.split(x, split_size_or_sections=1, dim=1) (y1, y2, y3) = torch.split(y, split_size_or_sections=1, dim=1) return (x1, x2, x3, y1, y2, y3, (x1 - y3), (y2 - x3))
def global_average_pooling_data_grad_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes): gdx = grad_inputs[0] gdy = F.global_average_pooling(gdx) return gdy
def _find_pow_of_frobenius(p, n, x, y): from .integer_mod import mod for i in range(n): if (x == y): break y = (y ** p) else: raise RuntimeError('No appropriate power of Frobenius found') return mod(i, n)
class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class DCProblemTestsN_Nuemann_storeJ(DCProblem_2DTests): formulation = 'Simulation2DNodal' storeJ = True adjoint_tol = 1e-08 bc_type = 'Neumann'
class NextWordShardDataset(ShardDataset): def __init__(self, X, y): self.X = X self.y = y def __len__(self): return len(self.X) def __getitem__(self, index: int): return (self.X[index], self.y[index])
class Test_createMagConversionDict(TestCase): def test_works(self): magTable = eq._createMagConversionDict() self.assertEqual(magTable['A6'][10], '0.44') self.assertEqual(magTable['B0'][0], '30000') self.assertEqual(magTable['M6'][14], 'nan')
class TestInference(unittest.TestCase): def setUp(self): attrs = ['a', 'b', 'c', 'd', 'e'] shape = [2, 3, 4, 5, 6] self.domain = Domain(attrs, shape) self.measurements = [] for i in range(4): I = np.eye(shape[i]) y = np.random.rand(shape[i]) y /= y.sum() self.measurements.append((I, y, 1.0, attrs[i])) self.engine = FactoredInference(self.domain, backend='numpy', log=True, iters=100, warm_start=True) def test_estimate(self): self.engine.estimate(self.measurements, 1.0) self.assertEqual(self.engine.model.total, 1.0) def test_mirror_descent(self): loss = self.engine.mirror_descent(self.measurements, 1.0) self.assertEqual(self.engine.model.total, 1.0) self.assertTrue((loss <= 0.0001)) def test_dual_averaging(self): loss = self.engine.dual_averaging(self.measurements, 1.0) self.assertEqual(self.engine.model.total, 1.0) def test_interior_gradient(self): loss = self.engine.interior_gradient(self.measurements, 1.0) self.assertEqual(self.engine.model.total, 1.0) def test_warm_start(self): self.engine.estimate(self.measurements, 1.0) new = (np.eye((2 * 3)), np.random.rand(6), 1.0, ('a', 'b')) self.engine.estimate((self.measurements + [new]), 1.0) def test_lipschitz(self): self.engine._setup(self.measurements, None) lip = self.engine._lipschitz(self.measurements) def rand(): ans = {} for cl in self.engine.model.cliques: ans[cl] = self.engine.Factor.random(self.engine.domain.project(cl)) return CliqueVector(ans) for _ in range(100): x = rand() y = rand() (_, gx) = self.engine._marginal_loss(x) (_, gy) = self.engine._marginal_loss(y) A = (gx - gy).dot((gx - gy)) B = (x - y).dot((x - y)) ratio = np.sqrt((A / B)) self.assertTrue((ratio <= lip))
def test_fortran_eof_ok(tmpdir): filename = path.join(str(tmpdir), 'scratch') np.random.seed(1) with FortranFile(filename, 'w') as f: f.write_record(np.random.randn(5)) f.write_record(np.random.randn(3)) with FortranFile(filename, 'r') as f: assert (len(f.read_reals()) == 5) assert (len(f.read_reals()) == 3) with pytest.raises(FortranEOFError): f.read_reals()
class HidingRes(nn.Module): def __init__(self, in_c=4, out_c=3, only_residual=False, requires_grad=True): super(HidingRes, self).__init__() self.conv1 = nn.Conv2d(in_c, 128, 3, 1, 1, bias=False) self.norm1 = nn.InstanceNorm2d(128, affine=True) self.conv2 = nn.Conv2d(128, 128, 3, 1, 1, bias=False) self.norm2 = nn.InstanceNorm2d(128, affine=True) self.conv3 = nn.Conv2d(128, 128, 3, 2, 1, bias=False) self.norm3 = nn.InstanceNorm2d(128, affine=True) self.res1 = ResidualBlock(128, dilation=2) self.res2 = ResidualBlock(128, dilation=2) self.res3 = ResidualBlock(128, dilation=2) self.res4 = ResidualBlock(128, dilation=2) self.res5 = ResidualBlock(128, dilation=4) self.res6 = ResidualBlock(128, dilation=4) self.res7 = ResidualBlock(128, dilation=4) self.res8 = ResidualBlock(128, dilation=4) self.res9 = ResidualBlock(128, dilation=1) self.deconv3 = nn.ConvTranspose2d(128, 128, 4, 2, 1) self.norm4 = nn.InstanceNorm2d(128, affine=True) self.deconv2 = nn.Conv2d(128, 128, 3, 1, 1) self.norm5 = nn.InstanceNorm2d(128, affine=True) self.deconv1 = nn.Conv2d(128, out_c, 1) self.only_residual = only_residual if (not requires_grad): for param in self.parameters(): param.requires_grad = False def forward(self, x, c): c = c.view(c.size(0), c.size(1), 1, 1) c = c.repeat(1, 1, x.size(2), x.size(3)) x = torch.cat([x, c], dim=1) y = F.relu(self.norm1(self.conv1(x))) y = F.relu(self.norm2(self.conv2(y))) y = F.relu(self.norm3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.res6(y) y = self.res7(y) y = self.res8(y) y = self.res9(y) y = F.relu(self.norm4(self.deconv3(y))) y = F.relu(self.norm5(self.deconv2(y))) if self.only_residual: y = self.deconv1(y) else: y = F.tanh(self.deconv1(y)) return y
class LoopScopeGuard(): def __init__(self, scopes, non_static_guard=None): self.scopes = scopes self.non_static_guard = non_static_guard def __enter__(self): self.scopes.append(LoopScopeAttribute((self.non_static_guard is None))) if self.non_static_guard: self.non_static_guard.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): self.scopes.pop() if self.non_static_guard: self.non_static_guard.__exit__(exc_type, exc_val, exc_tb)
def test_api(): import pgx env = pgx.bridge_bidding.BridgeBidding(DDS_HASH_TABLE_PATH) pgx.api_test(env, 3, use_key=False) pgx.api_test(env, 3, use_key=True)
def write_version_py(): content = "# GENERATED VERSION FILE\n# TIME: {}\n__version__ = '{}'\n__gitsha__ = '{}'\nversion_info = ({})\n" sha = get_hash() with open('VERSION', 'r') as f: SHORT_VERSION = f.read().strip() VERSION_INFO = ', '.join([(x if x.isdigit() else f'"{x}"') for x in SHORT_VERSION.split('.')]) version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) with open(version_file, 'w') as f: f.write(version_file_str)
def test(image_file, fc, feat_dir): (index_list, label_list) = ([], []) with open(image_file) as fp: for line in fp: (index, l) = line.split() index_list.append(index.split('.')[0]) label_list.append(int(l)) top_retrv = [1, 5] hit_count = np.zeros(len(top_retrv)) cnt_valid = len(index_list) for (i, index) in enumerate(index_list): feat_path = os.path.join(feat_dir, (index + '.npz')) feat = np.load(feat_path)['feat'] scores = np.matmul(feat, fc) if ((i % 10000) == 0): print(i, len(index_list)) ids = np.argsort((- scores)) for k2 in range(len(top_retrv)): current_len = top_retrv[k2] for sort_id in range(current_len): lbl = ids[sort_id] if (lbl == label_list[i]): hit_count[k2] = (hit_count[k2] + 1) break hit_count = (hit_count / cnt_valid) outstr = '' for k in range(len(top_retrv)): outstr = ((outstr + ' ') + str(hit_count[k])) print(outstr) print('total: %d', cnt_valid)
def stacked_core_full_gauss_readout(dataloaders, seed, hidden_channels=32, input_kern=13, hidden_kern=3, layers=3, gamma_input=15.5, skip=0, final_nonlinearity=True, momentum=0.9, pad_input=False, batch_norm=True, hidden_dilation=1, laplace_padding=None, input_regularizer='LaplaceL2norm', use_avg_reg=False, init_mu_range=0.2, init_sigma=1.0, readout_bias=True, gamma_readout=4, elu_offset=0, stack=None, depth_separable=False, linear=False, gauss_type='full', grid_mean_predictor=None, attention_conv=False, shifter=None, shifter_type='MLP', input_channels_shifter=2, hidden_channels_shifter=5, shift_layers=3, gamma_shifter=0, shifter_bias=True, hidden_padding=None, core_bias=False): if ('train' in dataloaders.keys()): dataloaders = dataloaders['train'] batch = next(iter(list(dataloaders.values())[0])) (in_name, out_name) = (list(batch.keys())[:2] if isinstance(batch, dict) else batch._fields[:2]) session_shape_dict = get_dims_for_loader_dict(dataloaders) n_neurons_dict = {k: v[out_name][1] for (k, v) in session_shape_dict.items()} input_channels = [v[in_name][1] for v in session_shape_dict.values()] core_input_channels = (list(input_channels.values())[0] if isinstance(input_channels, dict) else input_channels[0]) set_random_seed(seed) (grid_mean_predictor, grid_mean_predictor_type, source_grids) = prepare_grid(grid_mean_predictor, dataloaders) core = Stacked2dCore(input_channels=core_input_channels, hidden_channels=hidden_channels, input_kern=input_kern, hidden_kern=hidden_kern, layers=layers, gamma_input=gamma_input, skip=skip, final_nonlinearity=final_nonlinearity, bias=core_bias, momentum=momentum, pad_input=pad_input, batch_norm=batch_norm, hidden_dilation=hidden_dilation, laplace_padding=laplace_padding, input_regularizer=input_regularizer, stack=stack, depth_separable=depth_separable, linear=linear, attention_conv=attention_conv, hidden_padding=hidden_padding, use_avg_reg=use_avg_reg) in_shapes_dict = {k: get_module_output(core, v[in_name])[1:] for (k, v) in session_shape_dict.items()} readout = MultipleFullGaussian2d(in_shape_dict=in_shapes_dict, loader=dataloaders, n_neurons_dict=n_neurons_dict, init_mu_range=init_mu_range, bias=readout_bias, init_sigma=init_sigma, gamma_readout=gamma_readout, gauss_type=gauss_type, grid_mean_predictor=grid_mean_predictor, grid_mean_predictor_type=grid_mean_predictor_type, source_grids=source_grids) if (shifter is True): data_keys = [i for i in dataloaders.keys()] if (shifter_type == 'MLP'): shifter = MLPShifter(data_keys=data_keys, input_channels=input_channels_shifter, hidden_channels_shifter=hidden_channels_shifter, shift_layers=shift_layers, gamma_shifter=gamma_shifter) elif (shifter_type == 'StaticAffine'): shifter = StaticAffine2dShifter(data_keys=data_keys, input_channels=input_channels_shifter, bias=shifter_bias, gamma_shifter=gamma_shifter) model = FiringRateEncoder(core=core, readout=readout, shifter=shifter, elu_offset=elu_offset) return model
class MLP(eqx.Module): layers: List[nn.Linear] activation: Callable = eqx.static_field() final_activation: Callable = eqx.static_field() in_size: int = static_field() out_size: int = static_field() width_size: int = static_field() depth: int = static_field() def __init__(self, in_size: int, out_size: int, width_size: int, depth: int, activation: Callable=jax.nn.relu, final_activation: Callable=(lambda x: x), *, key: 'jax.random.PRNGKey', **kwargs): super().__init__(**kwargs) keys = jax.random.split(key, (depth + 1)) layers = [] if (depth == 0): layers.append(nn.Linear(in_size, out_size, key=keys[0])) else: layers.append(nn.Linear(in_size, width_size, key=keys[0])) for i in range((depth - 1)): layers.append(nn.Linear(width_size, width_size, key=keys[(i + 1)])) layers.append(nn.Linear(width_size, out_size, key=keys[(- 1)])) self.layers = layers self.in_size = in_size self.out_size = out_size self.width_size = width_size self.depth = depth self.activation = activation self.final_activation = final_activation def __call__(self, x, *, key: Optional['jax.random.PRNGKey']=None): for layer in self.layers[:(- 1)]: x = layer(x) x = self.activation(x) x = self.layers[(- 1)](x) x = self.final_activation(x) return x
def encode(ob, extensions=None, **options): s = BsdfSerializer(extensions, **options) return s.encode(ob)
class AlexNet(nn.Module): configs = [3, 96, 256, 384, 384, 256] def __init__(self, width_mult=1): configs = list(map((lambda x: (3 if (x == 3) else int((x * width_mult)))), AlexNet.configs)) super(AlexNet, self).__init__() self.layer1 = nn.Sequential(nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2), nn.BatchNorm2d(configs[1]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True)) self.layer2 = nn.Sequential(nn.Conv2d(configs[1], configs[2], kernel_size=5), nn.BatchNorm2d(configs[2]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True)) self.layer3 = nn.Sequential(nn.Conv2d(configs[2], configs[3], kernel_size=3), nn.BatchNorm2d(configs[3]), nn.ReLU(inplace=True)) self.layer4 = nn.Sequential(nn.Conv2d(configs[3], configs[4], kernel_size=3), nn.BatchNorm2d(configs[4]), nn.ReLU(inplace=True)) self.layer5 = nn.Sequential(nn.Conv2d(configs[4], configs[5], kernel_size=3), nn.BatchNorm2d(configs[5])) self.feature_size = configs[5] def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.layer5(x) return x
class Tagger(object): def __init__(self, tagsfile): self.tagsfile = tagsfile self.prevline = None self.ignored = 0 def __call__(self, words): tagsline = '\n' while (tagsline == '\n'): tagsline = tagsfile.readline() tags = get_tags(tagsline) if (len(tags) != len(words)): self.ignored += 1 return uprint(' '.join(('|||'.join(pair) for pair in zip(words, tags)))) self.prevline = tagsline
def test_autodetect_function_in_for(): def adff(A): for _ in range(5): freefunction2(A) A = np.random.rand(20) ref = np.copy(A) adff(A) assert np.allclose(A, (ref + (2 * 5)))
def make_dataset(dataset_name, data_dir, batch_size=128, sample_size=None, SOTA=False): if (dataset_name == 'cifar10'): print('Dataset: CIFAR10.') if SOTA: trainset = CIFAR10(root=data_dir, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(size=32, padding=4), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])])) else: trainset = CIFAR10(root=data_dir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])])) testset = CIFAR10(root=data_dir, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])])) num_classes = 10 elif (dataset_name == 'mnist'): print('Dataset: MNIST.') trainset = MNIST(root=data_dir, train=True, download=True, transform=transforms.Compose([transforms.Grayscale(3), transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])) testset = MNIST(root=data_dir, train=False, download=True, transform=transforms.Compose([transforms.Grayscale(3), transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])) num_classes = 10 elif (dataset_name == 'cifar10_random'): print('Dataset: CIFAR10 with random label.') trainset = CIFAR10RandomLabels(root=data_dir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) testset = CIFAR10RandomLabels(root=data_dir, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])) num_classes = 10 else: raise ValueError if (sample_size is not None): total_sample_size = (num_classes * sample_size) cnt_dict = dict() total_cnt = 0 indices = [] for i in range(len(trainset)): if (total_cnt == total_sample_size): break label = trainset[i][1] if (label not in cnt_dict): cnt_dict[label] = 1 total_cnt += 1 indices.append(i) elif (cnt_dict[label] == sample_size): continue else: cnt_dict[label] += 1 total_cnt += 1 indices.append(i) train_indices = torch.tensor(indices) trainloader = DataLoader(trainset, batch_size=batch_size, sampler=SubsetRandomSampler(train_indices), num_workers=1) else: trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4) testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4) return (trainloader, testloader, num_classes)
class ScalarInequality(Constraint): def _validate_inputs(cls, **kwargs): errors = [] try: super()._validate_inputs(**kwargs) except Exception as e: errors.append(e) if (('relation' in kwargs) and (kwargs['relation'] not in {'>', '>=', '<', '<='})): wrong_relation = {kwargs['relation']} errors.append(ConstraintMetadataError(f"Invalid relation value {wrong_relation} in a ScalarInequality constraint. The relation must be one of: '>', '>=', '<' or '<='.")) if errors: raise AggregateConstraintsError(errors) def _validate_metadata_specific_to_constraint(metadata, **kwargs): column_name = kwargs.get('column_name') sdtype = metadata.columns.get(column_name, {}).get('sdtype') value = kwargs.get('value') if (sdtype == 'numerical'): if (not isinstance(value, (int, float))): raise ConstraintMetadataError("'value' must be an int or float.") elif (sdtype == 'datetime'): datetime_format = metadata.columns.get(column_name).get('datetime_format') matches_format = matches_datetime_format(value, datetime_format) if (not matches_format): raise ConstraintMetadataError("'value' must be a datetime string of the right format.") else: raise ConstraintMetadataError('A ScalarInequality constraint is being applied to columns with mismatched sdtypes. Numerical columns must be compared to integer or float values. Datetimes column must be compared to datetime strings.') def _validate_init_inputs(column_name, value, relation): value_is_datetime = is_datetime_type(value) if (not isinstance(column_name, str)): raise ValueError('`column_name` must be a string.') if (relation not in ['>', '>=', '<', '<=']): raise ValueError('`relation` must be one of the following: `>`, `>=`, `<`, `<=`') if (not (isinstance(value, (int, float)) or value_is_datetime)): raise ValueError('`value` must be a number or a string that represents a datetime.') if (value_is_datetime and (not isinstance(value, str))): raise ValueError('Datetime must be represented as a string.') def __init__(self, column_name, relation, value): self._validate_init_inputs(column_name, value, relation) self._value = (cast_to_datetime64(value) if is_datetime_type(value) else value) self._column_name = column_name self._diff_column_name = f'{self._column_name}#diff' self.constraint_columns = (column_name,) self._is_datetime = None self._dtype = None self._operator = INEQUALITY_TO_OPERATION[relation] def _get_is_datetime(self, table_data): column = table_data[self._column_name].to_numpy() is_column_datetime = is_datetime_type(column) is_value_datetime = is_datetime_type(self._value) is_datetime = (is_column_datetime and is_value_datetime) if ((not is_datetime) and any([is_value_datetime, is_column_datetime])): raise ValueError('Both column and value must be datetime.') return is_datetime def _validate_columns_exist(self, table_data): if (self._column_name not in table_data.columns): raise KeyError(f'The column {self._column_name} was not found in table_data.') def _fit(self, table_data): self._validate_columns_exist(table_data) self._is_datetime = self._get_is_datetime(table_data) self._dtype = table_data[self._column_name].dtypes def is_valid(self, table_data): column = table_data[self._column_name].to_numpy() if (self._is_datetime and (self._dtype == 'O')): column = cast_to_datetime64(column) valid = (pd.isna(column) | self._operator(column, self._value)) return valid def _transform(self, table_data): column = table_data[self._column_name].to_numpy() if self._is_datetime: column = cast_to_datetime64(column) diff_column = abs((column - self._value)) diff_column = diff_column.astype(np.float64) else: diff_column = abs((column - self._value)) self._diff_column_name = create_unique_name(self._diff_column_name, table_data.columns) table_data[self._diff_column_name] = np.log((diff_column + 1)) return table_data.drop(self._column_name, axis=1) def _reverse_transform(self, table_data): diff_column = (np.exp(table_data[self._diff_column_name]) - 1) if (self._dtype != np.dtype('float')): diff_column = diff_column.round() if self._is_datetime: diff_column = convert_to_timedelta(diff_column) if (self._operator in [np.greater, np.greater_equal]): original_column = (self._value + diff_column) else: original_column = (self._value - diff_column) table_data[self._column_name] = pd.Series(original_column).astype(self._dtype) return table_data.drop(self._diff_column_name, axis=1)
def construct_beta_hats(opt_beta, opt_beta_sens, eps_list, max_norm): beta_hats = gen_list(opt_beta, opt_beta_sens, eps_list) for i in range(len(beta_hats)): beta_hats[i] = project.two_norm_project(beta_hats[i], max_norm) return beta_hats
def create_val_img_folder(root): dataset_dir = os.path.join(root) val_dir = os.path.join(dataset_dir, 'val') img_dir = os.path.join(val_dir, 'images') fp = open(os.path.join(val_dir, 'val_annotations.txt'), 'r') data = fp.readlines() val_img_dict = {} for line in data: words = line.split('\t') val_img_dict[words[0]] = words[1] fp.close() for (img, folder) in val_img_dict.items(): newpath = os.path.join(img_dir, folder) if (not os.path.exists(newpath)): os.makedirs(newpath) if os.path.exists(os.path.join(img_dir, img)): os.rename(os.path.join(img_dir, img), os.path.join(newpath, img))
class Context(): def __init__(self, name): self.name = name self.constants = {} self.symbols = [] self.containers = [] self.read_vars = [] self.written_vars = []
def norm(input, is_train, reuse=True, norm=None): assert (norm in ['instance', 'batch', None]) if (norm == 'instance'): with tf.variable_scope('instance_norm', reuse=reuse): eps = 1e-05 (mean, sigma) = tf.nn.moments(input, [1, 2], keep_dims=True) normalized = ((input - mean) / (tf.sqrt(sigma) + eps)) out = normalized elif (norm == 'batch'): with tf.variable_scope('batch_norm', reuse=reuse): out = tf.layers.batch_normalization(inputs=input, training=is_train, reuse=reuse) else: out = input return out
def set_window_size_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if (window_addr in _window_size_callback_repository): previous_callback = _window_size_callback_repository[window_addr] else: previous_callback = None if (cbfun is None): cbfun = 0 c_cbfun = _GLFWwindowsizefun(cbfun) _window_size_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetWindowSizeCallback(window, cbfun) if ((previous_callback is not None) and (previous_callback[0] != 0)): return previous_callback[0]
def scatter(inputs, target_gpus, dim=0): def scatter_map(obj): if isinstance(obj, torch.Tensor): if (target_gpus != [(- 1)]): return OrigScatter.apply(target_gpus, None, dim, obj) else: return Scatter.forward(target_gpus, obj) if isinstance(obj, DataContainer): if obj.cpu_only: return obj.data else: return Scatter.forward(target_gpus, obj.data) if (isinstance(obj, tuple) and (len(obj) > 0)): return list(zip(*map(scatter_map, obj))) if (isinstance(obj, list) and (len(obj) > 0)): out = list(map(list, zip(*map(scatter_map, obj)))) return out if (isinstance(obj, dict) and (len(obj) > 0)): out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) return out return [obj for targets in target_gpus] try: return scatter_map(inputs) finally: scatter_map = None
def mask_and(clip, other_clip): if isinstance(other_clip, ImageClip): other_clip = other_clip.img if isinstance(other_clip, np.ndarray): return clip.fl_image((lambda f: np.minimum(f, other_clip))) else: return clip.fl((lambda gf, t: np.minimum(gf(t), other_clip.get_frame(t))))
.parametrize('tau', [0.05]) .parametrize('input_size', [32]) .parametrize('output_size', [32]) def test_soft_sync(tau: float, input_size: int, output_size: int) -> None: module = torch.nn.Linear(input_size, output_size) targ_module = torch.nn.Linear(input_size, output_size) original = copy.deepcopy(targ_module) soft_sync(targ_module, module, tau) module_params = module.parameters() targ_params = targ_module.parameters() original_params = original.parameters() for (p, targ_p, orig_p) in zip(module_params, targ_params, original_params): assert torch.allclose(((p * tau) + (orig_p * (1.0 - tau))), targ_p)
class lPCA(GlobalEstimator): def __init__(self, ver='FO', alphaRatio=0.05, alphaFO=0.05, alphaFan=10, betaFan=0.8, PFan=0.95, verbose=True, fit_explained_variance=False): self.ver = ver self.alphaRatio = alphaRatio self.alphaFO = alphaFO self.alphaFan = alphaFan self.betaFan = betaFan self.PFan = PFan self.verbose = verbose self.fit_explained_variance = fit_explained_variance def fit(self, X, y=None): if self.fit_explained_variance: X = check_array(X, ensure_2d=False, ensure_min_samples=2) else: X = check_array(X, ensure_min_samples=2, ensure_min_features=2) (self.dimension_, self.gap_) = self._pcaLocalDimEst(X) self.is_fitted_ = True return self def _fit_once(self, X, y=None): if self.fit_explained_variance: X = check_array(X, ensure_2d=False, ensure_min_samples=2) else: X = check_array(X, ensure_min_samples=2, ensure_min_features=2) (self.dimension_, self.gap_) = self._pcaLocalDimEst(X) self.is_fitted_ = True return self def _pcaLocalDimEst(self, X): if self.fit_explained_variance: explained_var = X else: pca = PCA().fit(X) self.explained_var_ = explained_var = pca.explained_variance_ if (self.ver == 'FO'): return self._FO(explained_var) elif (self.ver == 'Fan'): return self._fan(explained_var) elif (self.ver == 'maxgap'): return self._maxgap(explained_var) elif (self.ver == 'ratio'): return self._ratio(explained_var) elif (self.ver == 'participation_ratio'): return self._participation_ratio(explained_var) elif (self.ver == 'Kaiser'): return self._Kaiser(explained_var) elif (self.ver == 'broken_stick'): return self._broken_stick(explained_var) def _FO(self, explained_var): de = sum((explained_var > (self.alphaFO * explained_var[0]))) gaps = (explained_var[:(- 1)] / explained_var[1:]) return (de, gaps) def _maxgap(explained_var): gaps = (explained_var[:(- 1)] / explained_var[1:]) de = (np.nanargmax(gaps) + 1) return (de, gaps) def _ratio(self, explained_var): sumexp = np.cumsum(explained_var) sumexp_norm = (sumexp / np.max(sumexp)) de = (sum((sumexp_norm < self.alphaRatio)) + 1) gaps = (explained_var[:(- 1)] / explained_var[1:]) return (de, gaps) def _participation_ratio(self, explained_var): PR = ((sum(explained_var) ** 2) / sum((explained_var ** 2))) de = PR gaps = (explained_var[:(- 1)] / explained_var[1:]) return (de, gaps) def _fan(self, explained_var): r = np.where(((np.cumsum(explained_var) / sum(explained_var)) > self.PFan))[0][0] sigma = np.mean(explained_var[r:]) explained_var -= sigma gaps = (explained_var[:(- 1)] / explained_var[1:]) de = (1 + np.min(np.concatenate((np.where((gaps > self.alphaFan))[0], np.where(((np.cumsum(explained_var) / sum(explained_var)) > self.betaFan))[0])))) return (de, gaps) def _Kaiser(self, explained_var): de = sum((explained_var > np.mean(explained_var))) gaps = (explained_var[:(- 1)] / explained_var[1:]) return (de, gaps) def _brokenstick_distribution(dim): distr = np.zeros(dim) for i in range(dim): for j in range(i, dim): distr[i] = (distr[i] + (1 / (j + 1))) distr[i] = (distr[i] / dim) return distr def _broken_stick(self, explained_var): bs = self._brokenstick_distribution(dim=len(explained_var)) gaps = (explained_var[:(- 1)] / explained_var[1:]) de = 0 explained_var_norm = (explained_var / np.sum(explained_var)) for i in range(len(explained_var)): if (bs[i] > explained_var_norm[i]): de = (i + 1) break return (de, gaps)
.parametrize('name', ['foo', '_foo']) def test_valid_identifier_names(name): t = sqlparse.parse(name)[0].tokens assert isinstance(t[0], sqlparse.sql.Identifier)
class Trainer(object): def __init__(self, train_data, model, optimizer=None, loss=None, batch_size=32, sampler=None, drop_last=False, update_every=1, num_workers=0, n_epochs=10, print_every=5, dev_data=None, metrics=None, metric_key=None, validate_every=(- 1), save_path=None, use_tqdm=True, device=None, callbacks=None, check_code_level=0, **kwargs): super(Trainer, self).__init__() if (not isinstance(model, nn.Module)): raise TypeError(f'The type of model must be torch.nn.Module, got {type(model)}.') if ((not metrics) and (dev_data is not None)): raise ValueError('No metric for dev_data evaluation.') if (metrics and (dev_data is None)): raise ValueError('No dev_data for evaluations, pass dev_data or set metrics to None. ') assert (update_every >= 1), 'update_every must be no less than 1.' self.update_every = int(update_every) if (not ((save_path is None) or isinstance(save_path, str))): raise ValueError('save_path can only be None or `str`.') metrics = _prepare_metrics(metrics) self.increase_better = True if (metric_key is not None): self.increase_better = (False if (metric_key[0] == '-') else True) self.metric_key = (metric_key[1:] if ((metric_key[0] == '+') or (metric_key[0] == '-')) else metric_key) else: self.metric_key = None losser = _prepare_losser(loss) if isinstance(train_data, BatchIter): if (sampler is not None): warnings.warn('sampler is ignored when train_data is a BatchIter.') if (num_workers > 0): warnings.warn('num_workers is ignored when train_data is BatchIter.') if drop_last: warnings.warn('drop_last is ignored when train_data is BatchIter.') if isinstance(model, nn.parallel.DistributedDataParallel): if (device is not None): warnings.warn('device is ignored when model is nn.parallel.DistributedDataParallel.') device = None if (sampler is None): sampler = torch.utils.data.DistributedSampler(train_data) elif (not isinstance(sampler, torch.utils.data.DistributedSampler)): raise TypeError('When using nn.parallel.DistributedDataParallel, sampler must be None or torch.utils.data.DistributedSampler.') if save_path: raise RuntimeError('Saving model in Distributed situation is not allowed right now.') else: if ((sampler is not None) and (not isinstance(sampler, (Sampler, torch.utils.data.Sampler)))): raise ValueError(f"The type of sampler should be fastNLP.BaseSampler or pytorch's Sampler, got {type(sampler)}") if (sampler is None): sampler = RandomSampler() elif hasattr(sampler, 'set_batch_size'): sampler.set_batch_size(batch_size) if isinstance(train_data, DataSet): self.data_iterator = DataSetIter(dataset=train_data, batch_size=batch_size, sampler=sampler, num_workers=num_workers, drop_last=drop_last) elif isinstance(train_data, BatchIter): self.data_iterator = train_data train_data = train_data.dataset check_code_level = (- 1) else: raise TypeError('train_data type {} not support'.format(type(train_data))) model.train() self.model = _move_model_to_device(model, device=device) if _model_contains_inner_module(self.model): self._forward_func = self.model.module.forward else: self._forward_func = self.model.forward if (check_code_level > (- 1)): dev_dataset = dev_data if isinstance(dev_data, BatchIter): dev_dataset = None warnings.warn('dev_data is of BatchIter type, ignore validation checking.') check_batch_size = min(batch_size, DEFAULT_CHECK_BATCH_SIZE) if isinstance(self.model, nn.DataParallel): _num_devices = len(self.model.device_ids) if ((batch_size // _num_devices) > 1): check_batch_size = max((len(self.model.device_ids) * 2), check_batch_size) else: check_batch_size = max(len(self.model.device_ids), check_batch_size) _check_code(dataset=train_data, model=self.model, losser=losser, forward_func=self._forward_func, metrics=metrics, dev_data=dev_dataset, metric_key=self.metric_key, check_level=check_code_level, batch_size=check_batch_size) self.train_data = train_data self.dev_data = dev_data self.losser = losser self.metrics = metrics self.n_epochs = int(n_epochs) self.batch_size = int(batch_size) self.save_path = save_path self.print_every = int(print_every) self.validate_every = (int(validate_every) if (validate_every != 0) else (- 1)) self.best_metric_indicator = None self.best_dev_epoch = None self.best_dev_step = None self.best_dev_perf = None self.n_steps = (len(self.data_iterator) * self.n_epochs) if isinstance(optimizer, torch.optim.Optimizer): self.optimizer = optimizer elif isinstance(optimizer, Optimizer): self.optimizer = optimizer.construct_from_pytorch(self.model.parameters()) elif (optimizer is None): self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.004) else: raise TypeError('optimizer can only be torch.optim.Optimizer type, not {}.'.format(type(optimizer))) self.logger = logger self.use_tqdm = use_tqdm if ('test_use_tqdm' in kwargs): self.test_use_tqdm = kwargs.get('test_use_tqdm') else: self.test_use_tqdm = self.use_tqdm self.pbar = None self.print_every = abs(self.print_every) self.kwargs = kwargs if (self.dev_data is not None): self.tester = Tester(model=self.model, data=self.dev_data, metrics=self.metrics, batch_size=kwargs.get('dev_batch_size', self.batch_size), device=None, verbose=0, use_tqdm=self.test_use_tqdm, sampler=kwargs.get('test_sampler', None)) self.start_time = None if isinstance(callbacks, Callback): callbacks = [callbacks] self.callback_manager = CallbackManager(env={'trainer': self}, callbacks=callbacks) def train(self, load_best_model=True, on_exception='auto'): results = {} if (self.n_epochs <= 0): self.logger.info(f'training epoch is {self.n_epochs}, nothing was done.') results['seconds'] = 0.0 return results try: self._model_device = _get_model_device(self.model) self._mode(self.model, is_test=False) self._load_best_model = load_best_model self.start_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')) start_time = time.time() self.logger.info(('training epochs started ' + self.start_time)) self.step = 0 self.epoch = 1 try: self.callback_manager.on_train_begin() self._train() self.callback_manager.on_train_end() except BaseException as e: self.callback_manager.on_exception(e) if (on_exception == 'auto'): if (not isinstance(e, (CallbackException, KeyboardInterrupt))): raise e elif (on_exception == 'raise'): raise e if ((self.dev_data is not None) and (self.best_dev_perf is not None) and load_best_model): model_name = ('best_' + '_'.join([self.model.__class__.__name__, self.metric_key, self.start_time])) load_succeed = self._load_model(self.model, model_name) if load_succeed: self.logger.info('Reloaded the best model.') else: self.logger.info('Fail to reload best model.') if ((self.dev_data is None) and (self.save_path is not None)): model_name = '_'.join([self.model.__class__.__name__, self.start_time]) self._save_model(self.model, model_name) finally: if ((self.dev_data is not None) and (self.best_dev_perf is not None)): self.logger.info('\nIn Epoch:{}/Step:{}, got best dev performance:'.format(self.best_dev_epoch, self.best_dev_step)) self.logger.info(self.tester._format_eval_results(self.best_dev_perf)) results['best_eval'] = self.best_dev_perf results['best_epoch'] = self.best_dev_epoch results['best_step'] = self.best_dev_step results['seconds'] = round((time.time() - start_time), 2) return results def _train(self): if (not self.use_tqdm): from .utils import _pseudo_tqdm as inner_tqdm else: inner_tqdm = tqdm start = time.time() with inner_tqdm(total=self.n_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True, initial=self.step) as pbar: self.pbar = pbar avg_loss = 0 self.batch_per_epoch = self.data_iterator.num_batches for epoch in range(self.epoch, (self.n_epochs + 1)): self.epoch = epoch pbar.set_description_str(desc='Epoch {}/{}'.format(epoch, self.n_epochs)) self.callback_manager.on_epoch_begin() for (batch_x, batch_y) in self.data_iterator: self.step += 1 _move_dict_value_to_device(batch_x, batch_y, device=self._model_device) indices = self.data_iterator.get_batch_indices() self.callback_manager.on_batch_begin(batch_x, batch_y, indices) prediction = self._data_forward(self.model, batch_x) self.callback_manager.on_loss_begin(batch_y, prediction) loss = self._compute_loss(prediction, batch_y).mean() loss = (loss / self.update_every) avg_loss += loss.item() self.callback_manager.on_backward_begin(loss) self._grad_backward(loss) self.callback_manager.on_backward_end() self._update() self.callback_manager.on_step_end() if ((self.step % self.print_every) == 0): avg_loss = (float(avg_loss) / self.print_every) if self.use_tqdm: print_output = 'loss:{:<6.5f}'.format(avg_loss) pbar.update(self.print_every) else: end = time.time() diff = timedelta(seconds=round((end - start))) print_output = '[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}'.format(epoch, self.step, avg_loss, diff) pbar.set_postfix_str(print_output) avg_loss = 0 self.callback_manager.on_batch_end() if (((self.validate_every > 0) and ((self.step % self.validate_every) == 0)) and (self.dev_data is not None)): eval_res = self._do_validation(epoch=epoch, step=self.step) eval_str = 'Evaluation on dev at Epoch {}/{}. Step:{}/{}: '.format(epoch, self.n_epochs, self.step, self.n_steps) self.logger.info(eval_str) self.logger.info((self.tester._format_eval_results(eval_res) + '\n')) if ((self.validate_every < 0) and (self.dev_data is not None)): eval_res = self._do_validation(epoch=epoch, step=self.step) eval_str = 'Evaluation on dev at Epoch {}/{}. Step:{}/{}: '.format(epoch, self.n_epochs, self.step, self.n_steps) self.logger.info(eval_str) self.logger.info((self.tester._format_eval_results(eval_res) + '\n')) self.callback_manager.on_epoch_end() pbar.close() self.pbar = None def _do_validation(self, epoch, step): self.callback_manager.on_valid_begin() res = self.tester.test() is_better_eval = False if self._better_eval_result(res): if (self.save_path is not None): self._save_model(self.model, ('best_' + '_'.join([self.model.__class__.__name__, self.metric_key, self.start_time]))) elif self._load_best_model: self._best_model_states = {name: param.cpu().clone() for (name, param) in self.model.state_dict().items()} self.best_dev_perf = res self.best_dev_epoch = epoch self.best_dev_step = step is_better_eval = True self.callback_manager.on_valid_end(res, self.metric_key, self.optimizer, is_better_eval) return res def _mode(self, model, is_test=False): if is_test: model.eval() else: model.train() def _update(self): if ((self.step % self.update_every) == 0): self.optimizer.step() def _data_forward(self, network, x): x = _build_args(self._forward_func, **x) y = network(**x) if (not isinstance(y, dict)): raise TypeError(f'The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.') return y def _grad_backward(self, loss): if (((self.step - 1) % self.update_every) == 0): self.model.zero_grad() loss.backward() def _compute_loss(self, predict, truth): return self.losser(predict, truth) def _save_model(self, model, model_name, only_param=False): if (self.save_path is not None): model_path = os.path.join(self.save_path, model_name) if (not os.path.exists(self.save_path)): os.makedirs(self.save_path, exist_ok=True) if _model_contains_inner_module(model): model = model.module if only_param: state_dict = model.state_dict() for key in state_dict: state_dict[key] = state_dict[key].cpu() torch.save(state_dict, model_path) else: model.cpu() torch.save(model, model_path) model.to(self._model_device) def _load_model(self, model, model_name, only_param=False): if (self.save_path is not None): model_path = os.path.join(self.save_path, model_name) if only_param: states = torch.load(model_path) else: states = torch.load(model_path).state_dict() if _model_contains_inner_module(model): model.module.load_state_dict(states) else: model.load_state_dict(states) elif hasattr(self, '_best_model_states'): model.load_state_dict(self._best_model_states) else: return False return True def _better_eval_result(self, metrics): (indicator, indicator_val) = _check_eval_results(metrics, self.metric_key, self.metrics) if (self.metric_key is None): self.metric_key = indicator is_better = True if (self.best_metric_indicator is None): self.best_metric_indicator = indicator_val elif (self.increase_better is True): if (indicator_val > self.best_metric_indicator): self.best_metric_indicator = indicator_val else: is_better = False elif (indicator_val < self.best_metric_indicator): self.best_metric_indicator = indicator_val else: is_better = False return is_better def is_master(self): return True
def test_config_namespace_copy(config_ns): config_ns2 = config_ns.deepcopy() config_ns2.a.b.param1 = 2 assert (config_ns2.a.b.param1 != config_ns.a.b.param1)
def register(*args, **kwargs): q = ctx.Queue() p = ctx.Process(target=_registration_worker, args=[q, args, kwargs], daemon=True) p.start() try: result = q.get() if isinstance(result, BaseException): raise result p.join() except BaseException as e: p.terminate() p.join() raise e return result
_toolkit() class Shopify(FunctionToolkit): name_for_human = 'Shopify' description_for_human = 'Toolkit for managing Shopify stores.' name_for_model = 'Shopify' description_for_model = 'A comprehensive toolkit for managing Shopify stores, including product, order, and customer management, as well as store analytics.' tool_classes = [ShopifySearchProducts, ShopifyGetProductDetails, ShopifyCreateProduct, ShopifyUpdateProduct, ShopifyDeleteProduct, ShopifySearchOrders, ShopifyGetOrderDetails, ShopifyDraftOrder, ShopifyManageOrder, ShopifySearchCustomers, ShopifyGetStoreAnalytics]
class decoder4(nn.Module): def __init__(self): super(decoder4, self).__init__() self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(512, 256, 3, 1, 0) self.relu11 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv12 = nn.Conv2d(256, 256, 3, 1, 0) self.relu12 = nn.ReLU(inplace=True) self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv13 = nn.Conv2d(256, 256, 3, 1, 0) self.relu13 = nn.ReLU(inplace=True) self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv14 = nn.Conv2d(256, 256, 3, 1, 0) self.relu14 = nn.ReLU(inplace=True) self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv15 = nn.Conv2d(256, 128, 3, 1, 0) self.relu15 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv16 = nn.Conv2d(128, 128, 3, 1, 0) self.relu16 = nn.ReLU(inplace=True) self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv17 = nn.Conv2d(128, 64, 3, 1, 0) self.relu17 = nn.ReLU(inplace=True) self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv18 = nn.Conv2d(64, 64, 3, 1, 0) self.relu18 = nn.ReLU(inplace=True) self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv19 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, x): out = self.reflecPad11(x) out = self.conv11(out) out = self.relu11(out) out = self.unpool(out) out = self.reflecPad12(out) out = self.conv12(out) out = self.relu12(out) out = self.reflecPad13(out) out = self.conv13(out) out = self.relu13(out) out = self.reflecPad14(out) out = self.conv14(out) out = self.relu14(out) out = self.reflecPad15(out) out = self.conv15(out) out = self.relu15(out) out = self.unpool2(out) out = self.reflecPad16(out) out = self.conv16(out) out = self.relu16(out) out = self.reflecPad17(out) out = self.conv17(out) out = self.relu17(out) out = self.unpool3(out) out = self.reflecPad18(out) out = self.conv18(out) out = self.relu18(out) out = self.reflecPad19(out) out = self.conv19(out) return out
class EigenQuaternionPrinter(): def __init__(self, val): type = val.type if (type.code == gdb.TYPE_CODE_REF): type = type.target() self.type = type.unqualified().strip_typedefs() self.innerType = self.type.template_argument(0) self.val = val self.data = self.val['m_coeffs']['m_storage']['m_data']['array'] self.data = self.data.cast(self.innerType.pointer()) class _iterator(): def __init__(self, dataPtr): self.dataPtr = dataPtr self.currentElement = 0 self.elementNames = ['x', 'y', 'z', 'w'] def __iter__(self): return self def next(self): element = self.currentElement if (self.currentElement >= 4): raise StopIteration self.currentElement = (self.currentElement + 1) item = self.dataPtr.dereference() self.dataPtr = (self.dataPtr + 1) return (('[%s]' % (self.elementNames[element],)), item) def children(self): return self._iterator(self.data) def to_string(self): return ('Eigen::Quaternion<%s> (data ptr: %s)' % (self.innerType, self.data))
def get_rgb_data(rgb_dir): assert os.path.exists(rgb_dir) return DataLoader_NoisyData(rgb_dir)
class Conv2dSame(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) nn.init.xavier_uniform_(self.weight) def forward(self, x): (ih, iw) = x.size()[(- 2):] (kh, kw) = self.weight.size()[(- 2):] oh = math.ceil((ih / self.stride[0])) ow = math.ceil((iw / self.stride[1])) pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0) pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0) if ((pad_h > 0) or (pad_w > 0)): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) out = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return out
def load_toy_cancer(): train = Database() test = Database() train.modes = ['friends(+Person,-Person).', 'friends(-Person,+Person).', 'smokes(+Person).', 'cancer(+Person).'] train.pos = ['cancer(alice).', 'cancer(bob).', 'cancer(chuck).', 'cancer(fred).'] train.neg = ['cancer(dan).', 'cancer(earl).'] train.facts = ['friends(alice,bob).', 'friends(alice,fred).', 'friends(chuck,bob).', 'friends(chuck,fred).', 'friends(dan,bob).', 'friends(earl,bob).', 'friends(bob,alice).', 'friends(fred,alice).', 'friends(bob,chuck).', 'friends(fred,chuck).', 'friends(bob,dan).', 'friends(bob,earl).', 'smokes(alice).', 'smokes(chuck).', 'smokes(bob).'] test.modes = ['friends(+Person,-Person).', 'friends(-Person,+Person).', 'smokes(+Person).', 'cancer(+Person).'] test.pos = ['cancer(zod).', 'cancer(xena).', 'cancer(yoda).'] test.neg = ['cancer(voldemort).', 'cancer(watson).'] test.facts = ['friends(zod,xena).', 'friends(xena,watson).', 'friends(watson,voldemort).', 'friends(voldemort,yoda).', 'friends(yoda,zod).', 'friends(xena,zod).', 'friends(watson,xena).', 'friends(voldemort,watson).', 'friends(yoda,voldemort).', 'friends(zod,yoda).', 'smokes(zod).', 'smokes(xena).', 'smokes(yoda).'] return (train, test)
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path): if (not is_tf_available()): raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.') print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\") try: import tensorflow as tf from keras2onnx import __version__ as k2ov from keras2onnx import convert_keras, save_model print(f'Using framework TensorFlow: {tf.version.VERSION}, keras2onnx: {k2ov}') (input_names, output_names, dynamic_axes, tokens) = infer_shapes(nlp, 'tf') nlp.model.predict(tokens.data) onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset) save_model(onnx_model, output.as_posix()) except ImportError as e: raise Exception(f'Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.')
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234): return cnn.logit(x, is_training=is_training, update_batch_stats=update_batch_stats, stochastic=stochastic, seed=seed)
class TrainingSummary(): model_name: str language: Optional[Union[(str, List[str])]] = None license: Optional[str] = None tags: Optional[Union[(str, List[str])]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[(str, List[str])]] = None dataset: Optional[Union[(str, List[str])]] = None dataset_tags: Optional[Union[(str, List[str])]] = None dataset_args: Optional[Union[(str, List[str])]] = None dataset_metadata: Optional[Dict[(str, Any)]] = None eval_results: Optional[Dict[(str, float)]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[(str, Any)]] = None source: Optional[str] = 'trainer' def __post_init__(self): if ((self.license is None) and (not is_offline_mode()) and (self.finetuned_from is not None) and (len(self.finetuned_from) > 0)): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith('license:'): self.license = tag[8:] except (requests.exceptions.HTTPError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {'name': self.model_name} dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if (len(dataset_args) < len(dataset_tags)): dataset_args = (dataset_args + ([None] * (len(dataset_tags) - len(dataset_args)))) dataset_mapping = dict(zip(dataset_tags, dataset_names)) dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = {task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if (task in TASK_TAG_TO_NAME_MAPPING)} model_index['results'] = [] if ((len(task_mapping) == 0) and (len(dataset_mapping) == 0)): return [model_index] if (len(task_mapping) == 0): task_mapping = {None: None} if (len(dataset_mapping) == 0): dataset_mapping = {None: None} all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for (task_tag, ds_tag) in all_possibilities: result = {} if (task_tag is not None): result['task'] = {'name': task_mapping[task_tag], 'type': task_tag} if (ds_tag is not None): metadata = dataset_metadata_mapping.get(ds_tag, {}) result['dataset'] = {'name': dataset_mapping[ds_tag], 'type': ds_tag, **metadata} if (dataset_arg_mapping[ds_tag] is not None): result['dataset']['args'] = dataset_arg_mapping[ds_tag] if (len(metric_mapping) > 0): result['metrics'] = [] for (metric_tag, metric_name) in metric_mapping.items(): result['metrics'].append({'name': metric_name, 'type': metric_tag, 'value': self.eval_results[metric_name]}) if (('task' in result) and ('dataset' in result) and ('metrics' in result)): model_index['results'].append(result) else: logger.info(f'''Dropping the following result as it does not have all the necessary fields: {result}''') return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_values_as_list(metadata, 'language', self.language) metadata = _insert_value(metadata, 'license', self.license) metadata = _insert_values_as_list(metadata, 'tags', self.tags) metadata = _insert_values_as_list(metadata, 'datasets', self.dataset_tags) metadata = _insert_values_as_list(metadata, 'metrics', list(metric_mapping.keys())) metadata['model-index'] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = '' metadata = yaml.dump(self.create_metadata(), sort_keys=False) if (len(metadata) > 0): model_card = f'''--- {metadata}--- ''' if (self.source == 'trainer'): model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f''' # {self.model_name} ''' if (self.finetuned_from is None): model_card += 'This model was trained from scratch on ' else: model_card += f'This model is a fine-tuned version of [{self.finetuned_from}]( on ' if (self.dataset is None): model_card += 'an unknown dataset.' elif isinstance(self.dataset, str): model_card += f'the {self.dataset} dataset.' elif (isinstance(self.dataset, (tuple, list)) and (len(self.dataset) == 1)): model_card += f'the {self.dataset[0]} dataset.' else: model_card += (', '.join([f'the {ds}' for ds in self.dataset[:(- 1)]]) + f' and the {self.dataset[(- 1)]} datasets.') if (self.eval_results is not None): model_card += '\nIt achieves the following results on the evaluation set:\n' model_card += '\n'.join([f'- {name}: {_maybe_round(value)}' for (name, value) in self.eval_results.items()]) model_card += '\n' model_card += '\n## Model description\n\nMore information needed\n' model_card += '\n## Intended uses & limitations\n\nMore information needed\n' model_card += '\n## Training and evaluation data\n\nMore information needed\n' model_card += '\n## Training procedure\n' model_card += '\n### Training hyperparameters\n' if (self.hyperparameters is not None): model_card += '\nThe following hyperparameters were used during training:\n' model_card += '\n'.join([f'- {name}: {value}' for (name, value) in self.hyperparameters.items()]) model_card += '\n' else: model_card += '\nMore information needed\n' if (self.eval_lines is not None): model_card += '\n### Training results\n\n' model_card += make_markdown_table(self.eval_lines) model_card += '\n' model_card += '\n### Framework versions\n\n' model_card += f'''- Transformers {__version__} ''' if ((self.source == 'trainer') and is_torch_available()): import torch model_card += f'''- Pytorch {torch.__version__} ''' elif ((self.source == 'keras') and is_tf_available()): import tensorflow as tf model_card += f'''- TensorFlow {tf.__version__} ''' if is_datasets_available(): import datasets model_card += f'''- Datasets {datasets.__version__} ''' if is_tokenizers_available(): import tokenizers model_card += f'''- Tokenizers {tokenizers.__version__} ''' return model_card def from_trainer(cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None): one_dataset = (trainer.eval_dataset if (trainer.eval_dataset is not None) else trainer.train_dataset) if (is_hf_dataset(one_dataset) and ((dataset_tags is None) or (dataset_args is None) or (dataset_metadata is None))): default_tag = one_dataset.builder_name if (default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']): if (dataset_metadata is None): dataset_metadata = [{'config': one_dataset.config_name, 'split': str(one_dataset.split)}] if (dataset_tags is None): dataset_tags = [default_tag] if (dataset_args is None): dataset_args = [one_dataset.config_name] if ((dataset is None) and (dataset_tags is not None)): dataset = dataset_tags if ((finetuned_from is None) and hasattr(trainer.model.config, '_name_or_path') and (not os.path.isdir(trainer.model.config._name_or_path))): finetuned_from = trainer.model.config._name_or_path if (tasks is None): model_class_name = trainer.model.__class__.__name__ for (task, mapping) in TASK_MAPPING.items(): if (model_class_name in _get_mapping_values(mapping)): tasks = task if (model_name is None): model_name = Path(trainer.args.output_dir).name if (len(model_name) == 0): model_name = finetuned_from if (tags is None): tags = ['generated_from_trainer'] elif (isinstance(tags, str) and (tags != 'generated_from_trainer')): tags = [tags, 'generated_from_trainer'] elif ('generated_from_trainer' not in tags): tags.append('generated_from_trainer') (_, eval_lines, eval_results) = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters) def from_keras(cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None): if (dataset is not None): if (is_hf_dataset(dataset) and ((dataset_tags is None) or (dataset_args is None))): default_tag = dataset.builder_name if (default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']): if (dataset_tags is None): dataset_tags = [default_tag] if (dataset_args is None): dataset_args = [dataset.config_name] if ((dataset is None) and (dataset_tags is not None)): dataset = dataset_tags if ((finetuned_from is None) and hasattr(model.config, '_name_or_path') and (not os.path.isdir(model.config._name_or_path))): finetuned_from = model.config._name_or_path if (tasks is None): model_class_name = model.__class__.__name__ for (task, mapping) in TASK_MAPPING.items(): if (model_class_name in _get_mapping_values(mapping)): tasks = task if (tags is None): tags = ['generated_from_keras_callback'] elif (isinstance(tags, str) and (tags != 'generated_from_keras_callback')): tags = [tags, 'generated_from_keras_callback'] elif ('generated_from_keras_callback' not in tags): tags.append('generated_from_keras_callback') if (keras_history is not None): (_, eval_lines, eval_results) = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source='keras')
def test_multiple_or_proof_infix_operator(group, params): (p1, p2, secrets) = params g = group.generator() x10 = Secret() secrets.update({x10: 13}) orproof = ((p1 | p2) | DLRep((13 * g), (x10 * g))) assert verify_proof(orproof, secrets)
def random_quadraticform_with_conditions(R, n, condition_list=[], rand_arg_list=[]): Q = random_quadraticform(R, n, rand_arg_list) done_flag = True while done_flag: done_flag = False for c in condition_list: try: bool_ans = Q.c() except Exception: bool_ans = c(Q) if (not bool_ans): Q = random_quadraticform(R, n, rand_arg_list) done_flag = True break return Q
.function def run_inception_jit(inputs, inception_model, num_batches=1): inputs = ((tf2.cast(inputs, tf2.float32) - 127.5) / 127.5) return tfgan.eval.run_classifier_fn(inputs, num_batches=num_batches, classifier_fn=classifier_fn_from_tfhub(INCEPTION_TFHUB, None, inception_model), dtypes=_DEFAULT_DTYPES)
.parametrize('create_solver', ss.solvers.values()) def test_cons_rts(create_solver): solver = create_solver(False) (solver, ts) = build_simple_ts(solver, pono.RelationalTransitionSystem)
def convert_id_to_speaker(ids_to_speakers, index, unk_token=''): return ids_to_speakers.get(index, unk_token)
def _iter_vectors(n, lower, upper, step=None): if (step is None): if (ZZ(lower) >= ZZ(upper)): raise ValueError(('Expected lower < upper, but got %d >= %d' % (lower, upper))) if (ZZ(n) <= 0): raise ValueError(('Expected n>0 but got %d <= 0' % n)) step = n assert (step > 0) if (step == 1): for x in range(lower, upper): v = vector(ZZ, n) v[0] = x (yield v) return else: for x in range(lower, upper): for v in _iter_vectors(n, lower, upper, (step - 1)): v[(step - 1)] = x (yield v)
class LAUC(BaseMetric): def __init__(self, recommendations, config, params, eval_objects): super().__init__(recommendations, config, params, eval_objects) self.logger = logging.get_logger('Evaluator', (pylog.CRITICAL if config.config_test else pylog.DEBUG)) self._cutoff = self._evaluation_objects.cutoff self._relevance = self._evaluation_objects.relevance.binary_relevance self._num_items = self._evaluation_objects.num_items def name(): return 'LAUC' def __user_auc_at_k(user_recommendations, cutoff, user_relevant_items, num_items, train_size): neg_num = (((num_items - train_size) - len(user_relevant_items)) + 1) pos_ranks = [r for (r, (i, _)) in enumerate(user_recommendations[:cutoff]) if (i in user_relevant_items)] return (sum([(((neg_num - r_r) + p_r) / neg_num) for (p_r, r_r) in enumerate(pos_ranks)]) / min(cutoff, len(user_relevant_items))) def eval_user_metric(self): return {u: LAUC.__user_auc_at_k(u_r, self._cutoff, self._relevance.get_user_rel(u), self._num_items, len(self._evaluation_objects.data.train_dict[u])) for (u, u_r) in self._recommendations.items() if len(self._relevance.get_user_rel(u))}
def test_vectorizer_max_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', max_df=1.0) vect.fit(test_data) assert ('a' in vect.vocabulary_.keys()) assert (len(vect.vocabulary_.keys()) == 6) assert (len(vect.stop_words_) == 0) vect.max_df = 0.5 vect.fit(test_data) assert ('a' not in vect.vocabulary_.keys()) assert (len(vect.vocabulary_.keys()) == 4) assert ('a' in vect.stop_words_) assert (len(vect.stop_words_) == 2) vect.max_df = 1 vect.fit(test_data) assert ('a' not in vect.vocabulary_.keys()) assert (len(vect.vocabulary_.keys()) == 4) assert ('a' in vect.stop_words_) assert (len(vect.stop_words_) == 2)
class ELU_VGG(nn.Module): def __init__(self, vgg_name): super(ELU_VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 10) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if (x == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ELU(x)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def test_lambertw(): assert (LambertW(0) == 0) assert (LambertW(E) == 1) assert (LambertW(((- 1) / E)) == (- 1)) assert (LambertW(((- log(2)) / 2)) == (- log(2)))
class SPADENorm(nn.Module): def __init__(self, opt, norm_type, norm_nc, label_nc): super(SPADENorm, self).__init__() self.param_opt = opt self.noise_scale = nn.Parameter(torch.zeros(norm_nc)) assert norm_type.startswith('alias') param_free_norm_type = norm_type[len('alias'):] if (param_free_norm_type == 'batch'): self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) elif (param_free_norm_type == 'instance'): self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) elif (param_free_norm_type == 'mask'): self.param_free_norm = MaskNorm(norm_nc) else: raise ValueError("'{}' is not a recognized parameter-free normalization type in SPADENorm".format(param_free_norm_type)) nhidden = 128 ks = 3 pw = (ks // 2) self.conv_shared = nn.Sequential(nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU()) self.conv_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) self.conv_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) def forward(self, x, seg, misalign_mask=None): (b, c, h, w) = x.size() if self.param_opt.cuda: noise = (torch.randn(b, w, h, 1).cuda() * self.noise_scale).transpose(1, 3) else: noise = (torch.randn(b, w, h, 1) * self.noise_scale).transpose(1, 3) if (misalign_mask is None): normalized = self.param_free_norm((x + noise)) else: normalized = self.param_free_norm((x + noise), misalign_mask) actv = self.conv_shared(seg) gamma = self.conv_gamma(actv) beta = self.conv_beta(actv) output = ((normalized * (1 + gamma)) + beta) return output
def run_attack(exp_meta, exp_meta_lock, adrAttack): global running try: print('ATK: Starting attack') adrAttack.attack() except: with exp_meta_lock: exp_meta['reason_stop'] = ('Attack threw an exception: ' + str(sys.exc_info()[1])) running = False raise if running: with exp_meta_lock: if (not ('reason_stop' in exp_meta)): exp_meta['reason_stop'] = 'attack-stopped' running = False
def chunks(l, n): bigger_count = (len(l) % n) start = 0 block_size = (len(l) // n) for i in range(n): end = ((start + block_size) + (1 if (i < bigger_count) else 0)) (yield l[start:end]) start = end
def main(_): header = ['content', 'label', 'id'] contents = load_data_by_id('train', FLAGS.train_id_path) os.mkdir(FLAGS.output_dir) dump_raw_data(([header] + contents), os.path.join(FLAGS.output_dir, 'train.csv')) contents = load_all_data('test') dump_raw_data(([header] + contents), os.path.join(FLAGS.output_dir, 'test.csv'))
def enable_dropout(model): for m in model.modules(): if m.__class__.__name__.startswith('Dropout'): m.train() print(m)
class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator): def __init__(self, transformer): self.transformer = transformer def fit(self, X, y=None, **fit_params): params = process_routing(self, 'fit', **fit_params) self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit) return self def transform(self, X, y=None, **transform_params): params = process_routing(self, 'transform', **transform_params) return self.transformer_.transform(X, **params.transformer.transform) def get_metadata_routing(self): return MetadataRouter(owner=self.__class__.__name__).add(transformer=self.transformer, method_mapping='one-to-one')
class GNN_node_Virtualnode(torch.nn.Module): def __init__(self, num_layer, emb_dim, node_encoder, drop_ratio=0.5, JK='last', residual=False, gnn_type='gin'): super(GNN_node_Virtualnode, self).__init__() self.num_layer = num_layer self.drop_ratio = drop_ratio self.JK = JK self.residual = residual if (self.num_layer < 2): raise ValueError('Number of GNN layers must be greater than 1.') self.node_encoder = node_encoder self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim) torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0) self.convs = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() self.mlp_virtualnode_list = torch.nn.ModuleList() for layer in range(num_layer): if (gnn_type == 'gin'): self.convs.append(GINConv(emb_dim)) elif (gnn_type == 'gcn'): self.convs.append(GCNConv(emb_dim)) else: raise ValueError('Undefined GNN type called {}'.format(gnn_type)) self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim)) for layer in range((num_layer - 1)): self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, (2 * emb_dim)), torch.nn.BatchNorm1d((2 * emb_dim)), torch.nn.ReLU(), torch.nn.Linear((2 * emb_dim), emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU())) def forward(self, batched_data): (x, edge_index, edge_attr, node_depth, batch) = (batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.node_depth, batched_data.batch) virtualnode_embedding = self.virtualnode_embedding(torch.zeros((batch[(- 1)].item() + 1)).to(edge_index.dtype).to(edge_index.device)) h_list = [self.node_encoder(x, node_depth.view((- 1)))] for layer in range(self.num_layer): h_list[layer] = (h_list[layer] + virtualnode_embedding[batch]) h = self.convs[layer](h_list[layer], edge_index, edge_attr) h = self.batch_norms[layer](h) if (layer == (self.num_layer - 1)): h = F.dropout(h, self.drop_ratio, training=self.training) else: h = F.dropout(F.relu(h), self.drop_ratio, training=self.training) if self.residual: h = (h + h_list[layer]) h_list.append(h) if (layer < (self.num_layer - 1)): virtualnode_embedding_temp = (global_add_pool(h_list[layer], batch) + virtualnode_embedding) if self.residual: virtualnode_embedding = (virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training=self.training)) else: virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training=self.training) if (self.JK == 'last'): node_representation = h_list[(- 1)] elif (self.JK == 'sum'): node_representation = 0 for layer in range((self.num_layer + 1)): node_representation += h_list[layer] return node_representation
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): if (timeout is None): timeout = int(os.environ.get('PYTEST_TIMEOUT', 600)) start_methohd = 'spawn' ctx = multiprocessing.get_context(start_methohd) input_queue = ctx.Queue(1) output_queue = ctx.JoinableQueue(1) input_queue.put(inputs, timeout=timeout) process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) process.start() try: results = output_queue.get(timeout=timeout) output_queue.task_done() except Exception as e: process.terminate() test_case.fail(e) process.join(timeout=timeout) if (results['error'] is not None): test_case.fail(f"{results['error']}")