code
stringlengths
101
5.91M
def main(): DATASETS = ['Augmented MSRA10K Experiment VIII'] DATASETS_NAME = ['Augmented MSRA10K Experiment VIII'] j = 0 for dataset in DATASETS: FOLDER_MASK = '/home/dvruiz/scriptPosProcessObjects/29_05_2019_FullMix/multipleBG/masks/' fileList = os.listdir(FOLDER_MASK) xs = np.empty(len(fileList), np.float32) ys = np.empty(len(fileList), np.float32) index = 0 for i in fileList: maskName = i maskFile = Image.open((FOLDER_MASK + maskName)) mask = np.array(maskFile) shape = mask.shape h = shape[0] w = shape[1] maskFile.close() (ymin, ymax, xmin, xmax) = bbox(mask) centerx = (((xmax - xmin) / 2) + xmin) centery = (((ymax - ymin) / 2) + ymin) newx = (centerx / w) newy = (centery / h) xs[index] = newx ys[index] = newy index += 1 plt.clf() plt.title((DATASETS_NAME[j] + '\n Distribution of Bounding Boxes Center Coordinates'), fontsize='xx-large') plt.xlabel('Normalized Position X', fontsize='xx-large') plt.xlim(0, 1) plt.xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) plt.ylabel('Normalized Position Y', fontsize='xx-large') plt.ylim(0, 1) plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) xy = np.vstack([xs, ys]) z = gaussian_kde(xy)(xy) z = (z / 100) plt.scatter(xs, ys, c=z, s=10, edgecolor='', vmin=0, vmax=0.5, cmap=plt.get_cmap('hot')) cb = plt.colorbar() cb.set_label('Sample Density', fontsize='xx-large') plt.tight_layout() plt.savefig(((savePath + dataset) + 'pos.png')) plt.savefig(((savePath + dataset) + 'pos.pdf')) plt.savefig(((savePath + dataset) + 'pos.svg')) plt.savefig(((savePath + dataset) + 'pos.eps')) j += 1
def get_head_wh(x_coords, y_coords): (final_w, final_h) = ((- 1), (- 1)) component_count = 0 save_componets = [] for component in PARTS_SEL: if ((x_coords[component] == MISSING_VALUE) or (y_coords[component] == MISSING_VALUE)): continue else: component_count += 1 save_componets.append([x_coords[component], y_coords[component]]) if (component_count >= 2): x_cords = [] y_cords = [] for component in save_componets: x_cords.append(component[0]) y_cords.append(component[1]) xmin = min(x_cords) xmax = max(x_cords) ymin = min(y_cords) ymax = max(y_cords) final_w = (xmax - xmin) final_h = (ymax - ymin) return (final_w, final_h)
def Optimizer_w_Initializer(class_loss, LR, epoch, init_epoch, global_step): with tf.variable_scope('Optimizer_w_Distillation'): variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) teacher_variables = tf.get_collection('Teacher') variables = list((set(variables) - set(teacher_variables))) optimize = tf.train.MomentumOptimizer(LR, 0.9, use_nesterov=True) reg_loss = tf.add_n(tf.losses.get_regularization_losses()) distillation_loss = tf.get_collection('dist')[0] total_loss = (class_loss + reg_loss) tf.summary.scalar('loss/total_loss', total_loss) gradients = optimize.compute_gradients(total_loss, var_list=variables) gradient_dist = optimize.compute_gradients(distillation_loss, var_list=variables) gradient_wdecay = optimize.compute_gradients(reg_loss, var_list=variables) with tf.variable_scope('clip_grad'): for (i, (gw, gd)) in enumerate(zip(gradient_wdecay, gradient_dist)): if (gd[0] is not None): gradient_dist[i] = ((gw[0] + gd[0]), gd[1]) update_ops.append(optimize.apply_gradients(gradients, global_step=global_step)) update_op = tf.group(*update_ops) train_op = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') update_ops_dist = tf.get_collection(tf.GraphKeys.UPDATE_OPS) update_ops_dist.append(optimize.apply_gradients(gradient_dist, global_step=global_step)) update_op_dist = tf.group(*update_ops_dist) train_op_dist = control_flow_ops.with_dependencies([update_op_dist], distillation_loss, name='train_op_dist') return (train_op, train_op_dist)
class RuntimeVariable(str, enum.Enum): TargetModule = 'TargetModule' ConfigurationId = 'ConfigurationId' RunId = 'RunId' ProjectName = 'ProjectName' TotalTime = 'TotalTime' SearchTime = 'SearchTime' AlgorithmIterations = 'AlgorithmIterations' ExecutionResults = 'ExecutionResults' RandomSeed = 'RandomSeed' CodeObjects = 'CodeObjects' Predicates = 'Predicates' Lines = 'Lines' AccessibleObjectsUnderTest = 'AccessibleObjectsUnderTest' GeneratableTypes = 'GeneratableTypes' ImportBranchCoverage = 'ImportBranchCoverage' ImportLineCoverage = 'ImportLineCoverage' Goals = 'Goals' CollectedTestCases = 'CollectedTestCases' FoundTestCases = 'FoundTestCases' SuitableTestModule = 'SuitableTestModule' McCabeAST = 'McCabeAST' McCabeCodeObject = 'McCabeCodeObject' LineNos = 'LineNos' NumberOfCreatedMutants = 'NumberOfCreatedMutants' NumberOfKilledMutants = 'NumberOfKilledMutants' NumberOfTimedOutMutants = 'NumberOfTimedOutMutants' MutationScore = 'MutationScore' SignatureInfos = 'SignatureInfos' NumberOfConstructors = 'NumberOfConstructors' CoverageTimeline = 'CoverageTimeline' SizeTimeline = 'SizeTimeline' LengthTimeline = 'LengthTimeline' FitnessTimeline = 'FitnessTimeline' TotalExceptionsTimeline = 'TotalExceptionsTimeline' Length = 'Length' Size = 'Size' Fitness = 'Fitness' Coverage = 'Coverage' BranchCoverage = 'BranchCoverage' LineCoverage = 'LineCoverage' StatementCheckedCoverage = 'StatementCheckedCoverage' AssertionCheckedCoverage = 'AssertionCheckedCoverage' FinalLength = 'FinalLength' FinalSize = 'FinalSize' FinalBranchCoverage = 'FinalBranchCoverage' FinalLineCoverage = 'FinalLineCoverage' Assertions = 'Assertions' DeletedAssertions = 'DeletedAssertions' def __repr__(self): return f'{self.name}'
class miniImagenetOneshotDataset(data.Dataset): def __init__(self, dataroot=(('/home/' + userName) + '/data/miniImagenet'), type='train', ways=5, shots=1, test_num=1, epoch=100, galleryNum=10): self.ways = ways self.shots = shots self.test_num = test_num self.__size = epoch self.transform = transforms.Compose([filenameToPILImage, transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) self.galleryTransform = transforms.Compose([filenameToPILImage, transforms.RandomHorizontalFlip(p=0.5), transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) def loadSplit(splitFile): dictLabels = {} with open(splitFile) as csvfile: csvreader = csv.reader(csvfile, delimiter=',') next(csvreader, None) for (i, row) in enumerate(csvreader): filename = row[0] label = row[1] if (label in dictLabels.keys()): dictLabels[label].append(filename) else: dictLabels[label] = [filename] return dictLabels self.miniImagenetImagesDir = os.path.join(dataroot, 'images') self.unData = loadSplit(splitFile=os.path.join(dataroot, ('train' + '.csv'))) self.data = loadSplit(splitFile=os.path.join(dataroot, (type + '.csv'))) self.type = type self.data = collections.OrderedDict(sorted(self.data.items())) self.unData = collections.OrderedDict(sorted(self.unData.items())) self.galleryNum = galleryNum self.Gallery = [] numpy.random.seed(2019) for classes in range(len(self.unData.keys())): Files = np.random.choice(self.unData[self.unData.keys()[classes]], self.galleryNum, False) for file in Files: self.Gallery.append(file) numpy.random.seed() self.keyTobh = {} for c in range(len(self.data.keys())): self.keyTobh[self.data.keys()[c]] = c for c in range(len(self.unData.keys())): self.keyTobh[self.unData.keys()[c]] = c def batchModel(model, AInputs, requireGrad): Batch = (((AInputs.size(0) + args.batchSize) - 1) // args.batchSize) First = True Cfeatures = 1 for b in range(Batch): if (b < (Batch - 1)): midFeature = model(Variable(AInputs[(b * args.batchSize):((b + 1) * args.batchSize)].cuda(), requires_grad=requireGrad)) else: midFeature = model(Variable(AInputs[(b * args.batchSize):AInputs.size(0)].cuda(), requires_grad=requireGrad)) if First: First = False Cfeatures = midFeature else: Cfeatures = torch.cat((Cfeatures, midFeature), dim=0) return Cfeatures def acquireFeature(self, model, batchSize=128): Batch = (((len(self.Gallery) + batchSize) - 1) // batchSize) First = True Cfeatures = 1 Images = 1 for b in range(Batch): jFirst = True Images = 1 for j in range((b * batchSize), min(((b + 1) * batchSize), len(self.Gallery))): image = self.transform(os.path.join(pathImages, str(self.Gallery[j]))) image = image.unsqueeze(0) if jFirst: jFirst = False Images = image else: Images = torch.cat((Images, image), 0) with torch.no_grad(): midFeature = model(Variable(Images.cuda(), requires_grad=False)).cpu() if First: First = False Cfeatures = midFeature else: Cfeatures = torch.cat((Cfeatures, midFeature), dim=0) return Cfeatures def get_image(self, file): image = self.galleryTransform(os.path.join(pathImages, str(file))) return image def __getitem__(self, index): supportFirst = True supportImages = 1 supportBelongs = torch.LongTensor((self.ways * self.shots), 1) supportReal = torch.LongTensor((self.ways * self.shots), 1) testFirst = True testImages = 1 testBelongs = torch.LongTensor((self.ways * self.test_num), 1) testReal = torch.LongTensor((self.ways * self.test_num), 1) selected_classes = np.random.choice(self.data.keys(), self.ways, False) for i in range(self.ways): files = np.random.choice(self.data[selected_classes[i]], self.shots, False) for j in range(self.shots): image = self.transform(os.path.join(pathImages, str(files[j]))) image = image.unsqueeze(0) if supportFirst: supportFirst = False supportImages = image else: supportImages = torch.cat((supportImages, image), 0) supportBelongs[(((i * self.shots) + j), 0)] = i supportReal[(((i * self.shots) + j), 0)] = self.keyTobh[selected_classes[i]] files = np.random.choice(self.data[selected_classes[i]], self.test_num, False) for j in range(self.test_num): image = self.transform(os.path.join(pathImages, str(files[j]))) image = image.unsqueeze(0) if testFirst: testFirst = False testImages = image else: testImages = torch.cat((testImages, image), 0) testBelongs[(((i * self.test_num) + j), 0)] = i testReal[(((i * self.test_num) + j), 0)] = self.keyTobh[selected_classes[i]] return (supportImages, supportBelongs, supportReal, testImages, testBelongs, testReal) def __len__(self): return self.__size
.parametrize('dtype', [ti.u8, ti.f32]) _utils.test(arch=get_host_arch_list()) def test_save_image_without_window(dtype): n = 255 pixels = ti.field(dtype=dtype, shape=(n, n, 3)) def paint(c: dtype): for (i, j, k) in pixels: pixels[(i, j, k)] = c gui = ti.GUI('Test', res=(n, n), show_gui=False) for i in [0, 32, 64, 128, 255]: if (dtype is ti.u8): paint(i) else: paint(((i * 1.0) / n)) gui.set_image(pixels) image_path = test_utils.make_temp_file(suffix='.png') gui.show(image_path) image = ti.tools.imread(image_path) delta = (image - i).sum() assert (delta == 0), 'Expected image difference to be 0 but got {} instead.'.format(delta)
def get_scare_snippets(nlp, csv_dir_path, text_id_map, filename_pattern='*.csv'): num_short_items = 0 snippets = [] csv_files = glob.glob(os.path.join(csv_dir_path, filename_pattern)) for csv_filename in csv_files: with open(csv_filename, newline='') as fin: cin = csv.reader(fin, delimiter='\t', quotechar='"') lines = list(cin) for line in lines: (ann_id, begin, end, sentiment) = [line[i] for i in [1, 2, 3, 6]] begin = int(begin) end = int(end) if (sentiment.lower() == 'unknown'): continue elif (sentiment.lower() == 'positive'): sentiment = 2 elif (sentiment.lower() == 'neutral'): sentiment = 1 elif (sentiment.lower() == 'negative'): sentiment = 0 else: raise ValueError("Tell John he screwed up and this is why he can't have Mox Opal: {}".format(sentiment)) if (ann_id not in text_id_map): print("Found snippet which can't be found: {}-{}".format(csv_filename, ann_id)) continue snippet = text_id_map[ann_id][begin:end] doc = nlp(snippet) text = [token.text for sentence in doc.sentences for token in sentence.tokens] num_tokens = sum((len(sentence.tokens) for sentence in doc.sentences)) if (num_tokens < 4): num_short_items = (num_short_items + 1) snippets.append(SentimentDatum(sentiment, text)) print('Number of short items: {}'.format(num_short_items)) return snippets
class SNLinear(Linear): def __init__(self, in_size, out_size, use_gamma=False, nobias=False, initialW=None, initial_bias=None, Ip=1, factor=None): self.Ip = Ip self.use_gamma = use_gamma self.factor = factor super(SNLinear, self).__init__(in_size, out_size, nobias, initialW, initial_bias) self.u = np.random.normal(size=(1, out_size)).astype(dtype='f') self.register_persistent('u') def W_bar(self): (sigma, _u, _) = max_singular_value(self.W, self.u, self.Ip) if self.factor: sigma = (sigma / self.factor) sigma = broadcast_to(sigma.reshape((1, 1)), self.W.shape) self.u[:] = _u if hasattr(self, 'gamma'): return ((broadcast_to(self.gamma, self.W.shape) * self.W) / sigma) else: return (self.W / sigma) def _initialize_params(self, in_size): super(SNLinear, self)._initialize_params(in_size) if self.use_gamma: (_, s, _) = np.linalg.svd(self.W.data) with self.init_scope(): self.gamma = chainer.Parameter(s[0], (1, 1)) def __call__(self, x): if (self.W.data is None): self._initialize_params((x.size // x.shape[0])) return linear.linear(x, self.W_bar, self.b)
class TestGaussianMLPRegressor(TfGraphTestCase): def test_fit_normalized(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1) data = np.linspace((- np.pi), np.pi, 1000) obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data] observations = np.concatenate([p['observations'] for p in obs]) returns = np.concatenate([p['returns'] for p in obs]) returns = returns.reshape(((- 1), 1)) for _ in range(150): gmr.fit(observations, returns) paths = {'observations': [[(- np.pi)], [((- np.pi) / 2)], [((- np.pi) / 4)], [0], [(np.pi / 4)], [(np.pi / 2)], [np.pi]]} prediction = gmr.predict(paths['observations']) expected = [[0], [(- 1)], [(- 0.707)], [0], [0.707], [1], [0]] assert np.allclose(prediction, expected, rtol=0, atol=0.1) x_mean = self.sess.run(gmr.model._networks['default'].x_mean) x_mean_expected = np.mean(observations, axis=0, keepdims=True) x_std = self.sess.run(gmr.model._networks['default'].x_std) x_std_expected = np.std(observations, axis=0, keepdims=True) assert np.allclose(x_mean, x_mean_expected) assert np.allclose(x_std, x_std_expected) y_mean = self.sess.run(gmr.model._networks['default'].y_mean) y_mean_expected = np.mean(returns, axis=0, keepdims=True) y_std = self.sess.run(gmr.model._networks['default'].y_std) y_std_expected = np.std(returns, axis=0, keepdims=True) assert np.allclose(y_mean, y_mean_expected) assert np.allclose(y_std, y_std_expected) def test_fit_unnormalized(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1, subsample_factor=0.9, normalize_inputs=False, normalize_outputs=False) data = np.linspace((- np.pi), np.pi, 1000) obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data] observations = np.concatenate([p['observations'] for p in obs]) returns = np.concatenate([p['returns'] for p in obs]) for _ in range(150): gmr.fit(observations, returns.reshape(((- 1), 1))) paths = {'observations': [[(- np.pi)], [((- np.pi) / 2)], [((- np.pi) / 4)], [0], [(np.pi / 4)], [(np.pi / 2)], [np.pi]]} prediction = gmr.predict(paths['observations']) expected = [[0], [(- 1)], [(- 0.707)], [0], [0.707], [1], [0]] assert np.allclose(prediction, expected, rtol=0, atol=0.1) x_mean = self.sess.run(gmr.model._networks['default'].x_mean) x_mean_expected = np.zeros_like(x_mean) x_std = self.sess.run(gmr.model._networks['default'].x_std) x_std_expected = np.ones_like(x_std) assert np.array_equal(x_mean, x_mean_expected) assert np.array_equal(x_std, x_std_expected) y_mean = self.sess.run(gmr.model._networks['default'].y_mean) y_mean_expected = np.zeros_like(y_mean) y_std = self.sess.run(gmr.model._networks['default'].y_std) y_std_expected = np.ones_like(y_std) assert np.allclose(y_mean, y_mean_expected) assert np.allclose(y_std, y_std_expected) def test_fit_smaller_subsample_factor(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1, subsample_factor=0.9) data = np.linspace((- np.pi), np.pi, 1000) obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data] observations = np.concatenate([p['observations'] for p in obs]) returns = np.concatenate([p['returns'] for p in obs]) for _ in range(150): gmr.fit(observations, returns.reshape(((- 1), 1))) paths = {'observations': [[(- np.pi)], [((- np.pi) / 2)], [((- np.pi) / 4)], [0], [(np.pi / 4)], [(np.pi / 2)], [np.pi]]} prediction = gmr.predict(paths['observations']) expected = [[0], [(- 1)], [(- 0.707)], [0], [0.707], [1], [0]] assert np.allclose(prediction, expected, rtol=0, atol=0.1) def test_fit_without_trusted_region(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1, use_trust_region=False) data = np.linspace((- np.pi), np.pi, 1000) obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data] observations = np.concatenate([p['observations'] for p in obs]) returns = np.concatenate([p['returns'] for p in obs]) for _ in range(150): gmr.fit(observations, returns.reshape(((- 1), 1))) paths = {'observations': [[(- np.pi)], [((- np.pi) / 2)], [((- np.pi) / 4)], [0], [(np.pi / 4)], [(np.pi / 2)], [np.pi]]} prediction = gmr.predict(paths['observations']) expected = [[0], [(- 1)], [(- 0.707)], [0], [0.707], [1], [0]] assert np.allclose(prediction, expected, rtol=0, atol=0.1) def test_is_pickleable(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1) with tf.compat.v1.variable_scope('GaussianMLPRegressor/GaussianMLPRegressorModel', reuse=True): bias = tf.compat.v1.get_variable('dist_params/mean_network/hidden_0/bias') bias.load(tf.ones_like(bias).eval()) result1 = gmr.predict(np.ones((1, 1))) h = pickle.dumps(gmr) with tf.compat.v1.Session(graph=tf.Graph()): gmr_pickled = pickle.loads(h) result2 = gmr_pickled.predict(np.ones((1, 1))) assert np.array_equal(result1, result2) def test_is_pickleable2(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=1) with tf.compat.v1.variable_scope('GaussianMLPRegressor/GaussianMLPRegressorModel', reuse=True): x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean') x_mean.load(tf.ones_like(x_mean).eval()) x1 = gmr.model._networks['default'].x_mean.eval() h = pickle.dumps(gmr) with tf.compat.v1.Session(graph=tf.Graph()): gmr_pickled = pickle.loads(h) x2 = gmr_pickled.model._networks['default'].x_mean.eval() assert np.array_equal(x1, x2) def test_auxiliary(self): gmr = GaussianMLPRegressor(input_shape=(1,), output_dim=5) assert gmr.vectorized assert (gmr.distribution.event_shape.as_list() == [5])
def eval(model, device, loader, evaluator): model.eval() loss_accum = 0 y_true_list = [] y_pred_list = [] for (step, batch) in enumerate(tqdm(loader, desc='Iteration')): (x_mini, y_true_mini) = batch with torch.no_grad(): y_pred_mini = model(x_mini.to(device)).cpu() if (y_pred_mini.shape[1] == 1): y_pred_mini = y_pred_mini.flatten() y_true_list.append(y_true_mini) y_pred_list.append(y_pred_mini) y_true = torch.cat(y_true_list) y_pred = torch.cat(y_pred_list) return evaluator.eval(y_pred, y_true)
def create_aspect_ratio_groups(dataset, k=0): bins = ((2 ** np.linspace((- 1), 1, ((2 * k) + 1))).tolist() if (k > 0) else [1.0]) groups = _quantize(dataset._aspect_ratios, bins) counts = np.unique(groups, return_counts=True)[1] fbins = (([0] + bins) + [np.inf]) print(f'Using {fbins} as bins for aspect ratio quantization') print(f'Count of instances per bin: {counts}') return groups
class IntelItaniumCCompiler(IntelCCompiler): compiler_type = 'intele' for cc_exe in map(find_executable, ['icc', 'ecc']): if cc_exe: break
class FairseqEncoder(nn.Module): def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): raise NotImplementedError def forward_torchscript(self, net_input: Dict[(str, Tensor)]): if torch.jit.is_scripting(): return self.forward(src_tokens=net_input['src_tokens'], src_lengths=net_input['src_lengths']) else: return self.forward_non_torchscript(net_input) .unused def forward_non_torchscript(self, net_input: Dict[(str, Tensor)]): encoder_input = {k: v for (k, v) in net_input.items() if (k != 'prev_output_tokens')} return self.forward(**encoder_input) def reorder_encoder_out(self, encoder_out, new_order): raise NotImplementedError def max_positions(self): return 1000000.0 def upgrade_state_dict_named(self, state_dict, name): return state_dict def set_num_updates(self, num_updates): def _apply(m): if (hasattr(m, 'set_num_updates') and (m != self)): m.set_num_updates(num_updates) self.apply(_apply)
def main(): args = parse_args() if (args.experiment_name is not None): wandb.init(project=args.experiment_name, config=args) if (args.output_dir is not None): if os.path.exists(args.output_dir): if args.overwrite_output_dir: shutil.rmtree(args.output_dir) logger.info(f'--overwrite_output_dir used. directory {args.output_dir} deleted!') else: raise ValueError(f'Output directory ({args.output_dir}) already exists. Use --overwrite_output_dir to overcome.') os.mkdir(args.output_dir) elif args.do_train: raise ValueError(f'Output directory is required while do_train=True.') elif (args.output_file is None): raise ValueError(f'Output directory or output file is required.') device = torch.device((args.device if torch.cuda.is_available() else 'cpu')) args.device = device args.n_gpu = 1 set_seed(args) config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True, add_prefix_space=True, cache_dir=args.cache_dir) (model, loading_info) = COREF_CLASS.from_pretrained(args.model_name_or_path, output_loading_info=True, config=config, cache_dir=args.cache_dir, args=args) if (model.base_model_prefix not in SUPPORTED_MODELS): raise NotImplementedError(f'Model not supporting {args.model_type}, choose one of {SUPPORTED_MODELS}') args.base_model = model.base_model_prefix model.to(args.device) for (key, val) in loading_info.items(): logger.info(f'{key}: {val}') (t_params, h_params) = [(p / 1000000) for p in model.num_parameters()] logger.info(f'Parameters: {(t_params + h_params):.1f}M, Transformer: {t_params:.1f}M, Head: {h_params:.1f}M') (dataset, dataset_files) = coref_dataset.create(tokenizer=tokenizer, train_file=args.train_file, dev_file=args.dev_file, test_file=args.test_file, cache_dir=args.cache_dir) args.dataset_files = dataset_files collator = LeftOversCollator(tokenizer=tokenizer, device=args.device, max_segment_len=args.max_segment_len) eval_dataloader = DynamicBatchSampler(dataset[args.eval_split], collator=collator, max_tokens=args.max_tokens_in_batch, max_segment_len=args.max_segment_len) evaluator = Evaluator(args=args, eval_dataloader=eval_dataloader) if args.do_train: train_sampler = DynamicBatchSampler(dataset['train'], collator=collator, max_tokens=args.max_tokens_in_batch, max_segment_len=args.max_segment_len) train_batches = coref_dataset.create_batches(sampler=train_sampler, dataset_files=args.dataset_files, cache_dir=args.cache_dir).shuffle(seed=args.seed) logger.info(train_batches) (global_step, tr_loss) = train(args, train_batches, model, tokenizer, evaluator) logger.info(f'global_step = {global_step}, average loss = {tr_loss}') results = evaluator.evaluate(model) return results
def get_dataset(): if (FLAGS.dataset == 'imagenet1k'): return get_imagenet() elif ('cifar' in FLAGS.dataset): return get_cifar() else: raise NotImplementedError('dataset not implemented.')
def add_label(img, label, bbox, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0), top=True): text_width = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0][0] if top: label_bg = [bbox[0], bbox[1], (bbox[0] + text_width), (bbox[1] - 30)] if draw_bg: cv2.rectangle(img, (label_bg[0], label_bg[1]), ((label_bg[2] + 5), label_bg[3]), text_bg_color, (- 1)) cv2.putText(img, label, ((bbox[0] + 5), (bbox[1] - 5)), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2) else: label_bg = [bbox[0], bbox[1], (bbox[0] + text_width), (bbox[1] + 30)] if draw_bg: cv2.rectangle(img, (label_bg[0], label_bg[1]), ((label_bg[2] + 5), label_bg[3]), text_bg_color, (- 1)) cv2.putText(img, label, ((bbox[0] + 5), ((bbox[1] - 5) + 30)), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2) return img
class MaxPool3d(nn.MaxPool3d): def forward(self, x: torch.Tensor) -> torch.Tensor: if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class StaticErrorRateChannel(Channel): def __init__(self, space, number_errors): if isinstance(number_errors, (Integer, int)): number_errors = (number_errors, number_errors) if (not isinstance(number_errors, (tuple, list))): raise ValueError('number_errors must be a tuple, a list, an Integer or a Python int') super().__init__(space, space) if (number_errors[1] > space.dimension()): raise ValueError('There might be more errors than the dimension of the input space') self._number_errors = number_errors def _repr_(self): no_err = self.number_errors() return ('Static error rate channel creating %s errors, of input and output space %s' % (format_interval(no_err), self.input_space())) def _latex_(self): no_err = self.number_errors() return ('\\textnormal{Static error rate channel creating %s errors, of input and output space %s}' % (format_interval(no_err), self.input_space())) def transmit_unsafe(self, message): w = copy(message) number_errors = randint(*self.number_errors()) V = self.input_space() R = V.base_ring() for i in sample(range(V.dimension()), number_errors): err = R.random_element() while (w[i] == err): err = R.random_element() w[i] = err return w def number_errors(self): return self._number_errors
def test_two_columns(): array = ak.Array([[{'x': 1, 'y': [1.1]}, {'x': 2, 'y': [2.2, 0.2]}], [], [{'x': 3, 'y': [3.0, 0.3, 3.3]}]]) ak_array_1 = array['x'] ak_array_2 = array['y'] data_frame = ak.to_rdataframe({'x': ak_array_1, 'y': ak_array_2}, flatlist_as_rvec=True) assert (set(data_frame.GetColumnNames()) == {'x', 'y'}) assert (data_frame.GetColumnType('x') == 'ROOT::VecOps::RVec<int64_t>') assert data_frame.GetColumnType('y').startswith('awkward::ListArray_')
def train(config: ConfigParser): logger = config.get_logger('train') config['data_loader']['args']['training'] = True data_loader = config.init_obj('data_loader', module_data) valid_data_loader = data_loader.split_validation() config = update_lr_scheduler(config, len(data_loader)) model = config.init_obj('arch', module_arch) logger.info(model) if config['checkpoint']: logger.info(f"Loading checkpoint: {config['checkpoint']} ...") checkpoint = torch.load(config['checkpoint']) state_dict = checkpoint['state_dict'] if (config['n_gpu'] > 1): model = torch.nn.DataParallel(model) model.load_state_dict(state_dict) (device, device_ids) = prepare_device(config['n_gpu']) model = model.to(device) if (len(device_ids) > 1): model = torch.nn.DataParallel(model, device_ids=device_ids) criterion = getattr(module_loss, config['loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] trainable_params = filter((lambda p: p.requires_grad), model.parameters()) optimizer = config.init_obj('optimizer', torch.optim, trainable_params) lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer) trainer = Trainer(model, criterion, metrics, optimizer, config=config, device=device, data_loader=data_loader, valid_data_loader=valid_data_loader, lr_scheduler=lr_scheduler, amp=config['amp']) trainer.train()
def path_map(src_path, obj_path): def inner_map(full_path): return full_path.replace(src_path, obj_path)
def plane_grid_2d(xbound, ybound): (xmin, xmax) = (xbound[0], xbound[1]) num_x = int(((xbound[1] - xbound[0]) / xbound[2])) (ymin, ymax) = (ybound[0], ybound[1]) num_y = int(((ybound[1] - ybound[0]) / ybound[2])) y = torch.linspace(xmin, xmax, num_x).cuda() x = torch.linspace(ymin, ymax, num_y).cuda() (y, x) = torch.meshgrid(x, y) x = x.flatten() y = y.flatten() coords = torch.stack([x, y], axis=0) return coords
def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: input_ids = ctx_tokenizer(documents['title'], documents['text'], truncation=True, padding='longest', return_tensors='pt')['input_ids'] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {'embeddings': embeddings.detach().cpu().numpy()}
def evaluate(sess, model, minibatch_iter, size=None): t_test = time.time() (feed_dict_val, labels) = minibatch_iter.node_val_feed_dict(size) node_outs_val = sess.run([model.preds, model.loss], feed_dict=feed_dict_val) (mic, mac) = calc_f1(labels, node_outs_val[0]) return (node_outs_val[1], mic, mac, (time.time() - t_test))
def dropout_flop_jit(inputs, outputs): input_shapes = [get_shape(v) for v in inputs[:1]] flop = prod(input_shapes[0]) flop_counter = Counter({'dropout': flop}) return flop_counter
class Project(): def __init__(self, base_path): self.base_path = base_path def get_sources_paths(self): source_paths = {} for (root, dirs, files) in os.walk(self.base_path): for file in files: if file.endswith('.java'): source_root = self.__get_source_root(file, root) if source_root: source_root = os.path.relpath(source_root, self.base_path) if ('test' not in source_root): source_paths[source_root] = 1 return list(source_paths.keys()) def __get_source_root(self, file, root): source_path = root for package_name in reversed(self.__get_package_names(os.path.join(root, file))): if (os.path.basename(source_path) == package_name): source_path = os.path.dirname(source_path) else: source_path = None break return source_path def __get_package_names(java_file_path): try: with open(java_file_path, 'r', encoding='utf-8', errors='ignore') as f: try: package_declaration = next((line for line in f.readlines() if ('package ' in line))) package_name_start = (package_declaration.find('package ') + 8) package_name_end = package_declaration.find(';', package_name_start) return package_declaration[package_name_start:package_name_end].split('.') except StopIteration: return [] except FileNotFoundError: return []
def _save_tags(tag_list, output_labels): with open(output_labels, 'w') as f: f.write('\n'.join(tag_list))
def add_numeric_values_to_question(question): original_text = question question = normalize_for_match(question) numeric_spans = parse_text(question) return Question(original_text=original_text, text=question, numeric_spans=numeric_spans)
def main(): parser = argparse.ArgumentParser(description='Download the MNIST dataset from the internet') parser.add_argument('-d', '--destination', default='.', help='Destination directory') parser.add_argument('-q', '--quiet', action='store_true', help="Don't report about progress") options = parser.parse_args() if (not os.path.exists(options.destination)): os.makedirs(options.destination) try: for resource in RESOURCES: path = os.path.join(options.destination, resource) download(path, resource, options.quiet) unzip(path, options.quiet) except KeyboardInterrupt: print('Interrupted')
class softmax(nn.Module): def __init__(self, input_size: int, output_size: int): super().__init__() self._indim = input_size self._outdim = output_size self.fc = nn.Linear(input_size, output_size) self.criertion = nn.CrossEntropyLoss() def input_size(self): return self._indim def output_size(self): return self._outdim def forward(self, x: torch.Tensor, label: torch.LongTensor): assert (x.size()[0] == label.size()[0]) assert (x.size()[1] == self.input_size) x = F.normalize(x, dim=1) x = self.fc(x) loss = self.criertion(x, label) return (loss, x)
def evaluate_policy(policy, env_name, seed, eval_episodes=10, render=False): eval_env = gym.make(env_name) eval_env.seed((seed + 100)) avg_reward = 0.0 for _ in range(eval_episodes): (state, done) = (eval_env.reset(), False) while (not done): action = policy.select_action(np.array(state)) if render: eval_env.render() (state, reward, done, _) = eval_env.step(action) avg_reward += reward avg_reward /= eval_episodes return avg_reward
def count_params(model, verbose=False): total_params = sum((p.numel() for p in model.parameters())) if verbose: print(f'{model.__class__.__name__} has {(total_params * 1e-06):.2f} M params.') return total_params
def test_minimum_rule(): expected = 2 result = minimum_rule(example_kuncheva_batch) assert np.allclose(expected, result)
class TestGPT2WindowService(): def setup_method(self): self.path: str = tempfile.mkdtemp() service: TokenizerService = get_tokenizer_service(self.path) self.window_service = WindowServiceFactory.get_window_service('huggingface/gpt2', service) def teardown_method(self, method): shutil.rmtree(self.path) def test_max_request_length(self): assert (self.window_service.max_request_length == 1025) def test_encode(self): assert (self.window_service.encode(TEST_PROMPT).token_values == GPT2_TEST_TOKEN_IDS) def test_decode(self): assert (self.window_service.decode(self.window_service.encode(TEST_PROMPT).tokens) == TEST_PROMPT) def test_tokenize(self): assert (self.window_service.tokenize(TEST_PROMPT) == GPT2_TEST_TOKENS) def test_fits_within_context_window(self): assert self.window_service.fits_within_context_window(TEST_PROMPT, (1025 - 51)) assert (not self.window_service.fits_within_context_window(TEST_PROMPT, ((1025 - 51) + 1))) def test_truncate_from_right(self): long_prompt: str = (TEST_PROMPT * 41) assert (not self.window_service.fits_within_context_window(long_prompt)) truncated_long_prompt: str = self.window_service.truncate_from_right(long_prompt) assert (self.window_service.get_num_tokens(truncated_long_prompt) == 1025) assert self.window_service.fits_within_context_window(truncated_long_prompt) def test_truncate_from_right_edge_case(self): problematic_text: str = "their 'studio'" assert (self.window_service.truncate_from_right(problematic_text, expected_completion_token_length=0) == problematic_text) def test_tokenize_and_count(self): assert (self.window_service.get_num_tokens(TEST_PROMPT) == 51)
class SimpleTokenizer(object): def __init__(self, bpe_path: str=default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') merges = merges[1:(((49152 - 256) - 2) + 1)] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = (vocab + [(v + '</w>') for v in vocab]) for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for (k, v) in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) def bpe(self, token): if (token in self.cache): return self.cache[token] word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ') return text
class MvpConfig(PretrainedConfig): model_type = 'mvp' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50267, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, use_prompt=False, prompt_length=100, prompt_mid_dim=800, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding self.use_prompt = use_prompt self.prompt_length = prompt_length self.prompt_mid_dim = prompt_mid_dim super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs) if ((self.forced_bos_token_id is None) and kwargs.get('force_bos_token_to_be_generated', False)): self.forced_bos_token_id = self.bos_token_id warnings.warn(f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. The config can simply be saved and uploaded again to be fixed.')
def estimate_kikuchi_marginal(domain, total, marginals): marginals = dict(marginals) regions = set(marginals.keys()) size = 0 while (len(regions) > size): size = len(regions) for (r1, r2) in itertools.combinations(regions, 2): z = tuple(sorted((set(r1) & set(r2)))) if ((len(z) > 0) and (not (z in regions))): marginals[z] = marginals[r1].project(z) regions.update({z}) G = nx.DiGraph() G.add_nodes_from(regions) for r1 in regions: for r2 in regions: if ((set(r2) < set(r1)) and (not any((((set(r2) < set(r3)) and (set(r3) < set(r1))) for r3 in regions)))): G.add_edge(r1, r2) H1 = nx.transitive_closure(G.reverse()) ancestors = {r: list(H1.neighbors(r)) for r in regions} moebius = {} def get_counting_number(r): if (not (r in moebius)): moebius[r] = (1 - sum((get_counting_number(s) for s in ancestors[r]))) return moebius[r] logP = Factor.zeros(domain) for r in regions: kr = get_counting_number(r) logP += (kr * marginals[r].log()) logP += (np.log(total) - logP.logsumexp()) return logP.exp()
def wsf_exponential(table: Table, attrs: List[str], centers: List[Any], params: Dict[(str, Any)]) -> Query: query = new_query(table, ncols=len(attrs)) for (a, c) in zip(attrs, centers): if (pd.isnull(c) or is_categorical(table.columns[a].dtype)): query.predicates[a] = ('=', c) continue col = table.columns[a] lmd = (1 / ((col.maxval - col.minval) / 10)) width = random.expovariate(lmd) query.predicates[a] = parse_range(col, (c - (width / 2)), (c + (width / 2))) return query
class NoisyObservationEnv(ProxyEnv, Serializable): ('obs_noise', type=float, help='Noise added to the observations (note: this makes the problem non-Markovian!)') def __init__(self, env, obs_noise=0.1): super(NoisyObservationEnv, self).__init__(env) Serializable.quick_init(self, locals()) self.obs_noise = obs_noise def get_obs_noise_scale_factor(self, obs): return np.ones_like(obs) def inject_obs_noise(self, obs): noise = ((self.get_obs_noise_scale_factor(obs) * self.obs_noise) * np.random.normal(size=obs.shape)) return (obs + noise) def get_current_obs(self): return self.inject_obs_noise(self._wrapped_env.get_current_obs()) def reset(self): obs = self._wrapped_env.reset() return self.inject_obs_noise(obs) def step(self, action): (next_obs, reward, done, info) = self._wrapped_env.step(action) return Step(self.inject_obs_noise(next_obs), reward, done, **info)
class EventList(list): def __init__(self, *args, **kwargs): super(EventList, self).__init__(*args, **kwargs) def __str__(self): return self.table() def table(self, sort_by=None): return build_table(self, sort_by) def export_chrome_trace(self, path): import json with open(path, 'w') as f: chrome_events = [] next_id = 0 for evt in self: chrome_events.append(dict(name=evt.name, ph='X', ts=evt.cpu_interval.start, dur=evt.cpu_interval.elapsed_us(), tid=evt.thread, pid='CPU functions', args={})) for k in evt.kernels: chrome_events.append(dict(name=evt.name, ph='s', ts=evt.cpu_interval.start, tid=evt.thread, pid='CPU functions', id=next_id, cat='cpu_to_cuda', args={})) chrome_events.append(dict(name=k.name, ph='f', ts=k.interval.start, tid=k.device, pid='CUDA functions', id=next_id, cat='cpu_to_cuda', args={})) chrome_events.append(dict(name=k.name, ph='X', ts=k.interval.start, dur=k.interval.elapsed_us(), tid=k.device, pid='CUDA functions', args={})) next_id += 1 json.dump(chrome_events, f) def key_averages(self): stats = defaultdict(FunctionEventAvg) for evt in self: stats[evt.key] += evt return EventList(stats.values()) def total_average(self): total_stat = FunctionEventAvg() for evt in self: total_stat += evt total_stat.key = None total_stat.key = 'Total' return total_stat
def require_sudachi(test_case): return unittest.skipUnless(is_sudachi_available(), 'test requires sudachi')(test_case)
class Locked(HTTPException): code = 423 description = 'The resource that is being accessed is locked.'
def weights_init_mlp(m, gain=1.0): classname = m.__class__.__name__ if (classname.find('Linear') != (- 1)): init_normc_(m.weight.data, gain) if (m.bias is not None): m.bias.data.fill_(0)
def load_tsv_to_dicts(path: Union[(str, Path)]) -> List[dict]: with open(path, 'r') as f: reader = csv.DictReader(f, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE) rows = [dict(e) for e in reader] return rows
def test_later_default_lr(): import tempfile tmp_file = tempfile.mktemp() lr = 0.0005 learning_rates = list(numpy.linspace(0.0003, lr, num=10)) config = Config() config.update({'learning_rate_file': tmp_file, 'learning_rate_control': 'newbob_multi_epoch', 'learning_rate_control_relative_error_relative_lr': True, 'learning_rate_control_min_num_epochs_per_new_lr': 3, 'newbob_multi_num_epochs': 6, 'newbob_multi_update_interval': 1, 'learning_rate': lr, 'learning_rates': learning_rates, 'min_learning_rate': (lr / 50.0)}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobMultiEpoch) num_epochs = 250 for epoch in range(1, (num_epochs + 1)): lrc.get_learning_rate_for_epoch(epoch) lrc.set_epoch_error(epoch, {'train_score': 0.5, 'train_error': 0.5}) lrc.set_epoch_error(epoch, {'dev_score': 0.5, 'dev_error': 0.5}) print('Learning rates:') print(lrc) lrc.save() print('Saved to:', lrc.filename) learning_rates = {(i + 1): v for (i, v) in enumerate(learning_rates)} later_epoch = (num_epochs + 1) learning_rates[later_epoch] = (lr * 0.5) config.update({'learning_rates': learning_rates}) lrc = load_learning_rate_control_from_config(config) assert (later_epoch in lrc.epoch_data) lr251 = lrc.get_learning_rate_for_epoch(later_epoch) numpy.testing.assert_almost_equal(lr251, learning_rates[later_epoch])
def fasterrcnn_resnet8_fpn(num_classes=91, pretrained_backbone=True, weight_loss=False, detection_score_thres=0.05, use_soft_nms=False, nms_thres=0.4, anchor_sizes=[32, 64, 128, 256, 512], n_channel_backbone=5, use_context=False, use_track_branch=False, **kwargs): backbone = resnet_fpn_backbone('resnet8', pretrained_backbone, n_channel_backbone, first_layer_out='layer1') model = FasterRCNN(backbone, num_classes=num_classes, use_soft_nms=use_soft_nms, n_channel_backbone=n_channel_backbone, weight_loss=weight_loss, box_score_thresh=detection_score_thres, use_context=use_context, use_track_branch=use_track_branch, **kwargs) return model
def model_fn(config, batch): model = femr.models.transformer.EHRTransformer(config)(batch) return model
class CharacteristicCohomologyClassRing(FiniteGCAlgebra): Element = CharacteristicCohomologyClassRingElement def __init__(self, base, vbundle): self._vbundle = vbundle self._domain = vbundle._base_space dim = self._domain._dim rk = vbundle._rank if (vbundle._field_type == 'complex'): ran = min(rk, (dim // 2)) names = [f'c_{i}' for i in range(1, (ran + 1))] degrees = [(2 * i) for i in range(1, (ran + 1))] self._algorithm = ChernAlgorithm() elif (vbundle._field_type == 'real'): ran = min((rk // 2), (dim // 4)) names = [f'p_{i}' for i in range(1, (ran + 1))] degrees = [(4 * i) for i in range(1, (ran + 1))] self._algorithm = PontryaginAlgorithm() if vbundle.has_orientation(): names = (['e'] + names) degrees = ([rk] + degrees) self._algorithm = PontryaginEulerAlgorithm() else: raise TypeError(f'Characteristic cohomology classes not supported for vector bundles with field type {vbundle._field_type}') if ((not names) or (not degrees)): raise ValueError('cannot find any generators') names = tuple(names) degrees = tuple(degrees) super().__init__(base=base, names=names, degrees=degrees, max_degree=dim, mul_symbol='', mul_latex_symbol='\\smile') def _element_constructor_(self, x, **kwargs): if (isinstance(x, (str, Expression)) or isinstance(x, Polynomial)): return self._build_element(x, **kwargs) R = self.base_ring() if (x in R): one_basis = self.one_basis() d = {one_basis: R(x)} elif isinstance(x, CharacteristicCohomologyClassRingElement): d = x._monomial_coefficients elif ((hasattr(self._indices, 'element_class') and isinstance(self._indices.element_class, type) and isinstance(x, self._indices.element_class)) or (self.parent()(x) == self._indices)): d = {x: R.one()} elif (x in self._indices): d = {self._indices(x): R.one()} else: raise TypeError(f'do not know how to make x (= {x}) an element of self (={self})') (name, latex_name) = (kwargs.get('name'), kwargs.get('latex_name')) return self.element_class(self, d, name=name, latex_name=latex_name) _method def _build_element(self, *args, **kwargs): (name, latex_name) = (kwargs.get('name'), kwargs.get('latex_name')) base_ring = self.base_ring() class_type = kwargs.get('class_type') vbundle = self._vbundle val = args[0] dim = vbundle._base_space._dim if isinstance(val, str): from sage.arith.misc import factorial, bernoulli P = PolynomialRing(base_ring, 'x') x = P.gen() if (val == 'Chern'): if (vbundle._field_type != 'complex'): raise ValueError(f'total Chern class not defined on {vbundle}') if (name is None): name = 'c' class_type = 'multiplicative' val = (1 + x) if (val == 'Pontryagin'): if (vbundle._field_type != 'real'): raise ValueError(f'total Pontryagin class not defined on {vbundle}') if (name is None): name = 'p' class_type = 'multiplicative' val = (1 + x) elif (val == 'ChernChar'): if (vbundle._field_type != 'complex'): raise ValueError(f'Chern character not defined on {vbundle}') if (name is None): name = 'ch' if (latex_name is None): latex_name = '\\mathrm{ch}' class_type = 'additive' coeff = [(1 / factorial(k)) for k in range(((dim // 2) + 1))] val = P(coeff) elif (val == 'Todd'): if (vbundle._field_type != 'complex'): raise ValueError(f'Todd class not defined on {vbundle}') if (name is None): name = 'Td' if (latex_name is None): latex_name = '\\mathrm{Td}' class_type = 'multiplicative' val = (1 + (x / 2)) for k in range(1, ((dim // 2) + 1)): val += (((((- 1) ** (k + 1)) / factorial((2 * k))) * bernoulli((2 * k))) * (x ** (2 * k))) elif (val == 'Hirzebruch'): if (vbundle._field_type != 'real'): raise ValueError(f'Hirzebruch class not defined on {vbundle}') if (name is None): name = 'L' if (latex_name is None): latex_name = 'L' class_type = 'multiplicative' coeff = [(((2 ** (2 * k)) * bernoulli((2 * k))) / factorial((2 * k))) for k in range(((dim // 4) + 1))] val = P(coeff) elif (val == 'AHat'): if (vbundle._field_type != 'real'): raise ValueError(f'AHat class not defined on {vbundle}') if (name is None): name = 'A^' if (latex_name is None): latex_name = '\\hat{A}' class_type = 'multiplicative' coeff = [((((- ((2 ** (2 * k)) - 2)) / (2 ** (2 * k))) * bernoulli((2 * k))) / factorial((2 * k))) for k in range(((dim // 4) + 1))] val = P(coeff) elif (val == 'Euler'): if ((vbundle._field_type != 'real') or (not vbundle.has_orientation())): raise ValueError(f'Euler class not defined on {vbundle}') if (name is None): name = 'e' class_type = 'Pfaffian' val = x else: ValueError(f'predefined class "{val}" unknown') if isinstance(val, Expression): x = val.default_variable() P = PolynomialRing(base_ring, x) if (vbundle._field_type == 'real'): pow_range = (dim // 4) elif (vbundle._field_type == 'complex'): pow_range = (dim // 2) else: ValueError(f'field type of {vbundle} must be real or complex') val = P(val.taylor(x, 0, pow_range)) if isinstance(val, Polynomial): if (class_type is None): raise TypeError(f'class_type must be stated if {val} is a polynomial') n = self.ngens() s = 0 if (self._algorithm is PontryaginEulerAlgorithm()): s = 1 n -= 1 if (class_type == 'additive'): sym = additive_sequence(val, vbundle._rank, n) elif (class_type == 'multiplicative'): sym = multiplicative_sequence(val, n) elif (class_type == 'Pfaffian'): P = val.parent() x = P.gen() val = ((val(x) - val((- x))) / 2) val = P([(((- 1) ** k) * val[((2 * k) + 1)]) for k in range((n + 1))]) sym = multiplicative_sequence(val, n) else: AttributeError('unkown class type') d = {} w_vec = self._weighted_vectors for (p, c) in sym: vec = ([0] * self.ngens()) if (class_type == 'Pfaffian'): vec[0] = 1 for i in p: vec[((i - 1) + s)] += 1 key = w_vec(vec) d[key] = c res = self._from_dict(d) res.set_name(name=name, latex_name=latex_name) return res raise ValueError(f'cannot convert {val} into an element of {self}') def _repr_(self): vbundle = self._vbundle repr = f'Algebra of characteristic cohomology classes of the {vbundle}' return repr
def get_data_path(name): if (name == 'cityscapes'): return './data/city_dataset/' if (name == 'pascal_context'): return './data/'
def mytqdm(list_, desc='', show=True): if show: pbar = tqdm(list_) pbar.set_description(desc) return pbar return list_
class Lgmres(Benchmark): params = [[10, 50, 100, 1000, 10000], [10, 30, 60, 90, 180]] param_names = ['n', 'm'] def setup(self, n, m): rng = np.random.default_rng(1234) self.A = (sparse.eye(n, n) + sparse.rand(n, n, density=0.01, random_state=rng)) self.b = np.ones(n) def time_inner(self, n, m): lgmres(self.A, self.b, inner_m=m, maxiter=1)
def grapheme_pipeline(char, grapheme_encoder=None, uppercase=True): if uppercase: char = char.upper() grapheme_list = [grapheme for grapheme in char if (grapheme in grapheme_encoder.lab2ind)] (yield grapheme_list) grapheme_encoded_list = grapheme_encoder.encode_sequence(grapheme_list) (yield grapheme_encoded_list) grapheme_encoded = torch.LongTensor(grapheme_encoded_list) (yield grapheme_encoded)
def _has_hash(path, expected_hash): if (not os.path.exists(path)): return False return (pooch.file_hash(path) == expected_hash)
def plot_all_labeling_functions(df_results: pd.DataFrame, score: str, path_to_output_dir: str, model_heads: Optional[List[Tuple[(str, str)]]]=None, is_x_scale_log: bool=True, is_std_bars: bool=True): (fig, axes) = plt.subplots(5, 3, figsize=(20, 20)) labeling_functions: List[str] = df_results[(df_results['score'] == score)]['labeling_function'].unique().tolist() for (idx, labeling_function) in enumerate(labeling_functions): sub_tasks: List[str] = df_results[((df_results['score'] == score) & (df_results['labeling_function'] == labeling_function))]['sub_task'].unique().tolist() plot_one_labeling_function(df_results, axes.flat[idx], labeling_function, sub_tasks, score, model_heads=model_heads, is_x_scale_log=is_x_scale_log, is_std_bars=(False if (labeling_function == 'chexpert') else is_std_bars)) _plot_unified_legend(fig, axes) fig.suptitle(f'{score.upper()} by Task', fontsize=16) plt.tight_layout() plt.subplots_adjust(top=0.95, bottom=0.05) plt.savefig(os.path.join(path_to_output_dir, f'tasks_{score}.png'), dpi=300) plt.close('all') return fig
def rms_norm(x, weight=None, eps=1e-05): output = (x * torch.rsqrt((x.pow(2).mean((- 1), keepdim=True) + eps))) if (weight is not None): return (output * weight) return output
def train_val_split(base_dataset: torchvision.datasets.CIFAR10, subset_percent=1.0): num_classes = 10 base_dataset = np.array(base_dataset) train_n = int((((len(base_dataset) * subset_percent) * 1.0) / num_classes)) val_n = int(((len(base_dataset) * 0.9) / num_classes)) train_idxs = [] val_idxs = [] for i in range(num_classes): idxs = np.where((base_dataset == i))[0] np.random.shuffle(idxs) train_idxs.extend(idxs[:train_n]) val_idxs.extend(idxs[val_n:]) np.random.shuffle(train_idxs) np.random.shuffle(val_idxs) return (train_idxs, val_idxs)
class TestCompositeParametrization(unittest.TestCase): def test_get_structure(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 2, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) np.testing.assert_array_almost_equal(param.get_structure(), np.array([1, 2, 1, 4, 2, 2, 1, 1.5, 6.0, 1.5, 1.5]), 7) def test_calculate_gradient(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 2, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) vec = np.array([1, 2, 1, 2, 4, 1, 1, 3, 3, 4, 5, 1]) np.testing.assert_array_almost_equal((param.calculate_gradient() vec), np.array([1, 2, 1, 2, 4, 1, 1, 9, 18, 12, 15]), 7) def test_encode(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 2, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) np.testing.assert_array_almost_equal(param.encode(), np.array([1, 2, 1, 4, 2, 2, 1, 1, 2, 1, 1, 5]), 7) def test_decode(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 2, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) param.decode(np.array([1, 3, 1, 1, 4, 1, 3, 1, 5, 1, 1, 6])) np.testing.assert_array_almost_equal(param_1.encode(), np.array([1, 3, 1, 1]), 7) np.testing.assert_array_almost_equal(param_2.encode(), np.array([4, 1, 3]), 7) np.testing.assert_array_almost_equal(param_3.encode(), np.array([1, 5, 1, 1, 6]), 7) def test_project(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) param.project() np.testing.assert_array_almost_equal(param_1.encode(), np.array([1, 1, 1, 1]), 7) np.testing.assert_array_almost_equal(param_2.encode(), np.array([2, 2.1, 1]), 7) np.testing.assert_array_almost_equal(param_3.encode(), np.array([1, 2, 1, 1, 5]), 7) def test_bounds(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2, 1, 1, 5]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) (lower_bounds, upper_bounds) = param.get_bounds() np.testing.assert_array_equal(lower_bounds, np.array([0, 0, 0, 0, 0.9, 0.9, 0.9, None, None, None, None, None])) np.testing.assert_array_equal(upper_bounds, np.array([1, 1, 1, 1, 2.1, 2.1, 2.1, None, None, None, None, None])) def test_to_vector(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) np.testing.assert_array_almost_equal(param.to_vector(), np.array([1, 2, 1, 4, 2, 3.5, 1, 1.5, 3])) def test_from_vector(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) param.from_vector(np.array([1, 1, 2, 1, 4, 3, 1, 2, 2])) np.testing.assert_array_almost_equal(param_1.to_vector(), np.array([1, 1, 1, 1]), 7) np.testing.assert_array_almost_equal(param_2.to_vector(), np.array([2.1, 2.1, 1]), 7) np.testing.assert_array_almost_equal(param_3.to_vector(), np.array([4.5, 4.5]), 7) def test_serialize(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) data = param.serialize() np.testing.assert_array_almost_equal(data['param_0']['vector'], np.array([1, 2, 1, 4]), 7) np.testing.assert_array_almost_equal(data['param_1']['vector'], np.array([2, 3.5, 1]), 7) np.testing.assert_array_almost_equal(data['param_2']['vector'], np.array([1.5, 3]), 7) def test_deserialize(self): param_1 = parametrization.DirectParam(np.array([1, 2, 1, 4]), bounds=[0, 1]) param_2 = parametrization.DirectParam(np.array([2, 3.5, 1]), bounds=[0.9, 2.1]) param_3 = QuadraticParam(np.array([1, 2]), 1.5) param = composite_parametrization.CompositeParam([param_1, param_2, param_3]) param.deserialize({'param_0': {'vector': np.array([1, 2, 2, 3])}, 'param_1': {'vector': np.array([1, 2, 5])}, 'param_2': {'vector': np.array([1, 5])}}) np.testing.assert_array_almost_equal(param_1.encode(), np.array([1, 1, 1, 1]), 7) np.testing.assert_array_almost_equal(param_2.encode(), np.array([1, 2, 2.1]), 7) np.testing.assert_array_almost_equal(param_3.encode(), np.array([1.5, 7.5]), 7)
def write_csv_data_from_pkl(pkl_inputfile, csv_outputfile, fields, windowt=8, thresh=4, flag_increase_data=True, namefilebasin=None): file = open(pkl_inputfile, 'rb') list_tracks = pickle.load(file) file.close() name_cols = ['stormid', 'delay_tsteps', 'intenseStorm'] for field in fields: if (field in ['month', 'maxcategory']): name_cols.append(field) elif ((field is 'basin') and os.path.isfile(namefilebasin)): name_cols.append(field) basins = dict(np.array(pd.read_csv(namefilebasin, header=None))) else: name_cols.extend([(field + str(i)) for i in range(windowt)]) with open(csv_outputfile, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(name_cols) smalltracks = 0 maxcat_tracks = 0 Nintense = 0 for track in list_tracks: if (track.Ninstants < (windowt + 2)): print((((('Track ' + str(track.stormid)) + ' too small, only ') + str(track.Ninstants)) + ' instants.')) smalltracks = (smalltracks + 1) continue if (thresh < (max(track.categories[0:windowt]) + 1)): print((((('Track ' + str(track.stormid)) + ' threshold category (') + str(thresh)) + ') already reached.')) maxcat_tracks = (maxcat_tracks + 1) continue if flag_increase_data: range_windows = range(min(int(((track.Ninstants - windowt) / 2)), 6)) else: range_windows = [0] for delay in range_windows: if (thresh < (max(track.categories[0:(windowt + (delay * 2))]) + 1)): continue else: tarray = [track.stormid, (delay * 2)] if (track.maxcategory > thresh): tarray.append(1) Nintense = (Nintense + 1) else: tarray.append(0) for field in fields: if (field in ['month', 'maxcategory']): tarray.append(getattr(track, field)) elif ((field is 'basin') and os.path.isfile(namefilebasin)): tarray.append(get_num_basin(basins[track.stormid])) else: tarray.extend(getattr(track, field)[(delay * 2):(windowt + (delay * 2))]) spamwriter.writerow(tarray) print(('Number of too small tracks: ' + str(smalltracks))) print(('Number of threshold category already reached: ' + str(maxcat_tracks))) print(('Number of intense (positive) storms: ' + str(Nintense)))
class BilateralFilter(AbstractFilter): def __init__(self, image, alpha, beta): self.alpha = alpha self.beta = beta super(BilateralFilter, self).__init__(image) def _calc_features(self, image): xy = _spatial_features(image, self.alpha) rgb = (image / float(self.beta)).permute(1, 2, 0) return torch.cat([xy, rgb], dim=2)
def run(args): tokenizer = AutoTokenizer.from_pretrained(args.t5_model) dataloader = get_loader(args.test_file, args.batch_size, args.t5_model, args.max_length, args.max_decode_step, shuffle=False) device = torch.cuda.current_device() model = T5ForConditionalGeneration.from_pretrained(args.t5_model) ckpt = torch.load(args.ckpt_file, map_location='cpu') state_dict = ckpt['state_dict'] model.load_state_dict(state_dict) model.eval() model = model.to(device) if (not os.path.exists(args.res_dir)): os.makedirs(args.res_dir) pred_file = os.path.join(args.res_dir, 'candidate.txt') pred_fw = open(pred_file, 'w') for batch in tqdm(dataloader, total=len(dataloader)): (input_ids, _, _) = batch input_ids = input_ids.to(device) attention_mask = torch.sign(input_ids) with torch.no_grad(): outputs = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=args.max_decode_step, num_beams=args.beam_size) batch_size = input_ids.size(0) for i in range(batch_size): pred_abs = outputs[i].cpu() pred_tok = tokenizer.decode(pred_abs, skip_special_tokens=True, clean_up_tokenization_spaces=True) pred_fw.write((pred_tok.strip() + '\n')) pred_fw.flush() pred_fw.close() ref_dict = dict() pred_dict = dict() refs = [] with open(args.test_file) as f: for (idx, line) in enumerate(f): json_dict = json.loads(line) ref_dict[idx] = json_dict['trg_txt'].strip() refs.append(json_dict['trg_txt'].strip()) with open(pred_file) as f: for (idx, line) in enumerate(f): pred_dict[idx] = line.strip() metric_file = os.path.join(args.res_dir, 'metric.txt') fw = open(metric_file, 'w') bleus = eval_qg(ref_dict, pred_dict) for (idx, bleu) in enumerate(bleus): res = 'BLEU{}: {:.4f}'.format((idx + 1), bleu) print(res) fw.write((res + '\n')) preds = open(pred_file).readlines() bleu = sacrebleu.corpus_bleu(preds, [refs]) print('ScareBLEU:{:.4f}'.format(bleu.score)) fw.write('Scare BLEU:{:.4f}'.format(bleu.score)) fw.close()
def arrange_disamb_results_in_lagacy_format(split_id, entity_predictions_file): dataset_id = 'grail' example_cache = join('feature_cache', f'{dataset_id}_{split_id}_disamb_examples.bin') entities_file = f'outputs/grail_{split_id}_entities.json' if os.path.exists(example_cache): instances = torch.load(example_cache) else: dataset_file = join('outputs', f'grailqa_v1.0_{split_id}.json') instances = read_disamb_instances_from_entity_candidates(dataset_file, entities_file) torch.save(instances, example_cache) indexed_pred = load_json(entity_predictions_file) el_results = OrderedDict() for inst in instances: inst_result = {} normed_query = _process_query(inst.query) inst_result['question'] = normed_query pred_entities = OrderedDict() for problem in inst.disamb_problems: if (len(problem.candidates) == 0): continue if ((len(problem.candidates) == 1) or (problem.pid not in indexed_pred)): pred_idx = 0 else: pred_idx = indexed_pred[problem.pid] entity = problem.candidates[pred_idx] start_pos = normed_query.find(problem.mention) pred_entities[entity.id] = {'mention': problem.mention, 'label': entity.label, 'friendly_name': entity.facc_label, 'start': start_pos} inst_result['entities'] = pred_entities el_results[inst.qid] = inst_result return el_results
class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, output, target): lossvalue = torch.abs((output - target)).mean() return lossvalue
def butterworth(image, cutoff_frequency_ratio=0.005, high_pass=True, order=2.0, channel_axis=None, *, squared_butterworth=True, npad=0): if (npad < 0): raise ValueError('npad must be >= 0') elif (npad > 0): center_slice = tuple((slice(npad, (s + npad)) for s in image.shape)) image = np.pad(image, npad, mode='edge') fft_shape = (image.shape if (channel_axis is None) else np.delete(image.shape, channel_axis)) is_real = np.isrealobj(image) float_dtype = _supported_float_type(image.dtype, allow_complex=True) if ((cutoff_frequency_ratio < 0) or (cutoff_frequency_ratio > 0.5)): raise ValueError('cutoff_frequency_ratio should be in the range [0, 0.5]') wfilt = _get_nd_butterworth_filter(fft_shape, cutoff_frequency_ratio, order, high_pass, is_real, float_dtype, squared_butterworth) axes = np.arange(image.ndim) if (channel_axis is not None): axes = np.delete(axes, channel_axis) abs_channel = (channel_axis % image.ndim) post = ((image.ndim - abs_channel) - 1) sl = ((((slice(None),) * abs_channel) + (np.newaxis,)) + ((slice(None),) * post)) wfilt = wfilt[sl] if is_real: butterfilt = fft.irfftn((wfilt * fft.rfftn(image, axes=axes)), s=fft_shape, axes=axes) else: butterfilt = fft.ifftn((wfilt * fft.fftn(image, axes=axes)), s=fft_shape, axes=axes) if (npad > 0): butterfilt = butterfilt[center_slice] return butterfilt
class A001147(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return 'Double factorial numbers: (2n-1)!! = 1.3.5....(2n-1).' def _eval(self, n): return (arith.factorial((2 * n)) / (arith.factorial(n) * (2 ** n)))
class ResBlock(nn.Module): def __init__(self, conv, n_feats, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feats, n_feats, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feats)) if (i == 0): m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res
def Ring(n, order='lp', names=None, blocks=None): if (blocks is None): blocks = [] pbnames = names if (pbnames is None): pbnames = [(('x(' + str(idx)) + ')') for idx in range(n)] order = TermOrder_from_pb_order(n, order, blocks) return BooleanPolynomialRing(n, names=pbnames, order=order)
def cppunparse(node, expr_semicolon=True, locals=None, defined_symbols=None): strio = StringIO() CPPUnparser(node, 0, (locals or CPPLocals()), strio, expr_semicolon=expr_semicolon, defined_symbols=defined_symbols) return strio.getvalue().strip()
def _check_matching_outputs(): rf.get_run_ctx().check_outputs_complete() model_outputs_raw_keys = set(_get_model_outputs_raw_keys()) outputs_raw = rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False) outputs_raw_keys = set(outputs_raw.keys()) assert (model_outputs_raw_keys == outputs_raw_keys), f'''Model outputs raw keys and output raw keys from forward_step don't match. Model outputs raw keys: {sorted(model_outputs_raw_keys)} Output raw keys: {sorted(outputs_raw_keys)}''' assert all(((v is not None) for (k, v) in outputs_raw.items())), f'''Output raw keys from forward_step contain None values. Output raw keys with None: {list((k for (k, v) in outputs_raw.items() if (v is None)))}'''
def _text_to_num_mark(text: str, return_nan_mark: bool=True): if text.endswith('%'): text4num = text[:(- 1)] is_percent = True else: text4num = text is_percent = False try: possible_value = float(text4num) except: if return_nan_mark: return '<nan>' else: return text else: if (abs(possible_value) < 1): digits = 0 else: digits = min(MAX_DIGITS, (int(numpy.log10(abs(possible_value))) + 1)) if is_percent: num_type = 'percent' elif ('.' in text4num): num_type = 'real' else: num_type = 'int' if (possible_value < 0): num_sign = '-' else: num_sign = '' return f'<{num_sign}{num_type}{digits}>'
def register_Ns3UdpEchoServerHelper_methods(root_module, cls): cls.add_constructor([param('ns3::UdpEchoServerHelper const &', 'arg0')]) cls.add_constructor([param('uint16_t', 'port')]) cls.add_method('Install', 'ns3::ApplicationContainer', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True) cls.add_method('Install', 'ns3::ApplicationContainer', [param('std::string', 'nodeName')], is_const=True) cls.add_method('Install', 'ns3::ApplicationContainer', [param('ns3::NodeContainer', 'c')], is_const=True) cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) return
class GroupLinearLayer(nn.Module): def __init__(self, num_blocks, din, dout, bias=True): super(GroupLinearLayer, self).__init__() self.bias = bias self.w = nn.Parameter(torch.Tensor(num_blocks, din, dout)) self.b = nn.Parameter(torch.Tensor(1, num_blocks, dout)) stdv = (math.sqrt(6.0) / math.sqrt((din + dout))) nn.init.uniform_(self.w, (- stdv), stdv) nn.init.zeros_(self.b) def forward(self, x): x = x.permute(1, 0, 2) x = torch.bmm(x, self.w) x = x.permute(1, 0, 2) if self.bias: x = (x + self.b) return x
def test_change_call_function(function_mock, default_test_case): default_test_case.add_statement(stmt.FloatPrimitiveStatement(default_test_case, 3.5)) to_replace = stmt.NoneStatement(default_test_case) default_test_case.add_statement(to_replace) test_cluster = default_test_case.test_cluster feed_typesystem(test_cluster.type_system, function_mock) test_factory = tf.TestFactory(test_cluster) test_factory.change_call(default_test_case, to_replace, function_mock, {}) assert (default_test_case.statements[1].accessible_object() == function_mock) assert (default_test_case.statements[1].ret_val is to_replace.ret_val)
def hierarchical_dataset(root, opt, select_data='/', data_type='label', mode='train'): dataset_list = [] dataset_log = f'dataset_root: {root} dataset: {select_data}' print(dataset_log) dataset_log += '\n' for (dirpath, dirnames, filenames) in os.walk((root + '/')): if (not dirnames): select_flag = False for selected_d in select_data: if (selected_d in dirpath): select_flag = True break if select_flag: dataset = LmdbDataset(dirpath, opt, mode=mode) sub_dataset_log = f'sub-directory: /{os.path.relpath(dirpath, root)} num samples: {len(dataset)}' print(sub_dataset_log) dataset_log += f'''{sub_dataset_log} ''' dataset_list.append(dataset) concatenated_dataset = ConcatDataset(dataset_list) return (concatenated_dataset, dataset_log)
def rule_vs_node_stat(): line_num = 0 parse_trees = [] code_file = '/Users/yinpengcheng/Research/SemanticParsing/CodeGeneration/card_datasets/hearthstone/all_hs.out' node_nums = rule_nums = 0.0 for line in open(code_file): code = line.replace('', '\n').strip() parse_tree = parse(code) node_nums += len(list(parse_tree.nodes)) (rules, _) = parse_tree.get_productions() rule_nums += len(rules) parse_trees.append(parse_tree) line_num += 1 print(('avg. nums of nodes: %f' % (node_nums / line_num))) print(('avg. nums of rules: %f' % (rule_nums / line_num)))
class ImplBase(metaclass=ABCMeta): _observation_shape: Shape _action_size: int _modules: Modules _checkpointer: Checkpointer _device: str def __init__(self, observation_shape: Shape, action_size: int, modules: Modules, device: str): self._observation_shape = observation_shape self._action_size = action_size self._modules = modules self._checkpointer = modules.create_checkpointer(device) self._device = device def save_model(self, f: BinaryIO) -> None: self._checkpointer.save(f) def load_model(self, f: BinaryIO) -> None: self._checkpointer.load(f) def observation_shape(self) -> Shape: return self._observation_shape def action_size(self) -> int: return self._action_size def device(self) -> str: return self._device def modules(self) -> Modules: return self._modules def wrap_models_by_ddp(self) -> None: self._modules = self._modules.wrap_models_by_ddp() def unwrap_models_by_ddp(self) -> None: self._modules = self._modules.unwrap_models_by_ddp()
def get_specific_file(path, last_entry='tif'): base_path = path for i in os.listdir(path): if i.endswith(last_entry): return os.path.join(base_path, i) return '-1'
def _faulty_process_group_init_backend_handler(store, name, rank, world_size, rpc_backend_options): from . import FaultyProcessGroupAgent if dist.is_initialized(): raise RuntimeError('Process group must not be initialized before init_rpc.') process_group_timeout = rpc_constants.DEFAULT_PROCESS_GROUP_TIMEOUT dist.init_process_group(backend=dist.Backend.GLOO, store=store, rank=rank, world_size=world_size, timeout=process_group_timeout) try: group = dc10d._get_default_group() assert (group is not None), 'Failed to initialize default ProcessGroup.' if ((rank != (- 1)) and (rank != group.rank())): raise RuntimeError("rank argument {} doesn't match pg rank {}".format(rank, group.rank())) if ((world_size != (- 1)) and (world_size != group.size())): raise RuntimeError("world_size argument {} doesn't match pg size {}".format(world_size, group.size())) return FaultyProcessGroupAgent(name, group, rpc_backend_options.num_send_recv_threads, timedelta(seconds=rpc_backend_options.rpc_timeout), rpc_backend_options.messages_to_fail, rpc_backend_options.messages_to_delay, rpc_backend_options.num_fail_sends) except Exception as ex: dist.destroy_process_group() raise ex
def register_optimizer(name, optimizer_class): if (name in _OPTIMIZER_TYPES): raise RegistrationError(f"Optimizer '{name}' is already registered") _OPTIMIZER_TYPES[name] = optimizer_class
class HMI(Device): def _start(self): self.main_loop() def _stop(self): if (self.protocol['mode'] > 0): self._protocol._server_subprocess.kill() def main_loop(self, sleep=0.5): sec = 0 while (sec < 1): print('TODO HMI main_loop: please override me') time.sleep(sleep) sec += 1
def device_type(device: Union[(str, int)]): if (device == 'cpu'): return device assert device.isnumeric() return f'cuda:{device}'
def beta_prior_d(k, n, lk, ln, a0=1, b0=1, plot=True): from scipy.stats import beta as beta_d a = (a0 + n.sum()) b = ((b0 + k.sum()) - n.sum()) posterior = beta_d(a, b) def p_d(d): p = (compute_discrete_volume(ln, d) / compute_discrete_volume(lk, d)) dp_dd = _compute_jacobian(lk, ln, d) return abs((posterior.pdf(p) * dp_dd)) dx = 10.0 d_left = D_MIN d_right = ((D_MAX + dx) + d_left) counter = 0 elements = 0 while (elements < 3): dx /= 10 counter += 1 d_range = np.arange(d_left, d_right, dx) P = np.array([p_d(di) for di in d_range]) mask = (P > 1e-20) elements = mask.sum() ind = np.where(mask)[0] d_left = ((d_range[ind[0]] - (0.5 * dx)) if ((d_range[ind[0]] - dx) > 0) else D_MIN) d_right = (d_range[ind[(- 1)]] + (0.5 * dx)) d_range = np.linspace(d_left, d_right, 1000) dx = (d_range[1] - d_range[0]) P = np.array([p_d(di) for di in d_range]) P = P.reshape(P.shape[0]) P /= P.sum() if plot: plt.figure() plt.plot(d_range, P) plt.xlabel('d') plt.ylabel('P(d)') plt.title('posterior of d') E_d_emp = np.dot(d_range, P) S_d_emp = np.sqrt((((d_range * d_range) * P).sum() - (E_d_emp * E_d_emp))) if plot: print('empirical average:\t', E_d_emp, '\nempirical std:\t\t', S_d_emp) return (E_d_emp, S_d_emp, d_range, P)
class SuperbPR(SuperbASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_sets=['train-clean-100'], valid_sets=['dev-clean'], test_sets=['test-clean']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='phoneme'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=16, max_length=300000), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['per']), build_optimizer=dict(name='Adam', conf=dict(lr=0.01)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=100000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=2, valid_metric='per', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): (train_csv, valid_csv, test_csvs) = super().prepare_data(prepare_data, target_dir, cache_dir, get_path_only) if get_path_only: return (train_csv, valid_csv, test_csvs) g2p = G2P() def phonemize(csv_path): df = pd.read_csv(csv_path) text = df['transcription'].tolist() phonemized_text = [g2p.encode(t.strip()) for t in text] df['transcription'] = phonemized_text df.to_csv(csv_path, index=False) for csv_path in [train_csv, valid_csv, *test_csvs]: phonemize(csv_path) return (train_csv, valid_csv, test_csvs) def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedSliceSampler(wav_lens, **(conf.train or {})) elif (mode == 'valid'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) return sampler def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): return FrameLevelLinear(downstream_input_size, downstream_output_size, **build_downstream)
def test_is_datetime_type_with_int(): data = 2 is_datetime = is_datetime_type(data) assert (is_datetime is False)
def multiple_tensors_mse_loss(y_list: List[tf.Tensor], x_list: List[tf.Tensor], fxp_w_list: List[List[tf.Tensor]], flp_w_list: List[List[tf.Tensor]], act_bn_mean: List, act_bn_std: List, loss_weights: List[float]=None) -> tf.Tensor: loss_values_list = [] for (i, (y, x)) in enumerate(zip(y_list, x_list)): point_loss = mse_loss(y, x) loss_values_list.append(point_loss) if (loss_weights is not None): return tf.reduce_sum((loss_weights * tf.stack(loss_values_list))) else: return tf.reduce_mean(tf.stack(loss_values_list))
def default_signature(fn, source, _n_arguments, _n_binders): if (_n_binders is None): raise RuntimeError('default_signature needs to know the number of binders') if ((source is None) and (_n_arguments is None)): raise RuntimeError('default_signature needs either the source or the number of arguments') ret_type = TupleType([DynamicType() for _ in range(_n_binders)]) if (source is not None): py_ast = ast.parse(source) if ((len(py_ast.body) != 1) or (not isinstance(py_ast.body[0], ast.FunctionDef))): raise RuntimeError('expected a single top-level function') py_def = py_ast.body[0] if (py_def.args.vararg is not None): arg_types = ([DynamicType()] * _n_arguments) else: arg_types = [DynamicType() for _ in py_def.args.args] if inspect.ismethod(fn): arg_types = arg_types[1:] else: arg_types = ([DynamicType()] * _n_arguments) return (arg_types, ret_type)
class TestNewton(TestScalarRootFinders): def test_newton_collections(self): known_fail = ['aps.13.00'] known_fail += ['aps.12.05', 'aps.12.17'] for collection in ['aps', 'complex']: self.run_collection(collection, zeros.newton, 'newton', smoothness=2, known_fail=known_fail) def test_halley_collections(self): known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', 'aps.12.18', 'aps.13.00'] for collection in ['aps', 'complex']: self.run_collection(collection, zeros.newton, 'halley', smoothness=2, known_fail=known_fail) def test_newton(self): for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: x = zeros.newton(f, 3, tol=1e-06) assert_allclose(f(x), 0, atol=1e-06) x = zeros.newton(f, 3, x1=5, tol=1e-06) assert_allclose(f(x), 0, atol=1e-06) x = zeros.newton(f, 3, fprime=f_1, tol=1e-06) assert_allclose(f(x), 0, atol=1e-06) x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-06) assert_allclose(f(x), 0, atol=1e-06) def test_newton_by_name(self): for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='newton', x0=3, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) def test_secant_by_name(self): for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='secant', x0=3, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) def test_halley_by_name(self): for (f, f_1, f_2) in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='halley', x0=3, fprime=f_1, fprime2=f_2, xtol=1e-06) assert_allclose(f(r.root), 0, atol=1e-06) def test_root_scalar_fail(self): message = 'fprime2 must be specified for halley' with pytest.raises(ValueError, match=message): root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-06) message = 'fprime must be specified for halley' with pytest.raises(ValueError, match=message): root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-06) def test_array_newton(self): def f1(x, *a): b = (a[0] + (x * a[3])) return (((a[1] - (a[2] * (np.exp((b / a[5])) - 1.0))) - (b / a[4])) - x) def f1_1(x, *a): b = (a[3] / a[5]) return (((((- a[2]) * np.exp(((a[0] / a[5]) + (x * b)))) * b) - (a[3] / a[4])) - 1) def f1_2(x, *a): b = (a[3] / a[5]) return (((- a[2]) * np.exp(((a[0] / a[5]) + (x * b)))) * (b ** 2)) a0 = np.array([5., 5., 5., 5., 4., 1., 5., 5., 5., 5.4204629]) a1 = ((np.sin(range(10)) + 1.0) * 7.0) args = (a0, a1, 1e-09, 0.004, 10, 0.27456) x0 = ([7.0] * 10) x = zeros.newton(f1, x0, f1_1, args) x_expected = (6., 11.7702805, 12.2219954, 7., 1., 0., 4., 10.5419107, 12.755249, 8.) assert_allclose(x, x_expected) x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) assert_allclose(x, x_expected) x = zeros.newton(f1, x0, args=args) assert_allclose(x, x_expected) def test_array_newton_complex(self): def f(x): return ((x + 1) + 1j) def fprime(x): return 1.0 t = np.full(4, 1j) x = zeros.newton(f, t, fprime=fprime) assert_allclose(f(x), 0.0) t = np.ones(4) x = zeros.newton(f, t, fprime=fprime) assert_allclose(f(x), 0.0) x = zeros.newton(f, t) assert_allclose(f(x), 0.0) def test_array_secant_active_zero_der(self): x = zeros.newton((lambda x, *a: ((x * x) - a[0])), x0=[4.123, 5], args=[np.array([17, 25])]) assert_allclose(x, (4., 5.0)) def test_array_newton_integers(self): x = zeros.newton((lambda y, z: (z - (y ** 2))), ([4.0] * 2), args=([15.0, 17.0],)) assert_allclose(x, (3., 4.)) x = zeros.newton((lambda y, z: (z - (y ** 2))), ([4] * 2), args=([15, 17],)) assert_allclose(x, (3., 4.)) def test_array_newton_zero_der_failures(self): assert_warns(RuntimeWarning, zeros.newton, (lambda y: ((y ** 2) - 2)), [0.0, 0.0], (lambda y: (2 * y))) with pytest.warns(RuntimeWarning): results = zeros.newton((lambda y: ((y ** 2) - 2)), [0.0, 0.0], (lambda y: (2 * y)), full_output=True) assert_allclose(results.root, 0) assert results.zero_der.all() assert (not results.converged.any()) def test_newton_combined(self): def f1(x): return (((x ** 2) - (2 * x)) - 1) def f1_1(x): return ((2 * x) - 2) def f1_2(x): return (2.0 + (0 * x)) def f1_and_p_and_pp(x): return ((((x ** 2) - (2 * x)) - 1), ((2 * x) - 2), 2.0) sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) assert_allclose(sol0.root, sol.root, atol=1e-08) assert_equal((2 * sol.function_calls), sol0.function_calls) sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) assert_allclose(sol0.root, sol.root, atol=1e-08) assert_equal((3 * sol.function_calls), sol0.function_calls) def test_newton_full_output(self): x0 = 3 expected_counts = [(6, 7), (5, 10), (3, 9)] for derivs in range(3): kwargs = {'tol': 1e-06, 'full_output': True} for (k, v) in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]: kwargs[k] = v (x, r) = zeros.newton(f1, x0, disp=False, **kwargs) assert_(r.converged) assert_equal(x, r.root) assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) if (derivs == 0): assert (r.function_calls <= (r.iterations + 1)) else: assert_equal(r.function_calls, ((derivs + 1) * r.iterations)) iters = (r.iterations - 1) (x, r) = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs) assert_((not r.converged)) assert_equal(x, r.root) assert_equal(r.iterations, iters) if (derivs == 1): msg = ('Failed to converge after %d iterations, value is .*' % iters) with pytest.raises(RuntimeError, match=msg): (x, r) = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs) def test_deriv_zero_warning(self): def func(x): return ((x ** 2) - 2.0) def dfunc(x): return (2 * x) assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False) with pytest.raises(RuntimeError, match='Derivative was zero'): zeros.newton(func, 0.0, dfunc) def test_newton_does_not_modify_x0(self): x0 = np.array([0.1, 3]) x0_copy = x0.copy() newton(np.sin, x0, np.cos) assert_array_equal(x0, x0_copy) def test_gh17570_defaults(self): res_newton_default = root_scalar(f1, method='newton', x0=3, xtol=1e-06) res_secant_default = root_scalar(f1, method='secant', x0=3, x1=2, xtol=1e-06) res_secant = newton(f1, x0=3, x1=2, tol=1e-06, full_output=True)[1] assert_allclose(f1(res_newton_default.root), 0, atol=1e-06) assert (res_newton_default.root.shape == tuple()) assert_allclose(f1(res_secant_default.root), 0, atol=1e-06) assert (res_secant_default.root.shape == tuple()) assert_allclose(f1(res_secant.root), 0, atol=1e-06) assert (res_secant.root.shape == tuple()) assert (res_secant_default.root == res_secant.root != res_newton_default.iterations) assert (res_secant_default.iterations == (res_secant_default.function_calls - 1) == res_secant.iterations != res_newton_default.iterations == (res_newton_default.function_calls / 2)) .parametrize('kwargs', [dict(), {'method': 'newton'}]) def test_args_gh19090(self, kwargs): def f(x, a, b): assert (a == 3) assert (b == 1) return ((x ** a) - b) res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs) assert res.converged assert_allclose(res.root, 1) .parametrize('method', ['secant', 'newton']) def test_int_x0_gh19280(self, method): def f(x): return ((x ** (- 2)) - 2) res = optimize.root_scalar(f, x0=1, method=method) assert res.converged assert_allclose(abs(res.root), (2 ** (- 0.5))) assert (res.root.dtype == np.dtype(np.float64))
.parametrize('gzip_response', [True, False]) .parametrize('dataset_params', [{'data_id': 40675}, {'data_id': None, 'name': 'glass2', 'version': 1}]) def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params): data_id = 40675 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) msg = 'Version 1 of dataset glass2 is inactive,' with pytest.warns(UserWarning, match=msg): glass2 = fetch_openml(cache=False, as_frame=False, parser='liac-arff', **dataset_params) assert (glass2.data.shape == (163, 9)) assert (glass2.details['id'] == '40675')
def encode_spm(model, direction, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None): (src, tgt) = direction.split('-') for split in splits: (src_raw, tgt_raw) = (f'{RAW_DIR}/{split}{prefix}.{direction}.{src}', f'{RAW_DIR}/{split}{prefix}.{direction}.{tgt}') if (os.path.exists(src_raw) and os.path.exists(tgt_raw)): cmd = f'python {SPM_ENCODE} --model {model} --output_format=piece --inputs {src_raw} {tgt_raw} --outputs {BPE_DIR}/{direction}{prefix}/{split}.bpe.{src} {BPE_DIR}/{direction}{prefix}/{split}.bpe.{tgt} ' print(cmd) call(cmd)
class _OSA_stage(nn.Sequential): def __init__(self, in_ch, stage_ch, concat_ch, block_per_stage, layer_per_block, stage_num, SE=False, depthwise=False): super(_OSA_stage, self).__init__() if (not (stage_num == 2)): self.add_module('Pooling', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)) if (block_per_stage != 1): SE = False module_name = f'OSA{stage_num}_1' self.add_module(module_name, _OSA_module(in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE, depthwise=depthwise)) for i in range((block_per_stage - 1)): if (i != (block_per_stage - 2)): SE = False module_name = f'OSA{stage_num}_{(i + 2)}' self.add_module(module_name, _OSA_module(concat_ch, stage_ch, concat_ch, layer_per_block, module_name, SE, identity=True, depthwise=depthwise))
def header_to_xml(header_lines, book, output_xml_path): header_lines = [[y for y in x] for x in group_ranges(header_lines)] d = {x[0]: x[(- 1)] for x in header_lines} ET.strip_tags(book, 'section') for (from_line, to_line) in d.items(): f = book.find((('.//line[="' + str(from_line)) + '"]')) new_element = ET.Element('header') prev = f.getprevious() if (prev is not None): for line_num in range(from_line, (to_line + 1)): e = book.find((('.//line[="' + str(line_num)) + '"]')) new_element.append(e) prev.addnext(new_element) else: parent = f.getparent() for line_num in range(from_line, (to_line + 1)): e = book.find((('.//line[="' + str(line_num)) + '"]')) new_element.append(e) parent.insert(0, new_element) ET.strip_tags(book, 'line') with open(output_xml_path, 'wb') as f: f.write(ET.tostring(book, pretty_print=True))
def valuetype_type(t: Type) -> Optional[str]: if isinstance(t, BaseType): if (t.name == BaseTy.Tensor): return None elif (t.name == BaseTy.int): return 'int64_t' elif (t.name == BaseTy.float): return 'double' elif (t.name == BaseTy.str): return 'std::string' elif (t.name in [BaseTy.bool, BaseTy.QScheme, BaseTy.Scalar, BaseTy.ScalarType, BaseTy.Generator, BaseTy.Storage, BaseTy.Layout, BaseTy.Device, BaseTy.MemoryFormat, BaseTy.Dimname, BaseTy.ConstQuantizerPtr]): return t.name.name else: raise AssertionError(f'unsupported type: {t}') elif isinstance(t, OptionalType): elem = valuetype_type(t.elem) if (elem is None): return None return f'c10::optional<{elem}>' elif isinstance(t, ListType): if (str(t.elem) == 'bool'): assert (t.size is not None) return f'std::array<bool,{t.size}>' else: return None else: raise AssertionError(f'unrecognized type {repr(t)}')
class RoIPool(torch.nn.Module): def __init__(self, pooled_height, pooled_width, spatial_scale): super(RoIPool, self).__init__() self.pooled_width = int(pooled_width) self.pooled_height = int(pooled_height) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return RoIPoolFunction(self.pooled_height, self.pooled_width, self.spatial_scale)(features, rois)
class DataProcessor(): _DTYPE_TO_SDTYPE = {'i': 'numerical', 'f': 'numerical', 'O': 'categorical', 'b': 'boolean', 'M': 'datetime'} def _update_numerical_transformer(self, enforce_rounding, enforce_min_max_values): custom_float_formatter = rdt.transformers.FloatFormatter(missing_value_replacement='mean', missing_value_generation='random', learn_rounding_scheme=enforce_rounding, enforce_min_max_values=enforce_min_max_values) self._transformers_by_sdtype.update({'numerical': custom_float_formatter}) def __init__(self, metadata, enforce_rounding=True, enforce_min_max_values=True, model_kwargs=None, table_name=None, locales=None): self.metadata = metadata self._enforce_rounding = enforce_rounding self._enforce_min_max_values = enforce_min_max_values self._model_kwargs = (model_kwargs or {}) self._locales = locales self._constraints_list = [] self._constraints = [] self._constraints_to_reverse = [] self._custom_constraint_classes = {} self._transformers_by_sdtype = deepcopy(get_default_transformers()) self._transformers_by_sdtype['id'] = rdt.transformers.RegexGenerator() del self._transformers_by_sdtype['text'] self.grouped_columns_to_transformers = {} self._update_numerical_transformer(enforce_rounding, enforce_min_max_values) self._hyper_transformer = rdt.HyperTransformer() self.table_name = table_name self._dtypes = None self.fitted = False self.formatters = {} self._primary_key = self.metadata.primary_key self._prepared_for_fitting = False self._keys = deepcopy(self.metadata.alternate_keys) if self._primary_key: self._keys.append(self._primary_key) def _get_grouped_columns(self): return [col for col_tuple in self.grouped_columns_to_transformers for col in col_tuple] def _check_import_address_transformers(self): has_randomlocationgenerator = hasattr(rdt.transformers, 'RandomLocationGenerator') has_regionalanonymizer = hasattr(rdt.transformers, 'RegionalAnonymizer') if ((not has_randomlocationgenerator) or (not has_regionalanonymizer)): raise ImportError('You must have SDV Enterprise with the address add-on to use the address features') def _get_columns_in_address_transformer(self): try: self._check_import_address_transformers() result = [] for (col_tuple, transformer) in self.grouped_columns_to_transformers.items(): is_randomlocationgenerator = isinstance(transformer, rdt.transformers.RandomLocationGenerator) is_regionalanonymizer = isinstance(transformer, rdt.transformers.RegionalAnonymizer) if (is_randomlocationgenerator or is_regionalanonymizer): result.extend(list(col_tuple)) return result except ImportError: return [] def _get_address_transformer(self, anonymization_level): locales = (self._locales if self._locales else ['en_US']) self._check_import_address_transformers() if (anonymization_level == 'street_address'): return rdt.transformers.RegionalAnonymizer(locales=locales) return rdt.transformers.RandomLocationGenerator(locales=locales) def set_address_transformer(self, column_names, anonymization_level): columns_to_sdtypes = {column: self.metadata.columns[column]['sdtype'] for column in column_names} transformer = self._get_address_transformer(anonymization_level) transformer._validate_sdtypes(columns_to_sdtypes) if self._prepared_for_fitting: self.update_transformers({column_names: transformer}) self.grouped_columns_to_transformers[column_names] = transformer def get_model_kwargs(self, model_name): return deepcopy(self._model_kwargs.get(model_name)) def set_model_kwargs(self, model_name, model_kwargs): self._model_kwargs[model_name] = model_kwargs def get_sdtypes(self, primary_keys=False): sdtypes = {} for (name, column_metadata) in self.metadata.columns.items(): sdtype = column_metadata['sdtype'] if (primary_keys or (name not in self._keys)): sdtypes[name] = sdtype return sdtypes def _validate_custom_constraint_name(self, class_name): reserved_class_names = list(get_subclasses(Constraint)) if (class_name in reserved_class_names): error_message = f"The name '{class_name}' is a reserved constraint name. Please use a different one for the custom constraint." raise InvalidConstraintsError(error_message) def _validate_custom_constraints(self, filepath, class_names, module): errors = [] for class_name in class_names: try: self._validate_custom_constraint_name(class_name) except InvalidConstraintsError as err: errors += err.errors if (not hasattr(module, class_name)): errors.append(f"The constraint '{class_name}' is not defined in '{filepath}'.") if errors: raise InvalidConstraintsError(errors) def load_custom_constraint_classes(self, filepath, class_names): path = Path(filepath) module = load_module_from_path(path) self._validate_custom_constraints(filepath, class_names, module) for class_name in class_names: constraint_class = getattr(module, class_name) self._custom_constraint_classes[class_name] = constraint_class def add_custom_constraint_class(self, class_object, class_name): self._validate_custom_constraint_name(class_name) self._custom_constraint_classes[class_name] = class_object def _validate_constraint_dict(self, constraint_dict): params = {'constraint_class', 'constraint_parameters'} keys = constraint_dict.keys() missing_params = (params - keys) if missing_params: raise SynthesizerInputError(f'A constraint is missing required parameters {missing_params}. Please add these parameters to your constraint definition.') extra_params = (keys - params) if extra_params: raise SynthesizerInputError(f'Unrecognized constraint parameter {extra_params}. Please remove these parameters from your constraint definition.') constraint_class = constraint_dict['constraint_class'] constraint_parameters = constraint_dict['constraint_parameters'] try: if (constraint_class in self._custom_constraint_classes): constraint_class = self._custom_constraint_classes[constraint_class] else: constraint_class = Constraint._get_class_from_dict(constraint_class) except KeyError: raise InvalidConstraintsError(f"Invalid constraint class ('{constraint_class}').") if ('column_name' in constraint_parameters): column_names = [constraint_parameters.get('column_name')] else: column_names = constraint_parameters.get('column_names') columns_in_address = self._get_columns_in_address_transformer() if (columns_in_address and column_names): address_constraint_columns = (set(column_names) & set(columns_in_address)) if address_constraint_columns: to_print = "', '".join(address_constraint_columns) raise InvalidConstraintsError(f"The '{to_print}' columns are part of an address. You cannot add constraints to columns that are part of an address group.") constraint_class._validate_metadata(self.metadata, **constraint_parameters) def add_constraints(self, constraints): errors = [] validated_constraints = [] for constraint_dict in constraints: constraint_dict = deepcopy(constraint_dict) try: self._validate_constraint_dict(constraint_dict) validated_constraints.append(constraint_dict) except (AggregateConstraintsError, InvalidConstraintsError) as e: reformated_errors = '\n'.join(map(str, e.errors)) errors.append(reformated_errors) if errors: raise InvalidConstraintsError(errors) self._constraints_list.extend(validated_constraints) self._prepared_for_fitting = False def get_constraints(self): return deepcopy(self._constraints_list) def _load_constraints(self): loaded_constraints = [] default_constraints_classes = list(get_subclasses(Constraint)) for constraint in self._constraints_list: if (constraint['constraint_class'] in default_constraints_classes): loaded_constraints.append(Constraint.from_dict(constraint)) else: constraint_class = self._custom_constraint_classes[constraint['constraint_class']] loaded_constraints.append(constraint_class(**constraint.get('constraint_parameters', {}))) return loaded_constraints def _fit_constraints(self, data): self._constraints = self._load_constraints() errors = [] for constraint in self._constraints: try: constraint.fit(data) except Exception as e: errors.append(e) if errors: raise AggregateConstraintsError(errors) def _transform_constraints(self, data, is_condition=False): errors = [] if (not is_condition): self._constraints_to_reverse = [] for constraint in self._constraints: try: data = constraint.transform(data) if (not is_condition): self._constraints_to_reverse.append(constraint) except (MissingConstraintColumnError, FunctionError) as error: if isinstance(error, MissingConstraintColumnError): LOGGER.info('Unable to transform %s with columns %s because they are not all available in the data. This happens due to multiple, overlapping constraints.', constraint.__class__.__name__, error.missing_columns) log_exc_stacktrace(LOGGER, error) else: LOGGER.info('Unable to transform %s with columns %s due to an error in transform: \n%s\nUsing the reject sampling approach instead.', constraint.__class__.__name__, constraint.column_names, str(error)) log_exc_stacktrace(LOGGER, error) if is_condition: indices_to_drop = data.columns.isin(constraint.constraint_columns) columns_to_drop = data.columns.where(indices_to_drop).dropna() data = data.drop(columns_to_drop, axis=1) except Exception as error: errors.append(error) if errors: raise AggregateConstraintsError(errors) return data def _update_transformers_by_sdtypes(self, sdtype, transformer): self._transformers_by_sdtype[sdtype] = transformer def create_anonymized_transformer(sdtype, column_metadata, enforce_uniqueness, locales=None): kwargs = {'locales': locales} for (key, value) in column_metadata.items(): if (key not in ['pii', 'sdtype']): kwargs[key] = value if enforce_uniqueness: kwargs['enforce_uniqueness'] = True try: transformer = get_anonymized_transformer(sdtype, kwargs) except AttributeError as error: raise SynthesizerInputError(f"The sdtype '{sdtype}' is not compatible with any of the locales. To continue, try changing the locales or adding 'en_US' as a possible option.") from error return transformer def create_regex_generator(self, column_name, sdtype, column_metadata, is_numeric): default_regex_format = ('\\d{30}' if is_numeric else '[0-1a-z]{5}') regex_format = column_metadata.get('regex_format', default_regex_format) transformer = rdt.transformers.RegexGenerator(regex_format=regex_format, enforce_uniqueness=(column_name in self._keys)) return transformer def _get_transformer_instance(self, sdtype, column_metadata): transformer = self._transformers_by_sdtype[sdtype] if isinstance(transformer, AnonymizedFaker): is_lexify = (transformer.function_name == 'lexify') is_baseprovider = (transformer.provider_name == 'BaseProvider') if (is_lexify and is_baseprovider): return self.create_anonymized_transformer(sdtype, column_metadata, False, self._locales) kwargs = {key: value for (key, value) in column_metadata.items() if (key not in ['pii', 'sdtype'])} if (sdtype == 'datetime'): kwargs['enforce_min_max_values'] = self._enforce_min_max_values if (kwargs and (transformer is not None)): transformer_class = transformer.__class__ return transformer_class(**kwargs) return deepcopy(transformer) def _update_constraint_transformers(self, data, columns_created_by_constraints, config): missing_columns = (set(columns_created_by_constraints) - config['transformers'].keys()) for column in missing_columns: dtype_kind = data[column].dtype.kind if (dtype_kind in ('i', 'f')): config['sdtypes'][column] = 'numerical' config['transformers'][column] = rdt.transformers.FloatFormatter(missing_value_replacement='mean', missing_value_generation='random', enforce_min_max_values=self._enforce_min_max_values) else: sdtype = self._DTYPE_TO_SDTYPE.get(dtype_kind, 'categorical') config['sdtypes'][column] = sdtype config['transformers'][column] = self._get_transformer_instance(sdtype, {}) for column in list(config['sdtypes'].keys()): if (column not in data): LOGGER.info(f"A constraint has dropped the column '{column}', removing the transformer from the 'HyperTransformer'.") config['sdtypes'].pop(column) config['transformers'].pop(column) return config def _create_config(self, data, columns_created_by_constraints): sdtypes = {} transformers = {} columns_in_multi_col_transformer = self._get_grouped_columns() for column in (set(data.columns) - columns_created_by_constraints): column_metadata = self.metadata.columns.get(column) sdtype = column_metadata.get('sdtype') if (column in columns_in_multi_col_transformer): sdtypes[column] = sdtype continue pii = column_metadata.get('pii', (sdtype not in self._transformers_by_sdtype)) sdtypes[column] = ('pii' if pii else sdtype) if (sdtype == 'id'): is_numeric = pd.api.types.is_numeric_dtype(data[column].dtype) if column_metadata.get('regex_format', False): transformers[column] = self.create_regex_generator(column, sdtype, column_metadata, is_numeric) sdtypes[column] = 'text' elif (column in self._keys): prefix = None if (not is_numeric): prefix = 'sdv-id-' transformers[column] = IDGenerator(prefix=prefix) sdtypes[column] = 'text' else: transformers[column] = AnonymizedFaker(provider_name=None, function_name='bothify', function_kwargs={'text': '#####'}) sdtypes[column] = 'pii' elif (sdtype == 'unknown'): transformers[column] = AnonymizedFaker(function_name='bothify') transformers[column].function_kwargs = {'text': 'sdv-pii-?????', 'letters': 'abcdefghijklmnopqrstuvwxyz'} elif pii: enforce_uniqueness = bool((column in self._keys)) transformers[column] = self.create_anonymized_transformer(sdtype, column_metadata, enforce_uniqueness, self._locales) elif (sdtype in self._transformers_by_sdtype): transformers[column] = self._get_transformer_instance(sdtype, column_metadata) else: sdtypes[column] = 'categorical' transformers[column] = self._get_transformer_instance('categorical', column_metadata) for (columns, transformer) in self.grouped_columns_to_transformers.items(): transformers[columns] = transformer config = {'transformers': transformers, 'sdtypes': sdtypes} config = self._update_constraint_transformers(data, columns_created_by_constraints, config) return config def update_transformers(self, column_name_to_transformer): if (self._hyper_transformer.field_transformers == {}): raise NotFittedError('The DataProcessor must be prepared for fitting before the transformers can be updated.') for (column, transformer) in column_name_to_transformer.items(): if ((column in self._keys) and (not (type(transformer) in (AnonymizedFaker, RegexGenerator)))): raise SynthesizerInputError(f"Invalid transformer '{transformer.__class__.__name__}' for a primary or alternate key '{column}'. Please use 'AnonymizedFaker' or 'RegexGenerator' instead.") with warnings.catch_warnings(): warnings.filterwarnings('ignore', module='rdt.hyper_transformer') self._hyper_transformer.update_transformers(column_name_to_transformer) def _fit_hyper_transformer(self, data): self._hyper_transformer.fit(data) def _fit_formatters(self, data): for column_name in data: column_metadata = self.metadata.columns.get(column_name) sdtype = column_metadata.get('sdtype') if ((sdtype == 'numerical') and (column_name != self._primary_key)): representation = column_metadata.get('computer_representation', 'Float') self.formatters[column_name] = NumericalFormatter(enforce_rounding=self._enforce_rounding, enforce_min_max_values=self._enforce_min_max_values, computer_representation=representation) self.formatters[column_name].learn_format(data[column_name]) elif ((sdtype == 'datetime') and (column_name != self._primary_key)): datetime_format = column_metadata.get('datetime_format') self.formatters[column_name] = DatetimeFormatter(datetime_format=datetime_format) self.formatters[column_name].learn_format(data[column_name]) def prepare_for_fitting(self, data): if (not self._prepared_for_fitting): LOGGER.info(f'Fitting table {self.table_name} metadata') self._dtypes = data[list(data.columns)].dtypes self.formatters = {} LOGGER.info(f'Fitting formatters for table {self.table_name}') self._fit_formatters(data) LOGGER.info(f'Fitting constraints for table {self.table_name}') if (len(self._constraints_list) != len(self._constraints)): self._fit_constraints(data) constrained = self._transform_constraints(data) columns_created_by_constraints = (set(constrained.columns) - set(data.columns)) config = self._hyper_transformer.get_config() missing_columns = (columns_created_by_constraints - config.get('sdtypes').keys()) if (not config.get('sdtypes')): LOGGER.info(f'Setting the configuration for the ``HyperTransformer`` for table {self.table_name}') config = self._create_config(constrained, columns_created_by_constraints) self._hyper_transformer.set_config(config) elif missing_columns: config = self._update_constraint_transformers(constrained, missing_columns, config) self._hyper_transformer = rdt.HyperTransformer() self._hyper_transformer.set_config(config) self._prepared_for_fitting = True def fit(self, data): if data.empty: raise ValueError('The fit dataframe is empty, synthesizer will not be fitted.') self._prepared_for_fitting = False self.prepare_for_fitting(data) constrained = self._transform_constraints(data) if constrained.empty: raise ValueError('The constrained fit dataframe is empty, synthesizer will not be fitted.') LOGGER.info(f'Fitting HyperTransformer for table {self.table_name}') self._fit_hyper_transformer(constrained) self.fitted = True def reset_sampling(self): self._hyper_transformer.reset_randomization() def generate_keys(self, num_rows, reset_keys=False): generated_keys = self._hyper_transformer.create_anonymized_columns(num_rows=num_rows, column_names=self._keys) return generated_keys def transform(self, data, is_condition=False): data = data.copy() if (not self.fitted): raise NotFittedError() columns = [column for column in self.get_sdtypes(primary_keys=(not is_condition)) if (column in data.columns)] LOGGER.debug(f'Transforming constraints for table {self.table_name}') data = self._transform_constraints(data[columns], is_condition) LOGGER.debug(f'Transforming table {self.table_name}') if (self._keys and (not is_condition)): data = data.set_index(self._primary_key, drop=False) try: transformed = self._hyper_transformer.transform_subset(data) except (rdt.errors.NotFittedError, rdt.errors.ConfigNotSetError): transformed = data return transformed def reverse_transform(self, data, reset_keys=False): if (not self.fitted): raise NotFittedError() reversible_columns = [column for column in self._hyper_transformer._output_columns if (column in data.columns)] reversed_data = data try: if (not data.empty): reversed_data = self._hyper_transformer.reverse_transform_subset(data[reversible_columns]) except rdt.errors.NotFittedError: LOGGER.info(f'HyperTransformer has not been fitted for table {self.table_name}') for transformer in self.grouped_columns_to_transformers.values(): if (not transformer.output_columns): reversed_data = transformer.reverse_transform(reversed_data) num_rows = len(reversed_data) sampled_columns = list(reversed_data.columns) missing_columns = [column for column in (self.metadata.columns.keys() - set((sampled_columns + self._keys))) if self._hyper_transformer.field_transformers.get(column)] if (missing_columns and num_rows): anonymized_data = self._hyper_transformer.create_anonymized_columns(num_rows=num_rows, column_names=missing_columns) sampled_columns.extend(missing_columns) reversed_data[anonymized_data.columns] = anonymized_data[anonymized_data.notna()] if (self._keys and num_rows): generated_keys = self.generate_keys(num_rows, reset_keys) sampled_columns.extend(self._keys) reversed_data[generated_keys.columns] = generated_keys[generated_keys.notna()] for constraint in reversed(self._constraints_to_reverse): reversed_data = constraint.reverse_transform(reversed_data) new_columns = list((set(reversed_data.columns) - set(sampled_columns))) sampled_columns.extend(new_columns) sampled_columns = [column for column in self.metadata.columns.keys() if (column in sampled_columns)] for column_name in sampled_columns: column_data = reversed_data[column_name] dtype = self._dtypes[column_name] if (is_integer_dtype(dtype) and is_float_dtype(column_data.dtype)): column_data = column_data.round() reversed_data[column_name] = column_data[column_data.notna()] try: reversed_data[column_name] = reversed_data[column_name].astype(dtype) except ValueError as e: column_metadata = self.metadata.columns.get(column_name) sdtype = column_metadata.get('sdtype') if (sdtype not in self._DTYPE_TO_SDTYPE.values()): LOGGER.info(f"The real data in '{column_name}' was stored as '{dtype}' but the synthetic data could not be cast back to this type. If this is a problem, please check your input data and metadata settings.") if (column_name in self.formatters): self.formatters.pop(column_name) else: raise ValueError(e) for column in sampled_columns: if (column in self.formatters): data_to_format = reversed_data[column] reversed_data[column] = self.formatters[column].format_data(data_to_format) return reversed_data[sampled_columns] def filter_valid(self, data): for constraint in self._constraints: data = constraint.filter_valid(data) return data def to_dict(self): constraints_to_reverse = [cnt.to_dict() for cnt in self._constraints_to_reverse] return {'metadata': deepcopy(self.metadata.to_dict()), 'constraints_list': deepcopy(self._constraints_list), 'constraints_to_reverse': constraints_to_reverse, 'model_kwargs': deepcopy(self._model_kwargs)} def from_dict(cls, metadata_dict, enforce_rounding=True, enforce_min_max_values=True): instance = cls(metadata=SingleTableMetadata.load_from_dict(metadata_dict['metadata']), enforce_rounding=enforce_rounding, enforce_min_max_values=enforce_min_max_values, model_kwargs=metadata_dict.get('model_kwargs')) instance._constraints_to_reverse = [Constraint.from_dict(cnt) for cnt in metadata_dict.get('constraints_to_reverse', [])] instance._constraints_list = metadata_dict.get('constraints_list', []) return instance def to_json(self, filepath): with open(filepath, 'w') as out_file: json.dump(self.to_dict(), out_file, indent=4) def from_json(cls, filepath): with open(filepath, 'r') as in_file: return cls.from_dict(json.load(in_file))
class EmailTrashPlayer(RecipePlayer): def __init__(self, state): fields = state.fields by = [element for element in state.dom_elements if ((element.text == fields['by']) and (element.ref in EMAIL_SENDER_REFS))] by = by[0] for (sender_ref, trash_ref) in zip(EMAIL_SENDER_REFS, EMAIL_TRASH_REFS): if (by.ref == sender_ref): trash = trash_ref break actions = [(trash, None)] super(EmailTrashPlayer, self).__init__(actions)
def _parse(value, function, fmt): try: return function(value) except ValueError as e: raise_from(ValueError(fmt.format(e)), None)
def get_true_mi(syn_file_cat, z_dim): cmi_est = np.load('../data/cat{}/ksg_gt.dz{}.npy'.format(syn_file_cat, z_dim)) return float(cmi_est)
class Graph(): dict_contracts_per_vuln = {'ARTHM': 0, 'DOS': 0, 'LE': 0, 'RENT': 0, 'TimeM': 0, 'TimeO': 0, 'TX-Origin': 0, 'UE': 0} no_of_vuln = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0} def majority_warnings_per_vuln(self): filename = (data_path + 'scrawld_majority_all.txt') file1 = open(filename, 'r') Lines = file1.readlines() for line in Lines: for key in self.dict_contracts_per_vuln.keys(): if (key in line): self.dict_contracts_per_vuln[key] = (self.dict_contracts_per_vuln[key] + line.count(key)) xlabel = 'Vulnerability Name' ylabel = 'Count of Warnings' is_rotate = 1 self.plot_graph(self.dict_contracts_per_vuln, (graph_path + 'majority_warnings_per_vuln.pdf'), xlabel, ylabel, is_rotate) def contracts_per_vuln(self): filename = (data_path + 'scrawld_majority_unique.txt') file1 = open(filename, 'r') Lines = file1.readlines() for line in Lines: for key in self.dict_contracts_per_vuln.keys(): if (key in line): self.dict_contracts_per_vuln[key] = (self.dict_contracts_per_vuln[key] + 1) xlabel = 'Vulnerability Name' ylabel = 'Number of Contracts' is_rotate = 1 self.plot_graph(self.dict_contracts_per_vuln, (graph_path + 'contracts_per_vuln.pdf'), xlabel, ylabel, is_rotate) def zero_to_nan(self, values): to_remove_keys = [] for key in values.keys(): if (values[key] == 0): to_remove_keys.append(key) for key in to_remove_keys: values.pop(key) return values def get_no_of_vuln(self): filename = (data_path + 'scrawld_majority_unique.txt') file1 = open(filename, 'r') Lines = file1.readlines() for line in Lines: vuln_count = 0 for key in self.dict_contracts_per_vuln.keys(): if (key in line): vuln_count = (vuln_count + 1) if (vuln_count > 0): self.no_of_vuln[vuln_count] = (self.no_of_vuln[vuln_count] + 1) if (vuln_count == 0): self.no_of_vuln[vuln_count] = (self.no_of_vuln[vuln_count] + 1) xlabel = 'Number of Unique Vulnerabilities' ylabel = 'Number of Contracts' is_rotate = 0 self.plot_graph(self.zero_to_nan(self.no_of_vuln), (graph_path + 'unique_vuln.pdf'), xlabel, ylabel, is_rotate) def plot_graph(self, dict, plot_name, xlabel, ylabel, is_rotate): contracts_per_vuln_array = [] vuln_name_array = [] for key in dict.keys(): if (dict[key] != 0): contracts_per_vuln_array.append(dict[key]) vuln_name_array.append(key) ypoints = np.array(contracts_per_vuln_array) xlocs = range(0, len(vuln_name_array)) p1 = plt.bar(xlocs, ypoints, color='gray') if (is_rotate == 1): plt.xticks(xlocs, vuln_name_array, rotation=30) else: plt.xticks(xlocs, vuln_name_array) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.bar_label(p1) plt.subplots_adjust(bottom=0.2) plt.yscale('log') plt.savefig(plot_name) plt.clf()
def load_dataset(dataset_name, subset_name): try: return datasets.load_dataset(dataset_name, subset_name) except datasets.builder.ManualDownloadError: cache_root_dir = (os.environ['PROMPTSOURCE_MANUAL_DATASET_DIR'] if ('PROMPTSOURCE_MANUAL_DATASET_DIR' in os.environ) else DEFAULT_PROMPTSOURCE_CACHE_HOME) data_dir = (f'{cache_root_dir}/{dataset_name}' if (subset_name is None) else f'{cache_root_dir}/{dataset_name}/{subset_name}') return datasets.load_dataset(dataset_name, subset_name, data_dir=data_dir)
class Lamb(Optimizer): def __init__(self, params, lr=0.001, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, adam=False, correct_bias=False): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if (not isinstance(schedule, _LRSchedule)): schedule_type = SCHEDULES[schedule] schedule = schedule_type(warmup=warmup, t_total=t_total) elif ((warmup != (- 1)) or (t_total != (- 1))): logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.') defaults = dict(lr=lr, betas=betas, eps=eps, schedule=schedule, weight_decay=weight_decay, correct_bias=correct_bias) self.adam = adam super(Lamb, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if (len(state) == 0): pass else: lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) lr.append(lr_scheduled) return lr def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) step_size = group['lr'] if group['correct_bias']: bias_correction1 = (1.0 - (beta1 ** state['step'])) bias_correction2 = (1.0 - (beta2 ** state['step'])) step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1) lr_scheduled = (step_size * group['schedule'].get_lr(state['step'])) weight_norm = p.data.pow(2).sum().sqrt() adam_step = (exp_avg / exp_avg_sq.sqrt().add(group['eps'])) if (group['weight_decay'] != 0): adam_step.add_(group['weight_decay'], p.data) adam_norm = adam_step.pow(2).sum().sqrt() if ((weight_norm == 0) or (adam_norm == 0)): trust_ratio = 1 else: trust_ratio = (weight_norm / adam_norm) state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(((- lr_scheduled) * trust_ratio), adam_step) return loss
(params=['2.0', '3.0']) def schema_with_optional_headers(request): if (request.param == '2.0'): base_schema = request.getfixturevalue('empty_open_api_2_schema') base_schema['paths'] = {'/data': {'get': {'responses': {'200': {'description': 'OK', 'schema': {'type': 'object'}, 'headers': {'X-Optional': {'description': 'Optional header', 'type': 'integer', 'x-required': False}}}}}}} return base_schema if (request.param == '3.0'): base_schema = request.getfixturevalue('empty_open_api_3_schema') base_schema['paths'] = {'/data': {'get': {'responses': {'200': {'description': 'OK', 'content': {'application/json': {'schema': {'type': 'object'}}}, 'headers': {'X-Optional': {'description': 'Optional header', 'schema': {'type': 'integer'}, 'required': False}}}}}}} return base_schema