code
stringlengths
101
5.91M
_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) def result_type(*arrays_and_dtypes): return arrays_and_dtypes
def is_dist_avail_and_initialized(): global _USE_HVD global _USE_BPS if _USE_HVD: return True elif _USE_BPS: return True if (not dist.is_available()): return False if (not dist.is_initialized()): return False return True
class TrackerBase(): def __init__(self): self._is_tracking = False def track(self): self.start_tracking() try: (yield self) finally: self.stop_tracking() def start_tracking(self): self._is_tracking = True def stop_tracking(self): self._is_tracking = False def populate_report(self, builder): raise NotImplementedError
class ParallelWorkersTest(unittest.TestCase): def testParallelWorkers(self): workspace.ResetWorkspace() queue = create_queue() dummy_worker = create_worker(queue, (lambda worker_id: str(worker_id))) worker_coordinator = parallel_workers.init_workers(dummy_worker) worker_coordinator.start() for _ in range(10): value = dequeue_value(queue) self.assertTrue((value in [b'0', b'1']), ('Got unexpected value ' + str(value))) self.assertTrue(worker_coordinator.stop()) def testParallelWorkersInitFun(self): workspace.ResetWorkspace() queue = create_queue() dummy_worker = create_worker(queue, (lambda worker_id: workspace.FetchBlob('data'))) workspace.FeedBlob('data', 'not initialized') def init_fun(worker_coordinator, global_coordinator): workspace.FeedBlob('data', 'initialized') worker_coordinator = parallel_workers.init_workers(dummy_worker, init_fun=init_fun) worker_coordinator.start() for _ in range(10): value = dequeue_value(queue) self.assertEqual(value, b'initialized', ('Got unexpected value ' + str(value))) worker_coordinator.stop() def testParallelWorkersShutdownFun(self): workspace.ResetWorkspace() queue = create_queue() dummy_worker = create_worker(queue, (lambda worker_id: str(worker_id))) workspace.FeedBlob('data', 'not shutdown') def shutdown_fun(): workspace.FeedBlob('data', 'shutdown') worker_coordinator = parallel_workers.init_workers(dummy_worker, shutdown_fun=shutdown_fun) worker_coordinator.start() self.assertTrue(worker_coordinator.stop()) data = workspace.FetchBlob('data') self.assertEqual(data, b'shutdown', ('Got unexpected value ' + str(data)))
class ShapeDtypeStruct(): __slots__ = ['shape', 'dtype'] def __init__(self, shape, dtype): self.shape = shape self.dtype = dtype
def decide_download(url): d = ur.urlopen(url) size = (int(d.info()['Content-Length']) / GBFACTOR) if (size > 1): return (input(('This will download %.2fGB. Will you proceed? (y/N)\n' % size)).lower() == 'y') else: return True
def train(args, trainer, task, epoch_itr): if (epoch_itr.epoch <= len(args.update_freq)): update_freq = args.update_freq[(epoch_itr.epoch - 1)] else: update_freq = args.update_freq[(- 1)] itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus) itr = iterators.GroupedIterator(itr, update_freq) progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple') extra_meters = collections.defaultdict((lambda : AverageMeter())) first_valid = args.valid_subset.split(',')[0] max_update = (args.max_update or math.inf) num_batches = len(epoch_itr) for (i, samples) in enumerate(progress, start=epoch_itr.iterations_in_epoch): log_output = trainer.train_step(samples) if (log_output is None): continue stats = get_training_stats(trainer) for (k, v) in log_output.items(): if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']): continue if ('loss' in k): extra_meters[k].update(v, log_output['sample_size']) else: extra_meters[k].update(v) stats[k] = extra_meters[k].avg progress.log(stats) if (i == 0): trainer.get_meter('wps').reset() num_updates = trainer.get_num_updates() if ((args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)): valid_losses = validate(args, trainer, task, epoch_itr, [first_valid]) save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) if (num_updates >= max_update): break stats = get_training_stats(trainer) for (k, meter) in extra_meters.items(): stats[k] = meter.avg progress.print(stats) for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip']: meter = trainer.get_meter(k) if (meter is not None): meter.reset()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--csvpath', type=str, required=True, default='./', help='Location to data csv file') parser.add_argument('--output_dir', type=str, default='./', help='output directory to save model') parser.add_argument('--n_classes', type=int, default=2, help='number of classes') parser.add_argument('--n_epoch', type=int, default=200, help='number of epochs') parser.add_argument('--batch_size', type=int, default=2, help='batch size') args = parser.parse_args() validate = False os.makedirs(args.output_dir, exist_ok=True) if os.path.exists(os.path.join(args.csvpath, 'valid.csv')): validate = True initial_learning_rate = 0.01 weight_decay = 0.0001 train_object = classDataset(args.csvpath, 'train') train_loader = DataLoader(train_object, batch_size=args.batch_size, shuffle=True, num_workers=2) if validate: valid_object = classDataset(args.csvpath, 'valid') valid_loader = DataLoader(valid_object, batch_size=args.batch_size, shuffle=False, num_workers=2) model = hrnet(args.n_classes) criterion = nn.CrossEntropyLoss(weight=torch.Tensor([0.05, 1])).cuda() optimizer = Ranger(model.parameters(), lr=initial_learning_rate, weight_decay=weight_decay) lr_scheduler = polylr(optimizer, (args.n_epoch * len(train_loader)), initial_learning_rate) best_val_iou = 0 if torch.cuda.is_available(): model.cuda() for epoch in range(1, (args.n_epoch + 1)): train(epoch, args.n_epoch, model, train_loader, criterion, optimizer, lr_scheduler) torch.save({'state_dict': model.state_dict(), 'epoch': epoch}, os.path.join(args.output_dir, 'epoch_{}.pth'.format(epoch))) if validate: best_val_iou = validate_model(epoch, model, valid_loader, criterion, best_val_iou, args.n_classes, args.output_dir)
def main(args): kwargs = {'order': 'NCHW'} kwargs.update(dict(args.kwargs)) model = ModelHelper(name=args.benchmark_name) op_type = args.operator input_name = args.input_name output_name = args.output_name iters = int(args.iters) for i in range(iters): input_blob_name = (input_name + (str(i) if ((i > 0) and args.chain) else '')) output_blob_name = (output_name + str((i + 1))) add_op = getattr(brew, op_type) add_op(model, input_blob_name, output_blob_name, **kwargs) if args.chain: (input_name, output_name) = (output_name, input_name) workspace.RunNetOnce(model.param_init_net) extra_init_net_ops = [] def make_blob_on_context(blob_name, blob_data, context): if (context.upper() != 'CPU'): blob_name_modified = '{}_CPU'.format(blob_name) else: blob_name_modified = blob_name fill_op = core.CreateOperator('GivenTensorFill', [], [blob_name_modified], arg=[utils.MakeArgument('shape', blob_data.shape), utils.MakeArgument('values', blob_data)]) extra_init_net_ops.append(fill_op) if (context.upper() == 'OPENGL'): copy_op = core.CreateOperator('CopyToOpenGL', [blob_name_modified], [blob_name]) extra_init_net_ops.append(copy_op) for unparsed_blob in args.blob: (name, unparsed_dims) = unparsed_blob.split('=') dims = [int(d) for d in unparsed_dims.split(',')] np_input = np.random.rand(*dims).astype(np.float32) make_blob_on_context(name, np_input, args.context) (init_net, predict_net) = mobile_exporter.Export(workspace, model.net, model.params) init_net.op.extend(extra_init_net_ops) if (args.context.upper() == 'OPENGL'): old_ops = [op for op in predict_net.op] del predict_net.op[:] for op in old_ops: op.type = 'OpenGL{}'.format(op.type) predict_net.op.extend(old_ops) if args.debug: print('init_net:') for op in init_net.op: print(' ', op.type, op.input, '-->', op.output) print('predict_net:') for op in predict_net.op: print(' ', op.type, op.input, '-->', op.output) with open(args.predict_net, 'wb') as f: f.write(predict_net.SerializeToString()) with open(args.init_net, 'wb') as f: f.write(init_net.SerializeToString())
def _update_adamax(p, g, m, u, t, alpha, beta1, beta2, eps): alpha_t = (alpha / (1.0 - (beta1 ** t))) m[...] = ((beta1 * m) + ((1 - beta1) * g)) u[...] = np.maximum((beta2 * u), np.abs(g)) p[...] = (p - ((alpha_t * m) / (u + eps)))
def prune(data, cpnet_vocab_path): with open(cpnet_vocab_path, 'r', encoding='utf8') as fin: cpnet_vocab = [l.strip() for l in fin] prune_data = [] for item in tqdm(data): qc = item['qc'] prune_qc = [] for c in qc: if ((c[(- 2):] == 'er') and (c[:(- 2)] in qc)): continue if ((c[(- 1):] == 'e') and (c[:(- 1)] in qc)): continue have_stop = False for t in c.split('_'): if (t in nltk_stopwords): have_stop = True if ((not have_stop) and (c in cpnet_vocab)): prune_qc.append(c) ac = item['ac'] prune_ac = [] for c in ac: if ((c[(- 2):] == 'er') and (c[:(- 2)] in ac)): continue if ((c[(- 1):] == 'e') and (c[:(- 1)] in ac)): continue all_stop = True for t in c.split('_'): if (t not in nltk_stopwords): all_stop = False if ((not all_stop) and (c in cpnet_vocab)): prune_ac.append(c) try: assert ((len(prune_ac) > 0) and (len(prune_qc) > 0)) except Exception as e: pass item['qc'] = prune_qc item['ac'] = prune_ac prune_data.append(item) return prune_data
class CvtEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.stages = nn.ModuleList([]) for stage_idx in range(len(config.depth)): self.stages.append(CvtStage(config, stage_idx)) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = (() if output_hidden_states else None) hidden_state = pixel_values cls_token = None for (_, stage_module) in enumerate(self.stages): (hidden_state, cls_token) = stage_module(hidden_state) if output_hidden_states: all_hidden_states = (all_hidden_states + (hidden_state,)) if (not return_dict): return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if (v is not None))) return BaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states)
def radical_difference_family(K, k, l=1, existence=False, check=True): v = K.cardinality() x = K.multiplicative_generator() e = (k * (k - 1)) if ((l * (v - 1)) % e): raise ValueError('k (k-1) = {} should be a multiple of l (v-1) ={}'.format((k * (k - 1)), (l * (v - 1)))) t = ((l * (v - 1)) // e) if (t == 1): return radical_difference_set(K, k, l, existence=existence, check=check) elif (l == (k - 1)): if existence: return True else: return K.cyclotomic_cosets((x ** ((v - 1) // k)))[1:] elif (l != 1): if existence: return Unknown raise NotImplementedError('No radical families implemented for l > 2') else: D = one_radical_difference_family(K, k) if (D is None): if existence: return False raise EmptySetError('No such difference family') elif existence: return True if (check and (not is_difference_family(K, D, v, k, l))): raise RuntimeError('radical_difference_family produced a wrong difference family with parameters v={}, k={}, l={}. Please contact sage-'.format(v, k, l)) return D
def mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation, action_space): act_dim = a.shape.as_list()[(- 1)] mu = mlp(x, (list(hidden_sizes) + [act_dim]), activation, output_activation) log_std = tf.get_variable(name='log_std', initializer=((- 0.5) * np.ones(act_dim, dtype=np.float32))) std = tf.exp(log_std) pi = (mu + (tf.random_normal(tf.shape(mu)) * std)) logp = gaussian_likelihood(a, mu, log_std) logp_pi = gaussian_likelihood(pi, mu, log_std) return (pi, logp, logp_pi)
_module() class FSAF(SingleStageDetector): 'Implementation of `FSAF < def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
def make_current_context(device_id=None): torch.cuda.init() cuda_driver.init() if (device_id is None): context = _get_primary_context_for_current_device() else: context = cuda_driver.Device(device_id).retain_primary_context() context.push() return context
def generate_header(m: GfxRuntime140, module_name: str, namespace: str, tcm: Optional[bytes]) -> List[str]: out = [] out += ['// THIS IS A GENERATED HEADER; PLEASE DO NOT MODIFY.', '#pragma once', '#include <vector>', '#include <string>', '#include <taichi/cpp/taichi.hpp>', ''] if namespace: out += [f'namespace {namespace} {{', ''] if (tcm is not None): tcm_bytes = [x for x in tcm] out += [f'static const uint8_t {module_name}_tcm[{len(tcm_bytes)}] = {{'] out += [f" {', '.join((str(x) for x in tcm_bytes[i:(i + 8)]))}," for i in range(0, len(tcm_bytes), 8)] out += ['};', '', f'static const size_t {module_name}_tcm_size = {len(tcm_bytes)};', ''] out += generate_module_content(m, module_name) if namespace: out += [f'}} // namespace {namespace}', ''] return out
def split_file_into_training_and_dev(filename, frac_that_should_be_dev): assert filename.endswith('traindev.tsv') new_training_name = (filename[:filename.rfind('traindev.tsv')] + 'train.tsv') new_dev_name = (filename[:filename.rfind('traindev.tsv')] + 'dev.tsv') train_f = open(new_training_name, 'w') dev_f = open(new_dev_name, 'w') num_training = 0 num_dev = 0 with open(filename, 'r') as f: first_line = True for line in f: if first_line: train_f.write(line) dev_f.write(line) first_line = False else: if (line.strip() == ''): continue decider = random() if (decider < frac_that_should_be_dev): dev_f.write(line) num_dev += 1 else: train_f.write(line) num_training += 1 train_f.close() dev_f.close() print((('Selected ' + str(num_training)) + ' training instances.')) print((('Selected ' + str(num_dev)) + ' dev instances.'))
class ANY(): def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join((_type.__name__ for _type in self._types))})"
class Generator(BaseGenerator): def __init__(self, config, mode, X=None, ADV=None): super(Generator, self).__init__(config, mode) self.build_generator(X=X, ADV=ADV) def generate_random_X(self, shape): return (np.random.rand(*shape) + 2.0) def generate_random_ADV(self, shape): return (np.random.rand(*shape) + 2.0)
class BeamFixedFree(CompositeBase): def __init__(self, N, quad='LG', bc=(0, 0, 0, 0), domain=((- 1), 1), padding_factor=1, dealias_direct=False, dtype=float, coordinates=None, **kw): if isinstance(bc, (tuple, list)): bc = BoundaryConditions({'left': {'D': bc[0], 'N': bc[1]}, 'right': {'N2': bc[2], 'N3': bc[3]}}, domain=domain) CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates) self._stencil = {0: 1, 1: ((4 * ((2 * n) + 3)) / (((n ** 2) + (6 * n)) + 9)), 2: ((2 * ((((((- 2) * (n ** 4)) - (17 * (n ** 3))) - (28 * (n ** 2))) + (17 * n)) + 30)) / (((((2 * (n ** 4)) + (27 * (n ** 3))) + (136 * (n ** 2))) + (303 * n)) + 252)), 3: ((- ((((8 * (n ** 3)) + (28 * (n ** 2))) + (32 * n)) + 12)) / (((((n ** 4) + (14 * (n ** 3))) + (73 * (n ** 2))) + (168 * n)) + 144)), 4: (((((((2 * (n ** 5)) + (15 * (n ** 4))) + (44 * (n ** 3))) + (63 * (n ** 2))) + (44 * n)) + 12) / ((((((2 * (n ** 5)) + (35 * (n ** 4))) + (244 * (n ** 3))) + (847 * (n ** 2))) + (1464 * n)) + 1008))} def boundary_condition(): return 'BeamFixedFree' def short_name(): return 'BF'
def get_ref_index(length, sample_length): if (random.uniform(0, 1) > 0.5): ref_index = random.sample(range(length), sample_length) ref_index.sort() else: pivot = random.randint(0, (length - sample_length)) ref_index = [(pivot + i) for i in range(sample_length)] return ref_index
def save_checkpoint(state, is_best, epoch, path='./'): filename = os.path.join(path, ('checkpoint_%d.pth.tar' % epoch)) torch.save(state, filename) if is_best: shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
def transform_data(data, supports): df = data.df.copy() newdom = {} for col in data.domain: support = supports[col] size = support.sum() newdom[col] = int(size) if (size < support.size): newdom[col] += 1 mapping = {} idx = 0 for i in range(support.size): mapping[i] = size if support[i]: mapping[i] = idx idx += 1 assert (idx == size) df[col] = df[col].map(mapping) newdom = Domain.fromdict(newdom) return Dataset(df, newdom)
.parametrize('left, right, expected', ((MutationResult.SUCCESS, MutationResult.SUCCESS, MutationResult.SUCCESS), (MutationResult.FAILURE, MutationResult.SUCCESS, MutationResult.SUCCESS), (MutationResult.SUCCESS, MutationResult.FAILURE, MutationResult.SUCCESS), (MutationResult.FAILURE, MutationResult.FAILURE, MutationResult.FAILURE))) def test_mutation_result_success(left, right, expected): assert ((left | right) == expected) left |= right assert (left == expected)
class GPTJ(CausalModel): config_name: str = 'gptj' def __init__(self, weights_path: Optional[str]=None): super().__init__(GPTJEngine.config_name, weights_path)
def cross_product(R, names=['X', 'Y', 'Z']): L = three_dimensional(R, 1, 1, 1, 0, names=names) L.rename('Lie algebra of RR^3 under cross product over {}'.format(R)) return L
def convert_LinkProperty(model, prop, kwargs): kwargs['validators'].append(validators.url()) return get_TextField(kwargs)
def detokenize(string: str): (string, exceptions) = mask_special_tokens(string) tokens = ["'d", "n't", "'ve", "'m", "'re", "'ll", '.', ',', '?', '!', "'s", ')', ':', '-'] for t in tokens: string = string.replace((' ' + t), t) string = string.replace('( ', '(') string = string.replace('gon na', 'gonna') string = string.replace('wan na', 'wanna') string = unmask_special_tokens(string, exceptions) return string
def get_polyphonic_ratio(pianoroll, threshold=2): return (np.sum((np.sum(pianoroll, 1) >= threshold)) / pianoroll.shape[0])
(scope='module', autouse=True) def test_data_csv_png_10(): with generate_csv_png('test.csv', 10, 14) as csvfilename: (yield csvfilename)
def ResNeXt_4_20(in_ch=3, in_dim=32): return ResNeXt(num_blocks=[1, 1, 1], cardinality=4, bottleneck_width=20, in_ch=in_ch, in_dim=in_dim)
def main(): parser = argparse.ArgumentParser(description='Visual Place Recognition: A Tutorial. Code repository supplementing our paper.') parser.add_argument('--descriptor', type=str, default='HDC-DELF', choices=['HDC-DELF', 'AlexNet', 'NetVLAD', 'PatchNetVLAD', 'CosPlace', 'EigenPlaces', 'SAD'], help='Select descriptor (default: HDC-DELF)') parser.add_argument('--dataset', type=str, default='GardensPoint', choices=['GardensPoint', 'StLucia', 'SFU'], help='Select dataset (default: GardensPoint)') args = parser.parse_args() print(' Start VPR with {} descriptor on dataset {}'.format(args.descriptor, args.dataset)) print('===== Load dataset') if (args.dataset == 'GardensPoint'): dataset = GardensPointDataset() elif (args.dataset == 'StLucia'): dataset = StLuciaDataset() elif (args.dataset == 'SFU'): dataset = SFUDataset() else: raise ValueError(('Unknown dataset: ' + args.dataset)) (imgs_db, imgs_q, GThard, GTsoft) = dataset.load() if (args.descriptor == 'HDC-DELF'): from feature_extraction.feature_extractor_holistic import HDCDELF feature_extractor = HDCDELF() elif (args.descriptor == 'AlexNet'): from feature_extraction.feature_extractor_holistic import AlexNetConv3Extractor feature_extractor = AlexNetConv3Extractor() elif (args.descriptor == 'SAD'): from feature_extraction.feature_extractor_holistic import SAD feature_extractor = SAD() elif ((args.descriptor == 'NetVLAD') or (args.descriptor == 'PatchNetVLAD')): from feature_extraction.feature_extractor_patchnetvlad import PatchNetVLADFeatureExtractor from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR if (args.descriptor == 'NetVLAD'): configfile = os.path.join(PATCHNETVLAD_ROOT_DIR, 'configs/netvlad_extract.ini') else: configfile = os.path.join(PATCHNETVLAD_ROOT_DIR, 'configs/speed.ini') assert os.path.isfile(configfile) config = configparser.ConfigParser() config.read(configfile) feature_extractor = PatchNetVLADFeatureExtractor(config) elif (args.descriptor == 'CosPlace'): from feature_extraction.feature_extractor_cosplace import CosPlaceFeatureExtractor feature_extractor = CosPlaceFeatureExtractor() elif (args.descriptor == 'EigenPlaces'): from feature_extraction.feature_extractor_eigenplaces import EigenPlacesFeatureExtractor feature_extractor = EigenPlacesFeatureExtractor() else: raise ValueError(('Unknown descriptor: ' + args.descriptor)) if ((args.descriptor != 'PatchNetVLAD') and (args.descriptor != 'SAD')): print('===== Compute reference set descriptors') db_D_holistic = feature_extractor.compute_features(imgs_db) print('===== Compute query set descriptors') q_D_holistic = feature_extractor.compute_features(imgs_q) print('===== Compute cosine similarities S') db_D_holistic = (db_D_holistic / np.linalg.norm(db_D_holistic, axis=1, keepdims=True)) q_D_holistic = (q_D_holistic / np.linalg.norm(q_D_holistic, axis=1, keepdims=True)) S = np.matmul(db_D_holistic, q_D_holistic.transpose()) elif (args.descriptor == 'SAD'): print('===== Compute reference set descriptors') db_D_holistic = feature_extractor.compute_features(imgs_db) print('===== Compute query set descriptors') q_D_holistic = feature_extractor.compute_features(imgs_q) print('===== Compute similarities S from sum of absolute differences (SAD)') S = np.empty([len(imgs_db), len(imgs_q)], 'float32') for i in range(S.shape[0]): for j in range(S.shape[1]): diff = (db_D_holistic[i] - q_D_holistic[j]) dim = (len(db_D_holistic[0]) - np.sum(np.isnan(diff))) diff[np.isnan(diff)] = 0 S[(i, j)] = ((- np.sum(np.abs(diff))) / dim) else: print('=== WARNING: The PatchNetVLAD code in this repository is not optimised and will be slow and memory consuming.') print('===== Compute reference set descriptors') (db_D_holistic, db_D_patches) = feature_extractor.compute_features(imgs_db) print('===== Compute query set descriptors') (q_D_holistic, q_D_patches) = feature_extractor.compute_features(imgs_q) S = feature_extractor.local_matcher_from_numpy_single_scale(q_D_patches, db_D_patches) fig = plt.figure() plt.imshow(S) plt.axis('off') plt.title('Similarity matrix S') print('===== Match images') M1 = matching.best_match_per_query(S) M2 = matching.thresholding(S, 'auto') TP = np.argwhere((M2 & GThard)) FP = np.argwhere((M2 & (~ GTsoft))) print('===== Evaluation') show_correct_and_wrong_matches.show(imgs_db, imgs_q, TP, FP) fig = plt.figure() ax1 = fig.add_subplot(121) ax1.imshow(M1) ax1.axis('off') ax1.set_title('Best match per query') ax2 = fig.add_subplot(122) ax2.imshow(M2) ax2.axis('off') ax2.set_title('Thresholding S>=thresh') (P, R) = createPR(S, GThard, GTsoft, matching='multi', n_thresh=100) plt.figure() plt.plot(R, P) (plt.xlim(0, 1), plt.ylim(0, 1.01)) plt.xlabel('Recall') plt.ylabel('Precision') plt.title('Result on GardensPoint day_right--night_right') plt.grid('on') plt.draw() AUC = np.trapz(P, R) print(f''' ===== AUC (area under curve): {AUC:.3f}''') maxR = recallAt100precision(S, GThard, GTsoft, matching='multi', n_thresh=100) print(f''' ===== (maximum recall at 100% precision): {maxR:.2f}''') RatK = {} for K in [1, 5, 10]: RatK[K] = recallAtK(S, GThard, GTsoft, K=K) print(f''' ===== () -- : {RatK[1]:.3f}, : {RatK[5]:.3f}, : {RatK[10]:.3f}''') plt.show()
class AllPoleDigitalFilter(nn.Module): def __init__(self, filter_order, frame_period, ignore_gain=False): super(AllPoleDigitalFilter, self).__init__() self.filter_order = filter_order self.frame_period = frame_period self.ignore_gain = ignore_gain assert (0 <= self.filter_order) self.linear_intpl = LinearInterpolation(self.frame_period) def forward(self, x, a): check_size(a.size((- 1)), (self.filter_order + 1), 'dimension of LPC coefficients') check_size(x.size((- 1)), (a.size((- 2)) * self.frame_period), 'sequence length') d = x.dim() if (d == 1): a = a.unsqueeze(0) x = x.unsqueeze(0) a = self.linear_intpl(a) (K, a) = torch.split(a, [1, self.filter_order], dim=(- 1)) if (not self.ignore_gain): x = (K[(..., 0)] * x) y = sample_wise_lpc(x, a) if (d == 1): y = y.squeeze(0) return y
def main(pretrained_model_path: str, output_dir: str, train_data: Dict, validation_data: Dict, validation_steps: int=100, trainable_modules: Tuple[str]=('attn1.to_q', 'attn2.to_q', 'attn_temp'), train_batch_size: int=1, max_train_steps: int=500, learning_rate: float=3e-05, scale_lr: bool=False, lr_scheduler: str='constant', lr_warmup_steps: int=0, adam_beta1: float=0.9, adam_beta2: float=0.999, adam_weight_decay: float=0.01, adam_epsilon: float=1e-08, max_grad_norm: float=1.0, gradient_accumulation_steps: int=1, gradient_checkpointing: bool=True, checkpointing_steps: int=500, resume_from_checkpoint: Optional[str]=None, mixed_precision: Optional[str]='fp16', use_8bit_adam: bool=False, enable_xformers_memory_efficient_attention: bool=True, seed: Optional[int]=None): (*_, config) = inspect.getargvalues(inspect.currentframe()) accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() if (seed is not None): set_seed(seed) if accelerator.is_main_process: os.makedirs(output_dir, exist_ok=True) os.makedirs(f'{output_dir}/samples', exist_ok=True) os.makedirs(f'{output_dir}/inv_latents', exist_ok=True) OmegaConf.save(config, os.path.join(output_dir, 'config.yaml')) noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler') tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder='tokenizer') text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder='text_encoder') vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder='vae') unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder='unet') vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) for (name, module) in unet.named_modules(): if name.endswith(tuple(trainable_modules)): for params in module.parameters(): params.requires_grad = True if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError('xformers is not available. Make sure it is installed correctly') if gradient_checkpointing: unet.enable_gradient_checkpointing() if scale_lr: learning_rate = (((learning_rate * gradient_accumulation_steps) * train_batch_size) * accelerator.num_processes) if use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError('Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`') optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls(unet.parameters(), lr=learning_rate, betas=(adam_beta1, adam_beta2), weight_decay=adam_weight_decay, eps=adam_epsilon) train_dataset = TuneAVideoDataset(**train_data) train_dataset.prompt_ids = tokenizer(train_dataset.prompt, max_length=tokenizer.model_max_length, padding='max_length', truncation=True, return_tensors='pt').input_ids[0] train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size) validation_pipeline = TuneAVideoPipeline(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=DDIMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler')) validation_pipeline.enable_vae_slicing() ddim_inv_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler') ddim_inv_scheduler.set_timesteps(validation_data.num_inv_steps) lr_scheduler = get_scheduler(lr_scheduler, optimizer=optimizer, num_warmup_steps=(lr_warmup_steps * gradient_accumulation_steps), num_training_steps=(max_train_steps * gradient_accumulation_steps)) (unet, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler) weight_dtype = torch.float32 if (accelerator.mixed_precision == 'fp16'): weight_dtype = torch.float16 elif (accelerator.mixed_precision == 'bf16'): weight_dtype = torch.bfloat16 text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) num_update_steps_per_epoch = math.ceil((len(train_dataloader) / gradient_accumulation_steps)) num_train_epochs = math.ceil((max_train_steps / num_update_steps_per_epoch)) if accelerator.is_main_process: accelerator.init_trackers('text2video-fine-tune') total_batch_size = ((train_batch_size * accelerator.num_processes) * gradient_accumulation_steps) logger.info('***** Running training *****') logger.info(f' Num examples = {len(train_dataset)}') logger.info(f' Num Epochs = {num_train_epochs}') logger.info(f' Instantaneous batch size per device = {train_batch_size}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}') logger.info(f' Gradient Accumulation steps = {gradient_accumulation_steps}') logger.info(f' Total optimization steps = {max_train_steps}') global_step = 0 first_epoch = 0 if resume_from_checkpoint: if (resume_from_checkpoint != 'latest'): path = os.path.basename(resume_from_checkpoint) else: dirs = os.listdir(output_dir) dirs = [d for d in dirs if d.startswith('checkpoint')] dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1]))) path = dirs[(- 1)] accelerator.print(f'Resuming from checkpoint {path}') accelerator.load_state(os.path.join(output_dir, path)) global_step = int(path.split('-')[1]) first_epoch = (global_step // num_update_steps_per_epoch) resume_step = (global_step % num_update_steps_per_epoch) progress_bar = tqdm(range(global_step, max_train_steps), disable=(not accelerator.is_local_main_process)) progress_bar.set_description('Steps') for epoch in range(first_epoch, num_train_epochs): unet.train() train_loss = 0.0 for (step, batch) in enumerate(train_dataloader): if (resume_from_checkpoint and (epoch == first_epoch) and (step < resume_step)): if ((step % gradient_accumulation_steps) == 0): progress_bar.update(1) continue with accelerator.accumulate(unet): pixel_values = batch['pixel_values'].to(weight_dtype) video_length = pixel_values.shape[1] pixel_values = rearrange(pixel_values, 'b f c h w -> (b f) c h w') latents = vae.encode(pixel_values).latent_dist.sample() latents = rearrange(latents, '(b f) c h w -> b c f h w', f=video_length) latents = (latents * 0.18215) noise = torch.randn_like(latents) bsz = latents.shape[0] timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) encoder_hidden_states = text_encoder(batch['prompt_ids'])[0] if (noise_scheduler.prediction_type == 'epsilon'): target = noise elif (noise_scheduler.prediction_type == 'v_prediction'): target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f'Unknown prediction type {noise_scheduler.prediction_type}') model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean') avg_loss = accelerator.gather(loss.repeat(train_batch_size)).mean() train_loss += (avg_loss.item() / gradient_accumulation_steps) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 accelerator.log({'train_loss': train_loss}, step=global_step) train_loss = 0.0 if ((global_step % checkpointing_steps) == 0): if accelerator.is_main_process: save_path = os.path.join(output_dir, f'checkpoint-{global_step}') accelerator.save_state(save_path) logger.info(f'Saved state to {save_path}') if ((global_step % validation_steps) == 0): if accelerator.is_main_process: samples = [] generator = torch.Generator(device=latents.device) generator.manual_seed(seed) ddim_inv_latent = None if validation_data.use_inv_latent: inv_latents_path = os.path.join(output_dir, f'inv_latents/ddim_latent-{global_step}.pt') ddim_inv_latent = ddim_inversion(validation_pipeline, ddim_inv_scheduler, video_latent=latents, num_inv_steps=validation_data.num_inv_steps, prompt='')[(- 1)].to(weight_dtype) torch.save(ddim_inv_latent, inv_latents_path) for (idx, prompt) in enumerate(validation_data.prompts): sample = validation_pipeline(prompt, generator=generator, latents=ddim_inv_latent, **validation_data).videos save_videos_grid(sample, f'{output_dir}/samples/sample-{global_step}/{prompt}.gif') samples.append(sample) samples = torch.concat(samples) save_path = f'{output_dir}/samples/sample-{global_step}.gif' save_videos_grid(samples, save_path) logger.info(f'Saved samples to {save_path}') logs = {'step_loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if (global_step >= max_train_steps): break accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) pipeline = TuneAVideoPipeline.from_pretrained(pretrained_model_path, text_encoder=text_encoder, vae=vae, unet=unet) pipeline.save_pretrained(output_dir) accelerator.end_training()
def _process_dataset(name, directory, num_shards, labels_file): (filenames, texts, labels) = _find_image_files(directory, labels_file) _process_image_files(name, filenames, texts, labels, num_shards)
def directedLogContagion(G, A): return (sum((log((sum(((A[u] == 1) for u in G.outIterator(i))) + 1)) for i in G.nodeIterator() if (A[i] == 1))) + sum((log((sum(((A[u] == 1) for u in G.inIterator(i))) + 1)) for i in G.nodeIterator() if (A[i] == 1))))
def check_fit_args(distfn, arg, rvs): with np.errstate(all='ignore'), suppress_warnings() as sup: sup.filter(category=DeprecationWarning, message='.*frechet_') sup.filter(category=RuntimeWarning, message='The shape parameter of the erlang') sup.filter(category=RuntimeWarning, message='floating point number truncated') vals = distfn.fit(rvs) vals2 = distfn.fit(rvs, optimizer='powell') npt.assert_((len(vals) == (2 + len(arg)))) npt.assert_((len(vals2) == (2 + len(arg))))
def get_max_norm(data, ord=2): if isinstance(data, csr_matrix): if (ord == np.inf): norms = data.data else: norms = [np.linalg.norm(data.getrow(row_num).data, ord=ord) for row_num in range(data.shape[0])] else: norms = np.linalg.norm(data.data, axis=1, ord=ord) return np.max(norms)
class OrlikTeraoInvariantAlgebra(FiniteDimensionalInvariantModule): def __init__(self, R, M, G, action_on_groundset=None, *args, **kwargs): ordering = kwargs.pop('ordering', None) OT = OrlikTeraoAlgebra(R, M, ordering) self._ambient = OT if (action_on_groundset is None): def action_on_groundset(g, x): return g(x) self._groundset_action = action_on_groundset self._side = kwargs.pop('side', 'left') if ('category' in kwargs): category = kwargs.pop('category') else: from sage.categories.modules import Modules category = Modules(R).FiniteDimensional().WithBasis().Subobjects() def action(g, m): return OT.sum(((c * self._basis_action(g, x)) for (x, c) in m._monomial_coefficients.items())) self._action = action max_deg = max([b.degree() for b in OT.basis()]) B = [] for d in range((max_deg + 1)): OT_d = OT.homogeneous_component(d) OTG_d = OT_d.invariant_module(G, action=action, category=category) B += [OT_d.lift(OTG_d.lift(b)) for b in OTG_d.basis()] from sage.modules.with_basis.subquotient import SubmoduleWithBasis SubmoduleWithBasis.__init__(self, Family(B), *args, support_order=OT._compute_support_order(B), ambient=OT, unitriangular=False, category=category, **kwargs) self._semigroup = G def construction(self): return None def _basis_action(self, g, f): OT = self._ambient if (not f): return OT(f) fset = frozenset((self._groundset_action(g, e) for e in f)) return OT.subset_image(fset)
def batchnorm_flop_jit(inputs, outputs): input_shape = get_shape(inputs[0]) assert (2 <= len(input_shape) <= 5) flop = (prod(input_shape) * 4) flop_counter = Counter({'batchnorm': flop}) return flop_counter
('time') ('--start', '-s', metavar='TIMECODE', type=click.STRING, default='0', show_default=True, help='Time in video to begin detecting scenes. TIMECODE can be specified as exact number of frames (-s 100 to start at frame 100), time in seconds followed by s (-s 100s to start at 100 seconds), or a timecode in the format HH:MM:SS or HH:MM:SS.nnn (-s 00:01:40 to start at 1m40s).') ('--duration', '-d', metavar='TIMECODE', type=click.STRING, default=None, help='Maximum time in video to process. TIMECODE format is the same as other arguments. Mutually exclusive with --end / -e.') ('--end', '-e', metavar='TIMECODE', type=click.STRING, default=None, help='Time in video to end detecting scenes. TIMECODE format is the same as other arguments. Mutually exclusive with --duration / -d.') _context def time_command(ctx, start, duration, end): start = parse_timecode(ctx.obj, start) duration = parse_timecode(ctx.obj, duration) end = parse_timecode(ctx.obj, end) ctx.obj.time_command(start, duration, end)
class SkipDeclarations(object): def visit_CTypeDefNode(self, node): return node def visit_CVarDefNode(self, node): return node def visit_CDeclaratorNode(self, node): return node def visit_CBaseTypeNode(self, node): return node def visit_CEnumDefNode(self, node): return node def visit_CStructOrUnionDefNode(self, node): return node
def load_archive(archive_file: str, cuda_device: int=(- 1), overrides: str='') -> Archive: archive_file = cached_path(archive_file) tempdir = tempfile.mkdtemp() logger.info('extracting archive file %s to temp dir %s', archive_file, tempdir) with tarfile.open(archive_file, 'r:gz') as archive: archive.extractall(tempdir) fta_filename = os.path.join(tempdir, _FTA_NAME) if os.path.exists(fta_filename): with open(fta_filename, 'r') as fta_file: files_to_archive = json.loads(fta_file.read()) replacement_hocon = pyhocon.ConfigTree(root=True) for (key, _) in files_to_archive.items(): replacement_filename = os.path.join(tempdir, f'fta/{key}') replacement_hocon.put(key, replacement_filename) overrides_hocon = pyhocon.ConfigFactory.parse_string(overrides) combined_hocon = replacement_hocon.with_fallback(overrides_hocon) overrides = json.dumps(combined_hocon) config = Params.from_file(os.path.join(tempdir, _CONFIG_NAME), overrides) config.loading_from_archive = True model = Model.load(config.duplicate(), weights_file=os.path.join(tempdir, _WEIGHTS_NAME), serialization_dir=tempdir, cuda_device=cuda_device) shutil.rmtree(tempdir) return Archive(model=model, config=config)
def register_Ns3Mac16AddressChecker_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::Mac16AddressChecker const &', 'arg0')]) return
def test_multivariatenormaltril_layer_fails_to_serialilze() -> None: layer = tfp.layers.MultivariateNormalTriL(1) with pytest.raises(Exception): serialized = tf.keras.utils.serialize_keras_object(layer) tf.keras.utils.deserialize_keras_object(serialized, custom_objects={'MultivariateNormalTriL': tfp.layers.MultivariateNormalTriL})
class MultiTableMetadata(): METADATA_SPEC_VERSION = 'MULTI_TABLE_V1' def __init__(self): self.tables = {} self.relationships = [] def _validate_missing_relationship_keys(self, parent_table_name, parent_primary_key, child_table_name, child_foreign_key): parent_table = self.tables.get(parent_table_name) child_table = self.tables.get(child_table_name) if (parent_table.primary_key is None): raise InvalidMetadataError(f"The parent table '{parent_table_name}' does not have a primary key set. Please use 'set_primary_key' in order to set one.") missing_keys = set() parent_primary_key = cast_to_iterable(parent_primary_key) table_primary_keys = set(cast_to_iterable(parent_table.primary_key)) for key in parent_primary_key: if (key not in table_primary_keys): missing_keys.add(key) if missing_keys: raise InvalidMetadataError(f'Relationship between tables ({parent_table_name}, {child_table_name}) contains an unknown primary key {missing_keys}.') for key in set(cast_to_iterable(child_foreign_key)): if (key not in child_table.columns): missing_keys.add(key) if missing_keys: raise InvalidMetadataError(f'Relationship between tables ({parent_table_name}, {child_table_name}) contains an unknown foreign key {missing_keys}.') def _validate_no_missing_tables_in_relationship(parent_table_name, child_table_name, tables): missing_table_names = ({parent_table_name, child_table_name} - set(tables)) if missing_table_names: if (len(missing_table_names) == 1): raise InvalidMetadataError(f'Relationship contains an unknown table {missing_table_names}.') else: raise InvalidMetadataError(f'Relationship contains unknown tables {missing_table_names}.') def _validate_relationship_key_length(parent_table_name, parent_primary_key, child_table_name, child_foreign_key): pk_len = len(set(cast_to_iterable(parent_primary_key))) fk_len = len(set(cast_to_iterable(child_foreign_key))) if (pk_len != fk_len): raise InvalidMetadataError(f"Relationship between tables ('{parent_table_name}', '{child_table_name}') is invalid. Primary key has length {pk_len} but the foreign key has length {fk_len}.") def _validate_relationship_sdtypes(self, parent_table_name, parent_primary_key, child_table_name, child_foreign_key): parent_table_columns = self.tables.get(parent_table_name).columns child_table_columns = self.tables.get(child_table_name).columns parent_primary_key = cast_to_iterable(parent_primary_key) child_foreign_key = cast_to_iterable(child_foreign_key) for (pk, fk) in zip(parent_primary_key, child_foreign_key): if (parent_table_columns[pk]['sdtype'] != child_table_columns[fk]['sdtype']): raise InvalidMetadataError(f"Relationship between tables ('{parent_table_name}', '{child_table_name}') is invalid. The primary and foreign key columns are not the same type.") def _validate_circular_relationships(self, parent, children=None, parents=None, child_map=None, errors=None): parents = (set() if (parents is None) else parents) if (children is None): children = child_map[parent] if (parent in children): errors.append(parent) for child in children: if (child in parents): break parents.add(child) self._validate_circular_relationships(parent, children=child_map.get(child, set()), child_map=child_map, parents=parents, errors=errors) def _validate_child_map_circular_relationship(self, child_map): errors = [] for table_name in self.tables.keys(): self._validate_circular_relationships(table_name, child_map=child_map, errors=errors) if errors: raise InvalidMetadataError(f'The relationships in the dataset describe a circular dependency between tables {errors}.') def _validate_foreign_child_key(self, child_table_name, parent_table_name, child_foreign_key): child_primary_key = cast_to_iterable(self.tables[child_table_name].primary_key) child_foreign_key = cast_to_iterable(child_foreign_key) if set(child_foreign_key).intersection(set(child_primary_key)): raise InvalidMetadataError(f"Invalid relationship between table '{parent_table_name}' and table '{child_table_name}'. A relationship must connect a primary key with a non-primary key.") def _validate_relationship_does_not_exist(self, parent_table_name, parent_primary_key, child_table_name, child_foreign_key): for relationship in self.relationships: already_exists = ((relationship['parent_table_name'] == parent_table_name) and (relationship['parent_primary_key'] == parent_primary_key) and (relationship['child_table_name'] == child_table_name) and (relationship['child_foreign_key'] == child_foreign_key)) if already_exists: raise InvalidMetadataError('This relationship has already been added.') def _validate_relationship(self, parent_table_name, child_table_name, parent_primary_key, child_foreign_key): self._validate_no_missing_tables_in_relationship(parent_table_name, child_table_name, self.tables.keys()) self._validate_missing_relationship_keys(parent_table_name, parent_primary_key, child_table_name, child_foreign_key) self._validate_relationship_key_length(parent_table_name, parent_primary_key, child_table_name, child_foreign_key) self._validate_foreign_child_key(child_table_name, parent_table_name, child_foreign_key) self._validate_relationship_sdtypes(parent_table_name, parent_primary_key, child_table_name, child_foreign_key) def _get_parent_map(self): parent_map = defaultdict(set) for relation in self.relationships: parent_name = relation['parent_table_name'] child_name = relation['child_table_name'] parent_map[child_name].add(parent_name) return parent_map def _get_child_map(self): child_map = defaultdict(set) for relation in self.relationships: parent_name = relation['parent_table_name'] child_name = relation['child_table_name'] child_map[parent_name].add(child_name) return child_map def _get_foreign_keys(self, parent_table_name, child_table_name): foreign_keys = [] for relation in self.relationships: if ((parent_table_name == relation['parent_table_name']) and (child_table_name == relation['child_table_name'])): foreign_keys.append(deepcopy(relation['child_foreign_key'])) return foreign_keys def _get_all_foreign_keys(self, table_name): foreign_keys = [] for relation in self.relationships: if (table_name == relation['child_table_name']): foreign_keys.append(deepcopy(relation['child_foreign_key'])) return foreign_keys def add_relationship(self, parent_table_name, child_table_name, parent_primary_key, child_foreign_key): self._validate_relationship(parent_table_name, child_table_name, parent_primary_key, child_foreign_key) child_map = self._get_child_map() child_map[parent_table_name].add(child_table_name) self._validate_relationship_does_not_exist(parent_table_name, parent_primary_key, child_table_name, child_foreign_key) self._validate_child_map_circular_relationship(child_map) self.relationships.append({'parent_table_name': parent_table_name, 'child_table_name': child_table_name, 'parent_primary_key': deepcopy(parent_primary_key), 'child_foreign_key': deepcopy(child_foreign_key)}) def remove_relationship(self, parent_table_name, child_table_name): relationships_to_remove = [] for relation in self.relationships: if ((relation['parent_table_name'] == parent_table_name) and (relation['child_table_name'] == child_table_name)): relationships_to_remove.append(relation) if (not relationships_to_remove): warning_msg = f"No existing relationships found between parent table '{parent_table_name}' and child table '{child_table_name}'." warnings.warn(warning_msg) else: for relation in relationships_to_remove: self.relationships.remove(relation) def _validate_table_exists(self, table_name): if (table_name not in self.tables): raise InvalidMetadataError(f"Unknown table name ('{table_name}').") def add_column(self, table_name, column_name, **kwargs): self._validate_table_exists(table_name) table = self.tables.get(table_name) table.add_column(column_name, **kwargs) def update_column(self, table_name, column_name, **kwargs): self._validate_table_exists(table_name) table = self.tables.get(table_name) table.update_column(column_name, **kwargs) def add_constraint(self, table_name, constraint_name, **kwargs): self._validate_table_exists(table_name) table = self.tables.get(table_name) table.add_constraint(constraint_name, **kwargs) def _validate_table_not_detected(self, table_name): if (table_name in self.tables): raise InvalidMetadataError(f"Metadata for table '{table_name}' already exists. Specify a new table name or create a new MultiTableMetadata object for other data sources.") def _log_detected_table(single_table_metadata): table_dict = single_table_metadata.to_dict() table_dict.pop('METADATA_SPEC_VERSION', None) table_json = json.dumps(table_dict, indent=4) LOGGER.info(f'''Detected metadata: {table_json}''') def _validate_all_tables_connected(self, parent_map, child_map): nodes = list(self.tables.keys()) if (len(nodes) == 1): return parent_nodes = list(parent_map.keys()) queue = ([parent_nodes[0]] if parent_map else []) connected = {table_name: False for table_name in nodes} while queue: node = queue.pop() connected[node] = True for child in (list(child_map[node]) + list(parent_map[node])): if ((not connected[child]) and (child not in queue)): queue.append(child) if (not all(connected.values())): disconnected_tables = [table for (table, value) in connected.items() if (not value)] if (len(disconnected_tables) > 1): table_msg = f'Tables {disconnected_tables} are not connected to any of the other tables.' else: table_msg = f'Table {disconnected_tables} is not connected to any of the other tables.' raise InvalidMetadataError(f'The relationships in the dataset are disjointed. {table_msg}') def _detect_relationships(self): for parent_candidate in self.tables.keys(): primary_key = self.tables[parent_candidate].primary_key for child_candidate in (self.tables.keys() - {parent_candidate}): child_meta = self.tables[child_candidate] if (primary_key in child_meta.columns.keys()): try: original_foreign_key_sdtype = child_meta.columns[primary_key]['sdtype'] if (original_foreign_key_sdtype != 'id'): self.update_column(child_candidate, primary_key, sdtype='id') self.add_relationship(parent_candidate, child_candidate, primary_key, primary_key) except InvalidMetadataError: self.update_column(child_candidate, primary_key, sdtype=original_foreign_key_sdtype) continue try: self._validate_all_tables_connected(self._get_parent_map(), self._get_child_map()) except InvalidMetadataError as invalid_error: warning_msg = f'Could not automatically add relationships for all tables. {str(invalid_error)}' warnings.warn(warning_msg) def detect_table_from_dataframe(self, table_name, data): self._validate_table_not_detected(table_name) table = SingleTableMetadata() table._detect_columns(data) self.tables[table_name] = table self._log_detected_table(table) def detect_from_dataframes(self, data): if ((not data) or (not all((isinstance(df, pd.DataFrame) for df in data.values())))): raise ValueError('The provided dictionary must contain only pandas DataFrame objects.') for (table_name, dataframe) in data.items(): self.detect_table_from_dataframe(table_name, dataframe) self._detect_relationships() def detect_table_from_csv(self, table_name, filepath, read_csv_parameters=None): self._validate_table_not_detected(table_name) table = SingleTableMetadata() data = load_data_from_csv(filepath, read_csv_parameters) table._detect_columns(data) self.tables[table_name] = table self._log_detected_table(table) def detect_from_csvs(self, folder_name, read_csv_parameters=None): folder_path = Path(folder_name) if folder_path.is_dir(): csv_files = list(folder_path.rglob('*.csv')) else: raise ValueError(f"The folder '{folder_name}' does not exist.") if (not csv_files): raise ValueError(f"No CSV files detected in the folder '{folder_name}'.") for csv_file in csv_files: table_name = csv_file.stem self.detect_table_from_csv(table_name, str(csv_file), read_csv_parameters) self._detect_relationships() def set_primary_key(self, table_name, column_name): self._validate_table_exists(table_name) self.tables[table_name].set_primary_key(column_name) def set_sequence_key(self, table_name, column_name): self._validate_table_exists(table_name) warnings.warn('Sequential modeling is not yet supported on SDV Multi Table models.') self.tables[table_name].set_sequence_key(column_name) def add_alternate_keys(self, table_name, column_names): self._validate_table_exists(table_name) self.tables[table_name].add_alternate_keys(column_names) def set_sequence_index(self, table_name, column_name): self._validate_table_exists(table_name) warnings.warn('Sequential modeling is not yet supported on SDV Multi Table models.') self.tables[table_name].set_sequence_index(column_name) def _validate_column_relationships_foreign_keys(self, table_column_relationships, foreign_keys): for column_relationship in table_column_relationships: column_names = set(column_relationship.get('column_names', [])) invalid_columns = column_names.intersection(foreign_keys) if invalid_columns: raise InvalidMetadataError(f'Cannot use foreign keys {invalid_columns} in column relationship.') def add_column_relationship(self, relationship_type, table_name, column_names): self._validate_table_exists(table_name) foreign_keys = self._get_all_foreign_keys(table_name) relationships = ([{'type': relationship_type, 'column_names': column_names}] + self.tables[table_name].column_relationships) self._validate_column_relationships_foreign_keys(relationships, foreign_keys) self.tables[table_name].add_column_relationship(relationship_type, column_names) def _validate_single_table(self, errors): foreign_key_cols = defaultdict(list) for relationship in self.relationships: child_table = relationship.get('child_table_name') child_foreign_key = relationship.get('child_foreign_key') foreign_key_cols[child_table].append(child_foreign_key) for (table_name, table) in self.tables.items(): if (len(table.columns) == 0): error_message = f"Table '{table_name}' has 0 columns. Use 'add_column' to specify its columns." errors.append(error_message) try: table.validate() except Exception as error: errors.append('\n') title = f'Table: {table_name}' error = str(error).replace('The following errors were found in the metadata:\n', title) errors.append(error) try: self._validate_column_relationships_foreign_keys(table.column_relationships, foreign_key_cols[table_name]) except Exception as col_relationship_error: errors.append(str(col_relationship_error)) def _append_relationships_errors(self, errors, method, *args, **kwargs): try: method(*args, **kwargs) except Exception as error: if ('\nRelationships:' not in errors): errors.append('\nRelationships:') errors.append(error) def validate(self): errors = [] self._validate_single_table(errors) for relation in self.relationships: self._append_relationships_errors(errors, self._validate_relationship, **relation) parent_map = self._get_parent_map() child_map = self._get_child_map() self._append_relationships_errors(errors, self._validate_child_map_circular_relationship, child_map) self._append_relationships_errors(errors, self._validate_all_tables_connected, parent_map, child_map) if errors: raise InvalidMetadataError(('The metadata is not valid' + '\n'.join((str(e) for e in errors)))) def _validate_missing_tables(self, data): errors = [] missing_tables = (set(self.tables) - set(data)) if missing_tables: errors.append(f'The provided data is missing the tables {missing_tables}.') return errors def _validate_all_tables(self, data): errors = [] for (table_name, table_data) in data.items(): try: self.tables[table_name].validate_data(table_data) except InvalidDataError as error: error_msg = f"Table: '{table_name}'" for _error in error.errors: error_msg += f''' Error: {_error}''' errors.append(error_msg) except ValueError as error: errors.append(str(error)) except KeyError: continue return errors def _validate_foreign_keys(self, data): error_msg = None errors = [] for relation in self.relationships: child_table = data.get(relation['child_table_name']) parent_table = data.get(relation['parent_table_name']) if (isinstance(child_table, pd.DataFrame) and isinstance(parent_table, pd.DataFrame)): child_column = child_table[relation['child_foreign_key']] parent_column = parent_table[relation['parent_primary_key']] missing_values = child_column[(~ child_column.isin(parent_column))].unique() if any(missing_values): message = ', '.join(missing_values[:5].astype(str)) if (len(missing_values) > 5): message = f'({message}, + more)' else: message = f'({message})' errors.append(f"Error: foreign key column '{relation['child_foreign_key']}' contains unknown references: {message}. All the values in this column must reference a primary key.") if errors: error_msg = 'Relationships:\n' error_msg += '\n'.join(errors) return ([error_msg] if error_msg else []) def validate_data(self, data): errors = [] errors += self._validate_missing_tables(data) errors += self._validate_all_tables(data) errors += self._validate_foreign_keys(data) if errors: raise InvalidDataError(errors) def add_table(self, table_name): if ((not isinstance(table_name, str)) or (table_name == '')): raise InvalidMetadataError("Invalid table name (''). The table name must be a non-empty string.") if (table_name in self.tables): raise InvalidMetadataError(f"Cannot add a table named '{table_name}' because it already exists in the metadata. Please choose a different name.") self.tables[table_name] = SingleTableMetadata() def visualize(self, show_table_details='full', show_relationship_labels=True, output_filepath=None): if (show_table_details not in (None, True, False, 'full', 'summarized')): raise ValueError("'show_table_details' parameter should be 'full', 'summarized' or None.") if isinstance(show_table_details, bool): if show_table_details: future_warning_msg = "Using True or False for show_table_details is deprecated. Use show_table_details='full' to show all table details." show_table_details = 'full' else: future_warning_msg = "Using True or False for 'show_table_details' is deprecated. Use show_table_details=None to hide table details." show_table_details = None warnings.warn(future_warning_msg, FutureWarning) nodes = {} edges = [] if (show_table_details == 'full'): for (table_name, table_meta) in self.tables.items(): nodes[table_name] = {'columns': create_columns_node(table_meta.columns), 'primary_key': f'Primary key: {table_meta.primary_key}'} elif (show_table_details == 'summarized'): for (table_name, table_meta) in self.tables.items(): nodes[table_name] = {'columns': create_summarized_columns_node(table_meta.columns), 'primary_key': f'Primary key: {table_meta.primary_key}'} elif (show_table_details is None): nodes = {table_name: None for table_name in self.tables} for relationship in self.relationships: parent = relationship.get('parent_table_name') child = relationship.get('child_table_name') foreign_key = relationship.get('child_foreign_key') primary_key = self.tables.get(parent).primary_key edge_label = (f' {foreign_key} {primary_key}' if show_relationship_labels else '') edges.append((parent, child, edge_label)) if (show_table_details is not None): child_node = nodes.get(child) foreign_key_text = f'Foreign key ({parent}): {foreign_key}' if ('foreign_keys' in child_node): child_node.get('foreign_keys').append(foreign_key_text) else: child_node['foreign_keys'] = [foreign_key_text] for (table, info) in nodes.items(): if show_table_details: foreign_keys = '\\l'.join(info.get('foreign_keys', [])) keys = '\\l'.join([info['primary_key'], foreign_keys]) if foreign_keys: label = f"{{{table}|{info['columns']}\l|{keys}\l}}" else: label = f"{{{table}|{info['columns']}\l|{keys}}}" else: label = f'{table}' nodes[table] = label return visualize_graph(nodes, edges, output_filepath) def to_dict(self): metadata = {'tables': {}, 'relationships': []} for (table_name, single_table_metadata) in self.tables.items(): table_dict = single_table_metadata.to_dict() table_dict.pop('METADATA_SPEC_VERSION', None) metadata['tables'][table_name] = table_dict metadata['relationships'] = deepcopy(self.relationships) metadata['METADATA_SPEC_VERSION'] = self.METADATA_SPEC_VERSION return metadata def _set_metadata_dict(self, metadata): for (table_name, table_dict) in metadata.get('tables', {}).items(): self.tables[table_name] = SingleTableMetadata.load_from_dict(table_dict) for relationship in metadata.get('relationships', []): self.relationships.append(relationship) def load_from_dict(cls, metadata_dict): instance = cls() instance._set_metadata_dict(metadata_dict) return instance def save_to_json(self, filepath): validate_file_does_not_exist(filepath) metadata = self.to_dict() with open(filepath, 'w', encoding='utf-8') as metadata_file: json.dump(metadata, metadata_file, indent=4) def load_from_json(cls, filepath): metadata = read_json(filepath) return cls.load_from_dict(metadata) def __repr__(self): printed = json.dumps(self.to_dict(), indent=4) return printed def _convert_foreign_keys(cls, old_metadata, parent, child): foreign_keys = [] child_table = old_metadata.get('tables', {}).get(child, {}) for (name, field) in child_table.get('fields').items(): ref = field.get('ref') if (ref and (ref['table'] == parent)): foreign_keys.append(name) return foreign_keys def _convert_relationships(cls, old_metadata): tables = old_metadata.get('tables') parents = defaultdict(set) for (table, table_meta) in tables.items(): for field_meta in table_meta['fields'].values(): ref = field_meta.get('ref') if ref: parent = ref['table'] parents[table].add(parent) relationships = [{'parent_table_name': parent, 'parent_primary_key': tables.get(parent).get('primary_key'), 'child_table_name': table, 'child_foreign_key': foreign_key} for table in tables for parent in list(parents[table]) for foreign_key in cls._convert_foreign_keys(old_metadata, parent, table)] return relationships def upgrade_metadata(cls, filepath): old_metadata = read_json(filepath) tables_metadata = {} for (table_name, metadata) in old_metadata.get('tables', {}).items(): tables_metadata[table_name] = convert_metadata(metadata) relationships = cls._convert_relationships(old_metadata) metadata_dict = {'tables': tables_metadata, 'relationships': relationships, 'METADATA_SPEC_VERSION': cls.METADATA_SPEC_VERSION} metadata = cls.load_from_dict(metadata_dict) try: metadata.validate() except InvalidMetadataError as error: message = f'''Successfully converted the old metadata, but the metadata was not valid.To use this with the SDV, please fix the following errors. {str(error)}''' warnings.warn(message) return metadata
_cache(maxsize=1000) def measure_multiple_with_cache(state: Tuple[complex], basis: Tuple[Tuple[complex]], length_diff: int) -> Tuple[(List[array], List[float])]: state = array(state) projectors = ([None] * len(basis)) probabilities = ([0] * len(basis)) for (i, vector) in enumerate(basis): vector = array(vector, dtype=complex) M = outer(vector.conj(), vector) projectors[i] = kron(M, identity((2 ** length_diff))) probabilities[i] = (((state.conj().transpose() projectors[i].conj().transpose()) projectors[i]) state).real if (probabilities[i] < 0): probabilities[i] = 0 return_states = ([None] * len(projectors)) for (i, proj) in enumerate(projectors): if (probabilities[i] > 0): new_state = ((proj state) / sqrt(probabilities[i])) return_states[i] = new_state return (return_states, probabilities)
class SiqaScenario(Scenario): name = 'siqa' description = 'Benchmark from tags = ['knowledge', 'multiple_choice'] def get_instances(self, output_path: str) -> List[Instance]: data_path = os.path.join(output_path, 'data') ensure_directory_exists(data_path) ensure_file_downloaded(source_url=' target_path=os.path.join(data_path, 'socialiqa-train-dev'), unpack=True, unpack_type='unzip') split_mapping = {'train': 'train', 'val': 'dev'} instances = [] for split in ['train', 'val']: base_path = os.path.join(data_path, 'socialiqa-train-dev', 'socialiqa-train-dev', f'{split_mapping[split]}') data = [json.loads(line) for line in open((base_path + '.jsonl'))] labels = [int(line.strip()) for line in open((base_path + '-labels.lst'))] assert (len(data) == len(labels)) for (item, label) in zip(data, labels): instances.append(self.json_to_instance(item, label, split)) return instances def json_to_instance(item, label, split) -> Instance: question = f"{item['context']} {item['question']}" answers = [item['answerA'], item['answerB'], item['answerC']] correct_choice = (label - 1) correct_answer = answers[correct_choice] assert (len(item) == 5) assert (correct_choice in [0, 1, 2]) return _make_instance(question, answers, correct_answer, split)
def triu(A, k=0, format=None): coo_sparse = (coo_array if isinstance(A, sparray) else coo_matrix) A = coo_sparse(A, copy=False) mask = ((A.row + k) <= A.col) row = A.row[mask] col = A.col[mask] data = A.data[mask] new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype) return new_coo.asformat(format)
def load_url(url, model_dir='../../../pretrained', map_location=None): if (not os.path.exists(model_dir)): os.makedirs(model_dir) filename = url.split('/')[(- 1)] cached_file = os.path.join(model_dir, filename) if (not os.path.exists(cached_file)): sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) urlretrieve(url, cached_file) return torch.load(cached_file, map_location=map_location)
def KL_divergence_std(mu1: Tensor, log_var1: Tensor, mu2: Tensor, log_var2: Tensor): d1 = MultivariateNormal(mu1, covariance_matrix=torch.diag(log_var1.exp())) d2 = MultivariateNormal(mu2, covariance_matrix=torch.diag(log_var2.exp())) return kl_divergence(d2, d1)
def require_version(requirement: str, hint: Optional[str]=None) -> None: hint = (f''' {hint}''' if (hint is not None) else '') if re.match('^[\\w_\\-\\d]+$', requirement): (pkg, op, want_ver) = (requirement, None, None) else: match = re.findall('^([^!=<>\\s]+)([\\s!=<>]{1,2})(.+)', requirement) if (not match): raise ValueError(f'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}') (pkg, op, want_ver) = match[0] if (op not in ops): raise ValueError(f'need one of {list(ops.keys())}, but got {op}') if (pkg == 'python'): got_ver = '.'.join([str(x) for x in sys.version_info[:3]]) if (not ops[op](version.parse(got_ver), version.parse(want_ver))): raise pkg_resources.VersionConflict(f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.') return try: got_ver = pkg_resources.get_distribution(pkg).version except pkg_resources.DistributionNotFound: raise pkg_resources.DistributionNotFound(requirement, ['this application', hint]) if ((want_ver is not None) and (not ops[op](version.parse(got_ver), version.parse(want_ver)))): raise pkg_resources.VersionConflict(f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}')
def train_step(data_iter): def train_step_fn(images, labels): with tf.GradientTape() as tape: cosine = model(images) loss = loss_fn(labels, cosine) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_loss.update_state(loss) for _ in tf.range(STEPS_PER_TPU_CALL): strategy.run(train_step_fn, next(data_iter))
.parametrize('ext_name', ext_names) .parametrize('numpy_type, torch_type', types) def test_from_dlpack_new(ext_name, numpy_type, torch_type): ctx = get_extension_context(ext_name) device_name = ctx.backend[0].split(':')[0] if (device_name == 'cudnn'): device_name = 'cuda' nn.set_default_context(ctx) t = torch.ones((5, 5), dtype=torch_type, device=torch.device(device_name)) dlp = torch.utils.dlpack.to_dlpack(t) a = nn.utils.dlpack.from_dlpack(dlp) assert (a.dtype == numpy_type) a += 1 assert np.all((a.data == t.to('cpu').detach().numpy().copy()))
class Network(object): def __init__(self, scope_name): self.scope_name = scope_name def build(self, input): raise NotImplementedError def all_vars(self): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope_name) def trainable_vars(self): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope_name) def get_variable(self, name): for var in self.all_vars: if (var.name == name): return var return None def print_layers(self): print('Layers of {}'.format(self.scope_name)) print(self.all_vars) def save(self, sess, folder): saver = tf.train.Saver(self.all_vars) path = os.path.join(folder, 'model.ckpt') saver.save(sess, path) def load(self, sess, folder): saver = tf.train.Saver(self.all_vars) path = os.path.join(folder, 'model.ckpt') saver.restore(sess, path)
class TransformerSpecPredictionHead(nn.Module): def __init__(self, config, output_dim, input_dim=None): super(TransformerSpecPredictionHead, self).__init__() self.output_dim = output_dim if (input_dim is None): self.dense = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(input_dim, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.output = nn.Linear(config.hidden_size, self.output_dim) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) linear_output = self.output(hidden_states) return (linear_output, hidden_states)
class InconsistentCandidate(ResolverException): def __init__(self, candidate, criterion): super(InconsistentCandidate, self).__init__(candidate, criterion) self.candidate = candidate self.criterion = criterion def __str__(self): return 'Provided candidate {!r} does not satisfy {}'.format(self.candidate, ', '.join((repr(r) for r in self.criterion.iter_requirement())))
def get_expected_shape(t_shape, granularity): if (granularity == hessian_common.HessianInfoGranularity.PER_ELEMENT): return t_shape elif (granularity == hessian_common.HessianInfoGranularity.PER_TENSOR): return (1,) else: return (t_shape[0],)
def OpenAUC(open_set_pred_known, open_set_pred_unknown, close_set_pred_class, close_set_labels): (open_set_pred_known, open_set_pred_unknown, correct) = (open_set_pred_known.tolist(), open_set_pred_unknown.tolist(), (close_set_pred_class == close_set_labels).tolist()) m_x2 = (max(open_set_pred_unknown) + 1e-05) y_score = ([(value if hit else m_x2) for (value, hit) in zip(open_set_pred_known, correct)] + open_set_pred_unknown) y_true = (([0] * len(open_set_pred_known)) + ([1] * len(open_set_pred_unknown))) open_auc = roc_auc_score(y_true, y_score) return open_auc
class docLanguageTypeSub(supermod.docLanguageType): def __init__(self, langid=None, para=None): supermod.docLanguageType.__init__(self, langid, para)
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--source_file', type=str, help='PGN dir') parser.add_argument('--output_dir', type=str, help='Output directory') parser.add_argument('--max_games', default=, type=int, help='Max games to parse') parsed_args = parser.parse_args() assert path.exists(parsed_args.source_file) if (not path.exists(parsed_args.output_dir)): os.makedirs(parsed_args.output_dir) return parsed_args
class ControlBlock(object): def __init__(self): self.children = set() self.parents = set() self.positions = set() self.stats = [] self.gen = {} self.bounded = set() self.i_input = 0 self.i_output = 0 self.i_gen = 0 self.i_kill = 0 self.i_state = 0 def empty(self): return ((not self.stats) and (not self.positions)) def detach(self): for child in self.children: child.parents.remove(self) for parent in self.parents: parent.children.remove(self) self.parents.clear() self.children.clear() def add_child(self, block): self.children.add(block) block.parents.add(self)
class ViltProcessor(): def __init__(self, feature_extractor, tokenizer): if (not isinstance(feature_extractor, ViltFeatureExtractor)): raise ValueError(f'`feature_extractor` has to be of type {ViltFeatureExtractor.__class__}, but is {type(feature_extractor)}') if (not isinstance(tokenizer, BertTokenizerFast)): raise ValueError(f'`tokenizer` has to be of type {BertTokenizerFast.__class__}, but is {type(tokenizer)}') self.feature_extractor = feature_extractor self.tokenizer = tokenizer self.current_processor = self.feature_extractor def save_pretrained(self, save_directory): self.feature_extractor.save_pretrained(save_directory) self.tokenizer.save_pretrained(save_directory) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): feature_extractor = ViltFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__(self, images, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchEncoding: encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) encoding_feature_extractor = self.feature_extractor(images, return_tensors=return_tensors) encoding.update(encoding_feature_extractor) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs)
_utils.test(arch=[ti.cpu, ti.cuda], debug=True) def test_ref_atomic(): cur_arch = ti.lang.impl.get_runtime().prog.config().arch if ((cur_arch == ti.cuda) and (ti.lang.impl.get_cuda_compute_capability() < 70)): pytest.skip('Skip this test on Pascal (and potentially older) architecture, ask turbo0628/Proton for more information') _func def foo(a: ti.ref(ti.f32)): a += a def bar(): a = 5.0 foo(a) assert (a == 10.0) bar()
class Conceptual_Caption(RNGDataFlow): def __init__(self, corpus_path, shuffle=False): self.shuffle = shuffle self.num_file = 16 self.name = os.path.join(corpus_path, 'CC_resnet101_faster_rcnn_genome.tsv.%d') self.infiles = [(self.name % i) for i in range(self.num_file)] self.counts = [] self.num_caps = 3103920 self.captions = {} df = open_tsv('/mnt/yangan.ya/VL-BERT/data/conceptual-captions/utils/Train_GCC-training.tsv', 'training') for (i, img) in enumerate(df.iterrows()): caption = img[1]['caption'] url = img[1]['url'] image_id = str(i) self.captions[image_id] = caption json.dump(self.captions, open('/mnt3/yangan.ya/features_lmdb/CC/caption_train.json', 'w')) def __len__(self): return self.num_caps def __iter__(self): for infile in self.infiles: count = 0 with open(infile) as tsv_in_file: reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES) for item in reader: image_id = item['image_id'] image_h = item['image_h'] image_w = item['image_w'] num_boxes = item['num_boxes'] boxes = np.frombuffer(base64.b64decode(item['boxes']), dtype=np.float32).reshape(int(num_boxes), 4) features = np.frombuffer(base64.b64decode(item['features']), dtype=np.float32).reshape(int(num_boxes), 2048) cls_prob = np.frombuffer(base64.b64decode(item['cls_prob']), dtype=np.float32).reshape(int(num_boxes), 1601) caption = self.captions[image_id] (yield [features, cls_prob, boxes, num_boxes, image_h, image_w, image_id, caption])
class Lyric(Base): _attributes = OrderedDict([('time', int), ('lyric', str)]) def __init__(self, time: int, lyric: str): self.time = time self.lyric = lyric
def create_inputs_for_multiple_axes(rng, axes): x = (rng.randn(2, 3, 4).astype(np.float32) * 2) shape_stat = [1 for _ in range(x.ndim)] for i in range(len(axes)): shape_stat[axes[i]] = x.shape[axes[i]] beta = rng.randn(*shape_stat).astype(np.float32) gamma = rng.randn(*shape_stat).astype(np.float32) rmean = np.zeros(shape_stat, dtype=np.float32) rvar = np.zeros(shape_stat, dtype=np.float32) return (x, beta, gamma, rmean, rvar)
def get_world_size(): if (os.environ.get('PMI_SIZE') is not None): return int((os.environ.get('PMI_SIZE') or 1)) elif (os.environ.get('OMPI_COMM_WORLD_SIZE') is not None): return int((os.environ.get('OMPI_COMM_WORLD_SIZE') or 1)) else: return torch.cuda.device_count()
class TransformerInfo(object): def __init__(self, info): self._graph = info.graph self._scope = info.scope self._graph_ = info.graph_ self._scope_ = info.scope_ self._transformed_ops = info.transformed_ops self._transformed_ts = info.transformed_ts def _get_transformed_map(self, top): if isinstance(top, tf_ops.Operation): return self._transformed_ops elif isinstance(top, tf_ops.Tensor): return self._transformed_ts else: raise TypeError('Expected a tf.Tensor or a tf.Operation, got a {}'.format(type(top))) def _transformed_elem(self, original_top, missing_fn=None): transformed_map = self._get_transformed_map(original_top) if isinstance(original_top, string_types): for (original, transformed) in iteritems(transformed_map): if (original.name == original_top): return transformed return (None if (missing_fn is None) else missing_fn(original_top)) else: if (original_top not in transformed_map): return (None if (missing_fn is None) else missing_fn(original_top)) return transformed_map[original_top] def _original_elem(self, transformed_top, missing_fn=None): transformed_map = self._get_transformed_map(transformed_top) if isinstance(transformed_top, string_types): finder = (lambda transformed: (transformed.name == transformed_top)) else: finder = (lambda transformed: (transformed == transformed_top)) for (original, transformed) in iteritems(transformed_map): if finder(transformed): return original return (None if (missing_fn is None) else missing_fn(transformed_top)) def transformed(self, original, missing_fn=None): transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn) return util.transform_tree(original, transformed_elem) def original(self, transformed, missing_fn=None): original_elem = partial(self._original_elem, missing_fn=missing_fn) return util.transform_tree(transformed, original_elem) def __str__(self): res = StringIO() print('Transform result info:', file=res) if (self._graph == self._graph_): in_place_str = ('' if self._scope_ else ' IN-PLACE') print(' Within graph[{}]{}'.format(id(self._graph), in_place_str), file=res) else: print(' graph[{}] => graph[{}]'.format(id(self._graph), id(self._graph_)), file=res) if self._scope: print(' Relative to source scope: {}'.format(self._scope), file=res) if self._scope_: print(' Scope destination: {}'.format(self._scope_), file=res) print('Operations mapping:', file=res) for (op, op_) in iteritems(self._transformed_ops): print(' {} => {}'.format(op.name, op_.name), file=res) return res.getvalue()
def convert_ua_images_2_videos(image_folder): folders = glob.glob(os.path.join(image_folder, '*')) for f in tqdm(folders): if (not os.path.isdir(f)): continue video_name = (f + '.avi') convert(f, video_name, args.video_fps, 960, 540)
class TestCMAES(TfGraphTestCase): def test_cma_es_cartpole(self): with LocalTFRunner(snapshot_config) as runner: env = GarageEnv(env_name='CartPole-v1') policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32)) baseline = LinearFeatureBaseline(env_spec=env.spec) n_samples = 20 algo = CMAES(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, n_samples=n_samples) runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler) runner.train(n_epochs=1, batch_size=1000) env.close()
def register_Ns3WimaxConnection_methods(root_module, cls): cls.add_constructor([param('ns3::WimaxConnection const &', 'arg0')]) cls.add_constructor([param('ns3::Cid', 'cid'), param('ns3::Cid::Type', 'type')]) cls.add_method('ClearFragmentsQueue', 'void', []) cls.add_method('Dequeue', 'ns3::Ptr< ns3::Packet >', [param('ns3::MacHeaderType::HeaderType', 'packetType', default_value='::ns3::MacHeaderType::HeaderType::HEADER_TYPE_GENERIC')]) cls.add_method('Dequeue', 'ns3::Ptr< ns3::Packet >', [param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'availableByte')]) cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr')]) cls.add_method('FragmentEnqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'fragment')]) cls.add_method('GetCid', 'ns3::Cid', [], is_const=True) cls.add_method('GetFragmentsQueue', 'ns3::WimaxConnection::FragmentsQueue const', [], is_const=True) cls.add_method('GetQueue', 'ns3::Ptr< ns3::WimaxMacQueue >', [], is_const=True) cls.add_method('GetSchedulingType', 'uint8_t', [], is_const=True) cls.add_method('GetServiceFlow', 'ns3::ServiceFlow *', [], is_const=True) cls.add_method('GetType', 'ns3::Cid::Type', [], is_const=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('GetTypeStr', 'std::string', [], is_const=True) cls.add_method('HasPackets', 'bool', [], is_const=True) cls.add_method('HasPackets', 'bool', [param('ns3::MacHeaderType::HeaderType', 'packetType')], is_const=True) cls.add_method('SetServiceFlow', 'void', [param('ns3::ServiceFlow *', 'serviceFlow')]) cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return
class MNIST(torchvision.datasets.MNIST): def __init__(self, root, part, labeled_factors, transform): super().__init__(root, (part == 'train'), transform=transform, download=True) if (len(labeled_factors) == 0): self.has_label = False self.nclass = [] self.class_freq = [] else: self.has_label = True self.nclass = [10] class_count = self.targets.bincount(minlength=10) self.class_freq = [(class_count.float() / self.data.size(0))] def __getitem__(self, k): (img, target) = super().__getitem__(k) return ((img, torch.tensor([target])) if self.has_label else img)
def load_state_dict(checkpoint_path): sd = torch.load(checkpoint_path, map_location='cpu') return sd
class Eckerle4(Benchmark): def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0, 1.0, 10.0], [20, 20.0, 600.0])) self.global_optimum = [[1., 4., 451.]] self.fglob = 0. self.a = asarray([0.0001575, 0.0001699, 0.000235, 0.0003102, 0.0004917, 0.000871, 0.0017418, 0.00464, 0.0065895, 0.0097302, 0.0149002, 0.023731, 0.0401683, 0.0712559, 0.1264458, 0.2073413, 0.2902366, 0.3445623, 0.3698049, 0.3668534, 0.3106727, 0.2078154, 0.1164354, 0.0616764, 0.03372, 0.0194023, 0.0117831, 0.0074357, 0.0022732, 0.00088, 0.0004579, 0.0002345, 0.0001586, 0.0001143, 7.1e-05]) self.b = asarray([400.0, 405.0, 410.0, 415.0, 420.0, 425.0, 430.0, 435.0, 436.5, 438.0, 439.5, 441.0, 442.5, 444.0, 445.5, 447.0, 448.5, 450.0, 451.5, 453.0, 454.5, 456.0, 457.5, 459.0, 460.5, 462.0, 463.5, 465.0, 470.0, 475.0, 480.0, 485.0, 490.0, 495.0, 500.0]) def fun(self, x, *args): self.nfev += 1 vec = ((x[0] / x[1]) * exp(((- ((self.b - x[2]) ** 2)) / (2 * (x[1] ** 2))))) return sum(((self.a - vec) ** 2))
def replace_negative_size_with_batch_size(shape, batch_size): sl = [] for d in shape: if (d < 0): sl.append(batch_size) else: sl.append(d) return sl
class Op2DAddConstCollapsing(common.BaseSubstitution): def __init__(self, first_node: NodeOperationMatcher, second_node: NodeOperationMatcher, op2d_collapsing_fn: Callable, bias_str: str, use_bias_str: str, layer_name_str: str=None): super().__init__(matcher_instance=EdgeMatcher(first_node, second_node)) self.op2d_collapsing_fn = op2d_collapsing_fn self.bias_str = bias_str self.use_bias_str = use_bias_str self.layer_name_str = layer_name_str def substitute(self, graph: Graph, edge_nodes: Tuple[(BaseNode, BaseNode)]) -> Graph: (first_node, second_node, _) = edge_nodes if (first_node.is_reused() or second_node.is_reused()): return graph if ((len(graph.get_next_nodes(first_node)) > 1) or (len(graph.get_prev_nodes(second_node)) > 1)): return graph bias = self.op2d_collapsing_fn(first_node, second_node, self.bias_str) op2d_collapsed = copy.deepcopy(first_node) op2d_collapsed_name = (first_node.name + '_collapsed') op2d_collapsed.name = op2d_collapsed_name op2d_collapsed.framework_attr[self.use_bias_str] = True op2d_collapsed.set_weights_by_keys(self.bias_str, bias) if (self.layer_name_str is not None): op2d_collapsed.framework_attr[self.layer_name_str] = op2d_collapsed_name graph.add_node(op2d_collapsed) graph.reconnect_out_edges(current_node=second_node, new_node=op2d_collapsed) graph.reconnect_in_edges(current_node=first_node, new_node=op2d_collapsed) graph.replace_output_node(current_node=second_node, new_node=op2d_collapsed) graph.remove_edge(first_node, second_node) graph.remove_node(first_node) graph.remove_node(second_node) return graph
class MultiplexedEnv(): def __init__(self, envs, action_repeat, size=(64, 64), use_goal_idx=False, log_per_goal=False): self.use_goal_idx = use_goal_idx self.log_per_goal = log_per_goal self.envs = envs self.goals = sum(list((list(range(len(_env.get_goals()))) for _env in self.envs)), []) self.active_env_idx = 0 self.goal_idx = 0 def reset(self): if (not self.use_goal_idx): self.active_env_idx = (random.randint(1, len(self.envs)) - 1) return self.clean_obs(self.active.reset()) def step(self, action): (o, r, d, i) = self.active.step(action) return (self.clean_obs(o), r, d, i) def clean_obs(self, obs): keys = list((k for k in obs.keys() if ('metric_' in k))) orig_obs = obs obs = subdict(obs, ['image', 'image_goal']) if self.use_goal_idx: lens = np.cumsum(np.array(list((len(_env.get_goals()) for _env in self.envs)))) alens = ([0] + list(lens)) offset = alens[self.active_env_idx] new_keys = list([re.sub('(metric_.*/goal_)(\\d+)', (lambda e: f'{e.group(1)}{(int(e.group(2)) + offset)}'), k) for k in keys]) for (i, k) in enumerate(keys): obs[new_keys[i]] = orig_obs.pop(k) obs['goal'] = np.zeros(10) obs['state'] = np.zeros(10) obs['env_idx'] = self.active_env_idx return obs def get_goal_idx(self): return self.goal_idx def set_goal_idx(self, idx): self.goal_idx = idx lens = np.cumsum(np.array(list((len(_env.get_goals()) for _env in self.envs)))) self.active_env_idx = np.argmax((idx < lens)) self.active.reset() alens = ([0] + list(lens)) (idx - alens[self.active_env_idx]) self.active.set_goal_idx((idx - alens[self.active_env_idx])) def get_goals(self): return self.goals def active(self): return self.envs[self.active_env_idx] def __getattr__(self, name): return getattr(self.active, name)
class TFMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class Polyhedron_base1(Polyhedron_base0, ConvexSet_closed): def __hash__(self): return hash((self.dim(), self.ambient_dim(), self.n_Hrepresentation(), self.n_Vrepresentation(), self.n_equations(), self.n_facets(), self.n_inequalities(), self.n_lines(), self.n_rays(), self.n_vertices())) def _repr_(self): desc = '' if (self.n_vertices() == 0): desc += 'The empty polyhedron' else: desc += (('A ' + repr(self.dim())) + '-dimensional polyhedron') desc += ' in ' desc += self.parent()._repr_ambient_module() if (self.n_vertices() > 0): desc += ' defined as the convex hull of ' desc += repr(self.n_vertices()) if (self.n_vertices() == 1): desc += ' vertex' else: desc += ' vertices' if (self.n_rays() > 0): if (self.n_lines() > 0): desc += ', ' else: desc += ' and ' desc += repr(self.n_rays()) if (self.n_rays() == 1): desc += ' ray' else: desc += ' rays' if (self.n_lines() > 0): if (self.n_rays() > 0): desc += ', ' else: desc += ' and ' desc += repr(self.n_lines()) if (self.n_lines() == 1): desc += ' line' else: desc += ' lines' return desc def _richcmp_(self, other, op): if ((self.Vrepresentation() is None) or (other.Vrepresentation() is None)): raise RuntimeError('some V representation is missing') if (self.ambient_dim() != other.ambient_dim()): return (op == op_NE) c0 = self._is_subpolyhedron(other) c1 = other._is_subpolyhedron(self) if (c0 and c1): return rich_to_bool(op, 0) elif c0: return rich_to_bool(op, (- 1)) elif c1: return rich_to_bool(op, 1) else: return (op == op_NE) _binop def _is_subpolyhedron(self, other): return all((other_H.contains(self_V) for other_H in other.Hrepresentation() for self_V in self.Vrepresentation())) def is_empty(self): return (self.n_Vrepresentation() == 0) def is_universe(self): return (self.n_Hrepresentation() == 0) def dim(self): if (self.n_Vrepresentation() == 0): return (- 1) else: return (self.ambient_dim() - self.n_equations()) dimension = dim def Vrepresentation_space(self): return self.parent().Vrepresentation_space() def Hrepresentation_space(self): return self.parent().Hrepresentation_space() ambient_space = Vrepresentation_space def ambient_vector_space(self, base_field=None): return self.Vrepresentation_space().vector_space(base_field=base_field) ambient = ambient_vector_space def ambient_dim(self): return self.parent().ambient_dim() def an_affine_basis(self): chain = self.a_maximal_chain()[1:] chain_indices = [face.ambient_V_indices() for face in chain] basis_indices = [] for (dim, face) in enumerate(chain_indices): if (dim == 0): basis_indices.extend(face[:]) continue prev_face = chain_indices[(dim - 1)] for i in range(len(prev_face)): if (prev_face[i] != face[i]): basis_indices.append(face[i]) break else: basis_indices.append(face[len(prev_face)]) Vreps = [self.Vrepresentation()[i] for i in basis_indices] if all((vrep.is_vertex() for vrep in Vreps)): return Vreps for vrep in Vreps: if vrep.is_vertex(): vertex = vrep return [(vrep if vrep.is_vertex() else (vertex.vector() + vrep.vector())) for vrep in Vreps] _method def a_maximal_chain(self): _method def representative_point(self): accumulator = vector(self.base_ring(), ([0] * self.ambient_dim())) for v in self.vertex_generator(): accumulator += v.vector() accumulator /= self.n_vertices() for r in self.ray_generator(): accumulator += r.vector() accumulator.set_immutable() return accumulator def _some_elements_(self): if self.is_empty(): return (yield self.representative_point()) vertex_iter = iter(self.vertex_generator()) try: p = next(vertex_iter).vector() (yield vector(p, immutable=True)) for i in range(4): p = ((p + next(vertex_iter).vector()) / 2) (yield vector(p, immutable=True)) except StopIteration: pass def contains(self, point): try: p = vector(point) except TypeError: try: l = len(point) except TypeError: return False if (l > 0): return False else: p = vector(self.base_ring(), []) if (len(p) != self.ambient_dim()): return False for H in self.Hrep_generator(): if (not H.contains(p)): return False return True __contains__ = contains _method def interior(self): if self.is_open(): return self if (not self.is_full_dimensional()): return self.parent().element_class(self.parent(), None, None) return self.relative_interior() def interior_contains(self, point): try: p = vector(point) except TypeError: try: l = len(point) except TypeError: return False if (l > 0): return False else: p = vector(self.base_ring(), []) if (len(p) != self.ambient_dim()): return False for H in self.Hrep_generator(): if (not H.interior_contains(p)): return False return True def is_relatively_open(self): return (not self.inequalities()) _method def relative_interior(self): if self.is_relatively_open(): return self return RelativeInterior(self) def relative_interior_contains(self, point): try: p = vector(point) except TypeError: try: l = len(point) except TypeError: return False if (l > 0): return False else: p = vector(self.base_ring(), []) if (len(p) != self.ambient_dim()): return False for eq in self.equation_generator(): if (not eq.contains(p)): return False for ine in self.inequality_generator(): if (not ine.interior_contains(p)): return False return True
class DetrForObjectDetection(): def __init__(self, *args, **kwargs): requires_backends(self, ['timm']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['timm'])
class MCTCTPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_parameters_fixed(): spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'sample', 'data': [10.0], 'modifiers': [{'name': 'unfixed', 'type': 'normfactor', 'data': None}]}, {'name': 'another_sample', 'data': [5.0], 'modifiers': [{'name': 'mypoi', 'type': 'normfactor', 'data': None}]}]}], 'parameters': [{'name': 'mypoi', 'inits': [1], 'fixed': True}]} pyhf.Model(spec, poi_name='mypoi')
class TestNDArrayArrayFunction(object): _array_function def test_method(self): class Other(object): __array_function__ = _return_not_implemented class NoOverrideSub(np.ndarray): pass class OverrideSub(np.ndarray): __array_function__ = _return_not_implemented array = np.array([1]) other = Other() no_override_sub = array.view(NoOverrideSub) override_sub = array.view(OverrideSub) result = array.__array_function__(func=dispatched_two_arg, types=(np.ndarray,), args=(array, 1.0), kwargs={}) assert_equal(result, 'original') result = array.__array_function__(func=dispatched_two_arg, types=(np.ndarray, Other), args=(array, other), kwargs={}) assert_((result is NotImplemented)) result = array.__array_function__(func=dispatched_two_arg, types=(np.ndarray, NoOverrideSub), args=(array, no_override_sub), kwargs={}) assert_equal(result, 'original') result = array.__array_function__(func=dispatched_two_arg, types=(np.ndarray, OverrideSub), args=(array, override_sub), kwargs={}) assert_equal(result, 'original') with assert_raises_regex(TypeError, 'no implementation found'): np.concatenate((array, other)) expected = np.concatenate((array, array)) result = np.concatenate((array, no_override_sub)) assert_equal(result, expected.view(NoOverrideSub)) result = np.concatenate((array, override_sub)) assert_equal(result, expected.view(OverrideSub)) def test_no_wrapper(self): array = np.array(1) func = (lambda x: x) with assert_raises_regex(AttributeError, '_implementation'): array.__array_function__(func=func, types=(np.ndarray,), args=(array,), kwargs={})
def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, gamma=0.0001, tau_min=0.1, tau_max=0.5): f_k = prev_fs[(- 1)] f_bar = max(prev_fs) alpha_p = 1 alpha_m = 1 alpha = 1 while True: xp = (x_k + (alpha_p * d)) (fp, Fp) = f(xp) if (fp <= ((f_bar + eta) - ((gamma * (alpha_p ** 2)) * f_k))): alpha = alpha_p break alpha_tp = (((alpha_p ** 2) * f_k) / (fp + (((2 * alpha_p) - 1) * f_k))) xp = (x_k - (alpha_m * d)) (fp, Fp) = f(xp) if (fp <= ((f_bar + eta) - ((gamma * (alpha_m ** 2)) * f_k))): alpha = (- alpha_m) break alpha_tm = (((alpha_m ** 2) * f_k) / (fp + (((2 * alpha_m) - 1) * f_k))) alpha_p = np.clip(alpha_tp, (tau_min * alpha_p), (tau_max * alpha_p)) alpha_m = np.clip(alpha_tm, (tau_min * alpha_m), (tau_max * alpha_m)) return (alpha, xp, fp, Fp)
class RefineGAN(Model): def __init__(self, sess, config, pretrained, name='RefineGAN', reuse=None): super().__init__(sess, config, name) self.pretrained = pretrained print('[*] Building RefineGAN...') with tf.variable_scope(name, reuse=reuse) as scope: self.scope = scope self.build() def build(self): self.global_step = tf.Variable(0, trainable=False, name='global_step') self.z = self.pretrained.z self.x = self.pretrained.x self.x_ = self.pretrained.x_ self.slope_tensor = tf.Variable(1.0) self.G = Refiner(self.pretrained.G.tensor_out, self.config, slope_tensor=self.slope_tensor, name='R') self.D_real = self.pretrained.D_real with tf.variable_scope(self.pretrained.scope, reuse=True): self.D_fake = Discriminator(self.G.tensor_out, self.config, name='D') self.components = (self.pretrained.G, self.G, self.D_fake) (self.g_loss, self.d_loss) = self.get_adversarial_loss(self.pretrained.scope) with tf.variable_scope('Optimizer'): self.g_optimizer = self.get_optimizer() if self.config['joint_training']: self.g_step = self.g_optimizer.minimize(self.g_loss, self.global_step, (self.G.vars + self.pretrained.G.vars)) else: self.g_step = self.g_optimizer.minimize(self.g_loss, self.global_step, self.G.vars) self.d_optimizer = self.get_optimizer() self.d_step = self.d_optimizer.minimize(self.d_loss, self.global_step, self.D_fake.vars) if (self.config['gan']['type'] == 'wgan'): with tf.control_dependencies([self.d_step]): self.d_step = tf.group(*(tf.assign(var, tf.clip_by_value(var, (- self.config['gan']['clip_value']), self.config['gan']['clip_value'])) for var in self.D_fake.vars)) self.metrics = Metrics(self.config) self.saver = tf.train.Saver() self.print_statistics() self.save_statistics() self.print_summary() self.save_summary() def train(self, x_train, train_config): self.z_sample = np.random.normal(size=(self.config['batch_size'], self.config['net_g']['z_dim'])) self.x_sample = x_train[np.random.choice(len(x_train), self.config['batch_size'], False)] feed_dict_sample = {self.x: self.x_sample, self.z: self.z_sample} self.save_samples('x_train', x_train, save_midi=True) self.save_samples('x_sample', self.x_sample, save_midi=True) pretrained_samples = self.sess.run(self.pretrained.G.tensor_out, feed_dict_sample) self.save_samples('pretrained', pretrained_samples) for threshold in [0.1, 0.3, 0.5, 0.7, 0.9]: pretrained_threshold = (pretrained_samples > threshold) self.save_samples('pretrained_threshold_{}'.format(threshold), pretrained_threshold, save_midi=True) for idx in range(5): pretrained_bernoulli = np.ceil((pretrained_samples - np.random.uniform(size=pretrained_samples.shape))) self.save_samples('pretrained_bernoulli_{}'.format(idx), pretrained_bernoulli, save_midi=True) log_step = open(os.path.join(self.config['log_dir'], 'step.log'), 'w') log_batch = open(os.path.join(self.config['log_dir'], 'batch.log'), 'w') log_epoch = open(os.path.join(self.config['log_dir'], 'epoch.log'), 'w') log_step.write('# epoch, step, negative_critic_loss\n') log_batch.write('# epoch, batch, time, negative_critic_loss, g_loss\n') log_epoch.write('# epoch, time, negative_critic_loss, g_loss\n') if (train_config['slope_annealing_rate'] != 1.0): slope_annealing_op = tf.assign(self.slope_tensor, (self.slope_tensor * train_config['slope_annealing_rate'])) counter = 0 epoch_counter = 0 num_batch = (len(x_train) // self.config['batch_size']) print('{:=^80}'.format(' Training Start ')) for epoch in range(train_config['num_epoch']): print('{:-^80}'.format(' Epoch {} Start '.format(epoch))) epoch_start_time = time.time() z_random_batch = np.random.normal(size=(num_batch, self.config['batch_size'], self.config['net_g']['z_dim'])) x_random_batch = np.random.choice(len(x_train), (num_batch, self.config['batch_size']), False) for batch in range(num_batch): feed_dict_batch = {self.x: x_train[x_random_batch[batch]], self.z: z_random_batch[batch]} if ((counter % 500) == 0): num_critics = 100 else: num_critics = 5 batch_start_time = time.time() for _ in range(num_critics): (_, d_loss) = self.sess.run([self.d_step, self.d_loss], feed_dict_batch) log_step.write('{}, {:14.6f}\n'.format(self.get_global_step_str(), (- d_loss))) (_, d_loss, g_loss) = self.sess.run([self.g_step, self.d_loss, self.g_loss], feed_dict_batch) log_step.write('{}, {:14.6f}\n'.format(self.get_global_step_str(), (- d_loss))) time_batch = (time.time() - batch_start_time) if train_config['verbose']: if (batch < 1): print('epoch | batch | time | - D_loss | G_loss') print(' {:2d} | {:4d}/{:4d} | {:6.2f} | {:14.6f} | {:14.6f}'.format(epoch, batch, num_batch, time_batch, (- d_loss), g_loss)) log_batch.write('{:d}, {:d}, {:f}, {:f}, {:f}\n'.format(epoch, batch, time_batch, (- d_loss), g_loss)) if train_config['sample_along_training']: if (((counter % 100) == 0) or ((counter < 300) and ((counter % 20) == 0))): self.run_sampler(self.G.tensor_out, feed_dict_sample, (counter > 500)) self.run_sampler(self.G.preactivated, feed_dict_sample, False, postfix='preactivated') if train_config['evaluate_along_training']: if ((counter % 10) == 0): self.run_eval(self.G.tensor_out, feed_dict_sample) counter += 1 time_epoch = (time.time() - epoch_start_time) if (not train_config['verbose']): if (epoch < 1): print('epoch | time | - D_loss | G_loss') print(' {:2d} | {:8.2f} | {:14.6f} | {:14.6f}'.format(epoch, time_epoch, (- d_loss), g_loss)) log_epoch.write('{:d}, {:f}, {:f}, {:f}\n'.format(epoch, time_epoch, (- d_loss), g_loss)) self.save() if (train_config['slope_annealing_rate'] != 1.0): self.sess.run(slope_annealing_op) epoch_counter += 1 print('{:=^80}'.format(' Training End ')) log_step.close() log_batch.close() log_epoch.close()
def register_Ns3Dot11sPeerLinkCloseStart_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_constructor([]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) cls.add_method('GetFields', 'ns3::dot11s::PeerLinkCloseStart::PlinkCloseStartFields', [], is_const=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) cls.add_method('SetPlinkCloseStart', 'void', [param('ns3::dot11s::PeerLinkCloseStart::PlinkCloseStartFields', 'fields')]) return
class LPIPS(nn.Module): def __init__(self, net_type: str='alex', version: str='0.1'): assert (version in ['0.1']), 'v0.1 is only supported now' super(LPIPS, self).__init__() self.net = get_network(net_type) self.lin = LinLayers(self.net.n_channels_list) self.lin.load_state_dict(get_state_dict(net_type, version)) def forward(self, x: torch.Tensor, y: torch.Tensor): (feat_x, feat_y) = (self.net(x), self.net(y)) diff = [((fx - fy) ** 2) for (fx, fy) in zip(feat_x, feat_y)] res = [l(d).mean((2, 3), True) for (d, l) in zip(diff, self.lin)] return torch.sum(torch.cat(res, 0), 0, True)
def test_conversion_functions(): import numpy as nm import sfepy.mechanics.matcoefs as mc ok = True lam = 1.0 mu = 1.5 ec = mc.ElasticConstants(lam=lam, mu=mu) (young, poisson, bulk) = ec.get(['young', 'poisson', 'bulk']) lam = nm.array(([lam] * 3)) mu = nm.array(([mu] * 3)) young = nm.array(([young] * 3)) poisson = nm.array(([poisson] * 3)) (_lam, _mu) = mc.lame_from_youngpoisson(young, poisson) _ok = (nm.allclose(lam, _lam, rtol=0.0, atol=1e-14) and nm.allclose(mu, _mu, rtol=0.0, atol=1e-14)) tst.report('lame_from_youngpoisson():', _ok) if (not _ok): tst.report('correct:', lam, mu) tst.report(' got:', _lam, _mu) ok = (ok and _ok) _bulk = mc.bulk_from_youngpoisson(young, poisson) _ok = nm.allclose(bulk, _bulk, rtol=0.0, atol=1e-14) tst.report('bulk_from_youngpoisson():', _ok) if (not _ok): tst.report('correct:', bulk) tst.report(' got:', _bulk) ok = (ok and _ok) _bulk = mc.bulk_from_lame(lam, mu) _ok = nm.allclose(bulk, _bulk, rtol=0.0, atol=1e-14) tst.report('bulk_from_lame():', _ok) if (not _ok): tst.report('correct:', bulk) tst.report(' got:', _bulk) ok = (ok and _ok) assert ok
def _convert(image, dtype, force_copy=False, uniform=False): image = np.asarray(image) dtypeobj_in = image.dtype if (dtype is np.floating): dtypeobj_out = np.dtype('float64') else: dtypeobj_out = np.dtype(dtype) dtype_in = dtypeobj_in.type dtype_out = dtypeobj_out.type kind_in = dtypeobj_in.kind kind_out = dtypeobj_out.kind itemsize_in = dtypeobj_in.itemsize itemsize_out = dtypeobj_out.itemsize if np.issubdtype(dtype_in, np.core.numerictypes.obj2sctype(dtype)): if force_copy: image = image.copy() return image if (not ((dtype_in in _supported_types) and (dtype_out in _supported_types))): raise ValueError(f'Cannot convert from {dtypeobj_in} to {dtypeobj_out}.') if (kind_in in 'ui'): imin_in = np.iinfo(dtype_in).min imax_in = np.iinfo(dtype_in).max if (kind_out in 'ui'): imin_out = np.iinfo(dtype_out).min imax_out = np.iinfo(dtype_out).max if (kind_out == 'b'): return (image > dtype_in((dtype_range[dtype_in][1] / 2))) if (kind_in == 'b'): result = image.astype(dtype_out) if (kind_out != 'f'): result *= dtype_out(dtype_range[dtype_out][1]) return result if (kind_in == 'f'): if (kind_out == 'f'): return image.astype(dtype_out) if ((np.min(image) < (- 1.0)) or (np.max(image) > 1.0)): raise ValueError('Images of type float must be between -1 and 1.') computation_type = _dtype_itemsize(itemsize_out, dtype_in, np.float32, np.float64) if (not uniform): if (kind_out == 'u'): image_out = np.multiply(image, imax_out, dtype=computation_type) else: image_out = np.multiply(image, ((imax_out - imin_out) / 2), dtype=computation_type) image_out -= (1.0 / 2.0) np.rint(image_out, out=image_out) np.clip(image_out, imin_out, imax_out, out=image_out) elif (kind_out == 'u'): image_out = np.multiply(image, (imax_out + 1), dtype=computation_type) np.clip(image_out, 0, imax_out, out=image_out) else: image_out = np.multiply(image, (((imax_out - imin_out) + 1.0) / 2.0), dtype=computation_type) np.floor(image_out, out=image_out) np.clip(image_out, imin_out, imax_out, out=image_out) return image_out.astype(dtype_out) if (kind_out == 'f'): computation_type = _dtype_itemsize(itemsize_in, dtype_out, np.float32, np.float64) if (kind_in == 'u'): image = np.multiply(image, (1.0 / imax_in), dtype=computation_type) elif (kind_in == 'i'): image = np.multiply(image, (1.0 / imax_in), dtype=computation_type) np.maximum(image, (- 1.0), out=image) else: image = np.add(image, 0.5, dtype=computation_type) image *= (2 / (imax_in - imin_in)) return np.asarray(image, dtype_out) if (kind_in == 'u'): if (kind_out == 'i'): image = _scale(image, (8 * itemsize_in), ((8 * itemsize_out) - 1)) return image.view(dtype_out) else: return _scale(image, (8 * itemsize_in), (8 * itemsize_out)) if (kind_out == 'u'): image = _scale(image, ((8 * itemsize_in) - 1), (8 * itemsize_out)) result = np.empty(image.shape, dtype_out) np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe') return result if (itemsize_in > itemsize_out): return _scale(image, ((8 * itemsize_in) - 1), ((8 * itemsize_out) - 1)) image = image.astype(_dtype_bits('i', (itemsize_out * 8))) image -= imin_in image = _scale(image, (8 * itemsize_in), (8 * itemsize_out), copy=False) image += imin_out return image.astype(dtype_out)
def bounds_from_last_device(last_device: JaxDevice) -> HardwareMesh: if hasattr(last_device, 'coords'): (x, y, z) = last_device.coords return ((x + 1), (y + 1), (z + 1), (last_device.core_on_chip + 1)) else: return (jax.host_count(), jax.local_device_count())
def save_checkpoint(state, model_dir): if (jax.host_id() == 0): state = jax.device_get(jax.tree_map((lambda x: x[0]), state)) step = int(state.step) checkpoints.save_checkpoint(model_dir, state, step, keep=3)
def remove_Zrot(pose): noZ = em2euler(pose[:3].copy()) noZ[2] = 0 pose[:3] = euler2em(noZ).copy() return pose
class ActNorm(nn.Module): def __init__(self, dim: int, scale: float=1.0): super().__init__() size = [1, dim] self.register_parameter('bias', nn.Parameter(torch.zeros(*size))) self.register_parameter('logs', nn.Parameter(torch.zeros(*size))) self.dim = dim self.scale = float(scale) self.initializeed = False def initialize_parameters(self, x: FloatTensor): if (not self.training): return assert (x.device == self.bias.device) with torch.no_grad(): bias = _mean(x.clone(), dim=0, keepdim=True) variance = _mean(((x.clone() - bias) ** 2), dim=0, keepdim=True) logs = torch.log((self.scale / (torch.sqrt(variance) + 1e-06))) self.bias.data.copy_(bias.data) self.logs.data.copy_(logs.data) self.initializeed = True def forward(self, x: FloatTensor) -> FloatTensor: if (not self.initializeed): self.initialize_parameters(x) x = ((x - self.bias) * torch.exp(self.logs)) return x def inv(self, x: FloatTensor) -> FloatTensor: x = ((x * torch.exp((- self.logs))) + self.bias) return x
def conv3d(norm_type, in_planes, out_planes, kernel_size=3, stride=1, num_groups=2): if (norm_type == 'batch'): return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=((kernel_size - 1) // 2), bias=True), nn.BatchNorm3d(out_planes), nn.LeakyReLU(0.2, inplace=True)) elif (norm_type == 'group'): return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=((kernel_size - 1) // 2), bias=True), nn.GroupNorm(num_groups, out_planes), nn.LeakyReLU(0.2, inplace=True)) else: return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=((kernel_size - 1) // 2), bias=True), nn.LeakyReLU(0.2, inplace=True))
def main(): parser = argparse.ArgumentParser(description='Link-Prediction PLM/TCL') parser.add_argument('--device', type=int, default=0) parser.add_argument('--log_steps', type=int, default=1) parser.add_argument('--use_node_embedding', action='store_true') parser.add_argument('--num_layers', type=int, default=3) parser.add_argument('--hidden_channels', type=int, default=256) parser.add_argument('--dropout', type=float, default=0.0) parser.add_argument('--batch_size', type=int, default=(64 * 1024)) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--epochs', type=int, default=10) parser.add_argument('--gnn_model', type=str, help='GNN MOdel', default='GCN') parser.add_argument('--heads', type=int, default=4) parser.add_argument('--eval_steps', type=int, default=1) parser.add_argument('--runs', type=int, default=5) parser.add_argument('--test_ratio', type=float, default=0.08) parser.add_argument('--val_ratio', type=float, default=0.02) parser.add_argument('--neg_len', type=str, default='10000') parser.add_argument('--use_PLM', type=str, default='/mnt/v-wzhuang/TAG/Finetune/Amazon/History/Bert/Base/emb.npy', help='Use LM embedding as feature') parser.add_argument('--path', type=str, default='/mnt/v-wzhuang/TAG/Link_Predction/History/', help='Path to save splitting') parser.add_argument('--graph_path', type=str, default='/mnt/v-wzhuang/Amazon/Books/Amazon-Books-History.pt', help='Path to load the graph') args = parser.parse_args() wandb.config = args wandb.init(config=args, reinit=True) print(args) if (not os.path.exists(f'{args.path}{args.neg_len}/')): os.makedirs(f'{args.path}{args.neg_len}/') device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu') device = torch.device(device) graph = dgl.load_graphs(f'{args.graph_path}')[0][0] edge_split = split_edge(graph, test_ratio=0.08, val_ratio=0.02, path=args.path, neg_len=args.neg_len) x = torch.from_numpy(np.load(args.use_PLM).astype(np.float32)).to(device) x = x.to(device) edge_index = edge_split['train']['edge'].t() adj_t = SparseTensor.from_edge_index(edge_index).t() adj_t = adj_t.to_symmetric().to(device) if (args.gnn_model == 'SAGE'): model = SAGE(x.size(1), args.hidden_channels, args.hidden_channels, args.num_layers, args.dropout).to(device) elif (args.gnn_model == 'GCN'): model = GCN(x.size(1), args.hidden_channels, args.hidden_channels, args.num_layers, args.dropout).to(device) elif (args.gnn_model == 'GAT'): model = GAT(x.size(1), args.hidden_channels, args.hidden_channels, args.num_layers, args.heads, args.dropout).to(device) else: raise ValueError('Not implemented') predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1, args.num_layers, args.dropout).to(device) evaluator = Evaluator(name='History') loggers = {'': Logger(args.runs, args), '': Logger(args.runs, args), '': Logger(args.runs, args)} for run in range(args.runs): model.reset_parameters() predictor.reset_parameters() optimizer = torch.optim.Adam((list(model.parameters()) + list(predictor.parameters())), lr=args.lr) for epoch in range(1, (1 + args.epochs)): loss = train(model, predictor, x, adj_t, edge_split, optimizer, args.batch_size) if ((epoch % args.eval_steps) == 0): results = test(model, predictor, x, adj_t, edge_split, evaluator, args.batch_size) for (key, result) in results.items(): loggers[key].add_result(run, result) if ((epoch % args.log_steps) == 0): for (key, result) in results.items(): (train_hits, valid_hits, test_hits) = result print(key) print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {(100 * train_hits):.2f}%, Valid: {(100 * valid_hits):.2f}%, Test: {(100 * test_hits):.2f}%') print('---') for key in loggers.keys(): print(key) loggers[key].print_statistics(run) for key in loggers.keys(): print(key) loggers[key].print_statistics(key=key)
def get_uncertainty(models, unlabeled_loader): models['backbone'].eval() models['module'].eval() uncertainty = torch.tensor([]).cuda() with torch.no_grad(): for (inputs, labels) in unlabeled_loader: inputs = inputs.cuda() labels = labels.cuda() (scores, cons_scores, features, features_list) = models['backbone'](inputs) if (SAMPLING == 'LL4AL'): pred_loss = models['module'](features_list) pred_loss = pred_loss.view(pred_loss.size(0)) uncertainty = torch.cat((uncertainty, pred_loss), dim=0) return uncertainty.cpu()
def make_pyproject_path(unpacked_source_directory): path = os.path.join(unpacked_source_directory, 'pyproject.toml') if (six.PY2 and isinstance(path, six.text_type)): path = path.encode(sys.getfilesystemencoding()) return path
def main() -> None: sim_space = create_sim_space('sim_fg.gds', 'sim_bg.gds') (obj, monitors) = create_objective(sim_space) trans_list = create_transformations(obj, monitors, sim_space, cont_iters=100, min_feature=100) plan = optplan.OptimizationPlan(transformations=trans_list) problem_graph.run_plan(plan, '.')