code
stringlengths
101
5.91M
class TPCheckpointWrapper(TorchWrapper): def __init__(self, mod, checkpoint_fn): super().__init__(mod) self.checkpoint_fn = checkpoint_fn def forward(self, *args, **kwargs): (flat_args, kwarg_keys) = _pack_kwargs(*args, **kwargs) def my_function(*inputs): (unpacked_args, unpacked_kwargs) = _unpack_kwargs(inputs, kwarg_keys) return self._checkpoint_wrapped_module(*unpacked_args, **unpacked_kwargs) return self.checkpoint_fn(my_function, True, *flat_args)
def ReadGt_ctw(fileroot, filename): with open(((fileroot + filename) + '.txt')) as f: lst = f.readlines() return lst
def main(args): _start_() data_loader = _load_data() (corpus_dev, batch_idx_dev, en_batch_dev, processed_tree_dev) = data_loader.load_dev(args.dataset_dev) if (args.test == 'yes'): (corpus_test, batch_idx_test, en_batch_test, processed_tree_test) = data_loader.load_dev(args.dataset_test) else: en_batch_test = None (pre_compute_dict, embed_loader) = _load_pretrain_emb(data_loader, en_batch_dev=en_batch_dev, en_batch_test=en_batch_test) elmo_embeds_train = pre_compute_dict['elmo_embeds_train'] non_context_embeds_train = pre_compute_dict['non_context_embeds_train'] (elmo_embeds_dev, non_context_embeds_dev) = (pre_compute_dict['elmo_embeds_dev'], pre_compute_dict['non_context_embeds_dev']) if (args.test == 'yes'): (elmo_embeds_test, non_context_embeds_test) = (pre_compute_dict['elmo_embeds_test'], pre_compute_dict['non_context_embeds_test']) if (args.task == 'VIB_continuous'): model = Continuous_VIB(args, data_loader.word_dict) elif (args.task == 'VIB_discrete'): model = Discrete_VIB(args, data_loader.word_dict) else: print('invalid model type -- should be either VIB_continuous or VIB_discrete') model.to(args.device) if (args.mode == 'train'): training_total_data = 0 for elem in data_loader.en_batch: training_total_data += len(elem) print('the total amount of training sentence is {}'.format(training_total_data)) print('total training batch number:', len(data_loader.corpus)) print(('tag_dim=%d, beta=%f' % (args.tag_dim, args.beta))) print('start_training:') delta_temp = (1 / len(data_loader.corpus)) for e in range(args.epoch): logger.info(('epoch %d/%d:' % (e, args.epoch))) result_dict = model.train_batch(data_loader.corpus, args.sent_per_epoch, elmo_embeds_train, non_context_embeds_train, delta_temp=delta_temp) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s if True: model.save_model((args.save_path + ('_epoch_%d' % e))) print(('train_epoch%i: beta=%f, gamma=%f, tag_dim=%d, dev, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f, KL1=%.3f, KL2=%.3f' % (e, model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w), kl_s, kl_s2))) sys.stdout.flush() result_dict = model.parse_dev_batch(corpus_dev, elmo_embeds_dev, non_context_embeds_dev, (args.out_path + '_middev')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('dev_epoch%i: beta=%f, gamma=%f, tag_dim=%d, dev, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f, KL1=%.3f, KL2=%.3f' % (e, model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w), kl_s, kl_s2))) sys.stdout.flush() if (e == 20): print('mid-point evaluation:') print('train data total batches:', len(data_loader.corpus)) result_dict = model.parse_dev_batch(data_loader.corpus, elmo_embeds_train, non_context_embeds_train, (args.out_path + '_midtrain')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_train_mid: beta=%f, gamma=%f, tag_dim=%d, train, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() print('finished training [DONE]') model.save_model(args.save_path) print('train data total batches:', len(data_loader.corpus)) result_dict = model.parse_dev_batch(data_loader.corpus, elmo_embeds_train, non_context_embeds_train, (args.out_path + '_train')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_train_final: beta=%f, gamma=%f, tag_dim=%d, train, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() print('dev data total batches:', len(corpus_dev)) result_dict = model.parse_dev_batch(corpus_dev, elmo_embeds_dev, non_context_embeds_dev, (args.out_path + '_dev')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_dev_final: beta=%f, gamma=%f, tag_dim=%d, dev, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() if (args.test == 'yes'): print('test data total batches:', len(corpus_test)) result_dict = model.parse_dev_batch(corpus_test, elmo_embeds_test, non_context_embeds_test, (args.out_path + '_test')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_test_final: beta=%f, gamma=%f, tag_dim=%d, test, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() return 0 elif (args.mode == 'evaluate'): model = model.load_model(args.checkpoint_path) print('dev data total batches:', len(corpus_dev)) result_dict = model.parse_dev_batch(corpus_dev, elmo_embeds_dev, non_context_embeds_dev, (args.out_path + '_dev')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_dev_final: beta=%f, gamma=%f, tag_dim=%d, dev, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() if (args.test == 'yes'): print('test data total batches:', len(corpus_test)) result_dict = model.parse_dev_batch(corpus_test, elmo_embeds_test, non_context_embeds_test, (args.out_path + '_test')) align_err_w = result_dict['align_err_w'] nlogp_w = result_dict['nlogp_w'] align_err_s = result_dict['align_err_s'] nlogp_s = result_dict['nlogp_s'] kl_s = result_dict['kl_w'] LAS = result_dict['LAS'] kl_s2 = result_dict['kl_w2'] H_YT = nlogp_s print(('summary_test_final: beta=%f, gamma=%f, tag_dim=%d, test, elmo=%s, MI(X,T)=%.5f, H(Y|T)=%.5f, MI(X,Ti|Xi)=%.5f, err=%.5f, LAS=%.3f, UAS=%.3f' % (model.beta, model.gamma, args.tag_dim, args.embedding_source, kl_s, H_YT, kl_s2, align_err_w, LAS, (1 - align_err_w)))) sys.stdout.flush() else: print('invalid mode') return 0
class Path(object): def db_root_dir(dataset): if (dataset == 'sceneflow'): return './dataset/SceneFlow/' elif (dataset == 'kitti15'): return './dataset/kitti2015/training/' elif (dataset == 'kitti12'): return './dataset/kitti2012/training/' elif (dataset == 'middlebury'): return './dataset/MiddEval3/trainingH/' else: print('Dataset {} not available.'.format(dataset)) raise NotImplementedError
def relative_path(path_map: Dict[(Path, Path)], filename: Path): for p in path_map: if (p in filename.parents): return (path_map[p] / filename.relative_to(p)) raise Exception()
def get_arg_parser(): parser = argparse.ArgumentParser() parser.add_argument('--data_location', help='Full path of train data', required=False, default='./data') parser.add_argument('--steps', help='set the number of steps on train dataset', type=int, default=0) parser.add_argument('--batch_size', help='Batch size to train. Default is 512', type=int, default=512) parser.add_argument('--output_dir', help='Full path to model output directory. Default to ./result. Covered by --checkpoint. ', required=False, default='./result') parser.add_argument('--checkpoint', help='Full path to checkpoints input/output. Default to ./result/$MODEL_TIMESTAMP', required=False) parser.add_argument('--save_steps', help='set the number of steps on saving checkpoints', type=int, default=0) parser.add_argument('--seed', help='set the random seed for tensorflow', type=int, default=2021) parser.add_argument('--optimizer', type=str, choices=['adam', 'adamasync', 'adagraddecay', 'adagrad'], default='adamasync') parser.add_argument('--linear_learning_rate', help='Learning rate for linear model', type=float, default=0.2) parser.add_argument('--deep_learning_rate', help='Learning rate for deep model', type=float, default=0.01) parser.add_argument('--keep_checkpoint_max', help='Maximum number of recent checkpoint to keep', type=int, default=1) parser.add_argument('--timeline', help='number of steps on saving timeline. Default 0', type=int, default=0) parser.add_argument('--protocol', type=str, choices=['grpc', 'grpc++', 'star_server'], default='grpc') parser.add_argument('--inter', help='set inter op parallelism threads.', type=int, default=0) parser.add_argument('--intra', help='set inter op parallelism threads.', type=int, default=0) parser.add_argument('--input_layer_partitioner', help='slice size of input layer partitioner, units MB. Default 8MB', type=int, default=8) parser.add_argument('--dense_layer_partitioner', help='slice size of dense layer partitioner, units KB. Default 16KB', type=int, default=16) parser.add_argument('--bf16', help='enable DeepRec BF16 in deep model. Default FP32', action='store_true') parser.add_argument('--no_eval', help='not evaluate trained model by eval dataset.', action='store_true') parser.add_argument('--tf', help='Use TF 1.15.5 API and disable DeepRec feature to run a baseline.', action='store_true') parser.add_argument('--smartstaged', help='Whether to enable smart staged feature of DeepRec, Default to True.', type=boolean_string, default=True) parser.add_argument('--emb_fusion', help='Whether to enable embedding fusion, Default to True.', type=boolean_string, default=True) parser.add_argument('--ev', help='Whether to enable DeepRec EmbeddingVariable. Default False.', type=boolean_string, default=False) parser.add_argument('--ev_elimination', help='Feature Elimination of EmbeddingVariable Feature. Default closed.', type=str, choices=[None, 'l2', 'gstep'], default=None) parser.add_argument('--ev_filter', help='Feature Filter of EmbeddingVariable Feature. Default closed.', type=str, choices=[None, 'counter', 'cbf'], default=None) parser.add_argument('--op_fusion', help='Whether to enable Auto graph fusion feature. Default to True', type=boolean_string, default=True) parser.add_argument('--micro_batch', help='Set num for Auto Mirco Batch. Default close.', type=int, default=0) parser.add_argument('--adaptive_emb', help='Whether to enable Adaptive Embedding. Default to False.', type=boolean_string, default=False) parser.add_argument('--dynamic_ev', help='Whether to enable Dynamic-dimension Embedding Variable. Default to False.', type=boolean_string, default=False) parser.add_argument('--incremental_ckpt', help='Set time of save Incremental Checkpoint. Default 0 to close.', type=int, default=0) parser.add_argument('--workqueue', help='Whether to enable Work Queue. Default to False.', type=boolean_string, default=False) parser.add_argument('--cluster_mode', help='The cluster mode, such as local, k8s and yarn.', type=str, default='local') parser.add_argument('--num_nodes', help='The number of nodes to use in the cluster.', type=int, default=1) parser.add_argument('--cores', help='The number of cpu cores to use on each node.', type=int, default=8) parser.add_argument('--instances_per_node', help='The number of ps and worker instances to run on each node.', type=int, default=1) parser.add_argument('--master', help='k8s master ip and port.', type=str, default=None) parser.add_argument('--num_ps', help='The number of parameter servers to use.', type=int, default=1) parser.add_argument('--in_memory', help='Whether to run the example based on in-memory data ingestion.', action='store_true') return parser
class FaultTolerantDistributedSampler(DistributedSampler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.counter = 0 self.restarting = False def state_dict(self): return {'epoch': self.epoch, 'counter': self.counter} def load_state_dict(self, state_dict): self.epoch = state_dict['epoch'] self.counter = state_dict['counter'] self.restarting = True def __len__(self) -> int: return (self.num_samples - self.counter) def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed((self.seed + self.epoch)) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) if (not self.drop_last): padding_size = (self.total_size - len(indices)) if (padding_size <= len(indices)): indices += indices[:padding_size] else: indices += (indices * math.ceil((padding_size / len(indices))))[:padding_size] else: indices = indices[:self.total_size] assert (len(indices) == self.total_size) indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) if (not self.restarting): self.counter = 0 else: indices = indices[self.counter:] self.restarting = False for index in indices: self.counter += 1 (yield index)
def prepare_parser(): usage = 'Parser for all scripts.' parser = ArgumentParser(description=usage) parser.add_argument('--dataset', type=str, default='I128_hdf5', help='Which Dataset to train on, out of I128, I256, C10, C100;Append "_hdf5" to use the hdf5 version for ISLVRC (default: %(default)s)') parser.add_argument('--augment', action='store_true', default=False, help='Augment with random crops and flips (default: %(default)s)') parser.add_argument('--num_workers', type=int, default=8, help='Number of dataloader workers; consider using less for HDF5 (default: %(default)s)') parser.add_argument('--no_pin_memory', action='store_false', dest='pin_memory', default=True, help='Pin data into memory through dataloader? (default: %(default)s)') parser.add_argument('--shuffle', action='store_true', default=False, help='Shuffle the data (strongly recommended)? (default: %(default)s)') parser.add_argument('--load_in_mem', action='store_true', default=False, help='Load all data into memory? (default: %(default)s)') parser.add_argument('--use_multiepoch_sampler', action='store_true', default=False, help='Use the multi-epoch sampler for dataloader? (default: %(default)s)') parser.add_argument('--dict_decay', type=float, default=0.8, help='discrete dict learning decay') parser.add_argument('--commitment', type=float, default=0.5, help='regularizer coefficient') parser.add_argument('--discrete_layer', type=str, default='2', help='which layer to add the discretization') parser.add_argument('--dict_size', type=int, default=10, help='number of keys in dict') parser.add_argument('--model', type=str, default='BigGAN', help='Name of the model module (default: %(default)s)') parser.add_argument('--G_param', type=str, default='SN', help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD) or None (default: %(default)s)') parser.add_argument('--D_param', type=str, default='SN', help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD) or None (default: %(default)s)') parser.add_argument('--G_ch', type=int, default=64, help='Channel multiplier for G (default: %(default)s)') parser.add_argument('--D_ch', type=int, default=64, help='Channel multiplier for D (default: %(default)s)') parser.add_argument('--G_depth', type=int, default=1, help='Number of resblocks per stage in G? (default: %(default)s)') parser.add_argument('--D_depth', type=int, default=1, help='Number of resblocks per stage in D? (default: %(default)s)') parser.add_argument('--D_thin', action='store_false', dest='D_wide', default=True, help='Use the SN-GAN channel pattern for D? (default: %(default)s)') parser.add_argument('--G_shared', action='store_true', default=False, help='Use shared embeddings in G? (default: %(default)s)') parser.add_argument('--shared_dim', type=int, default=0, help='Gs shared embedding dimensionality; if 0, will be equal to dim_z. (default: %(default)s)') parser.add_argument('--dim_z', type=int, default=128, help='Noise dimensionality: %(default)s)') parser.add_argument('--z_var', type=float, default=1.0, help='Noise variance: %(default)s)') parser.add_argument('--hier', action='store_true', default=False, help='Use hierarchical z in G? (default: %(default)s)') parser.add_argument('--cross_replica', action='store_true', default=False, help='Cross_replica batchnorm in G?(default: %(default)s)') parser.add_argument('--mybn', action='store_true', default=False, help='Use my batchnorm (which supports standing stats?) %(default)s)') parser.add_argument('--G_nl', type=str, default='relu', help='Activation function for G (default: %(default)s)') parser.add_argument('--D_nl', type=str, default='relu', help='Activation function for D (default: %(default)s)') parser.add_argument('--G_attn', type=str, default='64', help='What resolutions to use attention on for G (underscore separated) (default: %(default)s)') parser.add_argument('--D_attn', type=str, default='64', help='What resolutions to use attention on for D (underscore separated) (default: %(default)s)') parser.add_argument('--norm_style', type=str, default='bn', help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], ln [layernorm], gn [groupnorm] (default: %(default)s)') parser.add_argument('--seed', type=int, default=0, help='Random seed to use; affects both initialization and dataloading. (default: %(default)s)') parser.add_argument('--G_init', type=str, default='ortho', help='Init style to use for G (default: %(default)s)') parser.add_argument('--D_init', type=str, default='ortho', help='Init style to use for D(default: %(default)s)') parser.add_argument('--skip_init', action='store_true', default=False, help='Skip initialization, ideal for testing when ortho init was used (default: %(default)s)') parser.add_argument('--G_lr', type=float, default=5e-05, help='Learning rate to use for Generator (default: %(default)s)') parser.add_argument('--D_lr', type=float, default=0.0002, help='Learning rate to use for Discriminator (default: %(default)s)') parser.add_argument('--G_B1', type=float, default=0.0, help='Beta1 to use for Generator (default: %(default)s)') parser.add_argument('--D_B1', type=float, default=0.0, help='Beta1 to use for Discriminator (default: %(default)s)') parser.add_argument('--G_B2', type=float, default=0.999, help='Beta2 to use for Generator (default: %(default)s)') parser.add_argument('--D_B2', type=float, default=0.999, help='Beta2 to use for Discriminator (default: %(default)s)') parser.add_argument('--batch_size', type=int, default=64, help='Default overall batchsize (default: %(default)s)') parser.add_argument('--G_batch_size', type=int, default=0, help='Batch size to use for G; if 0, same as D (default: %(default)s)') parser.add_argument('--num_G_accumulations', type=int, default=1, help='Number of passes to accumulate Gs gradients over (default: %(default)s)') parser.add_argument('--num_D_steps', type=int, default=2, help='Number of D steps per G step (default: %(default)s)') parser.add_argument('--num_D_accumulations', type=int, default=1, help='Number of passes to accumulate Ds gradients over (default: %(default)s)') parser.add_argument('--split_D', action='store_true', default=False, help='Run D twice rather than concatenating inputs? (default: %(default)s)') parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train for (default: %(default)s)') parser.add_argument('--parallel', action='store_true', default=False, help='Train with multiple GPUs (default: %(default)s)') parser.add_argument('--G_fp16', action='store_true', default=False, help='Train with half-precision in G? (default: %(default)s)') parser.add_argument('--D_fp16', action='store_true', default=False, help='Train with half-precision in D? (default: %(default)s)') parser.add_argument('--D_mixed_precision', action='store_true', default=False, help='Train with half-precision activations but fp32 params in D? (default: %(default)s)') parser.add_argument('--G_mixed_precision', action='store_true', default=False, help='Train with half-precision activations but fp32 params in G? (default: %(default)s)') parser.add_argument('--accumulate_stats', action='store_true', default=False, help='Accumulate "standing" batchnorm stats? (default: %(default)s)') parser.add_argument('--num_standing_accumulations', type=int, default=16, help='Number of forward passes to use in accumulating standing stats? (default: %(default)s)') parser.add_argument('--G_eval_mode', action='store_true', default=False, help='Run G in eval mode (running/standing stats?) at sample/test time? (default: %(default)s)') parser.add_argument('--save_every', type=int, default=2000, help='Save every X iterations (default: %(default)s)') parser.add_argument('--num_save_copies', type=int, default=2, help='How many copies to save (default: %(default)s)') parser.add_argument('--num_best_copies', type=int, default=2, help='How many previous best checkpoints to save (default: %(default)s)') parser.add_argument('--which_best', type=str, default='FID', help='Which metric to use to determine when to save new "best"checkpoints, one of IS or FID (default: %(default)s)') parser.add_argument('--no_fid', action='store_true', default=False, help='Calculate IS only, not FID? (default: %(default)s)') parser.add_argument('--test_every', type=int, default=5000, help='Test every X iterations (default: %(default)s)') parser.add_argument('--num_inception_images', type=int, default=50000, help='Number of samples to compute inception metrics with (default: %(default)s)') parser.add_argument('--hashname', action='store_true', default=False, help='Use a hash of the experiment name instead of the full config (default: %(default)s)') parser.add_argument('--base_root', type=str, default='', help='Default location to store all weights, samples, data, and logs (default: %(default)s)') parser.add_argument('--data_root', type=str, default='data', help='Default location where data is stored (default: %(default)s)') parser.add_argument('--weights_root', type=str, default='weights', help='Default location to store weights (default: %(default)s)') parser.add_argument('--logs_root', type=str, default='logs', help='Default location to store logs (default: %(default)s)') parser.add_argument('--samples_root', type=str, default='samples', help='Default location to store samples (default: %(default)s)') parser.add_argument('--pbar', type=str, default='mine', help='Type of progressbar to use; one of "mine" or "tqdm" (default: %(default)s)') parser.add_argument('--name_suffix', type=str, default='', help='Suffix for experiment name for loading weights for sampling (consider "best0") (default: %(default)s)') parser.add_argument('--experiment_name', type=str, default='', help='Optionally override the automatic experiment naming with this arg. (default: %(default)s)') parser.add_argument('--config_from_name', action='store_true', default=False, help='Use a hash of the experiment name instead of the full config (default: %(default)s)') parser.add_argument('--ema', action='store_true', default=False, help='Keep an ema of Gs weights? (default: %(default)s)') parser.add_argument('--ema_decay', type=float, default=0.9999, help='EMA decay rate (default: %(default)s)') parser.add_argument('--use_ema', action='store_true', default=False, help='Use the EMA parameters of G for evaluation? (default: %(default)s)') parser.add_argument('--ema_start', type=int, default=0, help='When to start updating the EMA weights (default: %(default)s)') parser.add_argument('--adam_eps', type=float, default=1e-08, help='epsilon value to use for Adam (default: %(default)s)') parser.add_argument('--BN_eps', type=float, default=1e-05, help='epsilon value to use for BatchNorm (default: %(default)s)') parser.add_argument('--SN_eps', type=float, default=1e-08, help='epsilon value to use for Spectral Norm(default: %(default)s)') parser.add_argument('--num_G_SVs', type=int, default=1, help='Number of SVs to track in G (default: %(default)s)') parser.add_argument('--num_D_SVs', type=int, default=1, help='Number of SVs to track in D (default: %(default)s)') parser.add_argument('--num_G_SV_itrs', type=int, default=1, help='Number of SV itrs in G (default: %(default)s)') parser.add_argument('--num_D_SV_itrs', type=int, default=1, help='Number of SV itrs in D (default: %(default)s)') parser.add_argument('--G_ortho', type=float, default=0.0, help='Modified ortho reg coefficient in G(default: %(default)s)') parser.add_argument('--D_ortho', type=float, default=0.0, help='Modified ortho reg coefficient in D (default: %(default)s)') parser.add_argument('--toggle_grads', action='store_true', default=True, help='Toggle D and Gs "requires_grad" settings when not training them? (default: %(default)s)') parser.add_argument('--which_train_fn', type=str, default='GAN', help='How2trainyourbois (default: %(default)s)') parser.add_argument('--load_weights', type=str, default='', help='Suffix for which weights to load (e.g. best0, copy0) (default: %(default)s)') parser.add_argument('--resume', action='store_true', default=False, help='Resume training? (default: %(default)s)') parser.add_argument('--logstyle', type=str, default='%3.3e', help='What style to use when logging training metrics?One of: %#.#f/ %#.#e (float/exp, text),pickle (python pickle),npz (numpy zip),mat (MATLAB .mat file) (default: %(default)s)') parser.add_argument('--log_G_spectra', action='store_true', default=False, help='Log the top 3 singular values in each SN layer in G? (default: %(default)s)') parser.add_argument('--log_D_spectra', action='store_true', default=False, help='Log the top 3 singular values in each SN layer in D? (default: %(default)s)') parser.add_argument('--sv_log_interval', type=int, default=10, help='Iteration interval for logging singular values (default: %(default)s)') return parser
def random_function(image, function, prob, seed=None, **kwargs): with tf.name_scope(('random_' + function.__name__)): uniform_random = tf.random.uniform([], 0, 1.0, seed=seed) mirror_cond = tf.math.less(uniform_random, prob) result = tf.cond(mirror_cond, (lambda : function(image, **kwargs)), (lambda : image)) return result
def FindNextMultiLineCommentEnd(lines, lineix): while (lineix < len(lines)): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines)
def osnet_x1_0_ms25_a0d1(num_classes=1000, pretrained=True, loss='softmax', **kwargs): model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, mixstyle_layers=['conv2', 'conv5'], mixstyle_alpha=0.1, **kwargs) if pretrained: init_pretrained_weights(model, key='osnet_x1_0') return model
def parse_args(): parser = argparse.ArgumentParser(description='Convert MMSeg to TorchScript') parser.add_argument('config', help='test config file path') parser.add_argument('--checkpoint', help='checkpoint file', default=None) parser.add_argument('--show', action='store_true', help='show TorchScript graph') parser.add_argument('--verify', action='store_true', help='verify the TorchScript model') parser.add_argument('--output-file', type=str, default='tmp.pt') parser.add_argument('--shape', type=int, nargs='+', default=[512, 512], help='input image size (height, width)') args = parser.parse_args() return args
def test_actionAngleTorus_AutoFitWarning(): from galpy.actionAngle import actionAngleTorus from galpy.potential import LogarithmicHaloPotential lp = LogarithmicHaloPotential(normalize=1.0, q=0.9) aAT = actionAngleTorus(pot=lp, tol=(10.0 ** (- 8.0))) (jr, jp, jz) = (0., 1., 0.6078445) (ar, ap, az) = (numpy.array([1.]), numpy.array([6.]), numpy.array([4.])) import warnings with warnings.catch_warnings(record=True) as w: if PY2: reset_warning_registry('galpy') warnings.simplefilter('always', galpyWarning) aAT(jr, jp, jz, ar, ap, az) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2") if raisedWarning: break assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', galpyWarning) aAT.xvFreqs(jr, jp, jz, ar, ap, az) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2") if raisedWarning: break assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', galpyWarning) aAT.Freqs(jr, jp, jz) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2") if raisedWarning: break assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', galpyWarning) aAT.hessianFreqs(jr, jp, jz) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2") if raisedWarning: break assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', galpyWarning) aAT.xvJacobianFreqs(jr, jp, jz, ar, ap, az) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2") if raisedWarning: break assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't" return None
def make_self_attn_gnn(): return self_attn_gnn(kq_dim=FLAGS.attn_kq_dim, v_dim=FLAGS.attn_v_dim, make_mlp_fn=partial(make_mlp_model, FLAGS.gnn_latent_dim, (FLAGS.node_embedding_dim / 2), FLAGS.gnn_num_layers, tf.nn.relu, FLAGS.gnn_l2_regularizer_weight, FLAGS.gnn_bias_init_stddev), kq_dim_division=True)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bnx1 = bn.BatchNorm2dEx(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bnx1(self.conv1(x), self.conv1.weight)) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out
def run_random(env_name): config = {'robot_base': 'xmls/point.xml', 'task': 'defense', 'goal_size': 0.5, 'observe_robbers': True, 'observe_hazards': True, 'constrain_hazards': True, 'constrain_indicator': False, 'lidar_num_bins': 16, 'hazards_num': 8, 'hazards_size': 0.3, 'robbers_num': 2, 'robbers_size': 0.3} config['num_steps'] = 2000 config['robot_rot'] = 0 env = Engine(config) obs = env.reset() done = False ep_ret = 0 ep_cost = 0 cnt = 0 T = 1000 ac = 10.0 import time t = time.time() first_done = 0 while True: act = env.action_space.sample() cnt = (cnt + 1) (obs, reward, done, info) = env.step(act) print('reward', reward) ep_ret += reward a = info['cost'] ep_cost += info.get('cost', 0) if done: o = env.reset() if (cnt > 10000): break env.render() print('##', (time.time() - t))
class Model(torch.nn.Module): _warn_for_unseparable_batches: Set[str] = set() def __init__(self, regularizer=None) -> None: super().__init__() self._regularizer = regularizer def get_regularization_penalty(self) -> Union[(float, torch.Tensor)]: if (self._regularizer is None): return 0.0 else: return self._regularizer(self) def get_parameters_for_histogram_tensorboard_logging(self) -> List[str]: return [name for (name, _) in self.named_parameters()] def forward(self, inputs) -> Dict[(str, torch.Tensor)]: raise NotImplementedError def forward_on_instance(self, instance) -> Dict[(str, numpy.ndarray)]: raise NotImplementedError def forward_on_instances(self, instances) -> List[Dict[(str, numpy.ndarray)]]: batch_size = len(instances) with torch.no_grad(): device = self._get_prediction_device() dataset = Batch(instances) dataset.index_instances(self.vocab) model_input = move_to_device(dataset.as_tensor_dict(), device) outputs = self.decode(self(model_input)) instance_separated_output: List[Dict[(str, numpy.ndarray)]] = [{} for _ in dataset.instances] for (name, output) in list(outputs.items()): if isinstance(output, torch.Tensor): if (output.dim() == 0): output = output.unsqueeze(0) if (output.size(0) != batch_size): self._maybe_warn_for_unseparable_batches(name) continue output = output.detach().cpu().numpy() elif (len(output) != batch_size): self._maybe_warn_for_unseparable_batches(name) continue outputs[name] = output for (instance_output, batch_element) in zip(instance_separated_output, output): instance_output[name] = batch_element return instance_separated_output def decode(self, output_dict: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]: return output_dict def get_metrics(self, reset: bool=False) -> Dict[(str, float)]: return {} def _get_prediction_device(self): devices = {get_device_of(param) for param in self.parameters()} if (len(devices) > 1): devices_string = ', '.join((str(x) for x in devices)) raise ConfigurationError(f'Parameters have mismatching cuda_devices: {devices_string}') elif ((len(devices) == 1) and all(((i >= 0) for i in devices))): device = torch.device('cuda:{}'.format(devices.pop())) else: device = torch.device('cpu') return device def _maybe_warn_for_unseparable_batches(self, output_key: str): if (output_key not in self._warn_for_unseparable_batches): logger.warning(f"Encountered the {output_key} key in the model's return dictionary which couldn't be split by the batch size. Key will be ignored.") self._warn_for_unseparable_batches.add(output_key) def set_vocab(self, vocab): self.vocab = vocab def _load(cls, config: Params, serialization_dir: str, weights_file: str=None, device=None) -> 'Model': weights_file = (weights_file or os.path.join(serialization_dir, _DEFAULT_WEIGHTS)) vocab_dir = os.path.join(serialization_dir, 'vocabulary') vocab = Vocabulary.from_files(vocab_dir) model_params = config['model'] remove_pretrained_embedding_params(model_params) model = cls.from_params(vocab=vocab, params=model_params) model_state = torch.load(weights_file, map_location=device_mapping((- 1))) if (not isinstance(model, torch.nn.DataParallel)): model_state = {re.sub('^module\\.', '', k): v for (k, v) in model_state.items()} model.load_state_dict(model_state) model.set_vocab(vocab) model.to(device) return model def load(cls, config: Params, serialization_dir: str, weights_file: str=None, device=None) -> 'Model': model_type = config['model']['model_type'] return getattr(Models, model_type)._load(config, serialization_dir, weights_file, device)
def load_data(root_path, source_dir, target_dir, batch_size): kwargs = {'num_workers': 4, 'pin_memory': True} source_loader = load_training(root_path, source_dir, batch_size, kwargs) target_loader = load_training(root_path, target_dir, batch_size, kwargs) test_loader = load_testing(root_path, target_dir, batch_size, kwargs) return (source_loader, target_loader, test_loader)
def collate_fn(batch): data = {} for key in ['x', 'x_attr', 'x_positions', 'x_centers', 'x_angles', 'x_velocity', 'x_velocity_diff', 'lane_positions', 'lane_centers', 'lane_angles', 'lane_attr', 'is_intersections']: data[key] = pad_sequence([b[key] for b in batch], batch_first=True) if ('x_scored' in batch[0]): data['x_scored'] = pad_sequence([b['x_scored'] for b in batch], batch_first=True) if (batch[0]['y'] is not None): data['y'] = pad_sequence([b['y'] for b in batch], batch_first=True) for key in ['x_padding_mask', 'lane_padding_mask']: data[key] = pad_sequence([b[key] for b in batch], batch_first=True, padding_value=True) data['x_key_padding_mask'] = data['x_padding_mask'].all((- 1)) data['lane_key_padding_mask'] = data['lane_padding_mask'].all((- 1)) data['num_actors'] = (~ data['x_key_padding_mask']).sum((- 1)) data['num_lanes'] = (~ data['lane_key_padding_mask']).sum((- 1)) data['scenario_id'] = [b['scenario_id'] for b in batch] data['track_id'] = [b['track_id'] for b in batch] data['origin'] = torch.cat([b['origin'] for b in batch], dim=0) data['theta'] = torch.cat([b['theta'] for b in batch]) return data
class DistilBertOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'})])
class Max(ZooKerasLayer): def __init__(self, dim, num_input_dims=INTMIN, return_value=True, input_shape=None, **kwargs): super(Max, self).__init__(None, dim, num_input_dims, return_value, (list(input_shape) if input_shape else None), **kwargs)
_model_architecture('s2t_conformer', 's2t_conformer') def conformer_base_architecture(args): args.attn_type = getattr(args, 'attn_type', None) args.pos_enc_type = getattr(args, 'pos_enc_type', 'abs') args.input_feat_per_channel = getattr(args, 'input_feat_per_channel', 80) args.input_channels = getattr(args, 'input_channels', 1) args.max_source_positions = getattr(args, 'max_source_positions', 6000) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.dropout = getattr(args, 'dropout', 0.1) args.encoder_layers = getattr(args, 'encoder_layers', 16) args.depthwise_conv_kernel_size = getattr(args, 'depthwise_conv_kernel_size', 31) transformer_base_architecture(args)
class rd_decoded_rm_depth_ahat(): def open(self, filename, chunk, profile): self._client = reader() self._client.open(filename, chunk) self._codec = hl2ss.decode_rm_depth_ahat(profile) self._codec.create() self.read() def read(self): data = self._client.read() if (data is not None): data.payload = self._codec.decode(data.payload) return data def close(self): self._client.close()
class TokenLengthAnalysis(): def __init__(self, file_paths, types, encoding_name='cl100k_base'): self.file_paths = file_paths self.types = types self.encoding_name = encoding_name def num_tokens_from_string(self, string): encoding = tiktoken.get_encoding(self.encoding_name) return len(encoding.encode(string)) def load_data(self, type): all_problem_solutions = [] for file_path in self.file_paths: with open(file_path, 'r') as file: for line in file: line = line.strip() if line: data = json.loads(line) key = (type if (type in data) else None) text = data[key] num_tokens = self.num_tokens_from_string(text) all_problem_solutions.append(num_tokens) return all_problem_solutions def plot_data(self): (fig, ax) = plt.subplots(figsize=(4.8, 3)) for type in self.types: data = self.load_data(type) width = 20 token_length = [((length // width) * width) for length in data] token_length_counts = Counter(token_length) x_values = sorted(list(token_length_counts.keys())) sorted_indices = np.argsort(x_values) y_values = [(token_length_counts[x_values[i]] / 1000) for i in sorted_indices] fill_color = (((0 / 256), (90 / 256), (146 / 256)) if (type == 'problem') else ((230 / 256), (120 / 256), (0 / 37))) ax.fill_between(x_values, 0, y_values, alpha=0.4, color=fill_color) ax.plot(x_values, y_values, linestyle='-', label=f'{type}') ax.set_xlim(left=0, right=700) ax.set_ylim(bottom=0) ax.set_xticks(np.arange(0, 700, 100)) ax.set_yticks(np.arange(0, 8, 1)) ax.set_xlabel('Number of Tokens', fontsize=14) ax.set_ylabel('#Count (Thousand)', fontsize=14) ax.legend(prop={'size': 10}) plt.tight_layout() plt.savefig('Length.png') plt.show()
class CSPDarkNet(object): __shared__ = ['norm_type', 'weight_prefix_name'] def __init__(self, depth=53, norm_type='bn', norm_decay=0.0, weight_prefix_name=''): assert (depth in [53]), 'unsupported depth value' self.depth = depth self.norm_type = norm_type self.norm_decay = norm_decay self.depth_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)} self.prefix_name = weight_prefix_name def _softplus(self, input): expf = fluid.layers.exp(fluid.layers.clip(input, (- 200), 50)) return fluid.layers.log((1 + expf)) def _mish(self, input): return (input * fluid.layers.tanh(self._softplus(input))) def _conv_norm(self, input, ch_out, filter_size, stride, padding, act='mish', name=None): conv = fluid.layers.conv2d(input=input, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, act=None, param_attr=ParamAttr(name=(name + '.conv.weights')), bias_attr=False) bn_name = (name + '.bn') bn_param_attr = ParamAttr(regularizer=L2Decay(float(self.norm_decay)), name=(bn_name + '.scale')) bn_bias_attr = ParamAttr(regularizer=L2Decay(float(self.norm_decay)), name=(bn_name + '.offset')) out = fluid.layers.batch_norm(input=conv, act=None, param_attr=bn_param_attr, bias_attr=bn_bias_attr, moving_mean_name=(bn_name + '.mean'), moving_variance_name=(bn_name + '.var')) if (act == 'mish'): out = self._mish(out) return out def _downsample(self, input, ch_out, filter_size=3, stride=2, padding=1, name=None): return self._conv_norm(input, ch_out=ch_out, filter_size=filter_size, stride=stride, padding=padding, name=name) def conv_layer(self, input, ch_out, filter_size=1, stride=1, padding=0, name=None): return self._conv_norm(input, ch_out=ch_out, filter_size=filter_size, stride=stride, padding=padding, name=name) def basicblock(self, input, ch_out, scale_first=False, name=None): conv1 = self._conv_norm(input, ch_out=((ch_out // 2) if scale_first else ch_out), filter_size=1, stride=1, padding=0, name=(name + '.0')) conv2 = self._conv_norm(conv1, ch_out=ch_out, filter_size=3, stride=1, padding=1, name=(name + '.1')) out = fluid.layers.elementwise_add(x=input, y=conv2, act=None) return out def layer_warp(self, block_func, input, ch_out, count, keep_ch=False, scale_first=False, name=None): if scale_first: ch_out = (ch_out * 2) right = self.conv_layer(input, ch_out, name='{}.route_in.right'.format(name)) neck = self.conv_layer(input, ch_out, name='{}.neck'.format(name)) out = block_func(neck, ch_out=ch_out, scale_first=scale_first, name='{}.0'.format(name)) for j in six.moves.xrange(1, count): out = block_func(out, ch_out=ch_out, name='{}.{}'.format(name, j)) left = self.conv_layer(out, ch_out, name='{}.route_in.left'.format(name)) route = fluid.layers.concat([left, right], axis=1) out = self.conv_layer(route, ch_out=(ch_out if keep_ch else (ch_out * 2)), name='{}.conv_layer'.format(name)) return out def __call__(self, input): (stages, block_func) = self.depth_cfg[self.depth] stages = stages[0:5] conv = self._conv_norm(input=input, ch_out=32, filter_size=3, stride=1, padding=1, act='mish', name=(self.prefix_name + 'conv')) blocks = [] for (i, stage) in enumerate(stages): input = (conv if (i == 0) else block) downsample_ = self._downsample(input=input, ch_out=(input.shape[1] * 2), name=(self.prefix_name + 'stage.{}.downsample'.format(i))) block = self.layer_warp(block_func=block_func, input=downsample_, ch_out=(32 * (2 ** i)), count=stage, keep_ch=(i == 0), scale_first=(i == 0), name=(self.prefix_name + 'stage.{}'.format(i))) blocks.append(block) return blocks
_schema(UchannelLOSchema) class UchannelLO(BaseModel): def __init__(self, q, scale, **kwargs): self.q = q self.scale = scale super().__init__(q=q, scale=scale, **kwargs)
def read_matches_files(data_dir, matches_file): matches = [] with open(os.path.join(data_dir, matches_file), 'r') as f: for line in f: l = line.split() matches.append([int(l[0]), int(l[3]), int((l[1] == l[4]))]) return torch.LongTensor(matches)
_torch _vision class BridgeTowerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (BridgeTowerImageProcessor if is_vision_available() else None) def setUp(self): self.image_processor_tester = BridgeTowerImageProcessingTester(self) def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, 'image_mean')) self.assertTrue(hasattr(image_processing, 'image_std')) self.assertTrue(hasattr(image_processing, 'do_normalize')) self.assertTrue(hasattr(image_processing, 'do_resize')) self.assertTrue(hasattr(image_processing, 'size')) self.assertTrue(hasattr(image_processing, 'size_divisor')) def test_batch_feature(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width)) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width)) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width)) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width)) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width)) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values (expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width)) def test_equivalence_pad_and_create_pixel_mask(self): image_processing_1 = self.image_processing_class(**self.image_processor_dict) image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors='pt') encoded_images = image_processing_2(image_inputs, return_tensors='pt') self.assertTrue(torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=0.0001)) self.assertTrue(torch.allclose(encoded_images_with_method['pixel_mask'], encoded_images['pixel_mask'], atol=0.0001))
class Image2D(Dataset): def __init__(self, dataset_path: str, transform: Callable=None): self.dataset_path = dataset_path self.input_path = os.path.join(dataset_path, 'img') self.images_list = os.listdir(self.input_path) if transform: self.transform = transform else: self.transform = T.ToTensor() def __len__(self): return len(os.listdir(self.input_path)) def __getitem__(self, idx): image_filename = self.images_list[idx] image = cv2.imread(os.path.join(self.input_path, image_filename), 0) image = correct_dims(image) image = self.transform(image) return (image, image_filename)
class DeepText(nn.Module): def __init__(self, vocab_size: int, rnn_type: str='lstm', hidden_dim: int=64, n_layers: int=3, rnn_dropout: float=0.1, bidirectional: bool=False, use_hidden_state: bool=True, padding_idx: int=1, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False): super(DeepText, self).__init__() if ((embed_dim is not None) and (embed_matrix is not None) and (not (embed_dim == embed_matrix.shape[1]))): warnings.warn('the input embedding dimension {} and the dimension of the pretrained embeddings {} do not match. The pretrained embeddings dimension ({}) will be used'.format(embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]), UserWarning) if (rnn_type.lower() not in ['lstm', 'gru']): raise ValueError(f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead") self.vocab_size = vocab_size self.rnn_type = rnn_type self.hidden_dim = hidden_dim self.n_layers = n_layers self.rnn_dropout = rnn_dropout self.bidirectional = bidirectional self.use_hidden_state = use_hidden_state self.padding_idx = padding_idx self.embed_dim = embed_dim self.embed_trainable = embed_trainable self.head_hidden_dims = head_hidden_dims self.head_activation = head_activation self.head_dropout = head_dropout self.head_batchnorm = head_batchnorm self.head_batchnorm_last = head_batchnorm_last self.head_linear_first = head_linear_first if isinstance(embed_matrix, np.ndarray): assert (embed_matrix.dtype == 'float32'), "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(str(embed_matrix.dtype)) self.word_embed = nn.Embedding(vocab_size, embed_matrix.shape[1], padding_idx=padding_idx) if embed_trainable: self.word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=True) else: self.word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=False) embed_dim = embed_matrix.shape[1] else: self.word_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx) rnn_params = {'input_size': embed_dim, 'hidden_size': hidden_dim, 'num_layers': n_layers, 'bidirectional': bidirectional, 'dropout': rnn_dropout, 'batch_first': True} if (self.rnn_type.lower() == 'lstm'): self.rnn: Union[(nn.LSTM, nn.GRU)] = nn.LSTM(**rnn_params) elif (self.rnn_type.lower() == 'gru'): self.rnn = nn.GRU(**rnn_params) self.output_dim = ((hidden_dim * 2) if bidirectional else hidden_dim) if (self.head_hidden_dims is not None): assert (self.head_hidden_dims[0] == self.output_dim), 'The hidden dimension from the stack or RNNs ({}) is not consistent with the expected input dimension ({}) of the fc-head'.format(self.output_dim, self.head_hidden_dims[0]) self.texthead = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first) self.output_dim = head_hidden_dims[(- 1)] def forward(self, X: Tensor) -> Tensor: embed = self.word_embed(X.long()) if (self.rnn_type.lower() == 'lstm'): (o, (h, c)) = self.rnn(embed) elif (self.rnn_type.lower() == 'gru'): (o, h) = self.rnn(embed) o = o.permute(1, 0, 2) if self.bidirectional: rnn_out = (torch.cat((h[(- 2)], h[(- 1)]), dim=1) if self.use_hidden_state else o[(- 1)]) else: rnn_out = (h[(- 1)] if self.use_hidden_state else o[(- 1)]) if (self.head_hidden_dims is not None): head_out = self.texthead(rnn_out) return head_out else: return rnn_out
def is_div_level(maybe_div, level): if (maybe_div is None): return False return ((maybe_div.name == 'div') and ('level' in maybe_div.attrs) and (maybe_div.attrs['level'] == str(level)))
def get_only_chars(line): clean_line = '' line = line.replace('', '') line = line.replace("'", '') line = line.replace('-', ' ') line = line.replace('\t', ' ') line = line.replace('\n', ' ') line = line.lower() for char in line: if (char in 'qwertyuiopasdfghjklzxcvbnm '): clean_line += char else: clean_line += ' ' clean_line = re.sub(' +', ' ', clean_line) if (clean_line[0] == ' '): clean_line = clean_line[1:] clean_line = ' '.join(clean_line.split(' ')[:100]) return clean_line
class Plateau(JavaValue): def __init__(self, monitor, factor=0.1, patience=10, mode='min', epsilon=0.0001, cooldown=0, min_lr=0.0, bigdl_type='float'): JavaValue.__init__(self, None, bigdl_type, monitor, factor, patience, mode, epsilon, cooldown, min_lr)
def get_metrics_names(metrics): if (len(metrics) == 0): return [] metrics_dict = next(iter(metrics.values())) return list(metrics_dict.keys())
class OptimizationArguments(): auto_distillation: bool = field(default=False, metadata={'help': 'Whether or not to apply distillation.'}) teacher_config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) teacher_model_name_or_path: str = field(default=False, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) flash_distillation_steps: int = field(default=500, metadata={'help': 'Steps for each stage in knowledge transfer.'}) regular_distillation_steps: int = field(default=25000, metadata={'help': 'Steps for each stage in regular distillation.'}) max_trials: int = field(default=100, metadata={'help': 'Maximum trials for AutoDistillation.'})
def DenseNet(blocks, include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, residuals=False, **kwargs): if (not ((weights in {'imagenet', None}) or os.path.exists(weights))): raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.') if ((weights == 'imagenet') and include_top and (classes != 1000)): raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000') input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) if (input_tensor is None): img_input = layers.Input(shape=input_shape) elif (not backend.is_keras_tensor(input_tensor)): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor bn_axis = (3 if (backend.image_data_format() == 'channels_last') else 1) res_outputs = [] x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='conv1/bn')(x) x = layers.Activation('relu', name='conv1/relu')(x) x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x) x = layers.MaxPooling2D(3, strides=2, name='pool1')(x) x = dense_block(x, blocks[0], name='conv2') if (residuals == 2): res_outputs = x x = transition_block(x, 0.5, name='pool2') x = dense_block(x, blocks[1], name='conv3') if (residuals == 3): res_outputs = x x = transition_block(x, 0.5, name='pool3') x = dense_block(x, blocks[2], name='conv4') x = transition_block(x, 0.5, name='pool4', pool=False) x = dense_block(x, blocks[3], name='conv5', dilation=2) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='bn')(x) x = layers.Activation('relu', name='relu')(x) if include_top: x = layers.GlobalAveragePooling2D(name='avg_pool')(x) x = layers.Dense(classes, activation='softmax', name='fc1000')(x) elif (pooling == 'avg'): x = layers.GlobalAveragePooling2D(name='avg_pool')(x) elif (pooling == 'max'): x = layers.GlobalMaxPooling2D(name='max_pool')(x) if (input_tensor is not None): inputs = keras_utils.get_source_inputs(input_tensor) else: inputs = img_input if (blocks == [6, 12, 24, 16]): model = models.Model(inputs, x, name='densenet121') elif (blocks == [6, 12, 32, 32]): model = models.Model(inputs, x, name='densenet169') elif (blocks == [6, 12, 48, 32]): model = models.Model(inputs, x, name='densenet201') else: model = models.Model(inputs, x, name='densenet') if (weights == 'imagenet'): if include_top: if (blocks == [6, 12, 24, 16]): weights_path = keras_utils.get_file('densenet121_weights_tf_dim_ordering_tf_kernels.h5', DENSENET121_WEIGHT_PATH, cache_subdir='models', file_hash='9d60b8095a5708f2dcce2bca79d332c7') elif (blocks == [6, 12, 32, 32]): weights_path = keras_utils.get_file('densenet169_weights_tf_dim_ordering_tf_kernels.h5', DENSENET169_WEIGHT_PATH, cache_subdir='models', file_hash='d699b8f76981ab1b30698df4c175e90b') elif (blocks == [6, 12, 48, 32]): weights_path = keras_utils.get_file('densenet201_weights_tf_dim_ordering_tf_kernels.h5', DENSENET201_WEIGHT_PATH, cache_subdir='models', file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807') elif (blocks == [6, 12, 24, 16]): weights_path = keras_utils.get_file('densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET121_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='30ee3e1110167f948a6b9946edeeb738') elif (blocks == [6, 12, 32, 32]): weights_path = keras_utils.get_file('densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET169_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='b8c4d4c20dd625c148057b9ff1c1176b') elif (blocks == [6, 12, 48, 32]): weights_path = keras_utils.get_file('densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5', DENSENET201_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='c13680b51ded0fb44dff2d8f86ac8bb1') model.load_weights(weights_path) elif (weights is not None): model.load_weights(weights) if residuals: model = models.Model(inputs, res_outputs, name='densenet121') return model
def get_next_activity_model(max_case_length, vocab_size, output_dim, embed_dim=36, num_heads=4, ff_dim=64): inputs = layers.Input(shape=(max_case_length,)) x = TokenAndPositionEmbedding(max_case_length, vocab_size, embed_dim)(inputs) x = TransformerBlock(embed_dim, num_heads, ff_dim)(x) x = layers.GlobalAveragePooling1D()(x) x = layers.Dropout(0.1)(x) x = layers.Dense(64, activation='relu')(x) x = layers.Dropout(0.1)(x) outputs = layers.Dense(output_dim, activation='linear')(x) transformer = tf.keras.Model(inputs=inputs, outputs=outputs, name='next_activity_transformer') return transformer
def detect_pattern_anomaly(y, yhat, th, dist_measure): anomaly_indexes = [] for (i, (y_i, yhat_i)) in enumerate(zip(y, yhat)): if (dist_measure.abs_dist(y_i, yhat_i) > th): anomaly_indexes.append(i) return anomaly_indexes
class CustomCallback(Callback): def on_train_end(self, logs=None): assert ('train_loss' in logs) assert ('val_loss' in logs) assert self.model def on_epoch_end(self, epoch, logs=None): assert ('train_loss' in logs) assert ('val_loss' in logs) assert self.model
def train_model_wrapper(config): datapath = config['datapath'] output = config['output'] appliance = config['appliance'] hparams = config['hparams'] doplot = config['doplot'] reload = config['reload'] tune_hparams = config['tune'] appliance['hparams']['F'] = tune_hparams['F'] appliance['hparams']['K'] = tune_hparams['K'] appliance['hparams']['H'] = tune_hparams['H'] return train_model(datapath, output, appliance, hparams, doplot, reload)
def test_plot_projected(tmp_path, corpus): n = tn.Textnet(corpus.tokenized()) papers = n.project(node_type=tn.DOC) out = (tmp_path / 'plot-2.png') plot = papers.plot(show_clusters=True, label_nodes=True, target=str(out)) assert (len(plot._objects) > 0) assert (len(list(tmp_path.iterdir())) == 1)
def to_tensor(x, dtype=None) -> torch.Tensor: if isinstance(x, torch.Tensor): if (dtype is not None): x = x.type(dtype) return x if isinstance(x, np.ndarray): x = torch.from_numpy(x) if (dtype is not None): x = x.type(dtype) return x if isinstance(x, (list, tuple)): x = np.ndarray(x) x = torch.from_numpy(x) if (dtype is not None): x = x.type(dtype) return x
def checkpoint_cb(checkpoint_path, steps_per_epoch=(- 1), num_epochs=10): checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(checkpoint_path, 'cp-{epoch:04d}.ckpt'), monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq=('epoch' if (steps_per_epoch < 0) else int((num_epochs * steps_per_epoch)))) return checkpoint_callback
_module() class VOCDataset(XMLDataset): CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') def __init__(self, **kwargs): super(VOCDataset, self).__init__(**kwargs) if ('VOC2007' in self.img_prefix): self.year = 2007 elif ('VOC2012' in self.img_prefix): self.year = 2012 else: raise ValueError('Cannot infer dataset year from img_prefix') def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): if (not isinstance(metric, str)): assert (len(metric) == 1) metric = metric[0] allowed_metrics = ['mAP', 'recall'] if (metric not in allowed_metrics): raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr) if (metric == 'mAP'): assert isinstance(iou_thrs, list) if (self.year == 2007): ds_name = 'voc07' else: ds_name = self.CLASSES mean_aps = [] for iou_thr in iou_thrs: print_log(f''' {('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''') (mean_ap, _) = eval_map(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger) mean_aps.append(mean_ap) eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3) eval_results['mAP'] = (sum(mean_aps) / len(mean_aps)) elif (metric == 'recall'): gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for (i, num) in enumerate(proposal_nums): for (j, iou) in enumerate(iou_thr): eval_results[f'{num}{iou}'] = recalls[(i, j)] if (recalls.shape[1] > 1): ar = recalls.mean(axis=1) for (i, num) in enumerate(proposal_nums): eval_results[f'{num}'] = ar[i] return eval_results
def dump_json(filename, data): pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True) with open(filename, 'w') as f: json.dump(data, f, indent=2, sort_keys=True, cls=LogEncoder)
class IndexedRowTableLinearize(): def process_table(self, table_content: Dict): assert (('header' in table_content) and ('rows' in table_content)), self.PROMPT_MESSAGE table_str = (self.process_header(table_content['header']) + ' ') for (i, row_example) in enumerate(table_content['rows']): table_str += (self.process_row(row_example, row_index=(i + 1)) + ' ') return table_str.strip() def process_header(self, headers: List): return ('col : ' + ' | '.join(headers)) def process_row(self, row: List, row_index: int): row_str = '' row_cell_values = [] for cell_value in row: if isinstance(cell_value, int): row_cell_values.append(str(cell_value)) else: row_cell_values.append(cell_value) row_str += ' | '.join(row_cell_values) return ((('row ' + str(row_index)) + ' : ') + row_str)
def postprocess3D(data, isU=True, resFlag=0, num=None): x = np.linspace((- 50), 50, 48) y = np.linspace((- 50), 50, 48) z = np.linspace((- 50), 50, 48) (x, y, z) = np.meshgrid(x, y, z) appd = ['PeRCNNTruth'] uv = ['v', 'u'] values = data fig = go.Figure(data=go.Isosurface(x=x.flatten(), y=y.flatten(), z=z.flatten(), value=values.flatten(), isomin=(0.3 if isU else 0.1), isomax=(0.5 if isU else 0.3), opacity=0.2, colorscale='RdBu', surface_count=2)) fig.write_image(('./figures/Iso_surf_%s_%s_%d.png' % (uv[isU], appd[resFlag], num))) plt.close('all')
_module class BFP(nn.Module): def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None): super(BFP, self).__init__() assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2D(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, inputs): assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
class TestATSSHead(TestCase): def test_atss_head_loss(self): s = 256 img_metas = [{'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1}] cfg = Config(dict(assigner=dict(type='ATSSAssigner', topk=9), allowed_border=(- 1), pos_weight=(- 1), debug=False)) atss_head = ATSSHead(num_classes=4, in_channels=1, stacked_convs=1, feat_channels=1, norm_cfg=None, train_cfg=cfg, anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)) feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [8, 16, 32, 64, 128]] (cls_scores, bbox_preds, centernesses) = atss_head.forward(feat) gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) empty_centerness_loss = sum(empty_gt_losses['loss_centerness']) self.assertGreater(empty_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') self.assertEqual(empty_centerness_loss.item(), 0, 'there should be no centerness loss when there are no true boxes') gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) onegt_centerness_loss = sum(one_gt_losses['loss_centerness']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero') self.assertGreater(onegt_centerness_loss.item(), 0, 'centerness loss should be non-zero')
def test_warn_internal_when_use_physical(): import warnings from galpy import potential from galpy.util import galpyWarning with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', galpyWarning) potential.evaluateRforces(potential.MWPotential2014, 1.0, 0.0, use_physical=True) raisedWarning = False for wa in w: raisedWarning = (str(wa.message) == 'Returning output(s) in internal units even though use_physical=True, because ro and/or vo not set') if raisedWarning: break assert raisedWarning, 'No warning raised when returning internal-units with use_physical=True' return None
class TestPSI(FLTest): def setUp(self) -> None: self.fl_server = FLServer() self.fl_server.set_port(self.port) self.fl_server.build() self.fl_server.start() def tearDown(self) -> None: self.fl_server.stop() def test_psi_get_salt(self): init_fl_context(1, self.target) psi = PSI() salt = psi.get_salt() assert isinstance(salt, str) def test_psi_pipeline(self): init_fl_context(1, self.target) psi = PSI() salt = psi.get_salt() key = ['k1', 'k2'] psi.upload_set(key, salt) intersection = psi.download_intersection() assert isinstance(intersection, list) self.assertEqual(len(intersection), 2)
class Position(NamedTuple): row: chex.Array col: chex.Array def __eq__(self, other: 'Position') -> chex.Array: if (not isinstance(other, Position)): return NotImplemented return ((self.row == other.row) & (self.col == other.col)) def __add__(self, other: 'Position') -> 'Position': if (not isinstance(other, Position)): return NotImplemented return Position(row=(self.row + other.row), col=(self.col + other.col))
def test(model, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for (data, target) in test_loader: if args.cuda: (data, target) = (data.cuda(), target.cuda()) (s_output, t_output) = model(data, data, target) test_loss += F.nll_loss(F.log_softmax(s_output, dim=1), target, reduction='sum').item() pred = s_output.data.max(1)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset), ((100.0 * correct) / len(test_loader.dataset)))) return correct
def load_data_normalised(root_path): (data, labels) = load_data(root_path) data = ((data - data.mean(axis=0)) / data.std(axis=0)) return (data, labels)
class SqueezeNet(nn.Module): def __init__(self, version=1.0, num_classes=1000): super(SqueezeNet, self).__init__() if (version not in [1.0, 1.1]): raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version)) self.num_classes = num_classes if (version == 1.0): self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256)) else: self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256)) final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) self.classifier = nn.Sequential(nn.Dropout(p=0.5), final_conv, nn.ReLU(inplace=True), nn.AvgPool2d(13, stride=1)) for m in self.modules(): if isinstance(m, nn.Conv2d): if (m is final_conv): init.normal_(m.weight, mean=0.0, std=0.01) else: init.kaiming_uniform_(m.weight) if (m.bias is not None): init.constant_(m.bias, 0) def forward(self, x): x = self.features(x) x = self.classifier(x) return x.view(x.size(0), self.num_classes)
def overwrite_args_from_json(fpath, args): with open(fpath, 'r') as f: json_dict = json.load(f) key = 'args' if (key in json_dict): for kk in json_dict[key]: setattr(args, kk, json_dict[key][kk]) return json_dict
class QLinearVQ(nn.Linear): def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False, measure=False, cal_qparams=False): super(QLinearVQ, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = (num_bits_weight or num_bits) self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.equ_scale = nn.Parameter(torch.ones(out_features, 1)) self.V = nn.Parameter(torch.eye(in_features)) self.U = nn.Parameter(torch.eye(in_features)) if measure: self.quantize_input = QuantMeasure(self.num_bits, measure=measure, cal_qparams=cal_qparams) self.quantize_weight = QuantMeasure(self.num_bits, shape_measure=((out_features if perC else 1), 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0)) else: self.quantize_input = QuantThUpdate(self.num_bits, measure=measure) self.quantize_weight = QuantThUpdate(self.num_bits, shape_measure=((out_features if perC else 1), 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0)) self.measure = measure self.cal_params = cal_qparams self.quantize = QUANTIZE def reset(self): stdv = (1.0 / math.sqrt(self.U.size(1))) self.U.data.uniform_((- stdv), stdv) self.V.data.uniform_((- stdv), stdv) def forward(self, input): vx = self.V.mm(input.transpose(0, 1).contiguous()) qvx = (self.quantize_input(vx) if self.quantize else input) qinput = (self.U.mm(qvx).transpose(1, 0).contiguous() if self.quantize else input) qweight = (self.quantize_weight(self.weight) if (self.quantize and (not self.cal_params)) else self.weight) if ((not self.measure) and (os.environ.get('DEBUG') == 'True')): assert (qinput.unique().numel() <= (2 ** self.num_bits)) assert (qweight[0].unique().numel() <= (2 ** self.num_bits_weight)) if (self.bias is not None): qbias = (self.bias if (self.measure or (not self.quantize)) else quantize(self.bias, num_bits=(self.num_bits_weight + self.num_bits), flatten_dims=(0, (- 1)))) else: qbias = None if ((not self.biprecision) or (self.num_bits_grad is None)): output = F.linear(qinput, qweight, qbias) if (self.num_bits_grad is not None): output = quantize_grad(output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output
def test_game_2048__step_jit(game_2048: Game2048) -> None: key = jax.random.PRNGKey(0) (state, timestep) = game_2048.reset(key) action = jnp.argmax(state.action_mask) chex.clear_trace_counter() step_fn = jax.jit(chex.assert_max_traces(game_2048.step, n=1)) (new_state, next_timestep) = step_fn(state, action) assert (not jnp.array_equal(new_state.board, state.board)) assert_is_jax_array_tree(new_state) state = new_state action = jnp.argmax(state.action_mask) (new_state, next_timestep) = step_fn(state, action) assert (not jnp.array_equal(new_state.board, state.board))
def main(): input_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800' save_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800_gray' mode = 'gray' compression_level = 3 n_thread = 20 if (not os.path.exists(save_folder)): os.makedirs(save_folder) print('mkdir [{:s}] ...'.format(save_folder)) else: print('Folder [{:s}] already exists. Exit...'.format(save_folder)) sys.exit(1) img_list = [] for (root, _, file_list) in sorted(os.walk(input_folder)): path = [os.path.join(root, x) for x in file_list] img_list.extend(path) def update(arg): pbar.update(arg) pbar = ProgressBar(len(img_list)) pool = Pool(n_thread) for path in img_list: pool.apply_async(worker, args=(path, save_folder, mode, compression_level), callback=update) pool.close() pool.join() print('All subprocesses done.')
class AStar(): __metaclass__ = ABCMeta __slots__ = () class SearchNode(): __slots__ = ('data', 'gscore', 'fscore', 'closed', 'came_from', 'out_openset') def __init__(self, data, gscore=Infinite, fscore=Infinite): self.data = data self.gscore = gscore self.fscore = fscore self.closed = False self.out_openset = True self.came_from = None def __lt__(self, b): return (self.fscore < b.fscore) class SearchNodeDict(dict): def __missing__(self, k): v = AStar.SearchNode(k) self.__setitem__(k, v) return v def heuristic_cost_estimate(self, current, goal): raise NotImplementedError def distance_between(self, n1, n2): raise NotImplementedError def neighbors(self, node): raise NotImplementedError def is_goal_reached(self, current, goal): return (current == goal) def reconstruct_path(self, last, reversePath=False): def _gen(): current = last while current: (yield current.data) current = current.came_from if reversePath: return _gen() else: return reversed(list(_gen())) def astar(self, start, goal, reversePath=False): if self.is_goal_reached(start, goal): return [start] searchNodes = AStar.SearchNodeDict() startNode = searchNodes[start] = AStar.SearchNode(start, gscore=0, fscore=self.heuristic_cost_estimate(start, goal)) openSet = [] heappush(openSet, startNode) while openSet: current = heappop(openSet) if self.is_goal_reached(current.data, goal): return (self.reconstruct_path(current, reversePath), current.gscore) current.out_openset = True current.closed = True for neighbor in map((lambda n: searchNodes[n]), self.neighbors(current.data)): if neighbor.closed: continue tentative_gscore = (current.gscore + self.distance_between(current.data, neighbor.data)) if (tentative_gscore >= neighbor.gscore): continue neighbor.came_from = current neighbor.gscore = tentative_gscore neighbor.fscore = (tentative_gscore + self.heuristic_cost_estimate(neighbor.data, goal)) if neighbor.out_openset: neighbor.out_openset = False heappush(openSet, neighbor) else: heapify(openSet) return None
def find_intermediate_values(spin_df): spin_df = find_release_point(spin_df) spin_df = find_release_time(spin_df) spin_df = find_release_velocity_components(spin_df) spin_df = find_flight_time(spin_df) spin_df = find_average_velocity_components(spin_df) spin_df = find_average_velocity(spin_df) spin_df = find_average_drag(spin_df) spin_df = find_magnus_acceleration_magnitude(spin_df) spin_df = find_average_magnus_acceleration(spin_df) spin_df = find_magnus_magnitude(spin_df) spin_df = find_phi(spin_df) spin_df = find_lift_coefficient(spin_df) spin_df = find_spin_factor(spin_df) spin_df = find_transverse_spin(spin_df) spin_df = find_spin_efficiency(spin_df) spin_df = find_theta(spin_df) return spin_df
class GaussianMixin(): def reset_variational_parameters(self): self.log_sigma2.data.uniform_((- 10), (- 10)) def log_alpha(self): return (self.log_sigma2 - (2 * torch.log((abs(self.weight) + 1e-12))))
def th_pack(tensor): batch_size = tensor.shape[0] padding = tensor.new_zeros((batch_size, 4, 3)) padding.requires_grad = False pack_list = [padding, tensor] pack_res = torch.cat(pack_list, 2) return pack_res
def get_flow_combinations_randomly_initalised(flow_names): if (type(flow_names) is list): flow_arr = [] easy_inv_flow_arr = [] for flow in flow_names: (flow_random, easy_inv_flow) = get_flow_combinations_randomly_initalised(flow) flow_arr.append(flow_random) easy_inv_flow_arr.append(easy_inv_flow) flow_random = CompositeFlow(flow_arr) easy_inv_flow = CompositeFlow(easy_inv_flow_arr) return (flow_random, easy_inv_flow) flow_name = flow_names if (flow_name == 'affine'): (_a, _b) = np.random.randn(2) flow_random = AffineFlow(_a, _b, set_restrictions=True) easy_inv_flow = flow_random elif (flow_name == 'arcsinh'): (_a, _b, _c, _d) = np.random.randn(4) flow_random = ArcsinhFlow(_a, _b, _c, _d, add_init_f0=False, set_restrictions=True) easy_inv_flow = flow_random elif (flow_name == 'inverse_arcsinh'): (_a, _b, _c, _d) = np.random.randn(4) flow_random = InverseArchsinhFlow(_a, _b, _c, _d, add_init_f0=False, set_restrictions=True) easy_inv_flow = flow_random elif (flow_name == 'sinh_arcsinhflow'): (_a, _b) = np.random.randn(2) flow_random = Sinh_ArcsinhFlow(_a, _b, add_init_f0=False, set_restrictions=True) easy_inv_flow = flow_random elif (flow_name == 'inverse_sinh_arcsinhflow'): (_a, _b) = np.random.randn(2) flow_random = Inverse_Sinh_ArcsinhFlow(_a, _b, add_init_f0=False, set_restrictions=True) easy_inv_flow = flow_random elif (flow_name == 'exp'): flow_random = ExpFlow() easy_inv_flow = flow_random elif (flow_name == 'softplus'): flow_random = SoftplusFlow() easy_inv_flow = flow_random elif (flow_name == 'inverse_boxcox'): n = 2.0 n = 2.0 boxcox_constraint = (lambda lam: ((n * torch.sigmoid(lam)) + 0.01)) identity_init = 1.0 flow_random = CompositeFlow([TranslationFlow(0), InverseBoxCoxFlow(0.01, add_init_f0=0.0, constraint=boxcox_constraint)]) easy_inv_flow = flow_random elif (flow_name == 'step_flow'): min_y = np.min(y_train) max_y = np.max(y_train) step_flow = initalize_step_flow_as_ladder(K=5, output_range=[min_y, max_y], smoothness_scale=0.01, remove_tails=False) step_flow = [instance_flow([f], is_composite=False)[0] for f in step_flow] step_flow = StepFlow(step_flow, add_init_f0=False) flow_random = step_flow easy_inv_flow = step_flow elif (flow_name == 'tukey_right'): (_g, _h) = np.random.randn(2) flow_random = TukeyRightFlow(init_g=_g, init_h=_h, add_init_f0=False) easy_inv_flow = flow_random else: print('Flow ', flow_name, ' not found') return (flow_random, easy_inv_flow)
def _quantize(x, bin_edges): bin_edges = copy.copy(bin_edges) bin_edges = sorted(bin_edges) quantized = list(map((lambda y: bisect.bisect_right(bin_edges, y)), x)) return quantized
class TestLMPlots(): def test_save_rankings_plot(self, rankings_plot_data_1): lm_plots.plot_inner_token_rankings(**rankings_plot_data_1, save_file_path='./tmp/ranking_1.png') def test_save_ranking_watch_plot(self, ranking_watch_data_1): lm_plots.plot_inner_token_rankings_watch(**ranking_watch_data_1, save_file_path='./tmp/ranking_watch_1.png')
_module() class InstaBoost(object): def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=((- 1), 1), color_prob=0.5, hflag=False, aug_ratio=0.5): try: import instaboostfast as instaboost except ImportError: raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first for instaboost augmentation.') self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag) self.aug_ratio = aug_ratio def _load_anns(self, results): labels = results['ann_info']['labels'] masks = results['ann_info']['masks'] bboxes = results['ann_info']['bboxes'] n = len(labels) anns = [] for i in range(n): label = labels[i] bbox = bboxes[i] mask = masks[i] (x1, y1, x2, y2) = bbox bbox = [x1, y1, (x2 - x1), (y2 - y1)] anns.append({'category_id': label, 'segmentation': mask, 'bbox': bbox}) return anns def _parse_anns(self, results, anns, img): gt_bboxes = [] gt_labels = [] gt_masks_ann = [] for ann in anns: (x1, y1, w, h) = ann['bbox'] if ((w <= 0) or (h <= 0)): continue bbox = [x1, y1, (x1 + w), (y1 + h)] gt_bboxes.append(bbox) gt_labels.append(ann['category_id']) gt_masks_ann.append(ann['segmentation']) gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) results['ann_info']['labels'] = gt_labels results['ann_info']['bboxes'] = gt_bboxes results['ann_info']['masks'] = gt_masks_ann results['img'] = img return results def __call__(self, results): img = results['img'] orig_type = img.dtype anns = self._load_anns(results) if np.random.choice([0, 1], p=[(1 - self.aug_ratio), self.aug_ratio]): try: import instaboostfast as instaboost except ImportError: raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first.') (anns, img) = instaboost.get_new_data(anns, img.astype(np.uint8), self.cfg, background=None) results = self._parse_anns(results, anns, img.astype(orig_type)) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' return repr_str
def patch_llama_for_ntk_scaled_rotary_embeddings(model, alpha): from .LlamaNTKScaledRotaryEmbedding import LlamaNTKScaledRotaryEmbedding for each in model.model.layers: each.self_attn.rotary_emb = LlamaNTKScaledRotaryEmbedding(each.self_attn.head_dim, alpha=alpha, device=each.self_attn.rotary_emb.inv_freq.device)
class CUBDataset(ConfounderDataset): def __init__(self, root_dir, target_name, confounder_names, augment_data=False, model_type=None): self.root_dir = root_dir self.target_name = target_name self.confounder_names = confounder_names self.model_type = model_type self.augment_data = augment_data self.data_dir = self.root_dir if (not os.path.exists(self.data_dir)): raise ValueError(f'{self.data_dir} does not exist yet. Please generate the dataset first.') self.metadata_df = pd.read_csv(os.path.join(self.data_dir, 'metadata.csv')) self.y_array = self.metadata_df['y'].values self.n_classes = 2 self.confounder_array = self.metadata_df['place'].values self.n_confounders = 1 self.n_groups = pow(2, 2) self.group_array = ((self.y_array * (self.n_groups / 2)) + self.confounder_array).astype('int') self.filename_array = self.metadata_df['img_filename'].values self.split_array = self.metadata_df['split'].values self.split_dict = {'train': 0, 'val': 1, 'test': 2} if (model_attributes[self.model_type]['feature_type'] == 'precomputed'): self.features_mat = torch.from_numpy(np.load(os.path.join(root_dir, 'features', model_attributes[self.model_type]['feature_filename']))).float() self.train_transform = None self.eval_transform = None else: self.features_mat = None self.train_transform = get_transform_cub(self.model_type, train=True, augment_data=augment_data) self.eval_transform = get_transform_cub(self.model_type, train=False, augment_data=augment_data)
.parametrize('loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss, SmoothL1Loss, BalancedL1Loss]) .parametrize('input_shape', [(10, 4), (0, 4)]) def test_regression_losses(loss_class, input_shape): pred = torch.rand(input_shape) target = torch.rand(input_shape) weight = torch.rand(input_shape) loss = loss_class()(pred, target) assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, weight) assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, reduction_override='mean') assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, avg_factor=10) assert isinstance(loss, torch.Tensor) with pytest.raises(ValueError): reduction_override = 'sum' loss_class()(pred, target, avg_factor=10, reduction_override=reduction_override) for reduction_override in [None, 'none', 'mean']: loss_class()(pred, target, avg_factor=10, reduction_override=reduction_override) assert isinstance(loss, torch.Tensor)
def train(args, model): if (not osp.isdir(args.root)): os.makedirs(args.root) with open(osp.join(args.root, 'args.yaml'), 'w') as f: yaml.dump(args.__dict__, f) train_ds = CelebA(train=True) eval_ds = CelebA(train=False) train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.train_batch_size, shuffle=True, num_workers=4) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(len(train_loader) * args.num_epochs)) if args.resume: ckpt = torch.load(osp.join(args.root, 'ckpt.tar')) model.load_state_dict(ckpt.model) optimizer.load_state_dict(ckpt.optimizer) scheduler.load_state_dict(ckpt.scheduler) logfilename = ckpt.logfilename start_epoch = ckpt.epoch else: logfilename = osp.join(args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M'))) start_epoch = 1 logger = get_logger(logfilename) ravg = RunningAverage() if (not args.resume): logger.info('Total number of parameters: {}\n'.format(sum((p.numel() for p in model.parameters())))) for epoch in range(start_epoch, (args.num_epochs + 1)): model.train() for (x, _) in tqdm(train_loader): batch = img_to_task(x, max_num_points=args.max_num_points, device='cuda') optimizer.zero_grad() outs = model(batch, num_samples=args.train_num_samples) outs.loss.backward() optimizer.step() scheduler.step() for (key, val) in outs.items(): ravg.update(key, val) line = f'{args.model}:{args.expid} epoch {epoch} ' line += f"lr {optimizer.param_groups[0]['lr']:.3e} " line += ravg.info() logger.info(line) if ((epoch % args.eval_freq) == 0): logger.info((eval(args, model) + '\n')) ravg.reset() if (((epoch % args.save_freq) == 0) or (epoch == args.num_epochs)): ckpt = AttrDict() ckpt.model = model.state_dict() ckpt.optimizer = optimizer.state_dict() ckpt.scheduler = scheduler.state_dict() ckpt.logfilename = logfilename ckpt.epoch = (epoch + 1) torch.save(ckpt, osp.join(args.root, 'ckpt.tar')) args.mode = 'eval' eval(args, model)
.dataclass class FlaxNextSentencePredictorOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
def process(words, labels, tokenizer, vocabulary, max_seq_length): input_id = [] label_id = [] words = ((['[CLS]'] + words) + ['[SEP]']) labels = ((['O'] + labels) + ['O']) for word in words: token = tokenizer.tokenize(word) input_id.extend(token) input_id = tokenizer.convert_tokens_to_ids(input_id) attention_mask = ([1] * len(input_id)) segment_id = ([0] * len(input_id)) for label in labels: label_id.append(vocabulary.to_index(label)) label_mask = ([1] * len(label_id)) input_id = input_id[0:max_seq_length] attention_mask = attention_mask[0:max_seq_length] segment_id = segment_id[0:max_seq_length] label_id = label_id[0:max_seq_length] label_mask = label_mask[0:max_seq_length] input_id += [0 for _ in range((max_seq_length - len(input_id)))] attention_mask += [0 for _ in range((max_seq_length - len(attention_mask)))] segment_id += [0 for _ in range((max_seq_length - len(segment_id)))] label_id += [(- 1) for _ in range((max_seq_length - len(label_id)))] label_mask += [0 for _ in range((max_seq_length - len(label_mask)))] return (input_id, attention_mask, segment_id, label_id, label_mask)
class Pybind11Extension(_Extension): def _add_cflags(self, flags: List[str]) -> None: self.extra_compile_args[:0] = flags def _add_ldflags(self, flags: List[str]) -> None: self.extra_link_args[:0] = flags def __init__(self, *args: Any, **kwargs: Any) -> None: self._cxx_level = 0 cxx_std = kwargs.pop('cxx_std', 0) if ('language' not in kwargs): kwargs['language'] = 'c++' include_pybind11 = kwargs.pop('include_pybind11', True) super().__init__(*args, **kwargs) if include_pybind11: try: import pybind11 pyinc = pybind11.get_include() if (pyinc not in self.include_dirs): self.include_dirs.append(pyinc) except ModuleNotFoundError: pass self.cxx_std = cxx_std cflags = [] ldflags = [] if WIN: cflags += ['/EHsc', '/bigobj'] else: cflags += ['-fvisibility=hidden'] env_cflags = os.environ.get('CFLAGS', '') env_cppflags = os.environ.get('CPPFLAGS', '') c_cpp_flags = (shlex.split(env_cflags) + shlex.split(env_cppflags)) if (not any((opt.startswith('-g') for opt in c_cpp_flags))): cflags += ['-g0'] if MACOS: cflags += ['-stdlib=libc++'] ldflags += ['-stdlib=libc++'] self._add_cflags(cflags) self._add_ldflags(ldflags) def cxx_std(self) -> int: return self._cxx_level _std.setter def cxx_std(self, level: int) -> None: if self._cxx_level: warnings.warn('You cannot safely change the cxx_level after setting it!') if (WIN and (level == 11)): level = 14 self._cxx_level = level if (not level): return cflags = [STD_TMPL.format(level)] ldflags = [] if (MACOS and ('MACOSX_DEPLOYMENT_TARGET' not in os.environ)): current_macos = tuple((int(x) for x in platform.mac_ver()[0].split('.')[:2])) desired_macos = ((10, 9) if (level < 17) else (10, 14)) macos_string = '.'.join((str(x) for x in min(current_macos, desired_macos))) macosx_min = f'-mmacosx-version-min={macos_string}' cflags += [macosx_min] ldflags += [macosx_min] self._add_cflags(cflags) self._add_ldflags(ldflags)
def check_file(filepath, md5sum): try: md5 = hashlib.md5() with open(filepath, 'rb') as f: for chunk in iter(partial(f.read, 4096), b''): md5.update(chunk) return (md5.hexdigest() == md5sum) except FileNotFoundError: return False
class AutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test(): net = VGG('VGG11', input_size=32, num_class=10) print(net) x = torch.randn(128, 3, 96, 96) y = net(x) print(y.size())
def file_based_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_file): writer = tf.compat.v1.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ((ex_index % 10000) == 0): tf.compat.v1.logging.info(('Writing example %d of %d' % (ex_index, len(examples)))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close()
def train_vanilla(args, io): train_loader = DataLoader(ModelNet40(args, partition='train'), num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True) test_loader = DataLoader(ModelNet40(args, partition='test'), num_workers=8, batch_size=args.test_batch_size, shuffle=True, drop_last=False) device = torch.device(('cuda' if args.cuda else 'cpu')) if (args.model == 'pointnet'): model = PointNet(args).to(device) elif (args.model == 'dgcnn'): model = DGCNN(args).to(device) else: raise Exception('Not implemented') print(str(model)) model = nn.DataParallel(model) print("Let's use", torch.cuda.device_count(), 'GPUs!') if args.use_sgd: print('Use SGD') opt = optim.SGD(model.parameters(), lr=(args.lr * 100), momentum=args.momentum, weight_decay=0.0001) else: print('Use Adam') opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0001) scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr) criterion = cal_loss best_test_acc = 0 for epoch in range(args.epochs): train_loss = 0.0 count = 0.0 model.train() train_pred = [] train_true = [] for (data, label) in train_loader: (data, label) = (data.to(device), label.to(device).squeeze()) data = data.permute(0, 2, 1) batch_size = data.size()[0] opt.zero_grad() logits = model(data) loss = criterion(logits, label) loss.backward() opt.step() preds = logits.max(dim=1)[1] count += batch_size train_loss += (loss.item() * batch_size) train_true.append(label.cpu().numpy()) train_pred.append(preds.detach().cpu().numpy()) scheduler.step() train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) outstr = ('Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch, ((train_loss * 1.0) / count), metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred))) io.cprint(outstr) test_loss = 0.0 count = 0.0 model.eval() test_pred = [] test_true = [] for (data, label) in test_loader: (data, label) = (data.to(device), label.to(device).squeeze()) data = data.permute(0, 2, 1) batch_size = data.size()[0] logits = model(data) loss = criterion(logits, label) preds = logits.max(dim=1)[1] count += batch_size test_loss += (loss.item() * batch_size) test_true.append(label.cpu().numpy()) test_pred.append(preds.detach().cpu().numpy()) test_true = np.concatenate(test_true) test_pred = np.concatenate(test_pred) test_acc = metrics.accuracy_score(test_true, test_pred) avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred) outstr = ('Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch, ((test_loss * 1.0) / count), test_acc, avg_per_class_acc)) io.cprint(outstr) if (test_acc >= best_test_acc): best_test_acc = test_acc torch.save(model.state_dict(), ('checkpoints/%s/models/model.t7' % args.exp_name))
class GetMatrix(nn.Module): def __init__(self, dim_in, dim_out): super(GetMatrix, self).__init__() self.get_gamma = nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False) self.get_beta = nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False) def forward(self, x): gamma = self.get_gamma(x) beta = self.get_beta(x) return (gamma, beta)
class LookupDuplicateError(Exception): def __init__(self, message: str): self.message = message
def efficientnet_b5(in_size=(456, 456), **kwargs): return get_efficientnet(version='b5', in_size=in_size, model_name='efficientnet_b5', **kwargs)
def resnetbc14b_cub(num_classes=200, **kwargs): return get_resnet(num_classes=num_classes, blocks=14, bottleneck=True, conv1_stride=False, model_name='resnetbc14b_cub', **kwargs)
class ExampleClass(): def __init__(self, param1, param2, param3): self.attr1 = param1 self.attr2 = param2 self.attr3 = param3 self.attr4 = ['attr4'] self.attr5 = None def property1(self): return 'property1' def method1(self, param1, param2): return True def __special__(self): pass def _private(self): pass
class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): super().__init__() self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout) self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) def forward(self, x, context=None): x = (self.attn1(self.norm1(x)) + x) x = (self.attn2(self.norm2(x), context=context) + x) x = (self.ff(self.norm3(x)) + x) return x
def delete_oleans(lean_files: List[Path]): for file_path in lean_files: olean = file_path.with_suffix('.olean') if olean.exists(): olean.unlink()
class DataTrainingArguments(): data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'}) labels: Optional[str] = field(default=None, metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'}) max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
def test(args, model, device, x, y, criterion, task_id_nominal, curr_task_masks=None, mode='test'): model.eval() total_loss = 0 total_num = 0 correct = 0 r = np.arange(x.size(0)) np.random.shuffle(r) r = torch.LongTensor(r).to(device) with torch.no_grad(): for i in range(0, len(r), args.batch_size_test): if ((i + args.batch_size_test) <= len(r)): b = r[i:(i + args.batch_size_test)] else: b = r[i:] data = x[b] (data, target) = (data.to(device), y[b].to(device)) if curr_task_masks: output = model(data, task_id_nominal, mask=curr_task_masks, mode=mode) else: output = model(data, task_id_nominal, mask=None, mode=mode) loss = criterion(output, target) pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() total_loss += (loss.data.cpu().numpy().item() * len(b)) total_num += len(b) acc = ((100.0 * correct) / total_num) final_loss = (total_loss / total_num) return (final_loss, acc)
class OneHotBool(enum.IntEnum): NONE = 0 TRUE = 1 FALSE = 2 def from_bool(b): if b: return OneHotBool.TRUE return OneHotBool.FALSE def __str__(self): return self.name def __repr__(self): return self.name
def main_worker(gpu, ngpus_per_node, args): global best_acc1 args.gpu = gpu if (args.multiprocessing_distributed and (args.gpu != 0)): def print_pass(*args): pass builtins.print = print_pass if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) if args.distributed: if ((args.dist_url == 'env://') and (args.rank == (- 1))): args.rank = int(os.environ['RANK']) if args.multiprocessing_distributed: args.rank = ((args.rank * ngpus_per_node) + gpu) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) print("=> creating model '{}'".format(args.arch)) if (args.arch == 'efficientb0'): model = efficientnet_b0(pretrained=False, num_classes=1000) elif (args.arch == 'efficientb1'): model = efficientnet_b1(pretrained=False, num_classes=1000) elif (args.arch == 'mobilenetv3'): model = mobilenetv3_large_100(num_classes=1000) else: model = models.__dict__[args.arch]() print(model) for (name, param) in model.named_parameters(): if ((args.arch in ['resnet18', 'resnet34', 'resnet50']) and (name not in ['fc.weight', 'fc.bias'])): param.requires_grad = False if ((args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']) and (name not in ['classifier.weight', 'classifier.bias'])): param.requires_grad = False if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']): model.classifier.weight.data.normal_(mean=0.0, std=0.01) model.classifier.bias.data.zero_() else: model.fc.weight.data.normal_(mean=0.0, std=0.01) model.fc.bias.data.zero_() if args.pretrained: if os.path.isfile(args.pretrained): print("=> loading checkpoint '{}'".format(args.pretrained)) checkpoint = torch.load(args.pretrained, map_location='cpu') state_dict = checkpoint['state_dict'] for k in list(state_dict.keys()): if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']): if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.classifier'))): state_dict[k[len('module.encoder_q.'):]] = state_dict[k] elif (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))): state_dict[k[len('module.encoder_q.'):]] = state_dict[k] del state_dict[k] args.start_epoch = 0 msg = model.load_state_dict(state_dict, strict=False) print(msg) if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']): assert (set(msg.missing_keys) == {'classifier.weight', 'classifier.bias'}) else: assert (set(msg.missing_keys) == {'fc.weight', 'fc.bias'}) print("=> loaded pre-trained model '{}'".format(args.pretrained)) else: print("=> no checkpoint found at '{}'".format(args.pretrained)) if args.distributed: if (args.gpu is not None): torch.cuda.set_device(args.gpu) model.cuda(args.gpu) args.batch_size = int((args.batch_size / ngpus_per_node)) args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node)) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() model = torch.nn.parallel.DistributedDataParallel(model) elif (args.gpu is not None): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')): model.features = torch.nn.DataParallel(model.features) model.cuda() else: model = torch.nn.DataParallel(model).cuda() criterion = nn.CrossEntropyLoss().cuda(args.gpu) parameters = list(filter((lambda p: p.requires_grad), model.parameters())) assert (len(parameters) == 2) optimizer = torch.optim.SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: checkpoint_path = get_last_checkpoint(args.resume) if os.path.isfile(checkpoint_path): print("=> loading checkpoint '{}'".format(checkpoint_path)) if (args.gpu is None): checkpoint = torch.load(checkpoint_path) else: loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(checkpoint_path, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if (args.gpu is not None): best_acc1 = best_acc1.to(args.gpu) out = model.load_state_dict(checkpoint['state_dict'], strict=False) print(out) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) if args.evaluate: validate(val_loader, model, criterion, args) return for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) train(train_loader, model, criterion, optimizer, epoch, args) acc1 = validate(val_loader, model, criterion, args) is_best = (acc1 > best_acc1) best_acc1 = max(acc1, best_acc1) if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))): save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best) if (epoch == args.start_epoch): sanity_check(model.state_dict(), args.pretrained, args)
class DPMSolverSinglestepScheduler(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch'])
def add_data(filename, split, ours): for ls in open(filename): instance = json.loads(ls) db = instance['table_id'] question = instance['question'] phase = instance['phase'] query = Query.from_dict(instance['sql']) info = {'query-split': 'N/A', 'sentences': [{'question-split': split, 'original': question, 'table-id': db, 'variables': {}}], 'sql-original': [str(query)], 'variables': []} ours.append(info)
def collate(batch): databatch = [b[0] for b in batch] labelbatch = [b[1] for b in batch] lenbatch = [len(b[0][0][0]) for b in batch] databatchTensor = collate_tensors(databatch) labelbatchTensor = torch.as_tensor(labelbatch) lenbatchTensor = torch.as_tensor(lenbatch) maskbatchTensor = lengths_to_mask(lenbatchTensor) batch = {'x': databatchTensor, 'y': labelbatchTensor, 'mask': maskbatchTensor, 'lengths': lenbatchTensor} return batch
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp10(): state = prototype_state() state['end_sym_utterance'] = '__eot__' state['unk_sym'] = 0 state['eos_sym'] = 1 state['eod_sym'] = (- 1) state['first_speaker_sym'] = (- 1) state['second_speaker_sym'] = (- 1) state['third_speaker_sym'] = (- 1) state['minor_speaker_sym'] = (- 1) state['voice_over_sym'] = (- 1) state['off_screen_sym'] = (- 1) state['pause_sym'] = (- 1) state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl' state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl' state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl' state['dictionary'] = '../UbuntuData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'UbuntuModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = True state['deep_dialogue_encoder_input'] = False state['deep_utterance_decoder_out'] = True state['bs'] = 80 state['utterance_decoder_gating'] = 'LSTM' state['direct_connection_between_encoders_and_decoder'] = True state['qdim_encoder'] = 1000 state['qdim_decoder'] = 2000 state['sdim'] = 1000 state['rankdim'] = 400 state['add_latent_gaussian_per_utterance'] = True state['latent_gaussian_per_utterance_dim'] = 100 state['scale_latent_gaussian_variable_variances'] = 0.1 state['add_latent_piecewise_per_utterance'] = False state['latent_piecewise_per_utterance_dim'] = 100 state['latent_piecewise_alpha_variables'] = 3 state['scale_latent_piecewise_variable_alpha_use_softplus'] = False state['scale_latent_piecewise_variable_prior_alpha'] = 1.0 state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0 state['condition_latent_variable_on_dialogue_encoder'] = True state['train_latent_variables_with_kl_divergence_annealing'] = True state['kl_divergence_annealing_rate'] = (1.0 / 75000.0) state['decoder_drop_previous_input_tokens'] = True state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['deep_utterance_decoder_input'] = True state['patience'] = 20 state['kl_divergence_max_weight'] = 0.5 return state
def _isDissipative(obj): from .planarDissipativeForce import planarDissipativeForce from .Potential import flatten obj = flatten(obj) isList = isinstance(obj, list) if isList: isCons = [((not isinstance(p, DissipativeForce)) and (not isinstance(p, planarDissipativeForce))) for p in obj] nonCons = (not numpy.prod(numpy.array(isCons))) else: nonCons = (isinstance(obj, DissipativeForce) or isinstance(obj, planarDissipativeForce)) return nonCons
def auto_augment_policy_v0(hparams): policy = [[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)]] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc
class AutoModelForNextSentencePrediction(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def cwh_to_whc(img: torch.Tensor) -> torch.Tensor: if (len(img.shape) == 3): return img.permute(1, 2, 0) elif (len(img.shape) == 4): return img.permute(0, 2, 3, 1) else: raise ValueError(f'Invalid shape for channel conversion. Expected 3 or 4 dims, got {len(img.shape)} (shape={img.shape})')
def sanity_check_labels(data_folder: Path, competitions: Optional[List[T4c22Competitions]]): summary = [] all_good = True if ((competitions is None) or (T4c22Competitions.CORE in competitions)): print(f'/ start core competition check') d = {} for city in CITIES: cc_sum = 0.0 label_file_list = list(((data_folder / 'train') / city).rglob('cc_labels*.parquet')) assert (len(label_file_list) == EXPECTED_NUM_TRAINING_LABEL_FILES[city]), (len(label_file_list), EXPECTED_NUM_TRAINING_LABEL_FILES[city]) for file in tqdm.tqdm(label_file_list, desc=city): df = load_df_from_parquet(file) assert (len(df[df['cc'].isna()]) == 0), (file, df[df['cc'].isna()]) assert (df['cc'].min() >= 0), (file, df['cc'].min()) assert (df['cc'].max() <= 3), (file, df['cc'].max()) cc_sum += df['cc'].astype(np.float64).sum() d[city] = cc_sum cc_sum_check = np.isclose(cc_sum, CC_SUMS[city]) all_good = (all_good and cc_sum_check) python_check = ('()' if cc_sum_check else '()') msg = (str((city, (cc_sum, CC_SUMS[city]))) if (not cc_sum_check) else city) msg = f'{python_check} cc sum check {msg}' print('') print(msg) summary.append(msg) print(json.dumps(d)) print(f'\ end core competition check -> OK!') if ((competitions is None) or (T4c22Competitions.EXTENDED in competitions)): print(f'/ start extended competition check') d = {} for city in CITIES: label_file_list = list(((data_folder / 'train') / city).rglob('eta_labels*.parquet')) eta_sum = 0.0 assert (len(label_file_list) == EXPECTED_NUM_TRAINING_LABEL_FILES[city]), (len(label_file_list), EXPECTED_NUM_TRAINING_LABEL_FILES[city]) for file in tqdm.tqdm(label_file_list, desc=city): df = load_df_from_parquet(file) assert (len(df[df['eta'].isna()]) == 0), (file, df[df['eta'].isna()]) num_supersegments_city = NUM_SUPERSEGMENTS[city] assert (len(df) == (num_supersegments_city * 96)), (len(df), (num_supersegments_city * 96)) assert (df.groupby(['day', 't'], sort=False).agg((lambda x: len(x.unique())))['identifier'] == num_supersegments_city).all(), file eta_sum += df['eta'].astype(np.float64).sum() d[city] = eta_sum eta_sum_check = np.isclose(eta_sum, ETA_SUMS[city]) all_good = (all_good and eta_sum_check) python_check = ('()' if eta_sum_check else '()') msg = (str((city, (eta_sum, ETA_SUMS[city]))) if (not eta_sum_check) else city) msg = f'{python_check} eta sum check {msg}' print('') print(msg) summary.append(msg) print('') print(json.dumps(d)) print(f'\ end extended competition check -> OK!') print('') print('') print('Summary:') print('\n'.join(summary)) if (not all_good): raise Exception('() Not all checks successful') else: print('All checks succesful!')
class ModulatedDeformRoIPoolingPack(DeformRoIPooling): def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, num_mask_fcs=2, deform_fc_channels=1024): super(ModulatedDeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std) self.num_offset_fcs = num_offset_fcs self.num_mask_fcs = num_mask_fcs self.deform_fc_channels = deform_fc_channels if (not no_trans): offset_fc_seq = [] ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels) for i in range(self.num_offset_fcs): if (i < (self.num_offset_fcs - 1)): oc = self.deform_fc_channels else: oc = ((self.out_size[0] * self.out_size[1]) * 2) offset_fc_seq.append(nn.Linear(ic, oc)) ic = oc if (i < (self.num_offset_fcs - 1)): offset_fc_seq.append(nn.ReLU(inplace=True)) self.offset_fc = nn.Sequential(*offset_fc_seq) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() mask_fc_seq = [] ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels) for i in range(self.num_mask_fcs): if (i < (self.num_mask_fcs - 1)): oc = self.deform_fc_channels else: oc = (self.out_size[0] * self.out_size[1]) mask_fc_seq.append(nn.Linear(ic, oc)) ic = oc if (i < (self.num_mask_fcs - 1)): mask_fc_seq.append(nn.ReLU(inplace=True)) else: mask_fc_seq.append(nn.Sigmoid()) self.mask_fc = nn.Sequential(*mask_fc_seq) self.mask_fc[(- 2)].weight.data.zero_() self.mask_fc[(- 2)].bias.data.zero_() def forward(self, data, rois): assert (data.size(1) == self.out_channels) if self.no_trans: offset = data.new_empty(0) return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) else: n = rois.shape[0] offset = data.new_empty(0) x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std) offset = self.offset_fc(x.view(n, (- 1))) offset = offset.view(n, 2, self.out_size[0], self.out_size[1]) mask = self.mask_fc(x.view(n, (- 1))) mask = mask.view(n, 1, self.out_size[0], self.out_size[1]) return (deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) * mask)