code
stringlengths
101
5.91M
def normalize(x, alpha=900, beta=1, num_iters=100, sample=1, method='gmm', use_cuda=False, verbose=False): if (method == 'affine'): mu = x.mean() std = x.std() mu = float(mu) std = float(std) metadata = {'mu': mu, 'std': std, 'pi': 1} x = ((x - mu) / std) x = x.astype(np.float32) return (x, metadata) x_sample = x scale = 1 if (sample > 1): n = int(np.round((x.size / sample))) scale = (x.size / n) x_sample = np.random.choice(x.ravel(), size=n, replace=False) (mu, std, pi, logp, mus, stds, pis, logps) = norm_fit(x_sample, alpha=alpha, beta=beta, scale=scale, num_iters=num_iters, use_cuda=use_cuda, verbose=verbose) x = ((x - mu) / std) x = x.astype(np.float32) metadata = {'mu': mu, 'std': std, 'pi': pi, 'logp': logp, 'mus': mus, 'stds': stds, 'pis': pis, 'logps': logps, 'alpha': alpha, 'beta': beta, 'sample': sample} return (x, metadata)
def read_args(): parse_bool = (lambda b: bool(distutils.util.strtobool(b))) parser = argparse.ArgumentParser(description='Training framework for Rainbow DQN\n - supports environments from the ALE (via gym), gym-retro and procgen\n - individial components of Rainbow can be adjusted with cli args (below)\n - uses vectorized environments and batches environment steps for best performance\n - uses the large IMPALA-CNN (with 2x channels by default)', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--training_frames', type=int, default=, help='train for n environment interactions ("game_frames" in the code)') parser.add_argument('--record_every', type=int, default=(60 * 50), help='wait at least x seconds between episode recordings (default is to use environment specific presets)') parser.add_argument('--seed', type=int, default=0, help='seed for pytorch, numpy, environments, random') parser.add_argument('--use_wandb', type=parse_bool, default=True, help='whether use "weights & biases" for tracking metrics, video recordings and model checkpoints') parser.add_argument('--use_amp', type=parse_bool, default=True, help='whether to enable automatic mixed precision for the forward passes') parser.add_argument('--der', type=parse_bool, default=False, help='enable data-efficient-rainbow profile (overrides some of the settings below)') parser.add_argument('--decorr', type=parse_bool, default=True, help='try to decorrelate state/progress in parallel envs') parser.add_argument('--env_name', type=str, default='gym:Qbert', help='the gym/procgen/retro environment name, should be either gym:[name], retro:[name] or procgen:[name]\nsome gym envs: MsPacman, Phoenix, Breakout, Qbert, Amidar, SpaceInvaders, Assault\nsome retro envs: SuperMarioWorld-Snes, MortalKombat3-Genesis, SpaceMegaforce-Snes, SmashTV-Nes, AirBuster-Genesis, NewZealandStory-Genesis, Paperboy-Nes\nprogcen envs: bigfish, bossfight, caveflyer, chaser, climber, coinrun, dodgeball, fruitbot, heist, jumper, leaper, maze, miner, ninja, plunder, starpilot') parser.add_argument('--procgen_distribution_mode', type=str, default='hard', help='what variant of the procgen levels to use, the options are "easy", "hard", "extreme", "memory", "exploration". All games support "easy" and "hard", while other options are game-specific. The default is "hard". Switching to "easy" will reduce the number of timesteps required to solve each game and is useful for testing or when working with limited compute resources.') parser.add_argument('--retro_state', type=str, default='default', help='initial gym-retro state name or "default" or "randomized" (to randomize on episode reset)') parser.add_argument('--time_limit', type=int, default=108000, help='environment time limit for gym & retro (in non-frameskipped native env frames)') parser.add_argument('--eid', type=int, default=None, help='') parser.add_argument('--wandb_tag', type=str, default=None, help='') parser.add_argument('--frame_skip', type=int, default=None, help='use only every nth env frame (default is to use environment specific presets)') parser.add_argument('--frame_stack', type=int, default=None, help='stack n frames (default is to use environment specific presets)') parser.add_argument('--grayscale', type=parse_bool, default=None, help='convert environment to grayscale (default is to use environment specific presets)') parser.add_argument('--resolution', type=int, default=None, help='environment resolution (default is to use environment specific presets)') parser.add_argument('--buffer_size', type=int, default=int((2 ** 20)), help='capacity of experience replay buffer (must be a power of two)') parser.add_argument('--burnin', type=int, default=100000, help='how many transitions should be in the buffer before start of training') parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor') parser.add_argument('--sync_dqn_target_every', type=int, default=32000, help='sync Q target net every n frames') parser.add_argument('--batch_size', type=int, default=256, help='sample size when sampling from the replay buffer') parser.add_argument('--parallel_envs', type=int, default=64, help='number of envs in the vectorized env') parser.add_argument('--train_count', type=int, default=2, help='how often to train on a batch_size batch for every step (of the vectorized env)') parser.add_argument('--subproc_vecenv', type=parse_bool, default=True, help="whether to run each environment in it's own subprocess (always enabled for gym-retro)") parser.add_argument('--network_arch', type=str, default='impala_large:2', help='which model architecture to use for the q-network; one of "nature", "dueling", "impala_small", "impala_large:c" (c is the number of channels in impala large)') parser.add_argument('--spectral_norm', type=str, default='all', help='where to use spectral norm in IMPALA-large residual blocks ("none", "last", "all")') parser.add_argument('--double_dqn', type=parse_bool, default=True, help='whether to use the double-dqn TD-target') parser.add_argument('--prioritized_er', type=parse_bool, default=True, help='whether to use prioritized experience replay') parser.add_argument('--prioritized_er_beta0', type=float, default=0.45, help='importance sampling exponent for PER (0.4 for rainbow, 0.5 for dopamine)') parser.add_argument('--prioritized_er_time', type=int, default=None, help='time period over which to increase the IS exponent (+inf for dopamine; default is value of training_frames)') parser.add_argument('--n_step', type=int, default=3, help='the n in n-step bootstrapping') parser.add_argument('--init_eps', type=float, default=1.0, help='initial dqn exploration epsilon (when not using noisy-nets)') parser.add_argument('--final_eps', type=float, default=0.01, help='final dqn exploration epsilon (when not using noisy-nets)') parser.add_argument('--eps_decay_frames', type=int, default=500000, help='exploration epsilon decay frames, 250_000 for rainbow paper, 1M for dopamine (when not using noisy-nets)') parser.add_argument('--noisy_dqn', type=parse_bool, default=True, help='whether to use noisy nets dqn') parser.add_argument('--noisy_sigma0', type=float, default=0.5, help='sigma_0 parameter for noisy nets dqn') parser.add_argument('--lr', type=float, default=0.00025, help='learning rate for adam (0.0000625 for rainbow paper/dopamine, 0.00025 for DQN/procgen paper)') parser.add_argument('--lr_decay_steps', type=int, default=None, help='learning rate is decayed every n game_steps (disabled by default)') parser.add_argument('--lr_decay_factor', type=float, default=None, help='factor by which lr is multiplied (disabled by default)') parser.add_argument('--adam_eps', type=float, default=None, help='epsilon for adam (0.00015 for rainbow paper/dopamine, 0.0003125 for DQN/procgen paper); default is to use 0.005/batch_size') parser.add_argument('--max_grad_norm', type=float, default=10, help='gradient will be clipped to ensure its l2-norm is less than this') parser.add_argument('--loss_fn', type=str, default='huber', help='loss function ("mse" or "huber")') parser.add_argument('--retro_stickyprob', type=float, default=0.25, help='sticky-action probability in the StochasticFrameSkip wrapper') parser.add_argument('--retro_action_patch', type=str, default='single_buttons', help='defines how to generate the action space from controller buttons, should be either "discrete" (each combination of buttons is an action) or "single_buttons" (each button is an action)\n') parser.add_argument('--procgen_num_levels', type=int, default=0, help='the number of unique levels that can be generated. Set to 0 to use unlimited levels. (this does not work correctly when parallel_envs > 1)') parser.add_argument('--procgen_start_level', type=int, default=0, help="the lowest seed that will be used to generated levels. 'start_level' and 'num_levels' fully specify the set of possible levels.") parser.add_argument('--procgen_paint_vel_info', type=parse_bool, default=False, help='paint player velocity info in the top left corner. Only supported by certain games.') parser.add_argument('--procgen_center_agent', type=parse_bool, default=True, help='determines whether observations are centered on the agent or display the full level. Override at your own risk.') parser.add_argument('--procgen_use_sequential_levels', type=parse_bool, default=False, help='when you reach the end of a level, the episode is ended and a new level is selected. If use_sequential_levels is set to True, reaching the end of a level does not end the episode, and the seed for the new level is derived from the current level seed. If you combine this with start_level=<some seed> and num_levels=1, you can have a single linear series of levels similar to a gym-retro or ALE game.') parser.add_argument('--procgen_use_generated_assets', type=parse_bool, default=False, help='use randomly generated assets in place of human designed assets.') parser.add_argument('--procgen_use_backgrounds', type=parse_bool, default=True, help='normally games use human designed backgrounds, if this flag is set to False, games will use pure black backgrounds.') parser.add_argument('--procgen_restrict_themes', type=parse_bool, default=False, help='some games select assets from multiple themes, if this flag is set to True, those games will only use a single theme.') parser.add_argument('--procgen_use_monochrome_assets', type=parse_bool, default=False, help='if set to True, games will use monochromatic rectangles instead of human designed assets. best used with restrict_themes=True.') args = parser.parse_args() assert ((args.sync_dqn_target_every % args.parallel_envs) == 0) assert (args.loss_fn in ('mse', 'huber')) assert ((args.lr_decay_steps is None) == (args.lr_decay_factor is None)) assert (args.burnin > args.batch_size) assert ((args.spectral_norm == 'none') or (args.spectral_norm == 'last') or (args.spectral_norm == 'all')) if (args.eid is not None): envs = ['Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids', 'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Berzerk', 'Bowling', 'Boxing', 'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber', 'Defender', 'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway', 'Frostbite', 'Gopher', 'Gravitar', 'Hero', 'IceHockey', 'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman', 'NameThisGame', 'Phoenix', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert', 'RoadRunner', 'Robotank', 'Seaquest', 'Skiing', 'Solaris', 'SpaceInvaders', 'StarGunner', 'Tennis', 'TimePilot', 'Tutankham', 'Venture', 'VideoPinball', 'WizardOfWor', 'YarsRevenge', 'Zaxxon'] args.env_name = ('gym:' + envs[args.eid]) args.user_seed = args.seed args.seed = env_seeding(args.user_seed, args.env_name) if (args.adam_eps is None): args.adam_eps = (0.005 / args.batch_size) if (args.prioritized_er_time is None): args.prioritized_er_time = args.training_frames if (args.resolution is not None): args.resolution = (args.resolution, args.resolution) if args.env_name.startswith('gym:'): if (args.frame_skip is None): args.frame_skip = 4 if (args.frame_stack is None): args.frame_stack = 4 if (args.resolution is None): args.resolution = (84, 84) if (args.grayscale is None): args.grayscale = True elif args.env_name.startswith('retro:'): if (args.frame_skip is None): args.frame_skip = 4 if (args.frame_stack is None): args.frame_stack = 4 if (args.resolution is None): args.resolution = (80, 80) if (args.grayscale is None): args.grayscale = False if (not args.subproc_vecenv): print('[WARNING] subproc_vecenv was forcibly enabled since retro envs need to run in subprocesses anyway!') args.subproc_vecenv = True elif args.env_name.startswith('procgen:'): if (args.frame_skip is None): args.frame_skip = 1 if (args.frame_stack is None): args.frame_stack = 4 if (args.resolution is None): args.resolution = args.resolution = (64, 64) if (args.grayscale is None): args.grayscale = False args.time_limit = None if args.der: args.parallel_envs = 1 args.batch_size = 32 args.train_count = 1 args.burnin = 6400 args.n_step = 20 args.sync_dqn_target_every = 8000 args.buffer_size = (2 ** 17) args.lr = 0.00025 args.adam_eps = 0.0003125 if args.noisy_dqn: args.init_eps = 0.002 args.final_eps = 0.0 args.eps_decay_frames = 20000 args.instance = socket.gethostname() wandb_log_config = deepcopy(vars(args)) wandb_log_config['env_type'] = args.env_name[:args.env_name.find(':')] del wandb_log_config['record_every'] del wandb_log_config['use_wandb'] if (not args.env_name.startswith('retro:')): for k in list(wandb_log_config.keys()): if k.startswith('retro'): del wandb_log_config[k] if (not args.env_name.startswith('procgen:')): for k in list(wandb_log_config.keys()): if k.startswith('procgen'): del wandb_log_config[k] del wandb_log_config['wandb_tag'] return (args, wandb_log_config)
def func(): generator = abc_xyz_generator() result = '' for letter in generator: if ((letter == 'x') or (letter == 'a')): result += letter return result
def conv4(in_planes, out_planes, stride=2): return nn.Sequential(nn.Conv2d(in_planes, out_planes, 3, stride, 1), nn.PReLU(out_planes), nn.Conv2d(out_planes, out_planes, 3, 1, 1), nn.PReLU(out_planes), nn.Conv2d(out_planes, out_planes, 3, 1, 1), nn.PReLU(out_planes), nn.Conv2d(out_planes, out_planes, 3, 1, 1), nn.PReLU(out_planes))
.expansion class ExpandPgemmMKLOpenMPI(ExpandTransformation): environments = [environments.intel_mkl_openmpi.IntelMKLScaLAPACKOpenMPI] def expansion(node, parent_state, parent_sdfg, **kwargs): return ExpandPgemmMKLMPICH.expansion(node, parent_state, parent_sdfg, **kwargs)
class TestOldDoctestSageScript(): def test_invoke_on_inputtest_file(self): result = subprocess.run(['sage', '-t', input_file], capture_output=True, text=True) assert (result.returncode == 1) assert ('Failed example:\n something()\nExpected:\n 44\nGot:\n 42\n' in result.stdout)
def get_args(): parser = argparse.ArgumentParser(description='Training') parser.add_argument('--load-model', action=LoadFromCheckpoint, help='Restart training using a model checkpoint') parser.add_argument('--conf', '-c', type=open, action=LoadFromFile, help='Configuration yaml file') parser.add_argument('--num-epochs', default=300, type=int, help='number of epochs') parser.add_argument('--num-steps', default=None, type=int, help='Maximum number of gradient steps.') parser.add_argument('--batch-size', default=32, type=int, help='batch size') parser.add_argument('--inference-batch-size', default=None, type=int, help='Batchsize for validation and tests.') parser.add_argument('--lr', default=0.0001, type=float, help='learning rate') parser.add_argument('--lr-schedule', default='reduce_on_plateau', type=str, choices=['cosine', 'reduce_on_plateau'], help='Learning rate schedule.') parser.add_argument('--lr-patience', type=int, default=10, help='Patience for lr-schedule. Patience per eval-interval of validation') parser.add_argument('--lr-min', type=float, default=1e-06, help='Minimum learning rate before early stop') parser.add_argument('--lr-factor', type=float, default=0.8, help='Minimum learning rate before early stop') parser.add_argument('--lr-warmup-steps', type=int, default=0, help='How many steps to warm-up over. Defaults to 0 for no warm-up') parser.add_argument('--lr-cosine-length', type=int, default=400000, help='Cosine length if lr_schedule is cosine.') parser.add_argument('--early-stopping-patience', type=int, default=30, help='Stop training after this many epochs without improvement') parser.add_argument('--weight-decay', type=float, default=0.0, help='Weight decay strength') parser.add_argument('--ema-alpha-y', type=float, default=1.0, help='The amount of influence of new losses on the exponential moving average of y') parser.add_argument('--ema-alpha-dy', type=float, default=1.0, help='The amount of influence of new losses on the exponential moving average of dy') parser.add_argument('--ngpus', type=int, default=(- 1), help='Number of GPUs, -1 use all available. Use CUDA_VISIBLE_DEVICES=1, to decide gpus') parser.add_argument('--num-nodes', type=int, default=1, help='Number of nodes') parser.add_argument('--precision', type=int, default=32, choices=[16, 32], help='Floating point precision') parser.add_argument('--log-dir', '-l', default='/tmp/logs', help='log file') parser.add_argument('--splits', default=None, help='Npz with splits idx_train, idx_val, idx_test') parser.add_argument('--train-size', type=number, default=None, help='Percentage/number of samples in training set (None to use all remaining samples)') parser.add_argument('--val-size', type=number, default=0.05, help='Percentage/number of samples in validation set (None to use all remaining samples)') parser.add_argument('--test-size', type=number, default=0.1, help='Percentage/number of samples in test set (None to use all remaining samples)') parser.add_argument('--test-interval', type=int, default=10, help='Test interval, one test per n epochs (default: 10)') parser.add_argument('--save-interval', type=int, default=10, help='Save interval, one save per n epochs (default: 10)') parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)') parser.add_argument('--distributed-backend', default='ddp', help='Distributed backend: dp, ddp, ddp2') parser.add_argument('--num-workers', type=int, default=4, help='Number of workers for data prefetch') parser.add_argument('--redirect', type=bool, default=False, help='Redirect stdout and stderr to log_dir/log') parser.add_argument('--wandb-notes', default='', type=str, help='Notes passed to wandb experiment.') parser.add_argument('--job-id', default='auto', type=str, help='Job ID. If auto, pick the next available numeric job id.') parser.add_argument('--pretrained-model', default=None, type=str, help='Pre-trained weights checkpoint.') parser.add_argument('--dataset', default=None, type=str, choices=datasets.__all__, help='Name of the torch_geometric dataset') parser.add_argument('--dataset-root', default='data', type=str, help='Data storage directory (not used if dataset is "CG")') parser.add_argument('--dataset-arg', default=None, type=str, help='Additional dataset argument, e.g. target property for QM9 or molecule for MD17') parser.add_argument('--coord-files', default=None, type=str, help='Custom coordinate files glob') parser.add_argument('--embed-files', default=None, type=str, help='Custom embedding files glob') parser.add_argument('--energy-files', default=None, type=str, help='Custom energy files glob') parser.add_argument('--force-files', default=None, type=str, help='Custom force files glob') parser.add_argument('--energy-weight', default=1.0, type=float, help='Weighting factor for energies in the loss function') parser.add_argument('--force-weight', default=1.0, type=float, help='Weighting factor for forces in the loss function') parser.add_argument('--position-noise-scale', default=0.0, type=float, help='Scale of Gaussian noise added to positions.') parser.add_argument('--denoising-weight', default=0.0, type=float, help='Weighting factor for denoising in the loss function.') parser.add_argument('--denoising-only', type=bool, default=False, help='If the task is denoising only (then val/test datasets also contain noise).') parser.add_argument('--model', type=str, default='graph-network', choices=models.__all__, help='Which model to train') parser.add_argument('--output-model', type=str, default='Scalar', choices=output_modules.__all__, help='The type of output model') parser.add_argument('--prior-model', type=str, default=None, choices=priors.__all__, help='Which prior model to use') parser.add_argument('--output-model-noise', type=str, default=None, choices=(output_modules.__all__ + ['VectorOutput']), help='The type of output model for denoising') parser.add_argument('--embedding-dimension', type=int, default=256, help='Embedding dimension') parser.add_argument('--num-layers', type=int, default=6, help='Number of interaction layers in the model') parser.add_argument('--num-rbf', type=int, default=64, help='Number of radial basis functions in model') parser.add_argument('--activation', type=str, default='silu', choices=list(act_class_mapping.keys()), help='Activation function') parser.add_argument('--rbf-type', type=str, default='expnorm', choices=list(rbf_class_mapping.keys()), help='Type of distance expansion') parser.add_argument('--trainable-rbf', type=bool, default=False, help='If distance expansion functions should be trainable') parser.add_argument('--neighbor-embedding', type=bool, default=False, help='If a neighbor embedding should be applied before interactions') parser.add_argument('--aggr', type=str, default='add', help="Aggregation operation for CFConv filter output. Must be one of 'add', 'mean', or 'max'") parser.add_argument('--distance-influence', type=str, default='both', choices=['keys', 'values', 'both', 'none'], help='Where distance information is included inside the attention') parser.add_argument('--attn-activation', default='silu', choices=list(act_class_mapping.keys()), help='Attention activation function') parser.add_argument('--num-heads', type=int, default=8, help='Number of attention heads') parser.add_argument('--layernorm-on-vec', type=str, default=None, choices=['whitened'], help='Whether to apply an equivariant layer norm to vec features. Off by default.') parser.add_argument('--derivative', default=False, type=bool, help='If true, take the derivative of the prediction w.r.t coordinates') parser.add_argument('--cutoff-lower', type=float, default=0.0, help='Lower cutoff in model') parser.add_argument('--cutoff-upper', type=float, default=5.0, help='Upper cutoff in model') parser.add_argument('--atom-filter', type=int, default=(- 1), help='Only sum over atoms with Z > atom_filter') parser.add_argument('--max-z', type=int, default=100, help='Maximum atomic number that fits in the embedding matrix') parser.add_argument('--max-num-neighbors', type=int, default=32, help='Maximum number of neighbors to consider in the network') parser.add_argument('--standardize', type=bool, default=False, help='If true, multiply prediction by dataset std and add mean') parser.add_argument('--reduce-op', type=str, default='add', choices=['add', 'mean'], help='Reduce operation to apply to atomic predictions') args = parser.parse_args() if (args.job_id == 'auto'): assert (len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')) == 1), 'Might be problematic with DDP.' if (Path(args.log_dir).exists() and (len(os.listdir(args.log_dir)) > 0)): next_job_id = str((max([int(x.name) for x in Path(args.log_dir).iterdir() if x.name.isnumeric()]) + 1)) else: next_job_id = '1' args.job_id = next_job_id args.log_dir = str(Path(args.log_dir, args.job_id)) Path(args.log_dir).mkdir(parents=True, exist_ok=True) if args.redirect: sys.stdout = open(os.path.join(args.log_dir, 'log'), 'w') sys.stderr = sys.stdout logging.getLogger('pytorch_lightning').addHandler(logging.StreamHandler(sys.stdout)) if (args.inference_batch_size is None): args.inference_batch_size = args.batch_size save_argparse(args, os.path.join(args.log_dir, 'input.yaml'), exclude=['conf']) return args
.parametrize('seed', [412]) .parametrize('batch_size', [2, 16]) .parametrize('grid_size', [2, 8]) .parametrize('feature_size', [4]) .parametrize('m, M', [((- 1), 1)]) .parametrize('sym_backward', [False, True]) def test_tv_loss_on_triline_forward_backward(seed, batch_size, grid_size, feature_size, m, M, sym_backward): nn.clear_parameters() ctx = get_extension_context('cudnn', device_id='0') nn.set_default_context(ctx) B = batch_size G = grid_size D = feature_size rng = np.random.RandomState(seed) query_data = (m + (rng.rand(batch_size, 3) * (M - m))) initializer_data = (rng.randn(3, G, D) * 0.01) query_data0 = query_data.astype(np.float32) initializer_data0 = initializer_data.astype(np.float32) query0 = nn.Variable.from_numpy_array(query_data0).apply(need_grad=True) feature0 = nn.parameter.get_parameter_or_create('F0', (3, G, D), initializer_data0) output0 = tv_loss_on_triline_composite(query0, feature0, m, M, sym_backward) query_data1 = query_data.astype(np.float32) initializer_data1 = initializer_data.astype(np.float32) query1 = nn.Variable.from_numpy_array(query_data1).apply(need_grad=True) feature1 = nn.parameter.get_parameter_or_create('F1', (3, G, D), initializer_data1) output1 = F.tv_loss_on_triline(query1, feature1, ([m] * 3), ([M] * 3), sym_backward=sym_backward) output0.forward(clear_no_need_grad=True) output1.forward(clear_no_need_grad=True) np.testing.assert_allclose(output0.d, output1.d, atol=1e-06) feature0.grad.fill(0) feature1.grad.fill(0) ograd = rng.randn(*output0.shape).astype(np.float32) output0.backward(ograd, clear_buffer=True) output1.backward(ograd, clear_buffer=True) np.testing.assert_allclose(feature0.g, feature1.g, atol=0.0001)
class A000012(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return "The all 1's sequence." def _eval(self, n): return ZZ.one()
def filter_invalid_unicode(text): return (('', True) if isinstance(text, bytes) else (text, False))
def uninstall() -> None: from ....specs import openapi openapi.unregister_string_format('uuid')
class HardTanhInterface(HardTanh): def __average(self, fun, a, v, rho): m = check_m(v, rho) v_eff = (self.var_noise + v) H_f1 = H2(_f, args=(a, m, v_eff, self.thres, fun, 0), epsrel=1e-07) H_f2 = H2(_f, args=(a, m, v_eff, self.thres, fun, 1), epsrel=1e-07) H_f3 = H2(_f, args=(a, m, v_eff, self.thres, fun, 2), epsrel=1e-07) return ((H_f1 + H_f2) + H_f3) def iter_a(self, a, v, rho): return self.__average(0, a, v, rho) def iter_v(self, a, v, rho): return self.__average(1, a, v, rho) def eval_i(self, a, v, rho): return self.__average(2, a, v, rho) def eval_rho(self, rho_prev): v_eff = (self.var_noise + rho_prev) res = ((self.thres ** 2) * erfc((self.thres / np.sqrt((2 * v_eff))))) res += (v_eff * erf((self.thres / np.sqrt((2 * v_eff))))) res -= ((self.thres * np.sqrt(((2 * v_eff) / np.pi))) * np.exp((((- 0.5) * (self.thres ** 2)) / v_eff))) return res
class SkewPolynomialRing(OrePolynomialRing): def __init__(self, base_ring, morphism, derivation, name, sparse, category=None): if (derivation is not None): raise NotImplementedError if (self.Element is None): import sage.rings.polynomial.skew_polynomial_element self.Element = sage.rings.polynomial.skew_polynomial_element.SkewPolynomial_generic_dense OrePolynomialRing.__init__(self, base_ring, morphism, None, name, sparse, category) def minimal_vanishing_polynomial(self, eval_pts): return _minimal_vanishing_polynomial(_base_ring_to_fraction_field(self), eval_pts) def lagrange_polynomial(self, points): l = len(points) if (not all(((len(pair) == 2) for pair in points))): raise TypeError('supplied points must be pairs of elements of base ring') eval_pts = [x for (x, _) in points] values = [y for (_, y) in points] if (l > len(set(eval_pts))): raise TypeError('the evaluation points must be distinct') zero_i = [i for i in range(l) if eval_pts[i].is_zero()] if (zero_i and (not values[zero_i[0]].is_zero())): raise TypeError('a skew polynomial always evaluates to 0 at 0, but a non-zero value was requested') return _lagrange_polynomial(_base_ring_to_fraction_field(self), eval_pts, values)
def scoreAlignment(aScore, bScore): def convertScoreToListOfPitches(aScore): def getPitches(el): if isinstance(el, music21.note.Note): return [el.pitch.midi] elif isinstance(el, music21.chord.Chord): currentList = [] for pitch in el.pitches: currentList.append(pitch.midi) return currentList def convertStreamToList(aStream): aList = [] currentOffset = 0.0 currentList = [] for el in aStream: if (el.offset == currentOffset): currentList += getPitches(el) else: aList.append((currentOffset, currentList)) currentOffset = el.offset currentList = getPitches(el) return aList def flattenStream(aStream): newStream = music21.stream.Stream() for el in aStream.recurse(): if (isinstance(el, music21.note.Note) or isinstance(el, music21.chord.Chord)): newStream.insert(el.getOffsetInHierarchy(aStream), el) return newStream parts = aScore.getElementsByClass([music21.stream.PartStaff, music21.stream.Part]) flat_notes = sorted(itertools.chain.from_iterable([flattenStream(part).elements for part in parts]), key=(lambda x: x.offset)) aList = convertStreamToList(flat_notes) return aList def compareSets(aSet, bSet): a = aSet.copy() b = bSet.copy() aTemp = [] for obj in a: if (obj in b): b.remove(obj) else: aTemp.append(obj) a = aTemp return (len(a) + len(b)) def costMatrix(s, t): m = len(s) n = len(t) d = np.zeros(((m + 1), (n + 1))) for i in range(1, (m + 1)): d[(i, 0)] = np.inf for j in range(1, (n + 1)): d[(0, j)] = np.inf for j in range(1, (n + 1)): for i in range(1, (m + 1)): cost = compareSets(s[(i - 1)][1], t[(j - 1)][1]) idx = np.argmin([d[((i - 1), j)], d[(i, (j - 1))], d[((i - 1), (j - 1))]]) if (idx == 0): d[(i, j)] = (d[((i - 1), j)] + cost) elif (idx == 1): d[(i, j)] = (d[(i, (j - 1))] + cost) else: d[(i, j)] = (d[((i - 1), (j - 1))] + cost) return d aList = convertScoreToListOfPitches(aScore) bList = convertScoreToListOfPitches(bScore) d = costMatrix(aList, bList) (i, j) = ((d.shape[0] - 1), (d.shape[1] - 1)) path = [] while (not ((i == 0) and (j == 0))): aOff = aList[(i - 1)][0] bOff = bList[(j - 1)][0] path = ([(aOff, bOff)] + path) idx = np.argmin([d[((i - 1), j)], d[(i, (j - 1))], d[((i - 1), (j - 1))]]) if (idx == 0): i = (i - 1) elif (idx == 1): j = (j - 1) else: (i, j) = ((i - 1), (j - 1)) return (path, d)
def english(): output_dir = 'data' GLUE = ' glue_dir = os.path.join(output_dir, 'GLUE') get_data(GLUE.format('CoLA'), glue_dir, 'COLA', dataset_dir=os.path.join(glue_dir, 'CoLA')) get_data(GLUE.format('SST-2'), glue_dir, 'SST-2', dataset_dir=os.path.join(glue_dir, 'SST-2')) get_data(GLUE.format('STS-B'), glue_dir, 'STS-B', dataset_dir=os.path.join(glue_dir, 'STS-B')) get_data(GLUE.format('QQP-clean'), glue_dir, 'QQP', dataset_dir=os.path.join(glue_dir, 'QQP')) get_data(GLUE.format('MNLI'), glue_dir, 'MNLI', dataset_dir=os.path.join(glue_dir, 'MNLI')) get_data(GLUE.format('QNLIv2'), glue_dir, 'QNLI', dataset_dir=os.path.join(glue_dir, 'QNLI')) get_data(GLUE.format('RTE'), glue_dir, 'RTE', dataset_dir=os.path.join(glue_dir, 'RTE')) get_data(GLUE.format('WNLI'), glue_dir, 'WNLI', dataset_dir=os.path.join(glue_dir, 'WNLI')) PRD = ' get_data(PRD.format('MRPC'), os.path.join(glue_dir, 'MRPC'), 'MRPC') get_data(PRD.format('AX'), os.path.join(glue_dir, 'AX'), 'AX')
def load_tiff(path, standardize=False): image = Image.open(path) fp = image.fp image.load() fp.close() if standardize: image = np.array(image, copy=False) image = ((image - image.mean()) / image.std()) image = Image.fromarray(image) return image
def _test_mpi(info, sdfg, dtype): from mpi4py import MPI as MPI4PY comm = MPI4PY.COMM_WORLD rank = comm.Get_rank() commsize = comm.Get_size() drank = ((rank + 1) % commsize) srank = (((rank - 1) + commsize) % commsize) mpi_sdfg = None if (commsize < 2): raise ValueError('This test is supposed to be run with at least two processes!') for r in range(0, commsize): if (r == rank): mpi_sdfg = sdfg.compile() comm.Barrier() size = 128 A = np.full(size, rank, dtype=dtype) B = np.zeros(size, dtype=dtype) src = np.array([srank], dtype=np.int32) dest = np.array([drank], dtype=np.int32) tag = np.array([23], dtype=np.int32) mpi_sdfg(x=A, y=B, src=src, dest=dest, tag=tag, n=size) if (not np.allclose(B, np.full(size, srank, dtype=dtype))): raise ValueError('The received values are not what I expected.')
def get_track_box(annotation: Dict[(str, Any)], center_coordinates: Tuple[(float, float)], center_pixels: Tuple[(float, float)], resolution: float=0.1) -> np.ndarray: assert (resolution > 0) location = annotation['translation'][:2] yaw_in_radians = quaternion_yaw(Quaternion(annotation['rotation'])) (row_pixel, column_pixel) = convert_to_pixel_coords(location, center_coordinates, center_pixels, resolution) width = (annotation['size'][0] / resolution) length = (annotation['size'][1] / resolution) return pixels_to_box_corners(row_pixel, column_pixel, length, width, yaw_in_radians)
def fail_if_equal(actual, desired, err_msg=''): if isinstance(desired, dict): if (not isinstance(actual, dict)): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) for (k, i) in desired.items(): if (k not in actual): raise AssertionError(repr(k)) fail_if_equal(actual[k], desired[k], ('key=%r\n%s' % (k, err_msg))) return if (isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple))): fail_if_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): fail_if_equal(actual[k], desired[k], ('item=%r\n%s' % (k, err_msg))) return if (isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray)): return fail_if_array_equal(actual, desired, err_msg) msg = build_err_msg([actual, desired], err_msg) if (not (desired != actual)): raise AssertionError(msg)
class CaseInsensitiveChoices(list): def __init__(self, iterable): super().__init__(iterable) def __contains__(self, other): return any([element for element in self if (element.lower() == other.lower())])
def align_comments(tlist): [align_comments(sgroup) for sgroup in tlist.get_sublists()] idx = 0 token = tlist.token_next_by_instance(idx, sql.Comment) while token: before = tlist.token_prev(tlist.token_index(token)) if isinstance(before, sql.TokenList): grp = tlist.tokens_between(before, token)[1:] before.tokens.extend(grp) for t in grp: tlist.tokens.remove(t) idx = (tlist.token_index(before) + 1) else: idx = (tlist.token_index(token) + 1) token = tlist.token_next_by_instance(idx, sql.Comment)
def dist_vector(point, exemplar_dict, data): result = {} for cluster in exemplar_dict: result[cluster] = min_dist_to_exemplar(point, exemplar_dict[cluster], data) return np.array(list(result.values()))
def inference(model, test_loader, num_query, return_f=False): print('Test') model.eval() metric = R1_mAP(num_query, 500) features = OrderedDict() with torch.no_grad(): for (ii, batch) in enumerate(test_loader): (data, pid, cmp, fnames) = batch data = (data.to('cuda') if (torch.cuda.device_count() >= 1) else data) f1 = model(data) f2 = model(fliplr(data)) f = (0.5 * (f1 + f2)) f = norm(f) metric.update([f, pid, cmp]) if return_f: for (fname, output) in zip(fnames, f): features[fname] = output (cmc, mAP) = metric.compute() if return_f: return (mAP, cmc[0], cmc[4], cmc[9], cmc[19], features) else: return (mAP, cmc[0], cmc[4], cmc[9], cmc[19])
def test3d_float32(): query_pts = np.array([[787014.438, (- 340616.906), 6313018.0], [751763.125, (- 59925.969), 6326205.5], [769957.188, (- 202418.125), 6321069.5]], dtype=np.float32) kdtree = KDTree(data_pts_real.astype(np.float32)) (dist, idx) = kdtree.query(query_pts, sqr_dists=True) epsilon = 1e-05 assert (idx[0] == 7) assert (idx[1] == 93) assert (idx[2] == 45) assert (dist[0] == 0) assert (abs((dist[1] - 3.0)) < (epsilon * dist[1])) assert (abs((dist[2] - 20001.0)) < (epsilon * dist[2])) assert (kdtree.data_pts.dtype == np.float32)
def sum(input, labels=None, index=None): (count, sum) = _stats(input, labels, index) return sum
_level_function() def sum(array, axis=None, *, keepdims=False, mask_identity=False, highlevel=True, behavior=None, attrs=None): (yield (array,)) return _impl(array, axis, keepdims, mask_identity, highlevel, behavior, attrs)
def issequence(t) -> bool: return ((isinstance(t, (list, tuple)) and ((len(t) == 0) or np.isscalar(t[0]))) or (isinstance(t, np.ndarray) and (t.ndim == 1)))
def xpos_vocab_factory(data, shorthand): if (shorthand in ['af_afribooms', 'grc_perseus', 'ar_padt', 'bg_btb', 'cs_cac', 'cs_fictree', 'cs_pdt', 'gl_ctg', 'gl_treegal', 'it_isdt', 'it_postwita', 'la_perseus', 'lv_lvtb', 'ro_rrt', 'sk_snk', 'sl_ssj', 'sl_sst', 'uk_iu']): return XPOSVocab(data, shorthand, idx=2, sep='') elif (shorthand in ['grc_proiel', 'hy_armtdp', 'eu_bdt', 'br_keb', 'bxr_bdt', 'ca_ancora', 'zh_gsd', 'hr_set', 'cs_pud', 'da_ddt', 'en_ewt', 'en_gum', 'en_pud', 'et_edt', 'fo_oft', 'fi_pud', 'fi_tdt', 'fr_gsd', 'fr_sequoia', 'fr_spoken', 'de_gsd', 'got_proiel', 'el_gdt', 'he_htb', 'hi_hdtb', 'hu_szeged', 'ga_idt', 'ja_gsd', 'ja_modern', 'kk_ktb', 'kmr_mg', 'la_proiel', 'pcm_nsc', 'sme_giella', 'no_bokmaal', 'no_nynorsk', 'no_nynorsklia', 'cu_proiel', 'fro_srcmf', 'fa_seraji', 'pt_bosque', 'ru_syntagrus', 'ru_taiga', 'sr_set', 'es_ancora', 'sv_pud', 'th_pud', 'tr_imst', 'hsb_ufal', 'ug_udt', 'vi_vtb']): return WordVocab(data, shorthand, idx=2, ignore=['_']) elif (shorthand in ['nl_alpino', 'nl_lassysmall', 'la_ittb', 'sv_talbanken']): return XPOSVocab(data, shorthand, idx=2, sep='|') elif (shorthand in ['en_lines', 'sv_lines', 'ur_udtb']): return XPOSVocab(data, shorthand, idx=2, sep='-') elif (shorthand in ['fi_ftb']): return XPOSVocab(data, shorthand, idx=2, sep=',') elif (shorthand in ['id_gsd', 'ko_gsd', 'ko_kaist']): return XPOSVocab(data, shorthand, idx=2, sep='+') elif (shorthand in ['pl_lfg', 'pl_sz']): return XPOSVocab(data, shorthand, idx=2, sep=':') else: raise NotImplementedError('Language shorthand "{}" not found!'.format(shorthand))
class TestDataLayout(unittest.TestCase): def setUp(self): self.frng1 = FmapRange((0, 0, 0, 0), (4, 4, 16, 16)) self.region1 = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(1, 1), type=NodeRegion.DRAM) self.part1 = PartitionScheme(order=range(pe.NUM), pdims=(PhyDim2(1, 1), PhyDim2(2, 1), PhyDim2(1, 2), PhyDim2(1, 1))) self.frng2 = FmapRange((0, 0, 0, 0), (4, 3, 16, 16)) self.region2 = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM) self.part2 = PartitionScheme(order=range(pe.NUM), pdims=(PhyDim2(2, 1), PhyDim2(1, 1), PhyDim2(1, 1), PhyDim2(1, 1))) self.dl1 = DataLayout(frngs=(self.frng1,), regions=(self.region1,), parts=(self.part1,)) self.dl2 = DataLayout(frngs=(self.frng2,), regions=(self.region2,), parts=(self.part2,)) def test_invalid_args(self): with self.assertRaisesRegex(TypeError, 'DataLayout: .*frngs.*'): _ = DataLayout(frngs=None, regions=(self.region1,), parts=(self.part1,)) with self.assertRaisesRegex(TypeError, 'DataLayout: .*elements in frngs.*'): _ = DataLayout(frngs=(None,), regions=(self.region1,), parts=(self.part1,)) with self.assertRaisesRegex(TypeError, 'DataLayout: .*regions.*'): _ = DataLayout(frngs=(self.frng1,), regions=None, parts=(self.part1,)) with self.assertRaisesRegex(TypeError, 'DataLayout: .*elements in regions.*'): _ = DataLayout(frngs=(self.frng1,), regions=self.region1, parts=(self.part1,)) with self.assertRaisesRegex(TypeError, 'DataLayout: .*parts.*'): _ = DataLayout(frngs=(self.frng1,), regions=(self.region1,), parts=None) with self.assertRaisesRegex(TypeError, 'DataLayout: .*elements in parts.*'): _ = DataLayout(frngs=(self.frng1,), regions=(self.region1,), parts=self.part1) def test_invalid_frngs(self): with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=tuple(), regions=(self.region1,), parts=(self.part1,)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=(FmapRange((0, 4, 0, 0), (4, 8, 16, 16)), self.frng1), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (3, 8, 16, 16))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (4, 8, 12, 16))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (4, 8, 16, 12))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*frng.*'): _ = DataLayout(frngs=(self.frng1, FmapRange((0, 5, 0, 0), (4, 8, 16, 16))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) def test_invalid_parts(self): with self.assertRaisesRegex(ValueError, 'DataLayout: .*part.*'): _ = DataLayout(frngs=(self.frng1,), regions=(self.region1,), parts=(PartitionScheme(order=range(pe.NUM), pdims=(PhyDim2(1, 1), PhyDim2(1, 2), PhyDim2(1, 1), PhyDim2(2, 1))),)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*part.*'): _ = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (4, 8, 16, 16))), regions=(self.region2, self.region1), parts=(self.part1, self.part2)) def test_invalid_args_length(self): with self.assertRaisesRegex(ValueError, 'DataLayout: .*length.*'): _ = DataLayout(frngs=(self.frng1,), regions=(self.region1, self.region2), parts=(self.part1,)) def test_complete_fmap_range(self): dl = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (4, 8, 16, 16))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) self.assertEqual(dl.complete_fmap_range(), FmapRange((0, 0, 0, 0), (4, 8, 16, 16))) def test_fmap_range_map(self): dl = DataLayout(frngs=(self.frng1, FmapRange((0, 4, 0, 0), (4, 8, 16, 16))), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) frmap = dl.fmap_range_map() self.assertEqual(frmap.complete_fmap_range(), dl.complete_fmap_range()) self.assertSetEqual(set(frmap.items()), {(FmapRange((0, 0, 0, 0), (2, 4, 8, 16)), PhyDim2(1, 1)), (FmapRange((2, 0, 0, 0), (4, 4, 8, 16)), PhyDim2(1, 2)), (FmapRange((0, 0, 8, 0), (2, 4, 16, 16)), PhyDim2(2, 1)), (FmapRange((2, 0, 8, 0), (4, 4, 16, 16)), PhyDim2(2, 2)), (FmapRange((0, 4, 0, 0), (4, 6, 16, 16)), PhyDim2(0, 0)), (FmapRange((0, 6, 0, 0), (4, 8, 16, 16)), PhyDim2(1, 0))}) def test_nhops_to(self): fr = FmapRange(((0,) * 4), (4, 4, 16, 16)) nhops = ((((2 * 4) * 8) * 16) * (((5 + 6) + 6) + 7)) self.assertEqual(self.dl1.nhops_to(fr, PhyDim2((- 1), (- 2))), nhops) frng1 = FmapRange((0, 4, 0, 0), (4, 8, 16, 16)) dl = DataLayout(frngs=(self.frng1, frng1), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2))), nhops) fr = FmapRange(((0,) * 4), ((16,) * 4)) nhops += ((((2 * 4) * 16) * 16) * (3 + 4)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2))), nhops) def test_nhops_to_multidests(self): fr = FmapRange(((0,) * 4), (4, 4, 16, 16)) nhops = ((((((2 * 4) * 8) * 16) * (((5 + 6) + 6) + 7)) + ((((2 * 4) * 8) * 16) * (((7 + 8) + 8) + 9))) + ((((2 * 4) * 8) * 16) * (((2 + 1) + 1) + 0))) self.assertEqual(self.dl1.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2)), nhops) frng1 = FmapRange((0, 4, 0, 0), (4, 8, 16, 16)) dl = DataLayout(frngs=(self.frng1, frng1), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2)), nhops) fr = FmapRange(((0,) * 4), ((16,) * 4)) nhops += ((((2 * 4) * 16) * 16) * (((3 + 4) + (5 + 6)) + (4 + 3))) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2)), nhops) def test_nhops_to_multidests_fwd(self): fr = FmapRange(((0,) * 4), (4, 4, 16, 16)) nhops = ((((((2 * 4) * 8) * 16) * (((2 + 1) + 1) + 0)) + ((((2 * 4) * 8) * 16) * (4 * 7))) + ((((2 * 4) * 8) * 16) * (4 * 2))) self.assertEqual(self.dl1.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2), forwarding=True), nhops) frng1 = FmapRange((0, 4, 0, 0), (4, 8, 16, 16)) dl = DataLayout(frngs=(self.frng1, frng1), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2), forwarding=True), nhops) nhops += ((((2 * 4) * 16) * 16) * (((3 + 4) + (2 * 7)) + (2 * 2))) fr = FmapRange(((0,) * 4), ((16,) * 4)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2), forwarding=True), nhops) nhops += ((((4 * 8) * 16) * 16) * (9 + 8)) self.assertEqual(dl.nhops_to(fr, PhyDim2((- 1), (- 2)), PhyDim2((- 2), (- 3)), PhyDim2(2, 2), PhyDim2(3, 10), PhyDim2(8, 4), forwarding=True), nhops) def test_nhops_to_invalid_kwargs(self): fr = FmapRange(((0,) * 4), (4, 4, 16, 16)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*keyword.*'): _ = self.dl1.nhops_to(fr, PhyDim2(1, 1), f=True) def test_is_in(self): nr1 = self.region1 self.assertTrue(self.dl1.is_in(nr1)) nr2 = NodeRegion(dim=PhyDim2(5, 5), origin=nr1.origin, type=nr1.type) self.assertTrue(self.dl1.is_in(nr2)) nr3 = NodeRegion(origin=PhyDim2(0, 0), dim=nr2.dim, type=nr2.type) self.assertTrue(self.dl1.is_in(nr3)) nr4 = NodeRegion(type=NodeRegion.PROC, origin=nr3.origin, dim=nr3.dim) self.assertFalse(self.dl1.is_in(nr4)) nr5 = NodeRegion(origin=PhyDim2(0, 0), dim=nr1.dim, type=nr1.type) self.assertFalse(self.dl1.is_in(nr5)) nr6_1 = NodeRegion(origin=PhyDim2(1, 1), dim=PhyDim2(2, 1), type=nr1.type) nr6_2 = NodeRegion(origin=PhyDim2(1, 2), dim=PhyDim2(2, 1), type=nr1.type) self.assertTrue(self.dl1.is_in(nr6_1, nr6_2)) frng1 = FmapRange((0, 4, 0, 0), (4, 8, 16, 16)) dl = DataLayout(frngs=(self.frng1, frng1), regions=(self.region1, self.region2), parts=(self.part1, self.part2)) self.assertFalse(dl.is_in(self.region1)) self.assertTrue(dl.is_in(self.region1, self.region2)) self.assertTrue(dl.is_in(NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(50, 50), type=self.region1.type))) def test_is_in_folded(self): nr1 = NodeRegion(origin=PhyDim2(1, 1), dim=PhyDim2(1, 10), type=self.region1.type, wtot=3, wbeg=2) nr2 = NodeRegion(origin=PhyDim2(1, 1), dim=PhyDim2(1, 3), type=self.region1.type, wtot=3, wbeg=2) self.assertTrue(self.dl1.is_in(nr1)) self.assertFalse(self.dl1.is_in(nr2)) region = NodeRegion(origin=PhyDim2(1, 2), dim=PhyDim2(2, 10), type=self.region1.type, wtot=3, wbeg=1) part = PartitionScheme(order=range(pe.NUM), pdims=(PhyDim2(1, 5), PhyDim2(2, 1), PhyDim2(1, 2), PhyDim2(1, 1))) dl = DataLayout(frngs=self.dl1.frngs, regions=(region,), parts=(part,)) nr3 = NodeRegion(origin=PhyDim2(1, 1), dim=PhyDim2(2, 13), type=self.region1.type, wtot=4, wbeg=2) self.assertTrue(dl.is_in(nr3)) self.assertFalse(dl.is_in(nr2)) def test_concat(self): fr = FmapRange(((0,) * 4), ((30,) * 4)) dl = DataLayout.concat(self.dl1, self.dl2) self.assertEqual(dl.complete_fmap_range(), FmapRange((0, 0, 0, 0), (4, 7, 16, 16))) self.assertEqual(dl.complete_fmap_range().size(), (self.dl1.complete_fmap_range().size() + self.dl2.complete_fmap_range().size())) self.assertEqual(dl.nhops_to(fr, PhyDim2(0, 0)), (self.dl1.nhops_to(fr, PhyDim2(0, 0)) + self.dl2.nhops_to(fr, PhyDim2(0, 0)))) dl_ = DataLayout.concat(self.dl2, self.dl1) self.assertEqual(dl.complete_fmap_range(), dl_.complete_fmap_range()) self.assertEqual(dl.nhops_to(fr, PhyDim2(0, 0)), dl_.nhops_to(fr, PhyDim2(0, 0))) def test_concat_invalid_type(self): with self.assertRaisesRegex(TypeError, 'DataLayout: .*concat.*'): _ = DataLayout.concat(self.dl1, self.frng1) with self.assertRaisesRegex(TypeError, 'DataLayout: .*concat.*'): _ = DataLayout.concat(self.dl1, PhyDim2(1, 3)) def test_concat_unmatch(self): for fr in [FmapRange(((0,) * 4), (4, 4, 10, 16)), FmapRange(((0,) * 4), (4, 4, 16, 32)), FmapRange(((0,) * 4), (3, 4, 16, 16))]: dl = DataLayout(frngs=(fr,), regions=(self.region1,), parts=(self.part1,)) with self.assertRaisesRegex(ValueError, 'DataLayout: .*match.*'): _ = DataLayout.concat(self.dl1, dl)
def compute_returns_yaml(f: NativeFunction) -> Tuple[(List[Dict[(str, str)]], Dict[(str, str)])]: name_to_field_name: Dict[(str, str)] = {} returns = [] for (i, r) in enumerate(f.func.returns): if f.func.name.name.inplace: assert (i == 0), 'illegal inplace function with multiple returns' name = 'self' elif f.func.is_out_fn(): name = f.func.out_arguments[i].name elif r.name: name_conflict = any(((r.name == a.name) for a in f.func.schema_order_arguments())) if (name_conflict and (not f.func.is_out_fn())): name = f'{r.name}_return' else: name = r.name else: name = ('result' if (len(f.func.returns) == 1) else f'result{i}') ret = {'dynamic_type': dynamic_type(r.type), 'name': name, 'type': cpp.return_type(r)} if r.name: ret['field_name'] = r.name if f.func.is_out_fn(): name_to_field_name[f.func.out_arguments[i].name] = r.name returns.append(ret) return (returns, name_to_field_name)
class ImageSoftmaxEngine(torchreid.engine.ImageSoftmaxEngine): def run(self, save_dir='log', max_epoch=0, start_epoch=1, print_freq=10, fixbase_epoch=0, open_layers=None, start_eval=1, eval_freq=(- 1), test_only=False, dist_metric='euclidean', normalize_feature=False, visrank=False, visrank_topk=10, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], rerank=False, save_best=True): if (visrank and (not test_only)): raise ValueError('visrank can be set to True only if test_only=True') if ((self.writer is None) and (not test_only)): self.writer = SummaryWriter(log_dir=save_dir) self.epoch = 0 self.test(dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank) if test_only: return time_start = time.time() rank1_best = 0 self.start_epoch = start_epoch self.max_epoch = max_epoch print('=> Start training') for self.epoch in range(self.start_epoch, (self.max_epoch + 1)): self.train(print_freq=print_freq, fixbase_epoch=fixbase_epoch, open_layers=open_layers) if ((self.epoch >= start_eval) and (eval_freq > 0) and ((self.epoch % eval_freq) == 0) and (self.epoch != self.max_epoch)): rank1 = self.test(dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks) is_best = False if (save_best and (rank1 >= rank1_best)): rank1_best = rank1 is_best = True self.save_model(self.epoch, rank1, save_dir, is_best=is_best) if (self.max_epoch > 0): print('=> Final test') rank1 = self.test(dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks) self.save_model(self.epoch, rank1, save_dir) elapsed = round((time.time() - time_start)) elapsed = str(datetime.timedelta(seconds=elapsed)) print('Elapsed {}'.format(elapsed)) if (self.writer is not None): self.writer.close() def save_model(self, epoch, rank1, save_dir, is_best=False): names = self.get_model_names() for name in names: save_checkpoint({'state_dict': self._models[name].state_dict(), 'epoch': (epoch + 1), 'rank1': rank1, 'optimizer': self._optims[name].state_dict(), 'scheduler': self._scheds[name].state_dict()}, osp.join(save_dir, name), is_best=is_best) def test(self, dist_metric='euclidean', normalize_feature=False, visrank=False, visrank_topk=10, save_dir='', use_metric_cuhk03=False, ranks=[1, 5, 10, 20], rerank=False): self.set_model_mode('eval') targets = list(self.test_loader.keys()) rank1_list = [] for name in targets: domain = ('source' if (name in self.datamanager.sources) else 'target') print(f'##### Evaluating {name} ({domain}) #####'.format(name, domain)) query_loader = self.test_loader[name]['query'] gallery_loader = self.test_loader[name]['gallery'] (rank1, mAP) = self._evaluate(dataset_name=name, query_loader=query_loader, gallery_loader=gallery_loader, dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank) if (self.writer is not None): self.writer.add_scalar(f'Test/{name}/rank1', rank1, self.epoch) self.writer.add_scalar(f'Test/{name}/mAP', mAP, self.epoch) rank1_list.append(rank1) print(f'##### MEAN targets Rank-1: {np.mean(rank1_list):.1%} #####') if (self.writer is not None): self.writer.add_scalar(f'Test/MEAN_rank1', np.mean(rank1_list), self.epoch) return rank1
class TestMinrelpath(object): def test_1(self): n = (lambda path: path.replace('/', sep)) assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) assert_equal(minrelpath('..'), '..') assert_equal(minrelpath(n('aa/..')), '') assert_equal(minrelpath(n('aa/../bb')), 'bb') assert_equal(minrelpath(n('aa/bb/..')), 'aa') assert_equal(minrelpath(n('aa/bb/../..')), '') assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) assert_equal(minrelpath(n('.././..')), n('../..')) assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class UniformHistogramMiner(BaseTupleMiner): def __init__(self, num_bins=100, pos_per_bin=10, neg_per_bin=10, **kwargs): super().__init__(**kwargs) self.num_bins = num_bins self.pos_per_bin = pos_per_bin self.neg_per_bin = neg_per_bin self.add_to_recordable_attributes(list_of_names=['pos_per_bin', 'neg_per_bin'], is_stat=False) def mine(self, embeddings, labels, ref_emb, ref_labels): mat = self.distance(embeddings, ref_emb) (a1, p, a2, n) = lmu.get_all_pairs_indices(labels, ref_labels) pos_pairs = mat[(a1, p)] neg_pairs = mat[(a2, n)] if (len(pos_pairs) > 0): (a1, p) = self.get_uniformly_distributed_pairs(pos_pairs, a1, p, self.pos_per_bin) if (len(neg_pairs) > 0): (a2, n) = self.get_uniformly_distributed_pairs(neg_pairs, a2, n, self.neg_per_bin) return (a1, p, a2, n) def get_bins(self, pairs): (device, dtype) = (pairs.device, pairs.dtype) return torch.linspace(torch.min(pairs), torch.max(pairs), steps=(self.num_bins + 1), device=device, dtype=dtype) def filter_by_bin(self, distances, bins, num_pairs): range_max = (len(bins) - 1) all_idx = [] for i in range(range_max): (s, e) = (bins[i], bins[(i + 1)]) low_condition = (s <= distances) high_condition = ((distances < e) if (i != (range_max - 1)) else (distances <= e)) condition = torch.where((low_condition & high_condition))[0] if (len(condition) == 0): continue idx = torch.multinomial(torch.ones_like(condition, device=condition.device, dtype=torch.float), num_pairs, replacement=True) all_idx.append(condition[idx]) return torch.cat(all_idx, dim=0) def get_uniformly_distributed_pairs(self, distances, anchors, others, num_pairs): bins = self.get_bins(distances) idx = self.filter_by_bin(distances, bins, num_pairs) return (anchors[idx], others[idx])
def SResNet34(Q_l, input_channels=3, imsize=32, output_dim=10): return SResNet(BasicBlock, [3, 4, 6, 3], Q_l, input_channels, imsize, output_dim)
def FinitelyGeneratedAbelianPresentation(int_list): from sage.groups.free_group import _lexi_gen check_ls = [Integer(x) for x in int_list if (Integer(x) >= 0)] if (len(check_ls) != len(int_list)): raise ValueError('input list must contain nonnegative entries') col_sp = diagonal_matrix(int_list).column_space() invariants = FGP_Module((ZZ ** len(int_list)), col_sp).invariants() name_gen = _lexi_gen() F = FreeGroup([next(name_gen) for i in invariants]) ret_rls = [(F([(i + 1)]) ** invariants[i]) for i in range(len(invariants)) if (invariants[i] != 0)] gen_pairs = [[F.gen(i), F.gen(j)] for i in range((F.ngens() - 1)) for j in range((i + 1), F.ngens())] ret_rls = (ret_rls + [((((x[0] ** (- 1)) * (x[1] ** (- 1))) * x[0]) * x[1]) for x in gen_pairs]) return FinitelyPresentedGroup(F, tuple(ret_rls))
class Adafactor(torch.optim.Optimizer): def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): if ((lr is not None) and relative_step): raise ValueError('Cannot combine manual lr and relative_step options') if (warmup_init and (not relative_step)): raise ValueError('warmup_init requires relative_step=True') defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01) rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step']))) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return (param_scale * rel_step_sz) def _get_options(self, param_group, param_shape): factored = (len(param_shape) >= 2) use_first_moment = (param_group['beta1'] is not None) return (factored, use_first_moment) def _rms(self, tensor): return (tensor.norm(2) / (tensor.numel() ** 0.5)) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_().unsqueeze((- 1)) c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt() return torch.mul(r_factor, c_factor) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if (grad.dtype in {torch.float16, torch.bfloat16}): grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape (factored, use_first_moment) = self._get_options(group, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_data_fp32 = p.data if (p.data.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) group['lr'] = self._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t)) update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(group['lr']) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=(1 - group['beta1'])) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr'])) p_data_fp32.add_((- update)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) return loss
_dispatch def hfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None, *, plan=None): return (Dispatchable(x, np.ndarray),)
def render_cat_num(itmdt: Intermediate, cfg: Config) -> Dict[(str, Any)]: plot_width = (cfg.plot.width if (cfg.plot.width is not None) else 450) plot_height = (cfg.plot.height if (cfg.plot.height is not None) else 400) tabs: List[Panel] = [] htgs: Dict[(str, List[Tuple[(str, str)]])] = {} (data, x, y) = (itmdt['data'], itmdt['x'], itmdt['y']) if cfg.box.enable: df = data['box'].to_frame().reset_index()[:(5 * cfg.box.ngroups)] df = df.pivot(index=x, columns='level_1', values=[0]).reset_index() df.columns = df.columns.get_level_values(1) df.columns = (['grp'] + list(df.columns[1:])) tabs.append(box_viz(df, x, plot_width, plot_height, cfg.box, y, data['ttl_grps'])) htgs['Box Plot'] = cfg.box.nom_cont_how_to_guide(plot_height, plot_width) if cfg.line.enable: df = data['hist'].to_frame()[:cfg.line.ngroups] tabs.append(line_viz(df, x, y, cfg.line.yscale, plot_width, plot_height, data['ttl_grps'])) htgs['Line Chart'] = cfg.line.nom_cont_how_to_guide(plot_height, plot_width) for panel in tabs: panel.child.children[0].frame_width = int((plot_width * 0.9)) return {'layout': [panel.child for panel in tabs], 'meta': [panel.title for panel in tabs], 'container_width': (plot_width + 170), 'how_to_guide': htgs}
def get_embedding_layer(num_embeddings, embedding_dim, padding_idx=None): emb = nn.Embedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(emb.weight, mean=0, std=(embedding_dim ** (- 0.5))) nn.init.constant_(emb.weight[padding_idx], 0) return emb
class GPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_intervention(action, time): action_to_intervention_map = {0: Intervention(time=time, nonmedical_incidence=0.0, illicit_incidence=0.0), 1: Intervention(time=time, nonmedical_incidence=0.0, illicit_incidence=0.05), 2: Intervention(time=time, nonmedical_incidence=0.05, illicit_incidence=0.0), 3: Intervention(time=time, nonmedical_incidence=0.05, illicit_incidence=0.05)} return action_to_intervention_map[action]
.operations('create_user', 'get_user', 'update_user') def test_add_link_by_reference(schema_url): schema = schemathesis.from_uri(schema_url) links = add_link(schema, '#/paths/~1users~1{user_id}/get', parameters={'userId': '$response.body#/id'}) assert (links['#/paths/~1users~1{user_id}/get'] == {'operationRef': '#/paths/~1users~1{user_id}/get', **EXPECTED_LINK_PARAMETERS})
class ParseResults(object): def __new__(cls, toklist=None, name=None, asList=True, modal=True): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) retobj.__doinit = True return retobj def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} self.__asList = asList self.__modal = modal if (toklist is None): toklist = [] if isinstance(toklist, list): self.__toklist = toklist[:] elif isinstance(toklist, _generatorType): self.__toklist = list(toklist) else: self.__toklist = [toklist] self.__tokdict = dict() if ((name is not None) and name): if (not modal): self.__accumNames[name] = 0 if isinstance(name, int): name = _ustr(name) self.__name = name if (not (isinstance(toklist, (type(None), basestring, list)) and (toklist in (None, '', [])))): if isinstance(toklist, basestring): toklist = [toklist] if asList: if isinstance(toklist, ParseResults): self[name] = _ParseResultsWithOffset(toklist.copy(), 0) else: self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) self[name].__name = name else: try: self[name] = toklist[0] except (KeyError, TypeError, IndexError): self[name] = toklist def __getitem__(self, i): if isinstance(i, (int, slice)): return self.__toklist[i] elif (i not in self.__accumNames): return self.__tokdict[i][(- 1)][0] else: return ParseResults([v[0] for v in self.__tokdict[i]]) def __setitem__(self, k, v, isinstance=isinstance): if isinstance(v, _ParseResultsWithOffset): self.__tokdict[k] = (self.__tokdict.get(k, list()) + [v]) sub = v[0] elif isinstance(k, (int, slice)): self.__toklist[k] = v sub = v else: self.__tokdict[k] = (self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]) sub = v if isinstance(sub, ParseResults): sub.__parent = wkref(self) def __delitem__(self, i): if isinstance(i, (int, slice)): mylen = len(self.__toklist) del self.__toklist[i] if isinstance(i, int): if (i < 0): i += mylen i = slice(i, (i + 1)) removed = list(range(*i.indices(mylen))) removed.reverse() for (name, occurrences) in self.__tokdict.items(): for j in removed: for (k, (value, position)) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, (position - (position > j))) else: del self.__tokdict[i] def __contains__(self, k): return (k in self.__tokdict) def __len__(self): return len(self.__toklist) def __bool__(self): return (not (not self.__toklist)) __nonzero__ = __bool__ def __iter__(self): return iter(self.__toklist) def __reversed__(self): return iter(self.__toklist[::(- 1)]) def _iterkeys(self): if hasattr(self.__tokdict, 'iterkeys'): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) def _itervalues(self): return (self[k] for k in self._iterkeys()) def _iteritems(self): return ((k, self[k]) for k in self._iterkeys()) if PY_3: keys = _iterkeys values = _itervalues items = _iteritems else: iterkeys = _iterkeys itervalues = _itervalues iteritems = _iteritems def keys(self): return list(self.iterkeys()) def values(self): return list(self.itervalues()) def items(self): return list(self.iteritems()) def haskeys(self): return bool(self.__tokdict) def pop(self, *args, **kwargs): if (not args): args = [(- 1)] for (k, v) in kwargs.items(): if (k == 'default'): args = (args[0], v) else: raise TypeError(("pop() got an unexpected keyword argument '%s'" % k)) if (isinstance(args[0], int) or (len(args) == 1) or (args[0] in self)): index = args[0] ret = self[index] del self[index] return ret else: defaultvalue = args[1] return defaultvalue def get(self, key, defaultValue=None): if (key in self): return self[key] else: return defaultValue def insert(self, index, insStr): self.__toklist.insert(index, insStr) for (name, occurrences) in self.__tokdict.items(): for (k, (value, position)) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, (position + (position > index))) def append(self, item): self.__toklist.append(item) def extend(self, itemseq): if isinstance(itemseq, ParseResults): self += itemseq else: self.__toklist.extend(itemseq) def clear(self): del self.__toklist[:] self.__tokdict.clear() def __getattr__(self, name): try: return self[name] except KeyError: return '' if (name in self.__tokdict): if (name not in self.__accumNames): return self.__tokdict[name][(- 1)][0] else: return ParseResults([v[0] for v in self.__tokdict[name]]) else: return '' def __add__(self, other): ret = self.copy() ret += other return ret def __iadd__(self, other): if other.__tokdict: offset = len(self.__toklist) addoffset = (lambda a: (offset if (a < 0) else (a + offset))) otheritems = other.__tokdict.items() otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) for (k, vlist) in otheritems for v in vlist] for (k, v) in otherdictitems: self[k] = v if isinstance(v[0], ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist self.__accumNames.update(other.__accumNames) return self def __radd__(self, other): if (isinstance(other, int) and (other == 0)): return self.copy() else: return (other + self) def __repr__(self): return ('(%s, %s)' % (repr(self.__toklist), repr(self.__tokdict))) def __str__(self): return (('[' + ', '.join(((_ustr(i) if isinstance(i, ParseResults) else repr(i)) for i in self.__toklist))) + ']') def _asStringList(self, sep=''): out = [] for item in self.__toklist: if (out and sep): out.append(sep) if isinstance(item, ParseResults): out += item._asStringList() else: out.append(_ustr(item)) return out def asList(self): return [(res.asList() if isinstance(res, ParseResults) else res) for res in self.__toklist] def asDict(self): if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict(((k, toItem(v)) for (k, v) in item_fn())) def copy(self): ret = ParseResults(self.__toklist) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret def asXML(self, doctag=None, namedItemsOnly=False, indent='', formatted=True): nl = '\n' out = [] namedItems = dict(((v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist)) nextLevelIndent = (indent + ' ') if (not formatted): indent = '' nextLevelIndent = '' nl = '' selfTag = None if (doctag is not None): selfTag = doctag elif self.__name: selfTag = self.__name if (not selfTag): if namedItemsOnly: return '' else: selfTag = 'ITEM' out += [nl, indent, '<', selfTag, '>'] for (i, res) in enumerate(self.__toklist): if isinstance(res, ParseResults): if (i in namedItems): out += [res.asXML(namedItems[i], (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)] else: out += [res.asXML(None, (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)] else: resTag = None if (i in namedItems): resTag = namedItems[i] if (not resTag): if namedItemsOnly: continue else: resTag = 'ITEM' xmlBodyText = _xml_escape(_ustr(res)) out += [nl, nextLevelIndent, '<', resTag, '>', xmlBodyText, '</', resTag, '>'] out += [nl, indent, '</', selfTag, '>'] return ''.join(out) def __lookup(self, sub): for (k, vlist) in self.__tokdict.items(): for (v, loc) in vlist: if (sub is v): return k return None def getName(self): if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif ((len(self) == 1) and (len(self.__tokdict) == 1) and (next(iter(self.__tokdict.values()))[0][1] in (0, (- 1)))): return next(iter(self.__tokdict.keys())) else: return None def dump(self, indent='', depth=0, full=True): out = [] NL = '\n' out.append((indent + _ustr(self.asList()))) if full: if self.haskeys(): items = sorted(((str(k), v) for (k, v) in self.items())) for (k, v) in items: if out: out.append(NL) out.append(('%s%s- %s: ' % (indent, (' ' * depth), k))) if isinstance(v, ParseResults): if v: out.append(v.dump(indent, (depth + 1))) else: out.append(_ustr(v)) else: out.append(repr(v)) elif any((isinstance(vv, ParseResults) for vv in self)): v = self for (i, vv) in enumerate(v): if isinstance(vv, ParseResults): out.append(('\n%s%s[%d]:\n%s%s%s' % (indent, (' ' * depth), i, indent, (' ' * (depth + 1)), vv.dump(indent, (depth + 1))))) else: out.append(('\n%s%s[%d]:\n%s%s%s' % (indent, (' ' * depth), i, indent, (' ' * (depth + 1)), _ustr(vv)))) return ''.join(out) def pprint(self, *args, **kwargs): pprint.pprint(self.asList(), *args, **kwargs) def __getstate__(self): return (self.__toklist, (self.__tokdict.copy(), (((self.__parent is not None) and self.__parent()) or None), self.__accumNames, self.__name)) def __setstate__(self, state): self.__toklist = state[0] (self.__tokdict, par, inAccumNames, self.__name) = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if (par is not None): self.__parent = wkref(par) else: self.__parent = None def __getnewargs__(self): return (self.__toklist, self.__name, self.__asList, self.__modal) def __dir__(self): return (dir(type(self)) + list(self.keys()))
class Partition9(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[12]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:9'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'encoder.12.0.SelfAttention.dropout', 'l_1': 'encoder.12.0.SelfAttention.o', 'l_2': 'encoder.12.0.dropout', 'l_3': 'encoder.12.1.layer_norm', 'l_4': 'encoder.12.1.DenseReluDense.wi', 'l_5': 'encoder.12.1.DenseReluDense.dropout', 'l_6': 'encoder.12.1.DenseReluDense.wo', 'l_7': 'encoder.12.1.dropout', 'l_8': 'encoder.13.0.layer_norm', 'l_9': 'encoder.13.0.SelfAttention.q', 'l_10': 'encoder.13.0.SelfAttention.k'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = x1.view(x2, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = x3.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_1.type_as(x3) t_1 = self.l_0(t_1) t_0 = torch.matmul(t_1, t_0) t_0 = t_0.transpose(1, 2) t_0 = t_0.contiguous() t_0 = t_0.view(x2, (- 1), 4096) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = (t_0 + t_1) t_0 = self.l_8(t_1) t_2 = t_0.size() t_3 = self.l_9(t_0) t_4 = self.l_10(t_0) t_2 = t_2[0] t_3 = t_3.view(t_2, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) return list(flatten((t_1, t_0, t_2, t_3, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class ClassSpecificDistributedSampler(_DistributedSampler): def __init__(self, dataset, num_replicas=None, rank=None, dynamic_length=True, shuffle=True, seed=0): super().__init__(dataset, num_replicas=num_replicas, rank=rank) self.shuffle = shuffle if (type(dataset).__name__ == 'RepeatDataset'): dataset = dataset.dataset assert hasattr(dataset, 'class_prob') self.class_prob = dataset.class_prob self.dynamic_length = dynamic_length self.seed = (seed if (seed is not None) else 0) def __iter__(self): g = torch.Generator() g.manual_seed((self.seed + self.epoch)) class_indices = defaultdict(list) times = 1 dataset = self.dataset if (type(dataset).__name__ == 'RepeatDataset'): times = dataset.times dataset = dataset.dataset for (i, item) in enumerate(dataset.video_infos): class_indices[item['label']].append(i) if self.dynamic_length: indices = [] for (k, prob) in self.class_prob.items(): prob = (prob * times) for i in range(int((prob // 1))): indices.extend(class_indices[k]) rem = int(((prob % 1) * len(class_indices[k]))) rem_indices = torch.randperm(len(class_indices[k]), generator=g).tolist()[:rem] indices.extend(rem_indices) if self.shuffle: shuffle = torch.randperm(len(indices), generator=g).tolist() indices = [indices[i] for i in shuffle] self.num_samples = math.ceil((len(indices) / self.num_replicas)) self.total_size = (self.num_samples * self.num_replicas) else: video_labels = [x['label'] for x in dataset.video_infos] probs = [(self.class_prob[lb] / len(class_indices[lb])) for lb in video_labels] indices = torch.multinomial(torch.Tensor(probs), self.total_size, replacement=True, generator=g) indices = indices.data.numpy().tolist() indices += indices[:(self.total_size - len(indices))] assert (len(indices) == self.total_size) indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) return iter(indices)
.parametrize('num_of_slices', [2, 3, 5]) .parametrize('size', [197, 124]) .parametrize('batch_size', [1, 20]) .parametrize('shuffle', [False, True]) def test_sliced_data_iterator_race_condition(num_of_slices, size, batch_size, shuffle): from nnabla.utils.data_source_implements import CacheDataSource from nnabla.utils.data_iterator import data_iterator_cache with generate_cache_dir(size) as cache_dir: rng = np.random.RandomState(313) iterator = data_iterator_cache(cache_dir, batch_size, shuffle=True) sliced_it = iterator.slice(rng, num_of_slices, 1) for i in range((size + 5)): d = sliced_it.next() sliced_it.close() iterator.close()
class set_no_jit(): def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False
def gemm(A: dace.float32[(M, K)], B: dace.float32[(K, N)], C: dace.float32[(M, N)], alpha: dace.float32, beta: dace.float32): C[:] = (((alpha * A) B) + (beta * C))
class GenerationMixin(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def main(): DEBUG = False parser = argparse.ArgumentParser(description='CTRL-UDA Training') parser.add_argument('--machine', type=int, default=(- 1), help='which machine to use') parser.add_argument('--expid', type=int, default=1, help='experiment id') parser.add_argument('--reso', type=str, default='FULL', help='inputs resolution full or low') parser.add_argument('--isl', type=str, default='true', help='activate the ISL training') parser.add_argument('--exp_root_dir', type=str, help='experiment root folder') parser.add_argument('--data_root', type=str, help='dataset root folder') parser.add_argument('--pret_model', type=str, help='pretrained weights to be used for initialization') parser.add_argument('--model_path', default='isl/model/path', type=str, help='trained model path for ISL training') cmdline_inputs = parser.parse_args() expid = cmdline_inputs.expid if (expid == 1): exp_file = 'ctrl/configs/synthia_to_cityscapes_16cls_isl.yml' elif (expid == 2): exp_file = 'ctrl/configs/synthia_to_cityscapes_7cls_fr_isl.yml' elif (expid == 3): exp_file = 'ctrl/configs/synthia_to_cityscapes_7cls_lr_isl.yml' elif (expid == 4): exp_file = 'ctrl/configs/synthia_to_mapillary_7cls_fr_isl.yml' elif (expid == 5): exp_file = 'ctrl/configs/synthia_to_mapillary_7cls_lr_isl.yml' cfg = convert_yaml_to_edict(exp_file) cfg = setup_exp_params(cfg, cmdline_inputs, DEBUG) torch.manual_seed(cfg.TRAIN.RANDOM_SEED) torch.cuda.manual_seed(cfg.TRAIN.RANDOM_SEED) np.random.seed(cfg.TRAIN.RANDOM_SEED) random.seed(cfg.TRAIN.RANDOM_SEED) sys.stdout = Logger(cfg.TRAIN_LOG_FNAME) print_output_paths(cfg, is_isl_training=True) (model, discriminator, optim_state_dict, disc_optim_state_dict, resume_iteration) = get_model(cfg) if cfg.USE_DATA_PARALLEL: model = torch.nn.DataParallel(model) model = model.to(cfg.GPU_ID) if cfg.USE_DATA_PARALLEL: discriminator = torch.nn.DataParallel(discriminator) discriminator = discriminator.to(cfg.GPU_ID) criterion_dict = get_criterion() if cfg.USE_DATA_PARALLEL: criterion_dict['semseg'] = torch.nn.DataParallel(criterion_dict['semseg']) criterion_dict['depth'] = torch.nn.DataParallel(criterion_dict['depth']) criterion_dict['disc_loss'] = torch.nn.DataParallel(criterion_dict['disc_loss']) criterion_dict['semseg'].to(cfg.GPU_ID) criterion_dict['depth'].to(cfg.GPU_ID) criterion_dict['disc_loss'].to(cfg.GPU_ID) print(criterion_dict) torch.backends.cudnn.benchmark = True torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True (optimizer, optimizer_disc) = get_optimizer_v2(cfg, model, USeDataParallel=cfg.USE_DATA_PARALLEL, discriminator=discriminator, optim_state_dict=optim_state_dict, disc_optim_state_dict=disc_optim_state_dict) print(optimizer) if cfg.ENABLE_DISCRIMINATOR: print(optimizer_disc) (source_train_loader, target_train_loader, target_val_loader, source_train_nsamp, target_train_nsamp, target_test_nsamp) = get_data_loaders(cfg, get_target_train_loader=False) cfg_file = os.path.join(cfg.TRAIN.SNAPSHOT_DIR, 'cfg.yml') with open(cfg_file, 'w') as fp: yaml.dump(dict(cfg), fp) print('cfg written to: {}'.format(cfg_file)) optimizer_disc = None train_model(cfg, model, discriminator, resume_iteration, criterion_dict, optimizer, optimizer_disc, source_train_loader, target_train_loader, target_val_loader, source_train_nsamp, target_train_nsamp, target_test_nsamp)
def test_ClusterNodeSequence_init(): G = create_stellargraph() nsg = ClusterNodeSequence(graph=G, clusters=[list(G.nodes())]) assert (len(nsg) == 1) nsg = ClusterNodeSequence(graph=G, clusters=[['a'], ['b', 'd'], ['c']]) assert (len(nsg) == 3) with pytest.raises(ValueError): ClusterNodeSequence(graph=G, clusters=[list(G.nodes())], q=1, targets=np.array([[0, 1]])) with pytest.raises(ValueError): ClusterNodeSequence(graph=G, clusters=[list(G.nodes())], q=1, targets=np.array([[0, 1], [1, 0]]), node_ids=['a']) with pytest.raises(ValueError): ClusterNodeSequence(graph=G, clusters=[list(G.nodes())], q=2)
def setup(app): app.connect('builder-inited', setup_link_role) return {'version': '0.1', 'parallel_read_safe': True}
class TMIn(TSIn): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): _snap.TMIn_swiginit(self, _snap.new_TMIn(*args)) def New(*args): return _snap.TMIn_New(*args) New = staticmethod(New) __swig_destroy__ = _snap.delete_TMIn def GetBfC(self): return _snap.TMIn_GetBfC(self) def GetBfL(self): return _snap.TMIn_GetBfL(self) def SetBfC(self, Pos): return _snap.TMIn_SetBfC(self, Pos) def CountNewLinesInRange(self, Lb, Ub): return _snap.TMIn_CountNewLinesInRange(self, Lb, Ub) def GetLineStartPos(self, Ind): return _snap.TMIn_GetLineStartPos(self, Ind) def GetLineEndPos(self, Ind): return _snap.TMIn_GetLineEndPos(self, Ind) def GetLine(self, Ind): return _snap.TMIn_GetLine(self, Ind) def SkipCommentLines(self): return _snap.TMIn_SkipCommentLines(self) def GetBfAddr(self): return _snap.TMIn_GetBfAddr(self)
def test_animation(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'plot_transition': False, 'gif_animation': True} run_test(params)
class Discriminator(nn.Module): def __init__(self, opt=None): super(Discriminator, self).__init__() self.nc = 32 if (opt is not None): stride_v1 = (1, 2, 2) stride_v2 = (2, 2, 2) stride = (stride_v1, stride_v2)[(opt.sample_duration == 16)] else: stride = (1, 2, 2) self.main = nn.Sequential(nn.Conv3d(3, self.nc, kernel_size=3, stride=2, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(self.nc, (self.nc * 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm3d((self.nc * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((self.nc * 2), (self.nc * 4), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm3d((self.nc * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((self.nc * 4), (self.nc * 8), kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm3d((self.nc * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((self.nc * 8), (self.nc * 8), kernel_size=3, stride=(1, 2, 2), padding=1, bias=False), nn.BatchNorm3d((self.nc * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((self.nc * 8), 1, kernel_size=(1, 4, 4), stride=(1, 2, 2), padding=0, bias=False), nn.Sigmoid()) def forward(self, x): output = self.main(x) return output.view((- 1), 1)
def ksp_options(): return {'ksp_type': 'cg', 'pc_type': 'hypre', 'pc_hypre_type': 'boomeramg', 'ksp_rtol': 0.0, 'ksp_atol': 0.0, 'ksp_max_it': 1, 'ksp_monitor_true_residual': None}
class GreedyMaskCalculator(): def __init__(self, prunable_nodes: List[BaseNode], fw_info: FrameworkInfo, simd_groups_scores: Dict[(BaseNode, np.ndarray)], target_kpi: KPI, graph: Graph, fw_impl: PruningFrameworkImplementation, tpc: TargetPlatformCapabilities, simd_groups_indices: Dict[(BaseNode, List[List[int]])]): self.prunable_nodes = prunable_nodes self.fw_info = fw_info self.target_kpi = target_kpi self.graph = graph self.fw_impl = fw_impl self.tpc = tpc self.simd_groups_indices = simd_groups_indices self.simd_groups_scores = simd_groups_scores self.oc_pruning_mask = PerSIMDGroupMask(prunable_nodes=prunable_nodes, fw_info=fw_info, simd_groups_indices=simd_groups_indices) self.memory_calculator = MemoryCalculator(graph=graph, fw_info=fw_info, fw_impl=fw_impl) def get_mask(self) -> Dict[(BaseNode, np.ndarray)]: return self.oc_pruning_mask.get_mask() def compute_mask(self): current_memory = self.memory_calculator.get_pruned_graph_memory(masks=self.oc_pruning_mask.get_mask(), include_padded_channels=self.tpc.is_simd_padding) if (current_memory > self.target_kpi.weights_memory): Logger.error(f'Minimal required memory is {current_memory}, but target KPI is {self.target_kpi.weights_memory}') while ((current_memory < self.target_kpi.weights_memory) and self.oc_pruning_mask.has_pruned_channel()): (node_to_remain, group_to_remain_idx) = self._get_most_sensitive_simd_group_candidate() self.oc_pruning_mask.set_mask_value_for_simd_group(node=node_to_remain, group_index=group_to_remain_idx, mask_indicator=MaskIndicator.REMAINED) current_memory = self.memory_calculator.get_pruned_graph_memory(masks=self.oc_pruning_mask.get_mask(), include_padded_channels=self.tpc.is_simd_padding) if (current_memory > self.target_kpi.weights_memory): self.oc_pruning_mask.set_mask_value_for_simd_group(node=node_to_remain, group_index=group_to_remain_idx, mask_indicator=MaskIndicator.PRUNED) def _get_most_sensitive_simd_group_candidate(self) -> Tuple[(BaseNode, int)]: best_score = (- np.inf) best_node = None best_group_idx = (- 1) for (node, mask) in self.oc_pruning_mask.get_mask_simd().items(): group_idx = int(np.argmax((mask == 0))) if (group_idx != 0): score = self.simd_groups_scores[node][group_idx] if (score > best_score): best_score = score best_node = node best_group_idx = group_idx if (best_node is None): Logger.error('No prunable SIMD group found.') return (best_node, best_group_idx)
class ModuleFloatShadow(nn.Module): def __init__(self, module): super(ModuleFloatShadow, self).__init__() self.original_module = module self.float_module = deepcopy(module) self.float_module.to(dtype=torch.float) def parameters(self, *kargs, **kwargs): return self.float_module.parameters(*kargs, **kwargs) def named_parameters(self, *kargs, **kwargs): return self.float_module.named_parameters(*kargs, **kwargs) def modules(self, *kargs, **kwargs): return self.float_module.modules(*kargs, **kwargs) def named_modules(self, *kargs, **kwargs): return self.float_module.named_modules(*kargs, **kwargs) def original_parameters(self, *kargs, **kwargs): return self.original_module.parameters(*kargs, **kwargs) def original_named_parameters(self, *kargs, **kwargs): return self.original_module.named_parameters(*kargs, **kwargs) def original_modules(self, *kargs, **kwargs): return self.original_module.modules(*kargs, **kwargs) def original_named_modules(self, *kargs, **kwargs): return self.original_module.named_modules(*kargs, **kwargs)
def function_namespace(declaration): if (has_tensor_options(declaration) or op_name(declaration).endswith('_like')): return 'torch' else: return 'at'
class LandscapeAsModel(Model): def __init__(self, landscape: flexs.Landscape): super().__init__(f'LandscapeAsModel={landscape.name}') self.landscape = landscape def _fitness_function(self, sequences: SEQUENCES_TYPE) -> np.ndarray: return self.landscape._fitness_function(sequences) def train(self, sequences: SEQUENCES_TYPE, labels: List[Any]): pass
def _try_make_config_directory(path: PathLike) -> None: try: Path(path).parent.mkdir(mode=493, parents=True, exist_ok=True) except OSError: pass
def in_bounds(val: Any, domain: Any) -> bool: if (isinstance(val, Sequence) or isinstance(val, np.ndarray)): if (isinstance(domain[0], Sequence) or isinstance(domain[0], np.ndarray)): if (len(val) == len(domain)): return all((((v >= d[0]) and (v <= d[1])) for (v, d) in zip(val, domain))) else: raise ValueError(f'if ``val`` is a Sequence, ``domain`` must have the same length as ``val``.') else: return all((((v >= domain[0]) and (v <= domain[1])) for v in val)) elif (isinstance(domain[0], Sequence) or isinstance(domain[0], np.ndarray)): raise ValueError(f'if ``val`` is not a Sequence, ``domain`` must be a 2-tuple of numbers.') else: return ((val >= domain[0]) and (val <= domain[1]))
def get_args_and_hdf5_file(activation, network): output_name = ('run_%s_%s_%s' % (activation.replace(':', '-'), network[0], network[1])) parameters = [sys.executable, 'volnet/train_volnet.py', CONFIG_FILE, '--train:mode', 'world', '--train:samples', '256**3', '--train:batchsize', '64*64*128', '--train:sampler_importance', '0.01', '--val:copy_and_split', '--outputmode', 'density:direct', '--lossmode', 'density', '-l1', '1', '--lr_step', '50', '-i', '200', '--fouriercount', str(((network[0] - 4) // 2)), '--fourierstd', '1.0', '--activation', activation, '--layers', ':'.join(([str(network[0])] * (network[1] - 1))), '--logdir', 'volnet/results/eval_network_configs/log', '--modeldir', 'volnet/results/eval_network_configs/model', '--hdf5dir', 'volnet/results/eval_network_configs/hdf5', '--name', output_name, '--save_frequency', '50'] hdf5_file = (('volnet/results/eval_network_configs/hdf5/' + output_name) + '.hdf5') return (parameters, hdf5_file)
class ProgressBarLogger(ProgressLogger): bar_indent = 2 def __init__(self, init_state=None, bars=None, ignored_bars=None, logged_bars='all', min_time_interval=0, ignore_bars_under=0): ProgressLogger.__init__(self, init_state) if (bars is None): bars = OrderedDict() elif isinstance(bars, (list, tuple)): bars = OrderedDict([(b, dict(title=b, index=(- 1), total=None, message=None, indent=0)) for b in bars]) if isinstance(ignored_bars, (list, tuple)): ignored_bars = set(ignored_bars) self.ignored_bars = ignored_bars self.logged_bars = logged_bars self.state['bars'] = bars self.min_time_interval = min_time_interval self.ignore_bars_under = ignore_bars_under def bars(self): return self.state['bars'] def bar_is_ignored(self, bar): if (self.ignored_bars is None): return False elif (self.ignored_bars == 'all_others'): return (bar not in self.bars) else: return (bar in self.ignored_bars) def bar_is_logged(self, bar): if (not self.logged_bars): return False elif (self.logged_bars == 'all'): return True else: return (bar in self.logged_bars) def iterable_is_too_short(self, iterable): length = (len(iterable) if hasattr(iterable, '__len__') else None) return ((length is not None) and (length < self.ignore_bars_under)) def iter_bar(self, bar_prefix='', **kw): if ('bar_message' in kw): bar_message = kw.pop('bar_message') else: bar_message = None (bar, iterable) = kw.popitem() if (self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable)): return iterable bar = (bar_prefix + bar) if hasattr(iterable, '__len__'): self(**{(bar + '__total'): len(iterable)}) def new_iterable(): last_time = time.time() i = 0 for (i, it) in enumerate(iterable): now_time = time.time() if ((i == 0) or ((now_time - last_time) > self.min_time_interval)): if (bar_message is not None): self(**{(bar + '__message'): bar_message(it)}) self(**{(bar + '__index'): i}) last_time = now_time (yield it) if (self.bars[bar]['index'] != i): self(**{(bar + '__index'): i}) self(**{(bar + '__index'): (i + 1)}) return new_iterable() def bars_callback(self, bar, attr, value, old_value=None): pass def __call__(self, **kw): items = sorted(kw.items(), key=(lambda kv: (not kv[0].endswith('total')))) for (key, value) in items: if ('__' in key): (bar, attr) = key.split('__') if self.bar_is_ignored(bar): continue kw.pop(key) if (bar not in self.bars): self.bars[bar] = dict(title=bar, index=(- 1), total=None, message=None) old_value = self.bars[bar][attr] if self.bar_is_logged(bar): new_bar = ((attr == 'index') and (value < old_value)) if ((attr == 'total') or new_bar): self.bars[bar]['indent'] = self.log_indent else: self.log_indent = self.bars[bar]['indent'] self.log(('[%s] %s: %s' % (bar, attr, value))) self.log_indent += self.bar_indent self.bars[bar][attr] = value self.bars_callback(bar, attr, value, old_value) self.state.update(kw) self.callback(**kw)
def test(model): model.eval() from scipy import misc img = misc.imread('lena_299.png') inputs = torch.zeros(1, 299, 299, 3) inputs[0] = torch.from_numpy(img) inputs.transpose_(1, 3) inputs.transpose_(2, 3) outputs = model.forward(torch.autograd.Variable(inputs)) h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r') outputs_tf = torch.from_numpy(h5f['out'][()]) h5f.close() outputs = torch.nn.functional.softmax(outputs) print(torch.dist(outputs.data, outputs_tf)) return outputs
class ConformerEncoderLayer(rf.Module): def __init__(self, out_dim: Dim=Dim(512, name='conformer-enc-default-out-dim'), *, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.swish, dropout: float=0.1, conv_kernel_size: int=32, conv_norm: Union[(rf.BatchNorm, type, Any)]=NotSpecified, conv_norm_opts: Optional[Dict[(str, Any)]]=None, num_heads: int=4, self_att: Optional[Union[(rf.RelPosSelfAttention, rf.Module, type, Any)]]=None, self_att_opts: Optional[Dict[(str, Any)]]=None, att_dropout: float=0.1): super().__init__() self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.out_dim = out_dim if (ff_dim is None): ff_dim = (4 * out_dim) self.ffn1 = ConformerPositionwiseFeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation) self.ffn1_layer_norm = rf.LayerNorm(out_dim) self.ffn2 = ConformerPositionwiseFeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation) self.ffn2_layer_norm = rf.LayerNorm(out_dim) if ((conv_norm is NotSpecified) or (conv_norm is rf.BatchNorm)): conv_norm_opts = (conv_norm_opts.copy() if conv_norm_opts else {}) conv_norm_opts.setdefault('use_mask', False) conv_norm = rf.BatchNorm(out_dim, **conv_norm_opts) elif isinstance(conv_norm, type): conv_norm = conv_norm(out_dim, **(conv_norm_opts or {})) self.conv_block = ConformerConvBlock(out_dim=out_dim, kernel_size=conv_kernel_size, norm=conv_norm) self.conv_layer_norm = rf.LayerNorm(out_dim) if ((self_att is None) or isinstance(self_att, type)): self_att_opts_ = dict(in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout) if self_att_opts: self_att_opts_.update(self_att_opts) if (self_att is None): self.self_att = rf.RelPosSelfAttention(**self_att_opts_) else: self.self_att = self_att(**self_att_opts_) else: self.self_att = self_att self.self_att_layer_norm = rf.LayerNorm(out_dim) self.final_layer_norm = rf.LayerNorm(out_dim) def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor: x_ffn1_ln = self.ffn1_layer_norm(inp) x_ffn1 = self.ffn1(x_ffn1_ln) x_ffn1_out = ((0.5 * rf.dropout(x_ffn1, self.dropout, axis=(self.dropout_broadcast and self.out_dim))) + inp) x_mhsa_ln = self.self_att_layer_norm(x_ffn1_out) x_mhsa = self.self_att(x_mhsa_ln, axis=spatial_dim) x_mhsa = rf.dropout(x_mhsa, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) x_mhsa_out = (x_mhsa + x_ffn1_out) x_conv_ln = self.conv_layer_norm(x_mhsa_out) x_conv = self.conv_block(x_conv_ln, spatial_dim=spatial_dim) x_conv_out = (rf.dropout(x_conv, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) + x_mhsa_out) x_ffn2_ln = self.ffn2_layer_norm(x_conv_out) x_ffn2 = self.ffn2(x_ffn2_ln) x_ffn2_out = ((0.5 * rf.dropout(x_ffn2, self.dropout, axis=(self.dropout_broadcast and self.out_dim))) + x_conv_out) return self.final_layer_norm(x_ffn2_out)
class Network(nn.Module): def __init__(self, cfg, mode='train', num_classes=1000): super(Network, self).__init__() pretrain = (True if ((mode == 'train') and (cfg.RESUME_MODEL == '') and (cfg.BACKBONE.PRETRAINED_MODEL != '')) else False) self.num_classes = num_classes self.cfg = cfg self.backbone = eval(self.cfg.BACKBONE.TYPE)(self.cfg, pretrain=pretrain, pretrained_model=cfg.BACKBONE.PRETRAINED_MODEL, last_layer_stride=2) self.module = self._get_module() self.classifier = self._get_classifer() self.feature_len = self.get_feature_length() def forward(self, x, **kwargs): if (('feature_flag' in kwargs) or ('feature_cb' in kwargs) or ('feature_rb' in kwargs)): return self.extract_feature(x, **kwargs) elif ('classifier_flag' in kwargs): return self.classifier(x) x = self.backbone(x) x = self.module(x) x = x.view(x.shape[0], (- 1)) x = self.classifier(x) return x def extract_feature(self, x, **kwargs): if ('bbn' in self.cfg.BACKBONE.TYPE): x = self.backbone(x, **kwargs) else: x = self.backbone(x) x = self.module(x) x = x.view(x.shape[0], (- 1)) return x def freeze_backbone(self): print('Freezing backbone .......') for p in self.backbone.parameters(): p.requires_grad = False def load_backbone_model(self, backbone_path=''): self.backbone.load_model(backbone_path) print('Backbone has been loaded...') def load_model(self, model_path): pretrain_dict = torch.load(model_path, map_location=('cpu' if self.cfg.CPU_MODE else 'cuda')) pretrain_dict = (pretrain_dict['state_dict'] if ('state_dict' in pretrain_dict) else pretrain_dict) model_dict = self.state_dict() from collections import OrderedDict new_dict = OrderedDict() for (k, v) in pretrain_dict.items(): if k.startswith('module'): new_dict[k[7:]] = v else: new_dict[k] = v model_dict.update(new_dict) self.load_state_dict(model_dict) print('Model has been loaded...') def get_feature_length(self): if ('cifar' in self.cfg.BACKBONE.TYPE): num_features = 64 else: num_features = 2048 if ('bbn' in self.cfg.BACKBONE.TYPE): num_features = (num_features * 2) return num_features def _get_module(self): module_type = self.cfg.MODULE.TYPE if (module_type == 'GAP'): module = GAP() elif (module_type == 'Identity'): module = Identity() else: raise NotImplementedError return module def _get_classifer(self): bias_flag = self.cfg.CLASSIFIER.BIAS num_features = self.get_feature_length() if (self.cfg.CLASSIFIER.TYPE == 'FCNorm'): classifier = FCNorm(num_features, self.num_classes) elif (self.cfg.CLASSIFIER.TYPE == 'FC'): classifier = nn.Linear(num_features, self.num_classes, bias=bias_flag) else: raise NotImplementedError return classifier
class BSDSDmat(SpectralMatrix): def assemble(self, method): (test, trial) = (self.testfunction, self.trialfunction) assert isinstance(test[0], SD) assert isinstance(trial[0], SD) d0 = get_norm_sq(test[0], trial[0], method) d = {0: (d0[:(- 2)] + d0[2:]), (- 2): (- d0[2:(- 2)])} if test[0].is_scaled(): k = np.arange((test[0].N - 2)) d[0] /= ((4 * k) + 6) d[(- 2)] /= (np.sqrt(((4 * k[2:]) + 6)) * np.sqrt(((4 * k[:(- 2)]) + 6))) d[2] = d[(- 2)].copy() return d def get_solver(self): return TDMA
def test_pisa_retinanet_head_loss(): s = 256 img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3)}] cfg = mmcv.Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=(- 1)), sampler=dict(type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=False), isr=dict(k=2.0, bias=0.0), carl=dict(k=1.0, bias=0.2), allowed_border=0, pos_weight=(- 1), debug=False)) self = PISARetinaHead(num_classes=4, in_channels=1, train_cfg=cfg) feat = [torch.rand(1, 1, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))) for i in range(len(self.anchor_generator.strides))] (cls_scores, bbox_preds) = self.forward(feat) gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) empty_cls_loss = empty_gt_losses['loss_cls'].sum() empty_box_loss = empty_gt_losses['loss_bbox'].sum() assert (empty_cls_loss.item() > 0), 'cls loss should be non-zero' assert (empty_box_loss.item() == 0), 'there should be no box loss when there are no true boxes' gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'].sum() onegt_box_loss = one_gt_losses['loss_bbox'].sum() assert (onegt_cls_loss.item() > 0), 'cls loss should be non-zero' assert (onegt_box_loss.item() > 0), 'box loss should be non-zero'
def IsDerivedFunction(clean_lines, linenum): opening_paren = clean_lines.elided[linenum].find('(') if (opening_paren < 0): return False (line, _, closing_paren) = CloseExpression(clean_lines, linenum, opening_paren) return ((closing_paren >= 0) and Search('\\boverride\\b', line[closing_paren:]))
class ZoneoutWrapper(RNNCell): def __init__(self, cell, zoneout_drop_prob, is_training=True): self._cell = cell self._zoneout_prob = zoneout_drop_prob self._is_training = is_training def state_size(self): return self._cell.state_size def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): (output, new_state) = self._cell(inputs, state, scope) if (not isinstance(self._cell.state_size, tuple)): new_state = tf.split(value=new_state, num_or_size_splits=2, axis=1) state = tf.split(value=state, num_or_size_splits=2, axis=1) final_new_state = [new_state[0], new_state[1]] if self._is_training: for (i, state_element) in enumerate(state): random_tensor = (1 - self._zoneout_prob) random_tensor += tf.random_uniform(tf.shape(state_element)) binary_tensor = tf.floor(random_tensor) final_new_state[i] = (((new_state[i] - state_element) * binary_tensor) + state_element) else: for (i, state_element) in enumerate(state): final_new_state[i] = ((state_element * self._zoneout_prob) + (new_state[i] * (1 - self._zoneout_prob))) if isinstance(self._cell.state_size, tuple): return (output, tf.contrib.rnn.LSTMStateTuple(final_new_state[0], final_new_state[1])) return (output, tf.concat([final_new_state[0], final_new_state[1]], 1))
def rand_contrast(x): x_mean = x.mean(dim=[1, 2, 3], keepdim=True) x = (((x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5)) + x_mean) return x
def set_discrete_variable(var_names: List[str], discrete_variable_name: str): discrete = {name: False for name in var_names} discrete[discrete_variable_name] = True return discrete
def aps15_fpp(x, n): if (not (0 <= x <= ((2 * 0.001) / (1 + n)))): return (np.e - 1.859) return ((((((np.exp(((((n + 1) * x) / 2) * 1000)) * (n + 1)) / 2) * 1000) * (n + 1)) / 2) * 1000)
def get_norm_layer(norm_type='instance'): if (norm_type == 'batch'): norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) elif (norm_type == 'instance'): norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) elif (norm_type == 'none'): def norm_layer(x): return Identity() else: raise NotImplementedError(('normalization layer [%s] is not found' % norm_type)) return norm_layer
class IBMCloudProvider(CloudProvider): def __init__(self, key_prefix: str='skyplane', auth: Optional[IBMCloudAuthentication]=None): super().__init__() self.key_prefix = key_prefix self.auth = (auth if auth else IBMCloudAuthentication()) self.regions_vpc = {} self.provisioning_semaphore = BoundedSemaphore(16) def name(self): return 'ibmcloud' def region_list() -> List[str]: return REGIONS def setup_global(self, iam_name: str='skyplane_gateway', attach_policy_arn: Optional[str]=None): pass def setup_region(self, region: str): ibm_vpc_config = {'ibm': {'iam_api_key': self.auth.iam_api_key, 'user_agent': self.auth.user_agent}, 'ibm_gen2': {'region': region, 'resource_group_id': self.auth.ibmcloud_resource_group_id}} load_config(ibm_vpc_config) ibm_vpc_backend = IBMVPCBackend(ibm_vpc_config['ibm_gen2']) ibm_vpc_backend.init() self.regions_vpc[region] = ibm_vpc_backend def teardown_region(self, region): if (region in self.regions_vpc): self.regions_vpc[region].clean(all=True) def teardown_global(self): for region in self.regions_vpc: self.regions_vpc[region].clean(all=True) def add_ips_to_security_group(self, cos_region: str, ips: Optional[List[str]]=None): return self.regions_vpc[cos_region].add_ips_to_security_group(ips) def remove_ips_from_security_group(self, cos_region: str, ips: List[str]): pass ('botocore.exceptions', pip_extra='ibmcloud') def provision_instance(exceptions, self, region: str, instance_class: str, zone_name: Optional[str]=None, name: Optional[str]=None, tags={'skyplane': 'true'}) -> IBMCloudServer: tags['node-type'] = 'master' tags['node-name'] = 'skyplane-master' (instance_id, vsi) = self.regions_vpc[region].create_vpc_instance() return IBMCloudServer(self.regions_vpc[region], f'ibmcloud:{region}', instance_id, vsi)
def update_packet_pbar(i, current_iteration, no_of_packets, total_iterations): if (packet_pbar.postfix == ''): packet_pbar.postfix = '0' bar_iteration = (int(packet_pbar.postfix) - 1) if (iterations_pbar.total == None): fix_bar_layout(iterations_pbar, total_iterations=total_iterations) if (packet_pbar.total == None): fix_bar_layout(packet_pbar, no_of_packets=no_of_packets) if (iterations_pbar.n == total_iterations): if (type(iterations_pbar).__name__ == 'tqdm_notebook'): iterations_pbar.container.close() fix_bar_layout(iterations_pbar, total_iterations=total_iterations) if (bar_iteration > current_iteration): packet_pbar.postfix = current_iteration if (type(packet_pbar).__name__ == 'tqdm_notebook'): packet_pbar.container.close() fix_bar_layout(packet_pbar, no_of_packets=no_of_packets) if (bar_iteration < current_iteration): packet_pbar.reset(total=no_of_packets) packet_pbar.postfix = str((current_iteration + 1)) packet_pbar.update(i)
def test_langid(basic_multilingual): english_text = 'This is an English sentence.' french_text = "C'est une phrase francaise." docs = [english_text, french_text] docs = [Document([], text=text) for text in docs] basic_multilingual(docs) predictions = [doc.lang for doc in docs] assert (predictions == ['en', 'fr'])
def quaternion_matrix(quaternion): q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if (n < _EPS): return numpy.identity(4) q *= math.sqrt((2.0 / n)) q = numpy.outer(q, q) return numpy.array([[((1.0 - q[(2, 2)]) - q[(3, 3)]), (q[(1, 2)] - q[(3, 0)]), (q[(1, 3)] + q[(2, 0)]), 0.0], [(q[(1, 2)] + q[(3, 0)]), ((1.0 - q[(1, 1)]) - q[(3, 3)]), (q[(2, 3)] - q[(1, 0)]), 0.0], [(q[(1, 3)] - q[(2, 0)]), (q[(2, 3)] + q[(1, 0)]), ((1.0 - q[(1, 1)]) - q[(2, 2)]), 0.0], [0.0, 0.0, 0.0, 1.0]])
def pdb_hook(type, value, tb): if (hasattr(sys, 'ps1') or (not sys.stderr.isatty())): sys.__excepthook__(type, value, tb) else: import traceback try: import ipdb as pdb except: import pdb traceback.print_exception(type, value, tb) pdb.post_mortem(tb)
def resnet_v2_152(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_152'): blocks = [resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=8, stride=2), resnet_v2_block('block3', base_depth=256, num_units=36, stride=2), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1)] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
def train(model, loader): model.train() total_loss = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() out = model(data.x, data.edge_index, data.batch) loss = F.cross_entropy(out, data.y) loss.backward() optimizer.step() total_loss += (float(loss) * data.num_graphs) return (total_loss / len(train_loader.dataset))
def main(): image_set_dir = 'training_range_BB8/' test_targets = [] for (cls_idx, cls_name) in IDX2CLASS.items(): print(cls_idx, cls_name) if (cls_name == 'camera'): BB8_train_idx_file = osp.join(image_set_dir, 'cam.txt') else: BB8_train_idx_file = osp.join(image_set_dir, f'{cls_name}.txt') with open(BB8_train_idx_file, 'r') as f: BB8_train_ids = [int(line.strip('\r\n')) for line in f] BOP_gt_file = osp.join(data_root, f'test/{cls_idx:06d}/scene_gt.json') assert osp.exists(BOP_gt_file), BOP_gt_file gt_dict = mmcv.load(BOP_gt_file) all_ids = [int(k) for k in gt_dict.keys()] test_ids = [k for k in all_ids if (k not in BB8_train_ids)] print(len(test_ids)) for idx in test_ids: target = {'im_id': idx, 'inst_count': 1, 'obj_id': cls_idx, 'scene_id': cls_idx} test_targets.append(target) res_file = osp.join(cur_dir, 'lm_test_targets_bb8.json') print(res_file) print(len(test_targets)) mmcv.dump(test_targets, res_file) print('done')
def install_given_reqs(to_install, install_options, global_options=(), *args, **kwargs): if to_install: logger.info('Installing collected packages: %s', ', '.join([req.name for req in to_install])) with indent_log(): for requirement in to_install: if requirement.conflicts_with: logger.info('Found existing installation: %s', requirement.conflicts_with) with indent_log(): uninstalled_pathset = requirement.uninstall(auto_confirm=True) try: requirement.install(install_options, global_options, *args, **kwargs) except Exception: should_rollback = (requirement.conflicts_with and (not requirement.install_succeeded)) if should_rollback: uninstalled_pathset.rollback() raise else: should_commit = (requirement.conflicts_with and requirement.install_succeeded) if should_commit: uninstalled_pathset.commit() requirement.remove_temporary_source() return to_install
def sketch_move(mocap_track, data=None, ax=None, figsize=(16, 8)): if (ax is None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) if (data is None): data = mocap_track.values for frame in range(0, data.shape[0], 4): for joint in mocap_track.skeleton.keys(): children_to_draw = [c for c in mocap_track.skeleton[joint]['children']] parent_x = data[('%s_Xposition' % joint)][frame] parent_y = data[('%s_Yposition' % joint)][frame] frame_alpha = (frame / data.shape[0]) for c in children_to_draw: child_x = data[('%s_Xposition' % c)][frame] child_y = data[('%s_Yposition' % c)][frame] ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)
def data_iterator_cityscapes(batch_size, data_dir, rng=None, train=True): cityscapes = CityScapesDatasetPath(data_dir) image_paths = cityscapes.get_image_paths(train=train) label_paths = cityscapes.get_label_paths(train=train) return data_iterator_segmentation(batch_size, image_paths, label_paths, rng, train)
def multiprocess(func, data, num_workers=1, granularity='shards', log_every=1000, verbose=False): start = time.time() if (num_workers > 1): if verbose: print('parallel processing') out = {} with Pool(num_workers) as p: count = 0 chunksize = max(1, (len(data) // num_workers)) for (i, res) in p.imap_unordered(func, enumerate(data), chunksize=chunksize): out[i] = res count += 1 if verbose: if (((count + 1) % log_every) == 0): elasped = (time.time() - start) elasped = str(datetime.timedelta(seconds=elasped)) print('{}/{} {} processed (elasped: {})'.format(count, len(data), granularity, elasped)) else: if verbose: print('sequential processing') out = [] count = 0 for (i, x) in enumerate(data): (i, res) = func((i, x)) out.append(res) count += 1 if verbose: if (((count + 1) % log_every) == 0): elasped = (time.time() - start) elasped = str(datetime.timedelta(seconds=elasped)) print('{}/{} {} processed (elasped: {})'.format(count, len(data), granularity, elasped)) out = dict(enumerate(out)) if verbose: print('sorting multiprocess outputs') out = [out[k] for k in sorted(list(out.keys()))] return out
class BottleneckWithFixedBatchNorm(Bottleneck): def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1, dcn_config={}): super(BottleneckWithFixedBatchNorm, self).__init__(in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=FrozenBatchNorm2d, dcn_config=dcn_config)
class FSMTTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, do_lower_case=False, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', **kwargs): super().__init__(langs=langs, src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs) self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.do_lower_case = do_lower_case self.cache_moses_punct_normalizer = dict() self.cache_moses_tokenizer = dict() self.cache_moses_detokenizer = dict() if (langs and (len(langs) == 2)): (self.src_lang, self.tgt_lang) = langs else: raise ValueError(f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. Usually that means that tokenizer can't find a mapping for the given model path in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer.") with open(src_vocab_file, encoding='utf-8') as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding='utf-8') as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for (k, v) in tgt_vocab.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def get_vocab(self) -> Dict[(str, int)]: return self.get_src_vocab() def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if (lang not in self.cache_moses_punct_normalizer): punct_normalizer = sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if (lang not in self.cache_moses_tokenizer): moses_tokenizer = sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize(text, aggressive_dash_splits=True, return_str=False, escape=True) def moses_detokenize(self, tokens, lang): if (lang not in self.cache_moses_tokenizer): moses_detokenizer = sm.MosesDetokenizer(lang=self.tgt_lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def src_vocab_size(self): return len(self.encoder) def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == '\n </w>'): word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text, lang='en', bypass_tokenizer=False): lang = self.src_lang if self.do_lower_case: text = text.lower() if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): tokens = [t.replace(' ', '').replace('</w>', ' ') for t in tokens] tokens = ''.join(tokens).split() text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if (token_ids_1 is None): return (token_ids_0 + sep) return (((token_ids_0 + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is not None): return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([0] * len(token_ids_0)) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if (token_ids_1 is None): return (len((token_ids_0 + sep)) * [0]) return ((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) _start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, return_tensors: str='pt', truncation=True, padding='longest', **unused) -> BatchEncoding: if (type(src_texts) is not list): raise ValueError('src_texts is expected to be a list') if ('' in src_texts): raise ValueError(f'found empty string in src_texts: {src_texts}') tokenizer_kwargs = dict(add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding) model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs) if (tgt_texts is None): return model_inputs if (max_target_length is not None): tokenizer_kwargs['max_length'] = max_target_length model_inputs['labels'] = self(tgt_texts, **tokenizer_kwargs)['input_ids'] return model_inputs def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return src_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['src_vocab_file'])) tgt_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['tgt_vocab_file'])) merges_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(src_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) with open(tgt_vocab_file, 'w', encoding='utf-8') as f: tgt_vocab = {v: k for (k, v) in self.decoder.items()} f.write(json.dumps(tgt_vocab, ensure_ascii=False)) index = 0 with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merges_file)) index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (src_vocab_file, tgt_vocab_file, merges_file)
def mask_loss_evaluator(): matcher = Matcher(cfg.FAST_RCNN.FG_IOU_THRESHOLD, cfg.FAST_RCNN.BG_IOU_THRESHOLD, allow_low_quality_matches=False) loss_evaluator = MaskRCNNLossComputation(matcher, cfg.MRCNN.RESOLUTION, cfg.MRCNN.MASKIOU_ON) return loss_evaluator
class GroupNorm(Module): __constants__ = ['num_groups', 'num_channels', 'eps', 'affine'] num_groups: int num_channels: int eps: float affine: bool def __init__(self, num_groups: int, num_channels: int, eps: float=1e-05, affine: bool=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super(GroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.empty(num_channels, **factory_kwargs)) self.bias = Parameter(torch.empty(num_channels, **factory_kwargs)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self) -> None: if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input: Tensor) -> Tensor: return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps) def extra_repr(self) -> str: return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__)
def test_badscope(): with pytest.raises(ValueError): (sdfg, state, t, me, mx) = create_sdfg() nest_state_subgraph(sdfg, state, SubgraphView(state, [t, me])) with pytest.raises(ValueError): (sdfg, state, t, me, mx) = create_sdfg() nest_state_subgraph(sdfg, state, SubgraphView(state, [t, mx])) with pytest.raises(NodeNotFoundError): (sdfg, state, t, me, mx) = create_sdfg() b_node = state.sink_nodes()[0] (sdfg, state, t, me, mx) = create_sdfg() nest_state_subgraph(sdfg, state, SubgraphView(state, [t, b_node]))
class InfDataLoader(): def __init__(self, dataset, **kwargs): self.dataloader = torch.utils.data.DataLoader(dataset, **kwargs) def inf_dataloader(): while True: for data in self.dataloader: (image, label) = data (yield (image, label)) self.inf_dataloader = inf_dataloader() def __iter__(self): return self def __next__(self): return next(self.inf_dataloader) def __del__(self): del self.dataloader
def process(filename, out_dir, n_frames, fps, skip_existing, ignore_exceptions, quiet): youtube_id = filename.stem instrument = filename.parent.name if (skip_existing and ((out_dir / instrument) / youtube_id).is_dir()): return (out_dir / instrument).mkdir(exist_ok=True) ((out_dir / instrument) / youtube_id).mkdir(exist_ok=True) if (n_frames is not None): duration = get_duration(filename, ignore_exceptions) stream = ffmpeg.input(filename, ss=1) stream = ffmpeg.filter(stream, 'fps', fps=((10 / (duration - 5)) if (duration > 20) else (10 / duration)), round='up') stream = ffmpeg.output(stream, str((((out_dir / instrument) / youtube_id) / '%6d.png')), vcodec='png', t=((duration - 10) if (duration > 20) else duration)) ffmpeg.run(stream, quiet=quiet, overwrite_output=True) else: stream = ffmpeg.input(filename) stream = ffmpeg.filter(stream, 'fps', fps=fps, round='up') stream = ffmpeg.output(stream, str((((out_dir / instrument) / youtube_id) / '%06d.png')), vcodec='png') ffmpeg.run(stream, quiet=quiet, overwrite_output=True) return filename
def force_iterable(value: Any) -> (list | tuple): if isinstance(value, (tuple, list)): return value return [value]
def define_treatments(name, t, c): treatment = dict(var_name=name, treatment_value=t, control_value=c) return treatment
def freesurface(model, eq): fs_eq = [] for eq_i in eq: for p in eq_i._flatten: (lhs, rhs) = p.evaluate.args zfs = model.grid.subdomains['fsdomain'].dimensions[(- 1)] z = zfs.parent funcs = retrieve_functions(rhs.evaluate) mapper = {} for f in funcs: zind = f.indices[(- 1)] if ((zind - z).as_coeff_Mul()[0] < 0): s = sign((zind - z.symbolic_min).subs({z: zfs, z.spacing: 1})) mapper.update({f: (s * f.subs({zind: INT(abs(zind))}))}) fs_eq.append(Eq(lhs, (sign((lhs.indices[(- 1)] - z.symbolic_min)) * rhs.subs(mapper)), subdomain=model.grid.subdomains['fsdomain'])) return fs_eq
class EventStorage(): def __init__(self, start_iter=0): self._history = defaultdict(HistoryBuffer) self._smoothing_hints = {} self._latest_scalars = {} self._iter = start_iter self._current_prefix = '' self._vis_data = [] self._histograms = [] def put_image(self, img_name, img_tensor): self._vis_data.append((img_name, img_tensor, self._iter)) def put_scalar(self, name, value, smoothing_hint=True): name = (self._current_prefix + name) history = self._history[name] value = float(value) history.update(value, self._iter) self._latest_scalars[name] = (value, self._iter) existing_hint = self._smoothing_hints.get(name) if (existing_hint is not None): assert (existing_hint == smoothing_hint), 'Scalar {} was put with a different smoothing_hint!'.format(name) else: self._smoothing_hints[name] = smoothing_hint def put_scalars(self, *, smoothing_hint=True, **kwargs): for (k, v) in kwargs.items(): self.put_scalar(k, v, smoothing_hint=smoothing_hint) def put_histogram(self, hist_name, hist_tensor, bins=1000): (ht_min, ht_max) = (hist_tensor.min().item(), hist_tensor.max().item()) hist_counts = torch.histc(hist_tensor, bins=bins) hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=(bins + 1), dtype=torch.float32) hist_params = dict(tag=hist_name, min=ht_min, max=ht_max, num=len(hist_tensor), sum=float(hist_tensor.sum()), sum_squares=float(torch.sum((hist_tensor ** 2))), bucket_limits=hist_edges[1:].tolist(), bucket_counts=hist_counts.tolist(), global_step=self._iter) self._histograms.append(hist_params) def history(self, name): ret = self._history.get(name, None) if (ret is None): raise KeyError('No history metric available for {}!'.format(name)) return ret def histories(self): return self._history def latest(self): return self._latest_scalars def latest_with_smoothing_hint(self, window_size=20): result = {} for (k, (v, itr)) in self._latest_scalars.items(): result[k] = ((self._history[k].median(window_size) if self._smoothing_hints[k] else v), itr) return result def smoothing_hints(self): return self._smoothing_hints def step(self): self._iter += 1 def iter(self): return self._iter def iter(self, val): self._iter = int(val) def iteration(self): return self._iter def __enter__(self): _CURRENT_STORAGE_STACK.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): assert (_CURRENT_STORAGE_STACK[(- 1)] == self) _CURRENT_STORAGE_STACK.pop() def name_scope(self, name): old_prefix = self._current_prefix self._current_prefix = (name.rstrip('/') + '/') (yield) self._current_prefix = old_prefix def clear_images(self): self._vis_data = [] def clear_histograms(self): self._histograms = []
def _propagate_device_option(net): if (not net.HasField('device_option')): return for op in net.op: if (not op.HasField('device_option')): op.device_option.CopyFrom(net.device_option)
def register_Ns3TracedValue__Unsigned_int_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'o')]) cls.add_constructor([param('unsigned int const &', 'v')]) cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) cls.add_method('Get', 'unsigned int', [], is_const=True) cls.add_method('Set', 'void', [param('unsigned int const &', 'v')]) return