code
stringlengths
101
5.91M
def unify_title(title): title_lower = title.lower().strip() title_clean = title_lower.translate(str.maketrans('', '', string.punctuation)) return title_clean
class TrackGenerator(object): def __init__(self, sequences, dataset_name, output_generator, output_dir, model_ckpt_path, max_tracks, preload_images, resize_scale, semseg_averaging_on_gpu, **kwargs): self.sequences = sequences self.dataset_name = dataset_name self.output_generator = output_generator if (self.dataset_name == 'kittimots'): semseg_output_type = 'argmax' elif (self.dataset_name == 'ytvis'): semseg_output_type = 'logits' else: semseg_output_type = None self.model = InferenceModel(model_ckpt_path, semseg_output_type=semseg_output_type, preload_images=preload_images, resize_scale=resize_scale, semseg_generation_on_gpu=semseg_averaging_on_gpu).cuda() self.resize_scale = resize_scale self.vis_output_dir = os.path.join(output_dir, 'vis') self.embeddings_output_dir = os.path.join(output_dir, 'embeddings') self.max_tracks = max_tracks self.save_vis = kwargs.get('save_vis', False) self.seediness_fg_threshold = kwargs.get('seediness_thresh', 0.25) self.ignore_fg_masks = kwargs.get('ignore_fg_masks', False) self.frame_overlap = kwargs.get('frame_overlap', (- 1)) self.clustering_device = kwargs.get('clustering_device', 'cuda:0') self.chainer = OnlineChainer(self.create_clusterer(), embedding_resize_factor=resize_scale) self.total_frames_processed = 0.0 def create_clusterer(self): _cfg = cfg.CLUSTERING return SequentialClustering(primary_prob_thresh=_cfg.PRIMARY_PROB_THRESHOLD, secondary_prob_thresh=_cfg.SECONDARY_PROB_THRESHOLD, min_seediness_prob=_cfg.MIN_SEEDINESS_PROB, n_free_dims=get_nb_free_dims(cfg.MODEL.EMBEDDING_DIM_MODE), free_dim_stds=cfg.TRAINING.LOSSES.EMBEDDING.FREE_DIM_STDS, device=self.clustering_device) def get_fg_masks_from_seediness(self, inference_output): seediness_scores = defaultdict((lambda : [0.0, 0.0])) for (subseq_frames, _, _, subseq_seediness) in inference_output['embeddings']: subseq_seediness = subseq_seediness.cuda().squeeze(0) for (i, t) in enumerate(subseq_frames): seediness_scores[t][0] += subseq_seediness[i] seediness_scores[t][1] += 1.0 fg_masks = [(seediness_scores[t][0] / seediness_scores[t][1]) for t in sorted(seediness_scores.keys())] return (torch.stack(fg_masks, 0) > self.seediness_fg_threshold).byte().cpu() def start(self, seqs_to_process): if (not isinstance(self.max_tracks, (list, tuple))): self.max_tracks = ([self.max_tracks] * len(self.sequences)) for i in range(len(self.sequences)): sequence = self.sequences[i] if (seqs_to_process and (str(sequence.seq_id) not in seqs_to_process)): continue print('Performing inference for sequence {}/{}'.format((i + 1), len(self.sequences))) self.process_sequence(sequence, self.max_tracks[i]) print('') print('Model inference speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_duration('inference')))) print('Clustering and postprocessing speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_duration('postprocessing')))) print('Overall speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_durations_sum()))) print('') def process_sequence(self, sequence, max_tracks): (embeddings, fg_masks, multiclass_masks) = self.do_inference(sequence) self.do_clustering(sequence, embeddings, fg_masks, multiclass_masks, max_tracks) self.total_frames_processed += len(sequence) _duration('inference') def do_inference(self, sequence): (subseq_idxes, _) = get_subsequence_frames(len(sequence), cfg.INPUT.NUM_FRAMES, self.dataset_name, self.frame_overlap) image_paths = [os.path.join(sequence.base_dir, path) for path in sequence.image_paths] inference_output = self.model(image_paths, subseq_idxes) (fg_masks, multiclass_masks) = (inference_output['fg_masks'], inference_output['multiclass_masks']) if torch.is_tensor(fg_masks): print("Obtaining foreground mask from model's foreground mask output") fg_masks = (fg_masks > 0.5).byte() else: print('Obtaining foreground mask by thresholding seediness map at {}'.format(self.seediness_fg_threshold)) fg_masks = self.get_fg_masks_from_seediness(inference_output) return (inference_output['embeddings'], fg_masks, multiclass_masks) _duration('postprocessing') def do_clustering(self, sequence, all_embeddings, fg_masks, multiclass_masks, max_tracks): subseq_dicts = [] for (i, (subseq_frames, embeddings, bandwidths, seediness)) in tqdm(enumerate(all_embeddings), total=len(all_embeddings)): subseq_dicts.append({'frames': subseq_frames, 'embeddings': embeddings, 'bandwidths': bandwidths, 'seediness': seediness}) ((track_labels, instance_pt_counts, instance_lifetimes), framewise_mask_idxes, subseq_labels_list, fg_embeddings, subseq_clustering_meta_info) = self.chainer.process(fg_masks, subseq_dicts) self.output_generator.process_sequence(sequence, framewise_mask_idxes, track_labels, instance_pt_counts, instance_lifetimes, multiclass_masks, fg_masks.shape[(- 2):], 4.0, max_tracks, device=self.clustering_device)
def read_arpa(fstream): _find_data_section(fstream) num_ngrams = {} for line in fstream: line = line.strip() if (line[:5] == 'ngram'): (lhs, rhs) = line.split('=') order = int(lhs.split()[1]) num_grams = int(rhs) num_ngrams[order] = num_grams elif (not line): (ended, order) = _next_section_or_end(fstream) break elif _starts_ngrams_section(line): ended = False order = _parse_order(line) break else: raise ValueError('Not a properly formatted line') ngrams_by_order = {} backoffs_by_order = {} while (not ended): probs = collections.defaultdict(dict) backoffs = {} backoff_line_length = (order + 2) try: for line in fstream: line = line.strip() all_parts = tuple(line.split()) prob = float(all_parts[0]) if (len(all_parts) == backoff_line_length): context = all_parts[1:(- 2)] token = all_parts[(- 2)] backoff = float(all_parts[(- 1)]) backoff_context = (context + (token,)) backoffs[backoff_context] = backoff else: context = all_parts[1:(- 1)] token = all_parts[(- 1)] probs[context][token] = prob except (IndexError, ValueError): ngrams_by_order[order] = probs backoffs_by_order[order] = backoffs if (not line): (ended, order) = _next_section_or_end(fstream) elif _starts_ngrams_section(line): ended = False order = _parse_order(line) elif _ends_arpa(line): ended = True order = None else: raise ValueError('Not a properly formatted ARPA file') if (not (num_ngrams.keys() == ngrams_by_order.keys())): raise ValueError('Not a properly formatted ARPA file') return (num_ngrams, ngrams_by_order, backoffs_by_order)
class SubprocessStoppedError(Exception): def __init__(self, code): super(Exception, self).__init__(code) self.code = code
def def_mat(ts, mode, coors, term, pb): if (not (mode == 'qp')): return if (not hasattr(pb, 'family_data')): pb.family_data = HyperElasticULFamilyData() update_var = pb.conf.options.mesh_update_variable if (pb.equations is None): state_u = pb.create_variables([update_var])[update_var] else: state_u = pb.get_variables()[update_var] if (state_u.data[0] is None): state_u.init_data() state_u.set_data((pb.domain.get_mesh_coors(actual=True) - pb.domain.get_mesh_coors())) state_u.field.clear_mappings() family_data = pb.family_data(state_u, term.region, term.integral, list(term.geometry_types.values())[0]) if (len(state_u.field.mappings0) == 0): state_u.field.save_mappings() (n_el, n_qp, dim, n_en, n_c) = state_u.get_data_shape(term.integral, term.act_integration, term.region.name) conf_mat = pb.conf.materials solid_key = [key for key in conf_mat.keys() if ('solid' in key)][0] solid_mat = conf_mat[solid_key].values mat = {} for mat_key in ['mu', 'K']: mat_fun = ConstantFunctionByRegion({mat_key: solid_mat[mat_key]}) mat[mat_key] = mat_fun.function(ts=ts, coors=coors, mode='qp', term=term, problem=pb)[mat_key].reshape((n_el, n_qp, 1, 1)) shape = family_data.green_strain.shape[:2] sym = family_data.green_strain.shape[(- 2)] dim2 = (dim ** 2) fargs = [family_data.get(name) for name in NeoHookeanULTerm.family_data_names] stress = nm.empty((shape + (sym, 1)), dtype=nm.float64) tanmod = nm.empty((shape + (sym, sym)), dtype=nm.float64) NeoHookeanULTerm.stress_function(stress, mat['mu'], *fargs) NeoHookeanULTerm.tan_mod_function(tanmod, mat['mu'], *fargs) fargs = [family_data.get(name) for name in BulkPenaltyULTerm.family_data_names] stress_p = nm.empty((shape + (sym, 1)), dtype=nm.float64) tanmod_p = nm.empty((shape + (sym, sym)), dtype=nm.float64) BulkPenaltyULTerm.stress_function(stress_p, mat['K'], *fargs) BulkPenaltyULTerm.tan_mod_function(tanmod_p, mat['K'], *fargs) stress_ns = nm.zeros((shape + (dim2, dim2)), dtype=nm.float64) tanmod_ns = nm.zeros((shape + (dim2, dim2)), dtype=nm.float64) sym2nonsym(stress_ns, (stress + stress_p)) sym2nonsym(tanmod_ns, (tanmod + tanmod_p)) npts = nm.prod(shape) J = family_data.det_f mtx_f = family_data.mtx_f.reshape((npts, dim, dim)) out = {'E': (0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim))), 'A': ((tanmod_ns + stress_ns) / J).reshape((npts, dim2, dim2)), 'S': ((stress + stress_p) / J).reshape((npts, sym, 1))} return out
def pytest_addoption(parser): parser.addoption('--random-seed', action='store', help='Fix the random seed')
def iterate_random_conditioning(opts, batch_size, frontal_camera: bool=False): if ((opts.G.c_dim != 0) or (opts.G.cfg.camera.origin.angles.dist == 'custom')): dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) if frontal_camera: yaw = torch.zeros(batch_size, device=opts.device) pitch = ((np.pi / 2) + torch.zeros(batch_size, device=opts.device)) roll = torch.zeros(batch_size, device=opts.device) frontal_angles = torch.stack([yaw, pitch, roll], dim=1) else: frontal_angles = None if (opts.G.c_dim == 0): c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device) if ((opts.G.c_dim == 0) and (opts.G.cfg.camera.origin.angles.dist != 'custom')): while True: camera_params = sample_camera_params(cfg=opts.G.cfg.camera, batch_size=batch_size, device=opts.device, origin_angles=(frontal_angles if frontal_camera else None)) (yield (c, camera_params)) else: while True: sample_idx = [np.random.randint(len(dataset)) for _ in range(batch_size)] if (opts.G.c_dim == 0): curr_c = c else: curr_c = [dataset.get_label(i) for i in sample_idx] curr_c = torch.from_numpy(np.stack(curr_c)).pin_memory().to(opts.device) if frontal_camera: camera_angles = frontal_angles elif (opts.G.cfg.camera.origin.angles.dist == 'custom'): camera_angles = [dataset.get_camera_angles(i) for i in sample_idx] camera_angles = torch.from_numpy(np.stack(camera_angles)).pin_memory().to(opts.device) else: camera_angles = None camera_params = sample_camera_params(opts.G.cfg.camera, len(sample_idx), opts.device, origin_angles=camera_angles) (yield (curr_c, camera_params))
def get_entity_types(_dialog_elem): if (_dialog_elem is None): return [] return (_dialog_elem.get('entity_types') or [])
class ConvGRUCell(tf.nn.rnn_cell.RNNCell): def __init__(self, shape, filters, kernel, activation=tf.tanh, normalize=True, data_format='channels_last', reuse=None): super(ConvGRUCell, self).__init__(_reuse=reuse) self._filters = filters self._kernel = kernel self._activation = activation self._normalize = normalize if (data_format == 'channels_last'): self._size = tf.TensorShape((shape + [self._filters])) self._feature_axis = self._size.ndims self._data_format = None elif (data_format == 'channels_first'): self._size = tf.TensorShape(([self._filters] + shape)) self._feature_axis = 0 self._data_format = 'NC' else: raise ValueError('Unknown data_format') def state_size(self): return self._size def output_size(self): return self._size def call(self, x, h): channels = x.shape[self._feature_axis].value with tf.variable_scope('gates'): inputs = tf.concat([x, h], axis=self._feature_axis) n = (channels + self._filters) m = ((2 * self._filters) if (self._filters > 1) else 2) W = tf.get_variable('kernel', (self._kernel + [n, m])) y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format) if self._normalize: (r, u) = tf.split(y, 2, axis=self._feature_axis) r = tf.contrib.layers.layer_norm(r) u = tf.contrib.layers.layer_norm(u) else: y += tf.get_variable('bias', [m], initializer=tf.ones_initializer()) (r, u) = tf.split(y, 2, axis=self._feature_axis) (r, u) = (tf.sigmoid(r), tf.sigmoid(u)) with tf.variable_scope('candidate'): inputs = tf.concat([x, (r * h)], axis=self._feature_axis) n = (channels + self._filters) m = self._filters W = tf.get_variable('kernel', (self._kernel + [n, m])) y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format) if self._normalize: y = tf.contrib.layers.layer_norm(y) else: y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer()) h = ((u * h) + ((1 - u) * self._activation(y)))
_model def tf_efficientnet_em(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge('tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
def read_pid_from_pidfile(pidfile_path): pid = None try: pidfile = open(pidfile_path, 'r') except IOError: pass else: line = pidfile.readline().strip() try: pid = int(line) except ValueError: pass pidfile.close() return pid
def norm_box_xyxy(box, *, w, h): (x1, y1, x2, y2) = box norm_x1 = max(0.0, min((x1 / w), 1.0)) norm_y1 = max(0.0, min((y1 / h), 1.0)) norm_x2 = max(0.0, min((x2 / w), 1.0)) norm_y2 = max(0.0, min((y2 / h), 1.0)) normalized_box = (round(norm_x1, 3), round(norm_y1, 3), round(norm_x2, 3), round(norm_y2, 3)) return normalized_box
def model_params(n, m, d, n_z, n_samples, **kwargs): params = {'n': n, 'm': m, 'd': d, 'n_z': n_z, 'n_samples': n_samples, 'manifold': 'euclid', 'kernel': 'RBF', 'prior': 'Uniform', 'likelihood': 'Gaussian', 'initialization': 'fa', 'Y': None, 'latent_sigma': 1, 'latent_mu': None, 'diagonal': True, 'learn_linear_scale': True, 'linear_scale': None, 'RBF_scale': None, 'RBF_ell': None, 'arp_p': 1, 'arp_eta': (np.ones(d) * 1), 'arp_learn_eta': True, 'arp_learn_c': False, 'arp_learn_phi': True, 'prior_ell': None, 'lik_gauss_std': None, 'ts': torch.arange(m)[(None, None, ...)].repeat(n_samples, 1, 1), 'device': None} for (key, value) in kwargs.items(): if (key in params.keys()): params[key] = value else: print('key not found; adding', key) params[key] = value return params
def main(args): model = PointFlow(args) def _transform_(m): return nn.DataParallel(m) model = model.cuda() model.multi_gpu_wrapper(_transform_) print(('Resume Path:%s' % args.resume_checkpoint)) checkpoint = torch.load(args.resume_checkpoint) model.load_state_dict(checkpoint) model.eval() with torch.no_grad(): if args.evaluate_recon: evaluate_recon(model, args) else: evaluate_gen(model, args)
def clean_manual(man_string): cur_man_line = [x.strip() for x in man_string.strip().split('\n') if (len(x.strip().split()) >= 1)] cur_man_line = [' '.join(x.split()) for x in cur_man_line] cur_man_line = ' '.join(cur_man_line) return cur_man_line
def test_count_operations(real_app_schema): event = next(from_schema(real_app_schema, count_operations=False).execute()) assert (event.operations_count is None)
def schedule_per_step(step, learning_rate): if (PARAMS['num_replicas'] > 1): if (PARAMS['scale'] == 0.0): lr_per_replica = PARAMS['learning_rate'] else: scale = (PARAMS['num_replicas'] * PARAMS['scale']) lr_per_replica = (PARAMS['learning_rate'] * scale) else: lr_per_replica = PARAMS['learning_rate'] return ExponentialDecay(initial_learning_rate=lr_per_replica, decay_steps=PARAMS['decay_steps'], decay_rate=PARAMS['decay_rate'], staircase=True)(step)
class NearToken(DistanceToken): def _neighbor_match(self, input_elem, output_elem): return N.is_pixel_neighbor(input_elem, output_elem) def __str__(self): return 'Near({}, {})'.format(self._token, self._classes) __repr__ = __str__
def parse_token_line(line): line = line.replace('[', '').replace(']', '') line = line.split(' ') token = line[0].split('=')[1] pos = line[3].split('=')[1] return (token, pos)
.gpu def test_relu_vec(): _config() def halftest(A: dace.float16[N]): out = np.ndarray([N], dace.float16) for i in dace.map[0:N]: with dace.tasklet: (a << A[i]) (o >> out[i]) o = max(a, dace.float16(0)) return out A = np.random.rand(24).astype(np.float16) sdfg = halftest.to_sdfg() sdfg.apply_gpu_transformations() assert (sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1) out = sdfg(A=A, N=24) assert np.allclose(out, np.maximum(A, 0))
class RegularFaceRegularizer(BaseRegularizer): def __init__(self, **kwargs): super().__init__(**kwargs) assert self.distance.is_inverted def compute_loss(self, weights): (dtype, device) = (weights.dtype, weights.device) num_classes = weights.size(0) cos = self.distance(weights) with torch.no_grad(): cos1 = cos.clone() cos1.fill_diagonal_(c_f.neg_inf(dtype)) (_, indices) = self.distance.smallest_dist(cos1, dim=1) mask = torch.zeros((num_classes, num_classes), dtype=dtype, device=device) row_nums = torch.arange(num_classes, device=device, dtype=torch.long) mask[(row_nums, indices)] = 1 losses = torch.sum((cos * mask), dim=1) return {'loss': {'losses': losses, 'indices': c_f.torch_arange_from_size(weights), 'reduction_type': 'element'}} def get_default_distance(self): return CosineSimilarity()
class DyReLU(BaseModule): def __init__(self, channels, ratio=4, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)), init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert (len(act_cfg) == 2) assert mmcv.is_tuple_of(act_cfg, dict) self.channels = channels self.expansion = 4 self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=(channels * self.expansion), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): coeffs = self.global_avgpool(x) coeffs = self.conv1(coeffs) coeffs = (self.conv2(coeffs) - 0.5) (a1, b1, a2, b2) = torch.split(coeffs, self.channels, dim=1) a1 = ((a1 * 2.0) + 1.0) a2 = (a2 * 2.0) out = torch.max(((x * a1) + b1), ((x * a2) + b2)) return out
def complex2array(z): Z_shape = ((2,) + z.shape) Z = np.zeros(Z_shape) Z[0] = z.real Z[1] = z.imag return Z
class Embeddings2D(nn.Module): def __init__(self, input_shape, patch_size=4, embed_dim=96, img_size=224, config=None): super().__init__() (self.resize, self.input_dimensions) = (transforms.Resize((img_size, img_size)), (img_size, img_size)) self.patch_size = to_2tuple(patch_size) self.patched_dimensions = ((self.input_dimensions[0] // self.patch_size[0]), (self.input_dimensions[1] // self.patch_size[1])) ks = self.patch_size self.projection = nn.Conv2d(input_shape[1], embed_dim, kernel_size=ks, stride=self.patch_size, padding=((ks[0] - self.patch_size[0]) // 2)) self.norm = nn.LayerNorm(embed_dim) num_patches = ((self.input_dimensions[1] // self.patch_size[1]) * (self.input_dimensions[0] // self.patch_size[0])) conv_init(self.projection) def maybe_pad(self, x, height, width): if ((width % self.patch_size[1]) != 0): pad_values = (0, (self.patch_size[1] - (width % self.patch_size[1]))) x = nn.functional.pad(x, pad_values) if ((height % self.patch_size[0]) != 0): pad_values = (0, 0, 0, (self.patch_size[0] - (height % self.patch_size[0]))) x = nn.functional.pad(x, pad_values) return x def forward(self, x, *args, **kwargs): x = self.resize(x) (_, _, height, width) = x.shape x = self.maybe_pad(x, height, width) x = self.projection(x) x = x.flatten(2).transpose(1, 2) x = self.norm(x) return (x, self.patched_dimensions)
def run(model, train_loader, test_loader, epochs, optimizer, scheduler, writer, device): (train_losses, test_losses) = ([], []) for epoch in range(1, (epochs + 1)): t = time.time() train_loss = train(model, optimizer, train_loader, device) t_duration = (time.time() - t) test_loss = test(model, test_loader, device) scheduler.step() info = {'current_epoch': epoch, 'epochs': epochs, 'train_loss': train_loss, 'test_loss': test_loss, 't_duration': t_duration} writer.print_info(info) writer.save_checkpoint(model, optimizer, scheduler, epoch)
class fcn32s_lite(fcn32s): def __init__(self, num_classes): super().__init__(num_classes, 256) del self.encoder[(- 6):] fc6 = [('fc6', nn.Conv2d(512, self.feat_dim, 7)), ('fc6_relu', nn.ReLU(inplace=True)), ('fc6_drop', nn.Dropout2d(p=0.5))] fc7 = [('fc7', nn.Conv2d(self.feat_dim, self.feat_dim, 1)), ('fc7_relu', nn.ReLU(inplace=True)), ('fc7_drop', nn.Dropout2d(p=0.5))] for (n, m) in (fc6 + fc6): setattr(self.encoder, n, m) for m in (self.encoder.fc6, self.encoder.fc7): nn.init.normal_(m.weight, 0.0, 0.001) nn.init.constant_(m.bias, 0.0)
class SIE_gens_constructor(SageInputExpression): def __init__(self, sib, constr, gen_names, gens_syntax=None): super().__init__(sib) self._sie_constr = constr self._sie_gen_names = gen_names self._sie_gens = None self._sie_gens_constr = gens_syntax self._sie_assign_gens = False self._sie_generated = False def __repr__(self): return ('{constr_parent: %s with gens: %s}' % (repr(self._sie_constr), self._sie_gen_names)) def _sie_referenced(self): return [self._sie_constr] def _sie_gens_referenced(self, sif): self._sie_assign_gens = True self._sie_require_varname(sif) for gen in self._sie_gens: gen._sie_require_varname(sif) def _sie_add_command(self, sif): if (not self._sie_generated): if (self._sie_builder.preparse() and (self._sie_gens_constr is not None) and all((g._sie_got_preferred(sif) for g in self._sie_gens))): (s, _) = self._sie_gens_constr._sie_format(sif) sif._commands += ('%s.<%s> = %s\n' % (self._sie_get_varname(sif), ','.join(self._sie_gen_names), s)) else: (s, _) = self._sie_constr._sie_format(sif) sif._commands += ('%s = %s\n' % (self._sie_get_varname(sif), s)) if self._sie_assign_gens: if (len(self._sie_gens) == 1): sif._commands += ('%s = %s.gen()\n' % (self._sie_gens[0]._sie_get_varname(sif), self._sie_get_varname(sif))) else: sif._commands += ('%s = %s.gens()\n' % (','.join([g._sie_get_varname(sif) for g in self._sie_gens]), self._sie_get_varname(sif))) self._sie_generated = True def _sie_format(self, sif): if self._sie_assign_gens: self._sie_add_command(sif) return (self._sie_get_varname(sif), _prec_atomic) return self._sie_constr._sie_format(sif)
def gen_ctx_vectors(ctx_rows: List[Tuple[(object, str, str)]], model: nn.Module, tensorizer: Tensorizer, insert_title: bool=True) -> List[Tuple[(object, np.array)]]: n = len(ctx_rows) bsz = args.batch_size total = 0 results = [] for (j, batch_start) in enumerate(range(0, n, bsz)): batch_token_tensors = [tensorizer.text_to_tensor(ctx[1], title=(ctx[2] if insert_title else None)) for ctx in ctx_rows[batch_start:(batch_start + bsz)]] ctx_ids_batch = move_to_device(torch.stack(batch_token_tensors, dim=0), args.device) ctx_seg_batch = move_to_device(torch.zeros_like(ctx_ids_batch), args.device) ctx_attn_mask = move_to_device(tensorizer.get_attn_mask(ctx_ids_batch), args.device) with torch.no_grad(): (_, out, _) = model(ctx_ids_batch, ctx_seg_batch, ctx_attn_mask) out = out.cpu() ctx_ids = [r[0] for r in ctx_rows[batch_start:(batch_start + bsz)]] assert (len(ctx_ids) == out.size(0)) total += len(ctx_ids) results.extend([(ctx_ids[i], out[i].view((- 1)).numpy()) for i in range(out.size(0))]) if ((total % 10) == 0): logger.info('Encoded passages %d', total) return results
def getParentNode(parent, allNodes): for n in allNodes: if (n._id == parent): return n return None
def dump_data_to_numpy(mode, output_file, workers=1, batchsize=1000, remove_missing_features='scan'): _generator = get_generator(path=db_path, mode=mode, batch_size=batchsize, use_malicious_labels=True, use_count_labels=False, use_tag_labels=False, num_workers=workers, remove_missing_features=remove_missing_features, shuffle=False) feature_array = [] label_array = [] for (i, (features, labels)) in enumerate(_generator): feature_array.append(deepcopy(features.numpy())) label_array.append(deepcopy(labels['malware'].numpy())) sys.stdout.write(f''' {i} / {len(_generator)}''') sys.stdout.flush() np.savez(output_file, feature_array, label_array) print(f''' Wrote output to {output_file}''')
() class Config(): dataset_loader: str = 'dtu' batching: str = 'single_image' batching_random: str = 'all_images' batch_size: int = 4096 batch_size_random: int = 4096 factor: int = 0 render_factor: int = 0 remap_to_hemisphere: bool = False render_path: bool = False render_train: bool = False render_path_frames: int = 240 llffhold: int = 8 dtuhold: int = 8 dtu_light_cond: int = 3 dtu_max_images: int = 49 dtu_split_type: str = 'pixelnerf' use_tiffs: bool = False compute_disp_metrics: bool = False compute_normal_metrics: bool = False lr_init: float = 0.0005 lr_final: float = 5e-05 lr_delay_steps: int = 0 lr_delay_mult: float = 0.0 resample_padding_init: float = 0.01 resample_padding_final: float = 0.01 grad_max_norm: float = 0.0 grad_max_val: float = 0.0 gc_every: int = 10000 disable_multiscale_loss: bool = False randomized: bool = True near: float = 2.0 far: float = 6.0 near_origin: float = 0.0 coarse_loss_mult: float = 0.1 weight_decay_mult: float = 0.0 white_background: bool = True checkpoint_dir: str = None render_dir: str = None data_dir: str = None render_chunk_size: int = 4096 num_showcase_images: int = 5 deterministic_showcase: bool = True vis_dist_curve_fn: Callable[(Ellipsis, Any)] = (lambda x: x) vis_num_rays: int = 64 dtu_scan: str = 'scan114' llff_scan: str = 'fern' blender_scene: str = 'lego' dtu_mask_path: str = None depth_tvnorm_loss_mult: float = 0.0 depth_tvnorm_selector: str = 'distance_mean_save' random_scales: int = 1 random_scales_init: int = 0 dietnerf_loss_mult: float = 0.0 dietnerf_loss_resolution: int = 96 dietnerf_loss_every: int = 10 depth_tvnorm_decay: bool = False depth_tvnorm_maxstep: int = 0 depth_tvnorm_loss_mult_start: float = 0.0 depth_tvnorm_loss_mult_end: float = 0.0 nll_loss_mult: float = 0.001 nll_loss_decay: bool = True nll_loss_maxstep: int = 0 nll_loss_weight_start: float = 0.0 nll_loss_weight_end: float = 0.0 flip_nll_loss_mult: float = 0.001 flip_nll_loss_decay: bool = True flip_nll_loss_maxstep: int = 0 flip_nll_loss_weight_start: float = 0.0 flip_nll_loss_weight_end: float = 0.0 ue_loss_mult: float = 0.001 ue_loss_decay: bool = True ue_loss_maxstep: int = 0 ue_loss_weight_start: float = 0.0 ue_loss_weight_end: float = 0.0 flip_ue_loss_weight: float = 0.001 bfc_loss_weight: float = 0.001 ori_loss_weight: float = 0.0001 llff_flip_mask: bool = False eta: float = 2.0 depth_tvnorm_mask_weight: float = 0.0 flow_loss_mult: float = 0.0 depth_tvnorm_type: str = 'l2' recon_loss_scales: int = 1 sample_reconscale_dist: str = 'uniform_scale' max_steps: int = 250000 checkpoint_every: int = 25000 print_every: int = 100 train_render_every: int = 10000 n_input_views: int = 9 n_random_poses: int = 10000 patch_size: int = 8 load_random_rays: bool = True anneal_nearfar: bool = False anneal_nearfar_steps: int = 2000 anneal_nearfar_perc: float = 0.2 anneal_mid_perc: float = 0.5 random_pose_type: str = 'renderpath' random_pose_focusptjitter: bool = True random_pose_radius: float = 1.0 random_pose_add_test_poses: bool = False check_grad_for_nans: bool = False maxdeg_val: int = 16 maxdeg_steps: int = 0 nll_loss: bool = False ue_loss: bool = False bfc_loss: bool = False ori_loss: bool = False eval_only_once: bool = True eval_save_output: bool = True eval_render_interval: int = 1 eval_disable_lpips: bool = False dtu_no_mask_eval: bool = False
def to_json_operator(instring, tokensStart, retTokens): tok = retTokens[0] op = tok[1] clean_op = op.lower() clean_op = binary_ops.get(clean_op, clean_op) for o in KNOWN_OPS: if isinstance(o, tuple): if o[0].matches(op): return {clean_op: [tok[0], tok[2], tok[4]]} elif o.matches(op): break else: if (op == COLLATE_NOCASE.match): op = COLLATE_NOCASE.name return {op: tok[0]} else: raise Exception('not found') if (clean_op == 'eq'): if (tok[2] == 'null'): return {'missing': tok[0]} elif (tok[0] == 'null'): return {'missing': tok[2]} elif (clean_op == 'neq'): if (tok[2] == 'null'): return {'exists': tok[0]} elif (tok[0] == 'null'): return {'exists': tok[2]} elif (clean_op == 'is'): if (tok[2] == 'null'): return {'missing': tok[0]} else: return {'exists': tok[0]} operands = [tok[0], tok[2]] simple = {clean_op: operands} if (len(tok) <= 3): return simple if (clean_op in {'add', 'mul', 'and', 'or'}): for i in range(3, len(tok), 2): if (tok[i] != op): return to_json_operator(None, None, [([simple] + tok[i:])]) else: operands.append(tok[(i + 1)]) return simple else: return to_json_operator(None, None, [([simple] + tok[3:])])
def fit_gp_surrogate(surrogate, mll, X_train, Y_train, X_val, Y_val, X_test, Y_test, eval_bs=None, train_bs=None, shuffle_train=False, log_prefix='', encoder_obj='mll', resampling_temp=None, select_crit_key='val_nll'): assert (encoder_obj in ['mll', 'mlm', 'lanmt', None]), 'unsupported encoder objective' num_val = (0 if (X_val is None) else X_val.shape[0]) print(f'{X_train.shape[0]} train, {num_val} val, {X_test.shape[0]} test') if ((surrogate.bootstrap_ratio is None) and (X_train.shape[0] >= surrogate.min_num_train)): pass else: (X_train, Y_train) = draw_bootstrap(X_train, Y_train, bootstrap_ratio=surrogate.bootstrap_ratio, min_samples=surrogate.min_num_train) if (resampling_temp is not None): print('\n---- resampling training and validation data ----') (_, train_weights, train_idxs) = weighted_resampling((- Y_train), k=resampling_temp) (_, val_weights, val_idxs) = weighted_resampling((- Y_val), k=resampling_temp) (X_train, Y_train) = (X_train[train_idxs], Y_train[train_idxs]) (X_val, Y_val) = (X_val[val_idxs], Y_val[val_idxs]) collate_fn = (lambda x: gfp_transforms.padding_collate_fn(x, surrogate.tokenizer.padding_idx)) train_bs = (X_train.shape[0] if (train_bs is None) else train_bs) if (num_val > 0): (_, val_dataset) = surrogate._get_datasets(X_train, X_val, Y_train, Y_val) else: val_dataset = None (train_dataset, test_dataset) = surrogate._get_datasets(X_train, X_test, Y_train, Y_test) train_loader = DataLoader(train_dataset, batch_size=train_bs, shuffle=shuffle_train, collate_fn=collate_fn) eval_bs = (max(X_val.shape[0], X_test.shape[0]) if (eval_bs is None) else eval_bs) if (val_dataset is not None): val_loader = DataLoader(val_dataset, batch_size=eval_bs, shuffle=False, collate_fn=collate_fn) else: val_loader = None test_loader = DataLoader(test_dataset, batch_size=eval_bs, shuffle=False, collate_fn=collate_fn) Y_train = to_tensor(Y_train, device=surrogate.device) Y_train = surrogate.reshape_targets(Y_train) Y_train = Y_train.to(dtype=list(surrogate.parameters())[0].dtype) if (len(list(surrogate.encoder.parameters())) > 0): has_encoder = True else: print('\n---- surrogate has no encoder ----') has_encoder = False print('\n---- preparing checkpoint ----') surrogate.eval() surrogate.requires_grad_(False) surrogate.set_train_data(X_train, Y_train, strict=False) if (val_loader is not None): start_metrics = surrogate.evaluate(val_loader, split='val') else: start_metrics = {} start_metrics.update(surrogate.evaluate(test_loader, split='test')) start_metrics['epoch'] = 0 if (has_encoder and (encoder_obj == 'mlm')): if (val_loader is not None): start_metrics.update(mlm_eval_epoch(surrogate.encoder, val_loader, surrogate.encoder.mask_ratio, split='val')) start_metrics.update(mlm_eval_epoch(surrogate.encoder, test_loader, surrogate.encoder.mask_ratio, split='test')) if (has_encoder and (encoder_obj == 'lanmt')): if (val_loader is not None): start_metrics.update(lanmt_eval_epoch(surrogate.encoder.model, val_loader, split='val')) start_metrics.update(lanmt_eval_epoch(surrogate.encoder.model, test_loader, split='test')) best_score = start_metrics.get(select_crit_key, None) best_score_epoch = 0 surrogate.cpu() best_weights = copy.deepcopy(surrogate.state_dict()) surrogate.to(surrogate.device) if (best_score is not None): print(f'starting val NLL: {best_score:.4f}') if any([isinstance(module, _BatchNorm) for module in surrogate.encoder.modules()]): print('\n---- initializing encoder normalization buffers ----') num_warmup_epochs = 8 surrogate.train() surrogate.requires_grad_(False) for epoch in range(num_warmup_epochs): for (inputs, _) in train_loader: _ = surrogate.get_features(inputs.to(surrogate.device), surrogate.bs, transform=False) if (hasattr(surrogate, 'init_inducing_points') and (surrogate.num_inducing_points <= X_train.shape[0])): print('\n---- initializing GP variational params ----') surrogate.eval() surrogate.requires_grad_(False) init_features = torch.cat(batched_call(surrogate.get_features, X_train, train_bs)) try: surrogate.train() surrogate.init_inducing_points(init_features) initialize_var_dist_sgpr(surrogate, init_features, Y_train.to(init_features), noise_lb=1.0) print('variational initialization successful') except Exception as exp: logging.exception(exp) print('variational initialization failed') mll.to(surrogate.device) if hasattr(mll, 'num_data'): mll.num_data = len(train_loader.dataset) stop_crit_key = 'train_loss' (best_loss, best_loss_epoch) = (None, 0) stop = False gp_optimizer = torch.optim.Adam(surrogate.param_groups) gp_lr_sched = torch.optim.lr_scheduler.ReduceLROnPlateau(gp_optimizer, patience=math.ceil((surrogate.patience / 2.0)), threshold=0.001) records = [start_metrics] print('\n---- fitting all params ----') for epoch_idx in range(surrogate.num_epochs): metrics = {} if (has_encoder and (encoder_obj == 'mll')): enc_sup_loss = fit_encoder_only(surrogate, gp_optimizer, mll, train_loader, num_epochs=1) else: enc_sup_loss = 0.0 avg_train_loss = enc_sup_loss surrogate.train() for (inputs, targets) in train_loader: if (isinstance(surrogate.encoder, LanguageModel) and (encoder_obj == 'mlm')): surrogate.encoder.requires_grad_(True) (mlm_loss, _, _) = mlm_train_step(surrogate.encoder, gp_optimizer, inputs, surrogate.encoder.mask_ratio, loss_scale=1.0) elif (isinstance(surrogate.encoder, LanguageModel) and (encoder_obj == 'lanmt')): surrogate.encoder.requires_grad_(True) (mlm_loss, _, _) = lanmt_train_step(surrogate.encoder.model, gp_optimizer, inputs, loss_scale=1.0) else: mlm_loss = torch.zeros(1, device=surrogate.device) surrogate.requires_grad_(True) gp_loss = gp_train_step(surrogate, gp_optimizer, inputs, targets, mll) avg_train_loss += ((mlm_loss.detach() + gp_loss.detach()) / len(train_loader)) gp_lr_sched.step(avg_train_loss) metrics.update({'epoch': (epoch_idx + 1), 'train_loss': avg_train_loss.item()}) if (((epoch_idx + 1) % surrogate.eval_period) == 0): surrogate.requires_grad_(False) surrogate.eval() surrogate.set_train_data(X_train, Y_train, strict=False) if (val_loader is not None): metrics.update(surrogate.evaluate(val_loader, split='val')) metrics.update(surrogate.evaluate(test_loader, split='test')) if (has_encoder and (encoder_obj == 'mlm')): if (val_loader is not None): metrics.update(mlm_eval_epoch(surrogate.encoder, val_loader, surrogate.encoder.mask_ratio, split='val')) metrics.update(mlm_eval_epoch(surrogate.encoder, test_loader, surrogate.encoder.mask_ratio, split='test')) elif (has_encoder and (encoder_obj == 'lanmt')): if (val_loader is not None): metrics.update(lanmt_eval_epoch(surrogate.encoder.model, val_loader, split='val')) metrics.update(lanmt_eval_epoch(surrogate.encoder.model, test_loader, split='test')) select_crit = metrics.get(select_crit_key, None) if (surrogate.early_stopping and (select_crit is not None)): assert (surrogate.holdout_ratio > 0.0), 'Need validation data for early stopping' (best_score, best_score_epoch, best_weights, _) = check_early_stopping(model=surrogate, best_score=best_score, best_epoch=best_score_epoch, best_weights=best_weights, curr_score=select_crit, curr_epoch=(epoch_idx + 1), patience=surrogate.patience, save_weights=True) metrics.update(dict(best_score=best_score, best_epoch=best_score_epoch)) stop_crit = metrics.get(stop_crit_key, None) if (stop_crit is not None): (best_loss, best_loss_epoch, _, stop) = check_early_stopping(model=surrogate, best_score=best_loss, best_epoch=best_loss_epoch, best_weights=None, curr_score=stop_crit, curr_epoch=(epoch_idx + 1), patience=surrogate.patience, save_weights=False) metrics.update(dict(best_loss=best_loss, best_loss_epoch=best_loss_epoch)) records.append(metrics) if (len(log_prefix) > 0): metrics = {'/'.join((log_prefix, key)): val for (key, val) in metrics.items()} try: wandb.log(metrics) except Exception: pass if stop: break if surrogate.early_stopping: print(f''' ---- loading checkpoint from epoch {best_score_epoch} ----''') surrogate.load_state_dict(best_weights) surrogate.requires_grad_(False) surrogate.train() surrogate.clear_cache() surrogate.eval() surrogate.set_train_data(X_train, Y_train, strict=False) return records
def _place_post_grad_agg_ops(ps_device, var_op_to_agg_grad, var_op_to_apply_grad_op): def _find_agg_grad_descendant_ops(agg_grad_ops, apply_grad_ops): agg_grad_descendant_ops = set() queue = [] queue.extend(agg_grad_ops) while (len(queue) > 0): curr_op = queue.pop() if (curr_op in agg_grad_descendant_ops): continue agg_grad_descendant_ops.add(curr_op) if (curr_op in apply_grad_ops): continue curr_op_consumers = get_consumers(curr_op) queue.extend(curr_op_consumers) return agg_grad_descendant_ops SHARED = (- 1) def _assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child): queue = [] stop = set() if is_parent_to_child: queue.extend(agg_grad_ops) stop.update(apply_grad_ops) else: queue.extend(apply_grad_ops) stop.update(agg_grad_ops) visited = set() while (len(queue) > 0): curr_op = queue.pop(0) if (curr_op in visited): continue visited.add(curr_op) if ((curr_op in op_to_task) and (curr_op not in stop)): if is_parent_to_child: queue.extend([consumer for consumer in get_consumers(curr_op) if (consumer in apply_grad_ancestor_ops)]) else: queue.extend([input.op for input in curr_op.inputs]) continue if is_parent_to_child: placement_reference_ops = set([input.op for input in curr_op.inputs]) placement_reference_ops = placement_reference_ops.difference(ancestors_diff_descendants) else: placement_reference_ops = set(get_consumers(curr_op)) placement_reference_ops = placement_reference_ops.intersection(apply_grad_ancestor_ops) is_ready = True for ref_op in placement_reference_ops: if (ref_op not in op_to_task): is_ready = False break if is_ready: placement_reference_tasks = [op_to_task[ref_op] for ref_op in placement_reference_ops] else: queue.append(curr_op) continue unique_tasks = set(placement_reference_tasks) curr_op_task = None if (len(unique_tasks) == 0): raise RuntimeError(('Should have placement reference for operation %s' % curr_op.name)) elif (len(unique_tasks) == 1): curr_op_task = unique_tasks.pop() op_to_task[curr_op] = curr_op_task else: if (SHARED in unique_tasks): unique_tasks.remove(SHARED) if (len(unique_tasks) == 1): curr_op_task = unique_tasks.pop() op_to_task[curr_op] = curr_op_task else: assert (len(unique_tasks) > 1) curr_op_task = SHARED op_to_task[curr_op] = SHARED parallax_log.debug(('post_grad_agg_op %s is assigned to ps task %d' % (curr_op.name, curr_op_task))) if (curr_op_task == SHARED): curr_op_task = 0 ps_device.task = curr_op_task curr_op._set_device(ps_device) if (curr_op not in stop): if is_parent_to_child: queue.extend([consumer for consumer in get_consumers(curr_op) if (consumer in apply_grad_ancestor_ops)]) else: queue.extend([input.op for input in curr_op.inputs]) op_to_task = {} agg_grad_ops = [] for (var_op, agg_grad) in var_op_to_agg_grad.items(): var_device = tf.DeviceSpec.from_string(var_op.device) if (agg_grad[0] != None): agg_grad_ops.append(agg_grad[0].op) op_to_task[agg_grad[0].op] = var_device.task agg_grad_ops.append(agg_grad[1].op) op_to_task[agg_grad[1].op] = var_device.task apply_grad_ops = [] for (var_op, apply_grad_op) in var_op_to_apply_grad_op.items(): var_device = tf.DeviceSpec.from_string(var_op.device) apply_grad_ops.append(apply_grad_op) apply_grad_op._set_device(var_device) op_to_task[apply_grad_op] = var_device.task apply_grad_ancestor_ops = get_ancestors(apply_grad_ops, agg_grad_ops) agg_grad_descendant_ops = _find_agg_grad_descendant_ops(agg_grad_ops, apply_grad_ops) ancestors_diff_descendants = apply_grad_ancestor_ops.difference(agg_grad_descendant_ops) parallax_log.debug(('apply_grad_ancestor_ops: %d' % len(apply_grad_ancestor_ops))) parallax_log.debug(('agg_grad_descendant_ops: %d' % len(agg_grad_descendant_ops))) parallax_log.debug(('ancestors diff descendants: %d' % len(ancestors_diff_descendants))) parallax_log.debug(('descendants diff ancestors: %d' % len(agg_grad_descendant_ops.difference(apply_grad_ancestor_ops)))) _assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child=True) _assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child=False)
def _multivariate_polynomial_interpolation(evaluation, order, polynomial_ring): def _interpolate(evaluation, num_of_var, order): if ((num_of_var == 0) or (order == 0)): return evaluation[0] base_field = polynomial_ring.base_ring() q = base_field.cardinality() n_by_q = (q ** (num_of_var - 1)) d = min((order + 1), q) multipoint_evaluation_list = [] uni_poly_ring = PolynomialRing(base_field, 'x') base_field_zero = base_field.zero() for k in range(n_by_q): iterator = iter(base_field) points = [] for i in range(d): xcoordinate = next(iterator) points.append((xcoordinate, evaluation[(k + (i * n_by_q))])) polyVector = uni_poly_ring.lagrange_polynomial(points).coefficients(sparse=False) if (len(polyVector) < d): polyVector += ([base_field_zero] * (d - len(polyVector))) multipoint_evaluation_list.append(polyVector) poly = polynomial_ring.zero() z = 1 x = polynomial_ring.gen((num_of_var - 1)) for k in range(d): poly = (poly + (z * _interpolate([multipoint_evaluation_list[i][k] for i in range(n_by_q)], (num_of_var - 1), (order - k)))) z *= x return poly return _interpolate(evaluation, polynomial_ring.ngens(), order)
def deconv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] (input_height, input_width) = input.shape[2:] (kernel_height, kernel_width) = conv_module.kernel_size in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = (out_channels // groups) conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel) active_elements_count = ((batch_size * input_height) * input_width) overall_conv_flops = (conv_per_position_flops * active_elements_count) bias_flops = 0 if (conv_module.bias is not None): (output_height, output_width) = output.shape[2:] bias_flops = (((out_channels * batch_size) * output_height) * output_height) overall_flops = (overall_conv_flops + bias_flops) conv_module.__flops__ += int(overall_flops)
def get_cartoon_thresh_method(thresh_method): return {CARTOON_THRESH_METHODS.BINARY.value: cv2.THRESH_BINARY, CARTOON_THRESH_METHODS.BINARY_INV.value: cv2.THRESH_BINARY_INV, CARTOON_THRESH_METHODS.TRIANGLE.value: cv2.THRESH_TRIANGLE, CARTOON_THRESH_METHODS.MASK.value: cv2.THRESH_MASK, CARTOON_THRESH_METHODS.TRUNC.value: cv2.THRESH_TRUNC, CARTOON_THRESH_METHODS.OTSU.value: cv2.THRESH_OTSU, CARTOON_THRESH_METHODS.TOZERO.value: cv2.THRESH_TOZERO, CARTOON_THRESH_METHODS.TOZERO_INV.value: cv2.THRESH_TOZERO_INV}[thresh_method]
_metric def rendering_val(opts): opts.dataset_kwargs.update(max_size=None, xflip=False) rendering_utils.render_val(opts, max_items=None) return dict(rendering_val=1)
_utils.test(arch=[ti.cuda, ti.cpu], real_matrix_scalarize=False, debug=True) def test_elementwise_ops(): def test(): x = ti.Matrix([[1, 2], [3, 4]]) t1 = (x + 10) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t1[(i, j)] == (x[(i, j)] + 10)) t2 = (x * 2) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t2[(i, j)] == (x[(i, j)] * 2)) t3 = (t1 + t2) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t3[(i, j)] == (t1[(i, j)] + t2[(i, j)])) t4 = (1 / t1) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t4[(i, j)] == (1 / t1[(i, j)])) t5 = (1 << x) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t5[(i, j)] == (1 << x[(i, j)])) t6 = (1 + (x // 2)) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (t6[(i, j)] == (1 + (x[(i, j)] // 2))) y = ti.Matrix([[1, 2], [3, 4]], dt=ti.i32) z = (y * 2) factors = (z // y) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (factors[(i, j)] == 2) y1 = ti.Matrix([[1, 2], [3, 4]], dt=ti.f32) z1 = (y1 * 2) factors1 = (z1 // y1) ti.loop_config(serialize=True) for i in range(2): for j in range(2): assert (factors1[(i, j)] == 2) test()
class Yolov3Optimizer(Optimizer): def get_input_tensor_name(): return 'input_data:0' def __init__(self, saver, sess, **kwargs): args = argparse.Namespace(**kwargs) self.anchors = parse_anchors(args.yolov3_anchor_path) self.classes = read_class_names(args.yolov3_class_name_path) self.num_class = len(self.classes) self.restore_path = args.restore_path self.class_dict = {} self.new_size = args.yolov3_new_size self.saver = saver self.sess = sess self.saver.restore(sess, self.restore_path) self.input = tf.compat.v1.get_default_graph().get_tensor_by_name(self.get_input_tensor_name()) (self.boxes, self.confs, self.probs, self.scores) = add_yolov3_head_instructions(args.batch_size, self.new_size, self.num_class, self.anchors) def slapped_input_to_network_input(box_coords_tensor, slapped_input_tensor, **kwargs): return slapped_input_tensor
def set_file_handler(logger, dir): if (not os.path.exists(dir)): os.makedirs(dir) file_handler = logging.FileHandler(os.path.join(dir, 'train.log')) file_handler.setFormatter(logging.Formatter('%(levelname)-8s %(asctime)-12s %(message)s')) logger.addHandler(file_handler)
def test_reshape_input_from_parameters(tmpdir): v = nn.parameter.get_parameter_or_create('reshape_input_0', (2, 3, 4, 5), need_grad=False) h = F.reshape(v, ((- 1), 4, 5)) ref_g = nn.graph_def.create_graph_from_variable('te_module', h) fn = str(tmpdir.join('tmp.nnp')) ref_g.save(fn) assert True
def test_functional_conv1d_stride_same_padding(): time_dim = Dim(Tensor('time', [batch_dim], dtype='int32')) in_dim = Dim(1, name='in') out_dim = Dim(1, name='out') extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')}) class _Net(rf.Module): def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]: x = rf.ones(x.dims, dtype=x.dtype) filter_size = Dim(4, name='filter_size') filters = rf.ones((filter_size, in_dim, out_dim), dtype=x.dtype) (y, (out_spatial_dim,)) = rf.conv(x, filter=filters, in_dim=in_dim, out_dim=out_dim, in_spatial_dims=[time_dim], filter_size=[filter_size], strides=3, padding='same') return (y, out_spatial_dim) def _forward_step(*, model: _Net, extern_data: TensorDict): (out, dim) = model(extern_data['data']) out.mark_as_default_output(shape=(batch_dim, dim, out_dim)) run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7}) run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9})
def get_parser(): parser = argparse.ArgumentParser(description='compute a pca matrix given an array of numpy features') parser.add_argument('data', help='numpy file containing features') parser.add_argument('--output', help='where to save the pca matrix', required=True) parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True) parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening') return parser
.flaky def test_late_type_lognormal_size(): from skypy.galaxies.morphology import late_type_lognormal_size magnitude_scalar = (- 20.0) (alpha, beta, gamma, M0) = (0.21, 0.53, (- 1.31), (- 20.52)) (sigma1, sigma2) = (0.48, 0.25) size_scalar = late_type_lognormal_size(magnitude_scalar, alpha, beta, gamma, M0, sigma1, sigma2) assert np.isscalar(size_scalar.value) assert size_scalar.unit.is_equivalent(units.kpc) magnitude_array = np.array([(- 20.0), (- 21.0)]) size_array = late_type_lognormal_size(magnitude_array, alpha, beta, gamma, M0, sigma1, sigma2) assert (np.shape(size_array.value) == np.shape(magnitude_array)) size_sample = late_type_lognormal_size(magnitude_scalar, alpha, beta, gamma, M0, sigma1, sigma2, size=1000) assert (np.shape(size_sample.value) == (1000,)) mean = (((((- 0.4) * alpha) * magnitude_scalar) + ((beta - alpha) * np.log10((1 + np.power(10, ((- 0.4) * (magnitude_scalar - M0))))))) + gamma) sigma = (sigma2 + ((sigma1 - sigma2) / (1.0 + np.power(10, ((- 0.8) * (magnitude_scalar - M0)))))) arguments = (sigma, 0, np.power(10, mean)) (d, p) = stats.kstest(size_sample, 'lognorm', args=arguments) assert (p > 0.01)
def test_prune_measurements(workspace_factory): ws = workspace_factory() measurement = ws.measurement_names[0] if (len(ws.measurement_names) == 1): with pytest.raises(pyhf.exceptions.InvalidWorkspaceOperation): ws.prune(measurements=measurement) with pytest.raises(pyhf.exceptions.InvalidSpecification): ws.prune(measurements=[measurement]) else: new_ws = ws.prune(measurements=[measurement]) assert new_ws assert (measurement not in new_ws.measurement_names) new_ws_list = ws.prune(measurements=[measurement]) assert (new_ws_list == new_ws)
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000): sample_elems = df[field].tolist() features = [] for (ind, mol) in enumerate(sample_elems): if ((ind % log_every_N) == 0): log(('Featurizing sample %d' % ind), verbose) features.append(featurizer.featurize([mol])) valid_inds = np.array([(1 if (elt.size > 0) else 0) for elt in features], dtype=bool) features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid] return (np.squeeze(np.array(features)), valid_inds)
def extract_tar(file_name): tar = tarfile.open(file_name, 'r:gz') tar.extractall(path='/tmp/') tar.close()
def test_get_average_of_sum_avg_cat_col(having_generator): cat_col = 'col2' num_col = 'col3' target_sum = round(TABLE_DATAFRAME.groupby(cat_col).agg({num_col: sum}).mean().values[0], 2) target_avg = round(TABLE_DATAFRAME.groupby(cat_col).agg({num_col: 'mean'}).mean().values[0], 2) avg_cat_col = having_generator._get_average_of_sum_avg_cat_col(TABLE_NAME, cat_col, num_col) assert ((target_sum, target_avg) == avg_cat_col)
.dataclass class FlaxBaseModelOutput(ModelOutput): last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
def add_epoch_number(batch: Batch, epoch: int) -> Batch: for instance in batch.instances: instance.fields['epoch_num'] = MetadataField(epoch) return batch
def convert_images_from_uint8(images, drange=[(- 1), 1], nhwc_to_nchw=False): images = tf.cast(images, tf.float32) if nhwc_to_nchw: images = tf.transpose(images, [0, 3, 1, 2]) return ((images * ((drange[1] - drange[0]) / 255)) + drange[0])
def generate_batch(seq_idx, dataset): batch = Batch() batch.add_frames(seq_idx=seq_idx, seq_start_frame=0, length=dataset.get_seq_length(seq_idx)) return batch
class pAdicValuation_base(DiscreteValuation): def __init__(self, parent, p): DiscreteValuation.__init__(self, parent) from sage.rings.integer_ring import ZZ self._p = ZZ(p) def p(self): return self._p def reduce(self, x): x = self.domain().coerce(x) if (self(x) < 0): raise ValueError('reduction is only defined for elements of non-negative valuation') return self.residue_field()(x) def lift(self, x): x = self.residue_field().coerce(x) return self.domain()(x) def is_unramified(self, G, include_steps=False, assume_squarefree=False): R = G.parent() from sage.rings.polynomial.polynomial_ring import is_PolynomialRing if ((not is_PolynomialRing(R)) or (R.base_ring() is not self.domain()) or (not G.is_monic())): raise ValueError('G must be a monic univariate polynomial over the domain of this valuation') if ((not assume_squarefree) and (not G.is_squarefree())): raise ValueError('G must be squarefree') from sage.rings.valuation.gauss_valuation import GaussValuation steps = [GaussValuation(R, self)] while True: v = steps[(- 1)] if (v.E() > 1): ret = False break if (v.F() == G.degree()): ret = True break assert (v(G) is not infinity) if v.is_key(G): ret = True break next = v.mac_lane_step(G, assume_squarefree=True) if (len(next) > 1): ret = False break steps.append(next[0]) if include_steps: return (ret, steps) else: return ret def is_totally_ramified(self, G, include_steps=False, assume_squarefree=False): R = G.parent() from sage.rings.polynomial.polynomial_ring import is_PolynomialRing if ((not is_PolynomialRing(R)) or (R.base_ring() is not self.domain()) or (not G.is_monic())): raise ValueError('G must be a monic univariate polynomial over the domain of this valuation') if ((not assume_squarefree) and (not G.is_squarefree())): raise ValueError('G must be squarefree') from sage.rings.valuation.gauss_valuation import GaussValuation steps = [GaussValuation(R, self)] while True: v = steps[(- 1)] if (v.F() > 1): ret = False break if (v.E() == G.degree()): ret = True break assert (v(G) is not infinity) if v.is_key(G): ret = False break next = v.mac_lane_step(G, assume_squarefree=True) if (len(next) > 1): ret = False break steps.append(next[0]) if include_steps: return (ret, steps) else: return ret def change_domain(self, ring): return pAdicValuation(ring, self.p()) def _extensions_to_quotient(self, ring, approximants=None): approximants = (approximants or self.mac_lane_approximants(ring.modulus().change_ring(self.domain()), assume_squarefree=True, require_incomparability=True)) return [pAdicValuation(ring, approximant, approximants) for approximant in approximants] def extensions(self, ring): if (self.domain() is ring): return [self] domain_fraction_field = _fraction_field(self.domain()) if (domain_fraction_field is not self.domain()): if domain_fraction_field.is_subring(ring): return pAdicValuation(domain_fraction_field, self).extensions(ring) if self.domain().is_subring(ring): from sage.rings.polynomial.polynomial_quotient_ring import is_PolynomialQuotientRing if is_PolynomialQuotientRing(ring): if is_PolynomialQuotientRing(self.domain()): if (self.domain().modulus() == ring.modulus()): base_extensions = self._base_valuation.extensions(self._base_valuation.domain().change_ring(self._base_valuation.domain().base_ring().fraction_field())) return [pAdicValuation(ring, base._initial_approximation) for base in base_extensions] if (ring.base_ring() is self.domain()): from sage.categories.integral_domains import IntegralDomains if (ring in IntegralDomains()): return self._extensions_to_quotient(ring) elif self.domain().is_subring(ring.base_ring()): return sum([w.extensions(ring) for w in self.extensions(ring.base_ring())], []) from sage.rings.number_field.number_field_base import NumberField if isinstance(ring.fraction_field(), NumberField): if (ring.base_ring().fraction_field() is self.domain().fraction_field()): approximants = self.mac_lane_approximants(ring.fraction_field().relative_polynomial().change_ring(self.domain()), assume_squarefree=True, require_incomparability=True) return [pAdicValuation(ring, approximant, approximants) for approximant in approximants] if ((ring.base_ring() is not ring) and self.domain().is_subring(ring.base_ring())): return sum([w.extensions(ring) for w in self.extensions(ring.base_ring())], []) return super().extensions(ring) def restriction(self, ring): if (ring is self.domain()): return self if (not ring.is_subring(self.domain())): raise ValueError(('ring must be a subring of the domain of this valuation but %r is not a subring of %r' % (ring, self.domain()))) return pAdicValuation(ring, self.p()) _method def value_semigroup(self): from sage.categories.fields import Fields v = self(self.uniformizer()) if (self.domain() in Fields()): return DiscreteValueSemigroup([(- v), v]) else: return DiscreteValueSemigroup([v])
class OptionsEnv(gym.Wrapper): def __init__(self, env, options=[(0, 5), (5, 5), (10, 5)], *args, **kwargs): super().__init__(env, *args, **kwargs) self.options = options num_hl_options = len(self.options) self.action_space = gym.spaces.Discrete(num_hl_options) self.observation_space = gym.spaces.Dict({'obs': env.observation_space, 'mask': gym.spaces.Box(low=0, high=1, shape=(num_hl_options,))}) def _after_choice(self): pass def _after_step(self): pass def _transitions(self): raise NotImplementedError('Use `LLOptions` or `HLOptions` for sampling.') def sample(self, generator): self.done = True while True: self.episode_start = False if self.done: self.s = self.env.reset() self.done = False self.episode_start = True self.m = available_actions(self.env, self.options) if (not self.m.any()): self.m[0] = True (self.ch, self.value, self.log_prob) = generator.policy.forward({'obs': torch.tensor(self.s).unsqueeze(0).to(generator.policy.device), 'mask': self.m.unsqueeze(0).to(generator.policy.device)}) self.plan = list(map(float, generate_plan(self.env, self.ch, self.options))) self._after_choice() assert (not self.done) assert self.plan while ((not self.done) and self.plan and (feasible(self.env, safety_plan(self.env, self.plan)) or (self.m.sum() == 1))): (self.a, self.plan) = (self.plan[0], self.plan[1:]) self.a = self.env._normalize(self.a) (self.nexts, _, self.done, _) = self.env.step(self.a) self._after_step() self.s = self.nexts (yield from self._transitions())
def small_recording_to_training_resnet(image, config, test=False, channel_last=False, name='bn-graph-ref'): axes = get_channel_axes(image, channel_last) rm = config.round_mode nr = config.narrow_range dt = config.dtype (sx, zpx, sw, zpw, sb, zpb) = (None, None, None, None, None, None) get_scale_zeropoint = config.recorder_activation.get_scale_zeropoint h = image (sx, zpx) = get_scale_zeropoint(h, axes=axes, narrow_range=nr, name='s') if (sx is None): shape = ([1] * h.ndim) name = 's' (sx, zpx) = get_fake_quantization_parameter(shape, name) h = F.quantize_linear(h, sx, zpx, rm, nr, dt) with nn.parameter_scope('first-conv'): h = F.dequantize_linear(h, sx, zpx) (w, b) = create_conv_weight_bias(h, 16, kernel=(3, 3), channel_last=channel_last, name=name) (sw, zpw) = get_scale_zeropoint(w, axes=axes, narrow_range=nr, name='sw') if (sw is None): shape = ([1] * w.ndim) name = 'sw' (sw, zpw) = get_fake_quantization_parameter(shape, name) w = F.quantize_linear(w, sw, zpw, rm, nr, dt) w = F.dequantize_linear(w, sw, zpw) sb = (sx.reshape([1]) * sw.reshape([1])) sb = nn.Variable.from_numpy_array(sb.d) zpb = zpx.reshape([1]) zpb = nn.Variable.from_numpy_array(zpb.d) b = F.quantize_linear(b, sb, zpb, rm, nr, dt) b = F.dequantize_linear(b, sb, zpb) h = F.convolution(h, w, b, pad=(1, 1), stride=(1, 1), channel_last=channel_last) (sx, zpx) = get_scale_zeropoint(h, axes=axes, narrow_range=nr, name='s') if (sx is None): shape = ([1] * h.ndim) name = 's' (sx, zpx) = get_fake_quantization_parameter(shape, name) h = F.quantize_linear(h, sx, zpx, rm, nr, dt) with nn.parameter_scope('ReLU-2'): h = F.dequantize_linear(h, sx, zpx) h = F.relu(h) (sx, zpx) = get_scale_zeropoint(h, axes=axes, narrow_range=nr, name='s') if (sx is None): shape = ([1] * h.ndim) name = 'ReLU-2/s' (sx, zpx) = get_fake_quantization_parameter(shape, name) h = F.quantize_linear(h, sx, zpx, rm, nr, dt) with nn.parameter_scope('MaxPooling-3'): h = F.dequantize_linear(h, sx, zpx) h = F.max_pooling(h, (2, 2), channel_last=channel_last) (sx, zpx) = get_scale_zeropoint(h, axes=axes, narrow_range=nr, name='s') if (sx is None): shape = ([1] * h.ndim) name = 'MaxPooling-3/s' (sx, zpx) = get_fake_quantization_parameter(shape, name) h = F.quantize_linear(h, sx, zpx, rm, nr, dt) (h, sx, zpx) = recording_to_training_resblock(h, config, 16, sx, zpx, test=test, channel_last=channel_last, name='cb1') (h, sx, zpx) = recording_to_training_resblock(h, config, 16, sx, zpx, test=test, channel_last=channel_last, name='cb2') (h, sx, zpx) = recording_to_training_resblock(h, config, 16, sx, zpx, test=test, channel_last=channel_last, name='cb3') (h, sx, zpx) = recording_to_training_resblock(h, config, 16, sx, zpx, test=test, channel_last=channel_last, name='cb4') with nn.parameter_scope('AveragePooling-16'): h = F.dequantize_linear(h, sx, zpx) h = F.average_pooling(h, (2, 2), channel_last=channel_last) (sx, zpx) = get_scale_zeropoint(h, axes=axes, narrow_range=nr, name='s') if (sx is None): shape = ([1] * h.ndim) name = 'AveragePooling-16/s' (sx, zpx) = get_fake_quantization_parameter(shape, name) h = F.quantize_linear(h, sx, zpx, rm, nr, dt) with nn.parameter_scope('fc'): h = F.dequantize_linear(h, sx, zpx) (w, b) = create_affine_weight_bias(h, 10, name=name) (sw, zpw) = get_scale_zeropoint(w, axes=axes, narrow_range=nr, name='w') if (sw is None): shape = ([1] * w.ndim) name = 'fc/sw' (sw, zpw) = get_fake_quantization_parameter(shape, name) w = F.quantize_linear(w, sw, zpw, rm, nr, dt) w = F.dequantize_linear(w, sw, zpw) sb = (sx.reshape([1]) * sw.reshape([1])) sb = nn.Variable.from_numpy_array(sb.d) zpb = zpx.reshape([1]) zpb = nn.Variable.from_numpy_array(zpb.d) b = F.quantize_linear(b, sb, zpb, rm, nr, dt) b = F.dequantize_linear(b, sb, zpb) pred = F.affine(h, w, b) return pred
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config): model.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}')) metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) header = 'Train Epoch: [{}]'.format(epoch) print_freq = 50 step_size = 100 for (i, (images, text, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): (images, targets) = (images.to(device, non_blocking=True), targets.to(device, non_blocking=True)) text_inputs = tokenizer(text, padding='longest', return_tensors='pt').to(device) loss = model(images, text_inputs, targets=targets, train=True) optimizer.zero_grad() with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() optimizer.step() scheduler.step() metric_logger.update(lr=optimizer.param_groups[0]['lr']) metric_logger.update(loss=loss.item()) metric_logger.synchronize_between_processes() print('Train Averaged stats:', metric_logger.global_avg()) return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()}
def scatter(x: torch.Tensor, indices: torch.Tensor, bin_ids: torch.Tensor, weights: torch.Tensor, bins: torch.Tensor, top_k: int, num_bits: int=(- 1)): return ScatterOp.apply(x, indices, bin_ids, weights, bins, top_k, num_bits)
def shape_data(node): if node.output_shape: return node.output_shape try: return map(int, node.parameters.shape[0].dim) except BaseException: raise KaffeError('Cannot determine dimensions of data layer.\nSee comments in function shape_data for more info.')
def load_examples_lama(path): def load_lama(which_lama): with open(f'{path}/original_rob/P{which_lama}/test.jsonl', 'r') as json_file: json_list = list(json_file) all_y_test = [] all_x_test = [] for json_str in json_list: result = json.loads(json_str) all_y_test.append(result['obj_label']) all_x_test.append(result['sub_label']) with open(f'{path}/relations.jsonl', 'r') as json_file: json_list = list(json_file) template = None for json_str in json_list: result = json.loads(json_str) idx = int(result['relation'][1:]) if (idx == which_lama): template = result['template'] x_pos = template.find('[X]') y_pos = template.find('[Y]') assert ((x_pos >= 0) and (y_pos >= 0)), 'placeholder not found' if (x_pos > y_pos): print('Not auto-regressive, skip') template = 'INVALID' break return (all_x_test, all_y_test, template) all_lamas = [1001, 101, 103, 106, 108, 127, 1303, 131, 136, 1376, 138, 140, 1412, 159, 17, 176, 178, 19, 190, 20, 264, 27, 276, 279, 30, 31, 36, 361, 364, 37, 39, 407, 413, 449, 463, 47, 495, 527, 530, 740, 937] for which_lama in all_lamas: (all_x_test, all_y_test, template) = load_lama(which_lama)
def register_Ns3WifiRemoteStationManager_methods(root_module, cls): cls.add_constructor([param('ns3::WifiRemoteStationManager const &', 'arg0')]) cls.add_constructor([]) cls.add_method('AddAllSupportedMcs', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('AddAllSupportedModes', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('AddBasicMcs', 'void', [param('ns3::WifiMode', 'mcs')]) cls.add_method('AddBasicMode', 'void', [param('ns3::WifiMode', 'mode')]) cls.add_method('AddStationHeCapabilities', 'void', [param('ns3::Mac48Address', 'from'), param('ns3::HeCapabilities', 'hecapabilities')]) cls.add_method('AddStationHtCapabilities', 'void', [param('ns3::Mac48Address', 'from'), param('ns3::HtCapabilities', 'htcapabilities')]) cls.add_method('AddStationVhtCapabilities', 'void', [param('ns3::Mac48Address', 'from'), param('ns3::VhtCapabilities', 'vhtcapabilities')]) cls.add_method('AddSupportedErpSlotTime', 'void', [param('ns3::Mac48Address', 'address'), param('bool', 'isShortSlotTimeSupported')]) cls.add_method('AddSupportedMcs', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'mcs')]) cls.add_method('AddSupportedMode', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'mode')]) cls.add_method('AddSupportedPlcpPreamble', 'void', [param('ns3::Mac48Address', 'address'), param('bool', 'isShortPreambleSupported')]) cls.add_method('DoGetCtsToSelfTxVector', 'ns3::WifiTxVector', []) cls.add_method('GetAckTxVector', 'ns3::WifiTxVector', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'dataMode')]) cls.add_method('GetBasicMcs', 'ns3::WifiMode', [param('uint32_t', 'i')], is_const=True) cls.add_method('GetBasicMode', 'ns3::WifiMode', [param('uint32_t', 'i')], is_const=True) cls.add_method('GetBlockAckTxVector', 'ns3::WifiTxVector', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'dataMode')]) cls.add_method('GetChannelWidthSupported', 'uint8_t', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('GetCtsToSelfTxVector', 'ns3::WifiTxVector', [param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('GetCtsTxVector', 'ns3::WifiTxVector', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'rtsMode')]) cls.add_method('GetDataTxVector', 'ns3::WifiTxVector', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('GetDefaultMcs', 'ns3::WifiMode', [], is_const=True) cls.add_method('GetDefaultMode', 'ns3::WifiMode', [], is_const=True) cls.add_method('GetDefaultTxPowerLevel', 'uint8_t', [], is_const=True) cls.add_method('GetErpProtectionMode', 'ns3::WifiRemoteStationManager::ProtectionMode', [], is_const=True) cls.add_method('GetFragmentOffset', 'uint32_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint32_t', 'fragmentNumber')]) cls.add_method('GetFragmentSize', 'uint32_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint32_t', 'fragmentNumber')]) cls.add_method('GetFragmentationThreshold', 'uint32_t', [], is_const=True) cls.add_method('GetGreenfieldSupported', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('GetHtProtectionMode', 'ns3::WifiRemoteStationManager::ProtectionMode', [], is_const=True) cls.add_method('GetInfo', 'ns3::WifiRemoteStationInfo', [param('ns3::Mac48Address', 'address')]) cls.add_method('GetMaxNumberOfTransmitStreams', 'uint8_t', []) cls.add_method('GetMaxSlrc', 'uint32_t', [], is_const=True) cls.add_method('GetMaxSsrc', 'uint32_t', [], is_const=True) cls.add_method('GetNBasicMcs', 'uint32_t', [], is_const=True) cls.add_method('GetNBasicModes', 'uint32_t', [], is_const=True) cls.add_method('GetNNonErpBasicModes', 'uint32_t', [], is_const=True) cls.add_method('GetNonErpBasicMode', 'ns3::WifiMode', [param('uint32_t', 'i')], is_const=True) cls.add_method('GetNonUnicastMode', 'ns3::WifiMode', [], is_const=True) cls.add_method('GetNumberOfAntennas', 'uint8_t', []) cls.add_method('GetQosSupported', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('GetRifsPermitted', 'bool', [], is_const=True) cls.add_method('GetRtsCtsThreshold', 'uint32_t', [], is_const=True) cls.add_method('GetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('GetShortPreambleEnabled', 'bool', [], is_const=True) cls.add_method('GetShortPreambleSupported', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('GetShortSlotTimeEnabled', 'bool', [], is_const=True) cls.add_method('GetShortSlotTimeSupported', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('GetUseGreenfieldProtection', 'bool', [], is_const=True) cls.add_method('GetUseNonErpProtection', 'bool', [], is_const=True) cls.add_method('GetUseNonHtProtection', 'bool', [], is_const=True) cls.add_method('GetVhtSupported', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('HasHeSupported', 'bool', [], is_const=True) cls.add_method('HasHtSupported', 'bool', [], is_const=True) cls.add_method('HasQosSupported', 'bool', [], is_const=True) cls.add_method('HasVhtSupported', 'bool', [], is_const=True) cls.add_method('IsAssociated', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('IsBrandNew', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('IsLastFragment', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint32_t', 'fragmentNumber')]) cls.add_method('IsWaitAssocTxOk', 'bool', [param('ns3::Mac48Address', 'address')], is_const=True) cls.add_method('NeedCtsToSelf', 'bool', [param('ns3::WifiTxVector', 'txVector')]) cls.add_method('NeedDataRetransmission', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('NeedFragmentation', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('NeedRts', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::WifiTxVector', 'txVector')]) cls.add_method('NeedRtsRetransmission', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('PrepareForQueue', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('ns3::Ptr< ns3::Packet const >', 'packet')]) cls.add_method('RecordDisassociated', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('RecordGotAssocTxFailed', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('RecordGotAssocTxOk', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('RecordWaitAssocTxOk', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('RemoveAllSupportedMcs', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('ReportAmpduTxStatus', 'void', [param('ns3::Mac48Address', 'address'), param('uint8_t', 'tid'), param('uint8_t', 'nSuccessfulMpdus'), param('uint8_t', 'nFailedMpdus'), param('double', 'rxSnr'), param('double', 'dataSnr')]) cls.add_method('ReportDataFailed', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header')]) cls.add_method('ReportDataOk', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')]) cls.add_method('ReportFinalDataFailed', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header')]) cls.add_method('ReportFinalRtsFailed', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header')]) cls.add_method('ReportRtsFailed', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header')]) cls.add_method('ReportRtsOk', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')]) cls.add_method('ReportRxOk', 'void', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMacHeader const *', 'header'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')]) cls.add_method('Reset', 'void', []) cls.add_method('Reset', 'void', [param('ns3::Mac48Address', 'address')]) cls.add_method('SetDefaultTxPowerLevel', 'void', [param('uint8_t', 'txPower')]) cls.add_method('SetErpProtectionMode', 'void', [param('ns3::WifiRemoteStationManager::ProtectionMode', 'mode')]) cls.add_method('SetFragmentationThreshold', 'void', [param('uint32_t', 'threshold')]) cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True) cls.add_method('SetHtProtectionMode', 'void', [param('ns3::WifiRemoteStationManager::ProtectionMode', 'mode')]) cls.add_method('SetHtSupported', 'void', [param('bool', 'enable')], is_virtual=True) cls.add_method('SetMaxSlrc', 'void', [param('uint32_t', 'maxSlrc')]) cls.add_method('SetMaxSsrc', 'void', [param('uint32_t', 'maxSsrc')]) cls.add_method('SetQosSupport', 'void', [param('ns3::Mac48Address', 'from'), param('bool', 'qosSupported')]) cls.add_method('SetQosSupported', 'void', [param('bool', 'enable')], is_virtual=True) cls.add_method('SetRifsPermitted', 'void', [param('bool', 'allow')]) cls.add_method('SetRtsCtsThreshold', 'void', [param('uint32_t', 'threshold')]) cls.add_method('SetShortPreambleEnabled', 'void', [param('bool', 'enable')]) cls.add_method('SetShortSlotTimeEnabled', 'void', [param('bool', 'enable')]) cls.add_method('SetUseGreenfieldProtection', 'void', [param('bool', 'enable')]) cls.add_method('SetUseNonErpProtection', 'void', [param('bool', 'enable')]) cls.add_method('SetUseNonHtProtection', 'void', [param('bool', 'enable')]) cls.add_method('SetVhtSupported', 'void', [param('bool', 'enable')], is_virtual=True) cls.add_method('SetupMac', 'void', [param('ns3::Ptr< ns3::WifiMac > const', 'mac')], is_virtual=True) cls.add_method('SetupPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy > const', 'phy')], is_virtual=True) cls.add_method('UpdateFragmentationThreshold', 'void', []) cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) cls.add_method('GetAddress', 'ns3::Mac48Address', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetAggregation', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetChannelWidth', 'uint8_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetGreenfield', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetGuardInterval', 'uint16_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetHeSupported', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetHtSupported', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetLongRetryCount', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetMac', 'ns3::Ptr< ns3::WifiMac >', [], is_const=True, visibility='protected') cls.add_method('GetMcsSupported', 'ns3::WifiMode', [param('ns3::WifiRemoteStation const *', 'station'), param('uint32_t', 'i')], is_const=True, visibility='protected') cls.add_method('GetNMcsSupported', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetNNonErpSupported', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetNSupported', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetNess', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetNonErpSupported', 'ns3::WifiMode', [param('ns3::WifiRemoteStation const *', 'station'), param('uint32_t', 'i')], is_const=True, visibility='protected') cls.add_method('GetNumberOfSupportedStreams', 'uint8_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetPhy', 'ns3::Ptr< ns3::WifiPhy >', [], is_const=True, visibility='protected') cls.add_method('GetPreambleForTransmission', 'ns3::WifiPreamble', [param('ns3::WifiMode', 'mode'), param('ns3::Mac48Address', 'dest')], visibility='protected') cls.add_method('GetQosSupported', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetShortGuardInterval', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetShortRetryCount', 'uint32_t', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetStbc', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('GetSupported', 'ns3::WifiMode', [param('ns3::WifiRemoteStation const *', 'station'), param('uint32_t', 'i')], is_const=True, visibility='protected') cls.add_method('GetVhtSupported', 'bool', [param('ns3::WifiRemoteStation const *', 'station')], is_const=True, visibility='protected') cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) cls.add_method('DoGetAckTxChannelWidth', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetAckTxGuardInterval', 'uint16_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ackMode')], visibility='private', is_virtual=True) cls.add_method('DoGetAckTxNess', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ackMode')], visibility='private', is_virtual=True) cls.add_method('DoGetAckTxNss', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ackMode')], visibility='private', is_virtual=True) cls.add_method('DoGetAckTxPowerLevel', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ackMode')], visibility='private', is_virtual=True) cls.add_method('DoGetAckTxStbc', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ackMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxChannelWidth', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxGuardInterval', 'uint16_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'blockAckMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxNess', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'blockAckMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxNss', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'blockAckMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxPowerLevel', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'blockAckMode')], visibility='private', is_virtual=True) cls.add_method('DoGetBlockAckTxStbc', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'blockAckMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxChannelWidth', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxGuardInterval', 'uint16_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxNess', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxNss', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxPowerLevel', 'uint8_t', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetCtsTxStbc', 'bool', [param('ns3::Mac48Address', 'address'), param('ns3::WifiMode', 'ctsMode')], visibility='private', is_virtual=True) cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoNeedDataRetransmission', 'bool', [param('ns3::WifiRemoteStation *', 'station'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True) cls.add_method('DoNeedFragmentation', 'bool', [param('ns3::WifiRemoteStation *', 'station'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True) cls.add_method('DoNeedRts', 'bool', [param('ns3::WifiRemoteStation *', 'station'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True) cls.add_method('DoNeedRtsRetransmission', 'bool', [param('ns3::WifiRemoteStation *', 'station'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True) cls.add_method('DoReportAmpduTxStatus', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('uint8_t', 'nSuccessfulMpdus'), param('uint8_t', 'nFailedMpdus'), param('double', 'rxSnr'), param('double', 'dataSnr')], visibility='private', is_virtual=True) cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], is_pure_virtual=True, visibility='private', is_virtual=True) cls.add_method('IsLowLatency', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return
class AnalyticalLinearChannel(Channel): def __init__(self, ensemble, name='W'): self.name = name self.alpha = ensemble.alpha self.repr_init() self.ensemble = ensemble def sample(self, Z): N = Z.shape[0] F = self.ensemble.generate(N) X = (F Z) return X def math(self): return (('$' + self.name) + '$') def second_moment(self, tau_z): tau_x = (tau_z * (self.ensemble.mean_spectrum / self.alpha)) return tau_x def compute_n_eff(self, az, ax): if (ax == 0): logger.info(f'ax=0 in {self} compute_n_eff') return 0.0 if ((az / ax) == 0): logger.info(f'az/ax=0 in {self} compute_n_eff') return min(1, self.alpha) gamma = (ax / az) n_eff = (1 - self.ensemble.eta_transform(gamma)) return n_eff def compute_backward_error(self, az, ax, tau_z): if (az == 0): logger.info(f'az=0 in {self} compute_backward_error') az = np.maximum(1e-11, az) n_eff = self.compute_n_eff(az, ax) vz = ((1 - n_eff) / az) return vz def compute_forward_error(self, az, ax, tau_z): if (ax == 0): return (self.ensemble.mean_spectrum / (self.alpha * az)) n_eff = self.compute_n_eff(az, ax) vx = (n_eff / (self.alpha * ax)) return vx def compute_mutual_information(self, az, ax, tau_z): gamma = (ax / az) S = self.ensemble.shannon_transform(gamma) I = ((0.5 * np.log((az * tau_z))) + (0.5 * S)) return I def compute_free_energy(self, az, ax, tau_z): tau_x = self.second_moment(tau_z) I = self.compute_mutual_information(az, ax, tau_z) A = (((0.5 * ((az * tau_z) + ((self.alpha * ax) * tau_x))) - I) + (0.5 * np.log((((2 * np.pi) * tau_z) / np.e)))) return A
def register_Ns3LteEnbNetDevice_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_method('DoDispose', 'void', [], is_virtual=True) cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) cls.add_method('GetMac', 'ns3::Ptr< ns3::LteEnbMac >', [], is_const=True) cls.add_method('GetPhy', 'ns3::Ptr< ns3::LteEnbPhy >', [], is_const=True) cls.add_method('GetRrc', 'ns3::Ptr< ns3::LteEnbRrc >', [], is_const=True) cls.add_method('GetCellId', 'uint16_t', [], is_const=True) cls.add_method('GetUlBandwidth', 'uint8_t', [], is_const=True) cls.add_method('SetUlBandwidth', 'void', [param('uint8_t', 'bw')]) cls.add_method('GetDlBandwidth', 'uint8_t', [], is_const=True) cls.add_method('SetDlBandwidth', 'void', [param('uint8_t', 'bw')]) cls.add_method('GetDlEarfcn', 'uint16_t', [], is_const=True) cls.add_method('SetDlEarfcn', 'void', [param('uint16_t', 'earfcn')]) cls.add_method('GetUlEarfcn', 'uint16_t', [], is_const=True) cls.add_method('SetUlEarfcn', 'void', [param('uint16_t', 'earfcn')]) cls.add_method('GetCsgId', 'uint32_t', [], is_const=True) cls.add_method('SetCsgId', 'void', [param('uint32_t', 'csgId')]) cls.add_method('GetCsgIndication', 'bool', [], is_const=True) cls.add_method('SetCsgIndication', 'void', [param('bool', 'csgIndication')]) cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return
class TangentSpace(FiniteRankFreeModule): Element = TangentVector def __init__(self, point: ManifoldPoint, base_ring=None): manif = point._manifold name = 'T_{} {}'.format(point._name, manif._name) latex_name = ('T_{%s}\\,%s' % (point._latex_name, manif._latex_name)) self._point = point self._manif = manif if (base_ring is None): base_ring = SR FiniteRankFreeModule.__init__(self, base_ring, manif._dim, name=name, latex_name=latex_name, start_index=manif._sindex) self._frame_bases = {} for frame in point.parent()._top_frames: if frame.destination_map().is_identity(): if (point in frame._domain): coframe = frame.coframe() basis = self.basis(frame._symbol, latex_symbol=frame._latex_symbol, indices=frame._indices, latex_indices=frame._latex_indices, symbol_dual=coframe._symbol, latex_symbol_dual=coframe._latex_symbol) self._frame_bases[frame] = basis def_frame = point.parent()._def_frame if (def_frame in self._frame_bases): self._def_basis = self._frame_bases[def_frame] for (frame_pair, automorph) in point.parent()._frame_changes.items(): if (point in automorph.domain()): (frame1, frame2) = (frame_pair[0], frame_pair[1]) (fr1, fr2) = (None, None) for frame in self._frame_bases: if (frame1 in frame._subframes): fr1 = frame break for frame in self._frame_bases: if (frame2 in frame._subframes): fr2 = frame break if ((fr1 is not None) and (fr2 is not None)): basis1 = self._frame_bases[fr1] basis2 = self._frame_bases[fr2] auto = self.automorphism() for (frame, comp) in automorph._components.items(): try: basis = None if (frame is frame1): basis = basis1 if (frame is frame2): basis = basis2 if (basis is not None): cauto = auto.add_comp(basis=basis) for (ind, val) in comp._comp.items(): cauto._comp[ind] = val(point) except ValueError: pass self._basis_changes[(basis1, basis2)] = auto def construction(self): return None def _repr_(self): return 'Tangent space at {}'.format(self._point) def _an_element_(self): resu = self.element_class(self) if (self._def_basis is not None): resu.set_comp()[:] = range(1, (self._rank + 1)) return resu def dimension(self): return self._rank dim = dimension def base_point(self): return self._point
def create_hier_dataset(spark: pyspark.sql.SparkSession, df: pyspark.sql.DataFrame, time_col: str=None, index_cols: List[str]=None, agg_dict: Dict=None) -> Tuple[(pyspark.sql.DataFrame, np.ndarray)]: index_cols = ([] if (index_cols is None) else index_cols) index_cols = [c for c in index_cols if (c != TSID_COL_NAME)] extended_index_cols = (index_cols + [TSID_COL_NAME]) if (time_col is None): non_index_cols = [c for c in df.schema.fieldNames() if (c not in extended_index_cols)] time_col = non_index_cols[0] data_cols = non_index_cols[1:] else: data_cols = [c for c in df.schema.fieldNames() if (c not in (extended_index_cols + [time_col]))] ts_index = df.groupBy(extended_index_cols).count().drop('count').toPandas() ts_index = ts_index.set_index(index_cols).sort_index() index_schema = StructType([df.schema[c] for c in extended_index_cols]) n = len(ts_index) full_df = df hier_vecs = [] df.createOrReplaceTempView('df') agg_dict = ({} if (agg_dict is None) else agg_dict) data_col_sql = [f"{agg_dict.get(c, 'sum').upper()}(`{c}`) AS `{c}`" for c in data_cols] for k in range(len(index_cols)): gb_cols = index_cols[:(- (k + 1))] gb_col_sql = [f'`{c}`' for c in ([time_col] + gb_cols)] agg = spark.sql(f"SELECT {','.join((gb_col_sql + data_col_sql))} FROM df GROUP BY {','.join(gb_col_sql)};") if (len(gb_cols) == 0): dummy = [((['__aggregated__'] * len(index_cols)) + [(n + len(hier_vecs))])] full_df = full_df.unionByName(agg.join(spark.createDataFrame(dummy, schema=index_schema))) hier_vecs.append(np.ones(n)) continue dummy = [] for (i, (group, group_idxs)) in enumerate(ts_index.groupby(gb_cols).groups.items()): group = ([group] if (len(gb_cols) == 1) else list(group)) locs = [ts_index.index.get_loc(j) for j in group_idxs] dummy.append(((group + (['__aggregated__'] * (k + 1))) + [(n + len(hier_vecs))])) x = np.zeros(n) x[locs] = 1 hier_vecs.append(x) dummy = spark.createDataFrame(dummy, schema=index_schema) full_df = full_df.unionByName(agg.join(dummy, on=gb_cols)) hier_matrix = np.concatenate([np.eye(n), np.stack(hier_vecs)]) return (full_df, hier_matrix)
def calculate_backbone_feature_dim(backbone, input_shape: Tuple[(int, int, int)]) -> int: tensor = torch.ones(1, *input_shape) output_feat = backbone.forward(tensor) return output_feat.shape[(- 1)]
class Timer(object): def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() def is_running(self): return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur
def compute_functional_name(test_params_dict): def camel_case_to_snake_case(camel_case_str): return re.sub('(?<!^)(?=[A-Z])', '_', camel_case_str).lower() if ('cpp_options_args' in test_params_dict): return camel_case_to_snake_case(test_params_dict['cpp_options_args'].split('(')[0].replace('F::', '').replace('FuncOptions', '')) elif ('cpp_function_call' in test_params_dict): return test_params_dict['cpp_function_call'].split('(')[0].replace('F::', '') else: raise RuntimeError('`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}'.format(pprint.pformat(test_params_dict)))
def compute_correlation(df: DataFrame, col1: Optional[str]=None, col2: Optional[str]=None, *, cfg: Union[(Config, Dict[(str, Any)], None)]=None, display: Optional[List[str]]=None, value_range: Optional[Tuple[(float, float)]]=None, k: Optional[int]=None) -> Intermediate: if isinstance(cfg, dict): cfg = Config.from_dict(display, cfg) elif (not cfg): cfg = Config() (x, y) = (col1, col2) frame = EDAFrame(df) if ((x is None) and (y is None)): with catch_warnings(): filterwarnings('ignore', 'overflow encountered in long_scalars', category=RuntimeWarning) return _calc_overview(frame, cfg, value_range=value_range, k=k) elif ((x is not None) and (y is None)): with catch_warnings(): filterwarnings('ignore', 'overflow encountered in long_scalars', category=RuntimeWarning) return _calc_univariate(frame, x, cfg, value_range=value_range, k=k) elif ((x is None) and (y is not None)): raise ValueError('Please give the column name to x instead of y') elif ((x is not None) and (y is not None)): return _calc_bivariate(frame, cfg, x, y, k=k) raise ValueError('Not Possible')
def text_progessbar(seq, total=None): step = 1 tick = time.time() while True: time_diff = (time.time() - tick) avg_speed = (time_diff / step) total_str = (('of %n' % total) if total else '') print('step', step, ('%.2f' % time_diff), ('avg: %.2f iter/sec' % avg_speed), total_str) step += 1 (yield next(seq))
class TestAllMaps(unittest.TestCase): version = 'v1.0-mini' render = False def setUp(self): self.nusc_maps = dict() for map_name in locations: nusc_map = NuScenesMap(map_name=map_name, dataroot=os.environ['NUSCENES']) if self.render: nusc_map.render_layers(['lane'], figsize=1) plt.show() self.nusc_maps[map_name] = nusc_map def test_layer_stats(self): layer_counts = defaultdict((lambda : [])) ref_counts = {'singapore-onenorth': [1, 783, 645, 936, 120, 838, 451, 39, 152, 357, 127], 'singapore-hollandvillage': [426, 167, 387, 601, 28, 498, 300, 0, 107, 220, 119], 'singapore-queenstown': [219, 260, 676, 910, 75, 457, 437, 40, 172, 257, 81], 'boston-seaport': [2, 928, 969, 1215, 340, 301, 775, 275, 377, 671, 307]} for map_name in locations: nusc_map = self.nusc_maps[map_name] for layer_name in nusc_map.non_geometric_layers: layer_objs = nusc_map.json_obj[layer_name] layer_counts[map_name].append(len(layer_objs)) assert (ref_counts[map_name] == layer_counts[map_name]), ('Error: Map %s has a different number of layers: \n%s vs. \n%s' % (map_name, ref_counts[map_name], layer_counts[map_name])) def test_disconnected_lanes(self): found_error = False for map_name in locations: nusc_map = self.nusc_maps[map_name] disconnected = get_disconnected_lanes(nusc_map) if (len(disconnected) > 0): print(('Error: Missing connectivity in map %s for %d lanes: \n%s' % (map_name, len(disconnected), disconnected))) found_error = True self.assertFalse(found_error, 'Error: Found missing connectivity. See messages above!') def test_egoposes_on_map(self): nusc = NuScenes(version=self.version, dataroot=os.environ['NUSCENES'], verbose=False) whitelist = ['scene-0499', 'scene-0501', 'scene-0502', 'scene-0515', 'scene-0517'] invalid_scenes = [] for scene in tqdm.tqdm(nusc.scene, leave=False): if (scene['name'] in whitelist): continue log = nusc.get('log', scene['log_token']) map_name = log['location'] nusc_map = self.nusc_maps[map_name] ratio_valid = get_egoposes_on_drivable_ratio(nusc, nusc_map, scene['token']) if (ratio_valid != 1.0): print(('Error: Scene %s has a ratio of %f ego poses on the driveable area!' % (scene['name'], ratio_valid))) invalid_scenes.append(scene['name']) self.assertEqual(len(invalid_scenes), 0)
class ProtoGraphBuilder(): def __init__(self, **kwargs): self.networks = OrderedDict() self.stack = [] self.current = None self.module = None self.names = {} self.dirty_flag = False self.graph_name = kwargs.get('name', None) self.proto_graph = ProtoGraph(self.networks, kwargs.get('parameter_scope', None)) def get_current(self): if (self.current is not None): return self.current self.current = ProtoNetwork(self.proto_graph) return self.current def get_module(self): if (self.module is not None): return self.module self.module = FlatModule(self.graph_name) return self.module def dirty(self): self.dirty_flag = True def is_dirty(self): return self.dirty_flag def get_graph(self): return self.proto_graph def begin_module(self, module): self.stack.append(module) self.module = module return self.get_current() def end_module(self): self.stack.pop() if self.stack: self.module = self.stack[(- 1)] else: self.commit_network() def commit_network(self): if self.graph_name: g_name = self.graph_name self.graph_name = None else: g_name = self.module.name g_name = _get_unique_name(self.names, g_name) params = {v.data: k for (k, v) in self.module.get_parameters().items()} self.current.commit(g_name, self.networks, params) self.dirty_flag = False self.current = None self.module = None
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, initial_learning_rate, decay_schedule_fn, warmup_steps, power=1.0, name=None): super(WarmUp, self).__init__() self.initial_learning_rate = initial_learning_rate self.warmup_steps = warmup_steps self.power = power self.decay_schedule_fn = decay_schedule_fn self.name = name def __call__(self, step): with tf.name_scope((self.name or 'WarmUp')) as name: global_step_float = tf.cast(step, tf.float32) warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) warmup_percent_done = (global_step_float / warmup_steps_float) warmup_learning_rate = (self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)) return tf.cond((global_step_float < warmup_steps_float), (lambda : warmup_learning_rate), (lambda : self.decay_schedule_fn(step)), name=name) def get_config(self): return {'initial_learning_rate': self.initial_learning_rate, 'decay_schedule_fn': self.decay_schedule_fn, 'warmup_steps': self.warmup_steps, 'power': self.power, 'name': self.name}
def _from_ctypes_scalar(t): if (getattr(t, '__ctype_be__', None) is t): return np.dtype(('>' + t._type_)) elif (getattr(t, '__ctype_le__', None) is t): return np.dtype(('<' + t._type_)) else: return np.dtype(t._type_)
.parametrize('N', [100, 1000, 10000]) def test_calculate_p_values(N): r = 1.0 expected = formal_integral_numba.calculate_p_values(r, N) actual = np.zeros_like(expected, dtype=np.float64) actual[:] = formal_integral_cuda.calculate_p_values(r, N) ntest.assert_allclose(actual, expected, rtol=1e-14)
def amber_app(wd, feat_name, run=False): type_dict = {'controller_type': 'GeneralController', 'knowledge_fn_type': 'zero', 'reward_fn_type': 'LossAucReward', 'modeler_type': 'KerasModelBuilder', 'manager_type': 'DistributedManager', 'env_type': 'ControllerTrainEnv'} train_data_kwargs = get_data_config_deepsea_compiled(fp='./data/zero_shot_deepsea/train.h5', feat_name=feat_name, batch_size=1024, shuffle=True) validate_data_kwargs = get_data_config_deepsea_compiled(fp='./data/zero_shot_deepsea/val.h5', feat_name=feat_name, batch_size=1024, shuffle=False) os.makedirs(wd, exist_ok=True) input_node = [Operation('input', shape=(1000, 4), name='input')] output_node = [Operation('dense', units=1, activation='sigmoid', name='output')] model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']} (model_space, layer_embedding_sharing) = get_model_space() batch_size = 1024 use_ppo = False specs = {'model_space': model_space, 'controller': {'share_embedding': layer_embedding_sharing, 'with_skip_connection': False, 'skip_weight': None, 'lstm_size': 128, 'lstm_num_layers': 1, 'kl_threshold': 0.1, 'train_pi_iter': 100, 'optim_algo': 'adam', 'rescale_advantage_by_reward': False, 'temperature': 1.0, 'tanh_constant': 1.5, 'buffer_size': 10, 'batch_size': 5, 'use_ppo_loss': use_ppo}, 'model_builder': {'batch_size': batch_size, 'inputs_op': input_node, 'outputs_op': output_node, 'model_compile_dict': model_compile_dict}, 'knowledge_fn': {'data': None, 'params': {}}, 'reward_fn': {'method': 'auc'}, 'manager': {'data': {'train_data': BatchedHDF5Generator, 'validation_data': BatchedHDF5Generator}, 'params': {'train_data_kwargs': train_data_kwargs, 'validate_data_kwargs': validate_data_kwargs, 'devices': ['/device:GPU:0'], 'epochs': 100, 'fit_kwargs': {'earlystop_patience': 40, 'steps_per_epoch': 100, 'max_queue_size': 50, 'workers': 3}, 'child_batchsize': batch_size, 'store_fn': 'model_plot', 'working_dir': wd, 'verbose': 0}}, 'train_env': {'max_episode': 75, 'max_step_per_ep': 5, 'working_dir': wd, 'time_budget': '24:00:00', 'with_skip_connection': False, 'save_controller_every': 1}} amb = Amber(types=type_dict, specs=specs) if run: amb.run() return amb
class LogBuffer(object): def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_output(self): self.output.clear() self.ready = False def update(self, vars, count=1): assert isinstance(vars, dict) for (key, var) in vars.items(): if (key not in self.val_history): self.val_history[key] = [] self.n_history[key] = [] self.val_history[key].append(var) self.n_history[key].append(count) def average(self, n=0): assert (n >= 0) for key in self.val_history: values = np.array(self.val_history[key][(- n):]) nums = np.array(self.n_history[key][(- n):]) avg = (np.sum((values * nums)) / np.sum(nums)) self.output[key] = avg self.ready = True
class IndexedGroup(IndexedMonoid): def order(self): return self.cardinality() def rank(self): return self.group_generators().cardinality() _method def group_generators(self): if (self._indices.cardinality() == infinity): gen = PoorManMap(self.gen, domain=self._indices, codomain=self, name='Generator map') return Family(self._indices, gen) return Family(self._indices, self.gen) gens = group_generators
def save_checkpoint(state, is_best, fpath): if (len(fpath) != 0): mkdir_if_missing(fpath) fpath = os.path.join(fpath, 'checkpoint.pth') torch.save(state, fpath, _use_new_zipfile_serialization=False) if is_best: shutil.copy(fpath, os.path.join(os.path.dirname(fpath), 'checkpoint_best.pth'))
def avg_pool_nd(dims, *args, **kwargs): if (dims == 1): return nn.AvgPool1d(*args, **kwargs) elif (dims == 2): return nn.AvgPool2d(*args, **kwargs) elif (dims == 3): return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}')
def makeEmulatorBaseWith5StubASAndHosts(hosts_per_stub_as: int) -> Emulator: emu = Emulator() base = Base() routing = Routing() ebgp = Ebgp() ibgp = Ibgp() ospf = Ospf() ix100 = base.createInternetExchange(100) ix101 = base.createInternetExchange(101) ix102 = base.createInternetExchange(102) ix103 = base.createInternetExchange(103) ix104 = base.createInternetExchange(104) ix100.getPeeringLan().setDisplayName('NYC-100') ix101.getPeeringLan().setDisplayName('San Jose-101') ix102.getPeeringLan().setDisplayName('Chicago-102') ix103.getPeeringLan().setDisplayName('Miami-103') ix104.getPeeringLan().setDisplayName('Boston-104') makeTransitAs(base, 2, [100, 101, 102], [(100, 101), (101, 102)]) makeTransitAs(base, 3, [100, 103, 104], [(100, 103), (103, 104)]) makeTransitAs(base, 4, [100, 102, 104], [(100, 104), (102, 104)]) makeTransitAs(base, 12, [101, 104], [(101, 104)]) makeStubAsWithHosts(emu, base, 150, 100, hosts_per_stub_as) makeStubAsWithHosts(emu, base, 151, 100, hosts_per_stub_as) makeStubAsWithHosts(emu, base, 152, 101, hosts_per_stub_as) makeStubAsWithHosts(emu, base, 153, 101, hosts_per_stub_as) makeStubAsWithHosts(emu, base, 154, 102, hosts_per_stub_as) ebgp.addRsPeers(100, [2, 3, 4]) ebgp.addRsPeers(102, [2, 4]) ebgp.addRsPeers(104, [3, 4]) ebgp.addPrivatePeerings(100, [2], [150, 151], PeerRelationship.Provider) ebgp.addPrivatePeerings(100, [3], [150], PeerRelationship.Provider) ebgp.addPrivatePeerings(101, [2], [12], PeerRelationship.Provider) ebgp.addPrivatePeerings(101, [12], [152, 153], PeerRelationship.Provider) ebgp.addPrivatePeerings(102, [2, 4], [154], PeerRelationship.Provider) emu.addLayer(base) emu.addLayer(routing) emu.addLayer(ebgp) emu.addLayer(ibgp) emu.addLayer(ospf) return emu
class WFRadiationMeshNy(RadiationField): glossary_name = 'params/Mesh/ny' def __init__(self, wf): super(WFRadiationMeshNy, self).__init__(wf) self.attributes.update({'units': '-', 'limits': '[2:LONG_MAX]', 'alias': ''}) def value(self): return self._wf._srwl_wf.mesh.ny def value(self, val): self._wf._srwl_wf.mesh.ny = int(val)
class Adafactor(torch.optim.Optimizer): def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01) rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step']))) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return (param_scale * rel_step_sz) def _get_options(self, param_group, param_shape): factored = (len(param_shape) >= 2) use_first_moment = (param_group['beta1'] is not None) return (factored, use_first_moment) def _rms(self, tensor): return (tensor.norm(2) / (tensor.numel() ** 0.5)) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1)).unsqueeze((- 1))).rsqrt_().unsqueeze((- 1)) c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt() torch.mul(r_factor, c_factor, out=output) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape (factored, use_first_moment) = self._get_options(group, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).type_as(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).type_as(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].type_as(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].type_as(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].type_as(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].type_as(grad) p_data_fp32 = p.data.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) group['lr'] = self._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 1))) exp_avg_sq_col.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 2))) self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_((1.0 - beta2t), update) torch.rsqrt(exp_avg_sq, out=update).mul_(grad) update.div_(max(1.0, (self._rms(update) / group['clip_threshold']))) update.mul_(group['lr']) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_((1 - group['beta1']), update) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.add_((- update)) p.data.copy_(p_data_fp32) return loss
class OrderedSetPartitions_scomp(OrderedSetPartitions): def __init__(self, s, comp): OrderedSetPartitions.__init__(self, s) self.c = Composition(comp) def __repr__(self): return ('Ordered set partitions of %s into parts of size %s' % (Set(self._set), self.c)) def __contains__(self, x): return (OrderedSetPartitions.__contains__(self, x) and ([len(z) for z in x] == self.c)) def cardinality(self): return multinomial(self.c) def __iter__(self): part_sizes = self.c N = len(part_sizes) if (not N): (yield self.element_class(self, [], check=False)) return l = [] lset = list(self._set) for (i, j) in enumerate(part_sizes): l.extend(([i] * j)) pi = multiset_permutation_to_ordered_set_partition(l, N) converted = [frozenset((lset[i] for i in part)) for part in pi] (yield self.element_class(self, converted, check=False)) while multiset_permutation_next_lex(l): pi = multiset_permutation_to_ordered_set_partition(l, N) converted = [frozenset((lset[i] for i in part)) for part in pi] (yield self.element_class(self, converted, check=False))
class Experiment(object): pkl_path = 'experiments/{}.pkl' def __new__(cls, id=None): if (id is None): r = object.__new__(cls) r.create_new_experiment() return r else: return cls.load_experiment(id) def __init__(self, *args, **kwargs): if hasattr(self, 'id'): return self.id = None self.version = '1.1' self.args = None self.optimization_iterations = 0 self.optimization_iteration_loss = [] self.random_state = None self.NN = None self.parent = None self.metrics = None self.original_loss = None self.noise_level = None self.noise_distribution = None def create_new_experiment(self): self.id = str(uuid.uuid4()) self.version = '1.1' self.args = None self.optimization_iterations = 0 self.optimization_iteration_loss = [] self.random_state = None self.NN = None self.parent = None self.metrics = None self.original_loss = None self.noise_level = None self.noise_distribution = None def load_experiment(cls, id): with open(cls.pkl_path.format(id), 'rb') as f: return pickle.load(f) def exists(cls, id): return path.exists(cls.pkl_path.format(id)) def save(self, prefix=''): with open(f'{prefix}{self.pkl_path.format(self.id)}', 'wb') as f: return pickle.dump(self, f) def opt_callback(self, J): self.optimization_iterations += 1 self.optimization_iteration_loss.append(float(J)) def spawn_child(cls, id): child = cls.load_experiment(id) parent_id = child.id child.id = str(uuid.uuid4()) child.parent = parent_id return child
class ResNetBlock(tf.keras.layers.Layer): def __init__(self, filters, strides=(1, 1), project=False, name='ResNetBlock'): super(ResNetBlock, self).__init__(name=name) self.conv1 = tf.keras.layers.Conv2D(filters, kernel_size=(3, 3), strides=strides, padding='SAME', use_bias=False, name='conv1') self.bn1 = tf.keras.layers.BatchNormalization(name='conv1/bn') self.conv2 = tf.keras.layers.Conv2D(filters, kernel_size=(3, 3), padding='SAME', use_bias=False, name='conv2') self.bn2 = tf.keras.layers.BatchNormalization(gamma_initializer=tf.zeros_initializer(), name='conv2/bn') self.project = project if self.project: self.conv_shortcut = tf.keras.layers.Conv2D(filters, kernel_size=(1, 1), strides=strides, use_bias=False, name='conv_shortcut') self.bn_shortcut = tf.keras.layers.BatchNormalization(name='conv_shortcut/bn') def call(self, inputs, training=False): x = self.conv1(inputs) x = self.bn1(x, training=training) x = tf.nn.relu(x) x = self.conv2(x) x = self.bn2(x, training=training) shortcut = inputs if self.project: shortcut = self.conv_shortcut(shortcut) shortcut = self.bn_shortcut(shortcut, training=training) x += shortcut return tf.nn.relu(x)
class DataInputTest(): def __init__(self, data, batch_size): self.batch_size = batch_size self.data = data self.epoch_size = (len(self.data) // self.batch_size) if ((self.epoch_size * self.batch_size) < len(self.data)): self.epoch_size += 1 self.i = 0 def __iter__(self): return self def next(self): if (self.i == self.epoch_size): raise StopIteration ts = self.data[(self.i * self.batch_size):min(((self.i + 1) * self.batch_size), len(self.data))] self.i += 1 (u, i, j, sl) = ([], [], [], []) for t in ts: u.append(t[0]) i.append(t[2][0]) j.append(t[2][1]) sl.append(len(t[1])) max_sl = max(sl) hist_i = np.zeros([len(ts), max_sl], np.int64) k = 0 for t in ts: for l in range(len(t[1])): hist_i[k][l] = t[1][l] k += 1 return (self.i, (u, i, j, hist_i, sl))
def Over9000(params, alpha=0.5, k=6, *args, **kwargs): ralamb = Ralamb(params, *args, **kwargs) return Lookahead(ralamb, alpha, k)
def load_multi_foreign_key(): parent = pd.DataFrame({'parent_id': range(10), 'value': range(10)}) child = pd.DataFrame({'parent_1_id': range(10), 'parent_2_id': range(10), 'value': range(10)}) metadata = Metadata() metadata.add_table('parent', parent, primary_key='parent_id') metadata.add_table('child', child, parent='parent', foreign_key='parent_1_id') metadata.add_relationship('parent', 'child', 'parent_2_id') return (metadata, {'parent': parent, 'child': child})
class TestFuture(TestCase): def test_done(self) -> None: f = Future[torch.Tensor]() self.assertFalse(f.done()) f.set_result(torch.ones(2, 2)) self.assertTrue(f.done()) def test_done_exception(self) -> None: err_msg = 'Intentional Value Error' def raise_exception(unused_future): raise RuntimeError(err_msg) f1 = Future[torch.Tensor]() self.assertFalse(f1.done()) f1.set_result(torch.ones(2, 2)) self.assertTrue(f1.done()) f2 = f1.then(raise_exception) self.assertTrue(f2.done()) with self.assertRaisesRegex(RuntimeError, err_msg): f2.wait() def test_wait(self) -> None: f = Future[torch.Tensor]() f.set_result(torch.ones(2, 2)) self.assertEqual(f.wait(), torch.ones(2, 2)) def test_wait_multi_thread(self) -> None: def slow_set_future(fut, value): time.sleep(0.5) fut.set_result(value) f = Future[torch.Tensor]() t = threading.Thread(target=slow_set_future, args=(f, torch.ones(2, 2))) t.start() self.assertEqual(f.wait(), torch.ones(2, 2)) t.join() def test_mark_future_twice(self) -> None: fut = Future[int]() fut.set_result(1) with self.assertRaisesRegex(RuntimeError, 'Future can only be marked completed once'): fut.set_result(1) def test_pickle_future(self): fut = Future[int]() errMsg = 'Can not pickle torch.futures.Future' with TemporaryFileName() as fname: with self.assertRaisesRegex(RuntimeError, errMsg): torch.save(fut, fname) def test_then(self): fut = Future[torch.Tensor]() then_fut = fut.then((lambda x: (x.wait() + 1))) fut.set_result(torch.ones(2, 2)) self.assertEqual(fut.wait(), torch.ones(2, 2)) self.assertEqual(then_fut.wait(), (torch.ones(2, 2) + 1)) def test_chained_then(self): fut = Future[torch.Tensor]() futs = [] last_fut = fut for _ in range(20): last_fut = last_fut.then(add_one) futs.append(last_fut) fut.set_result(torch.ones(2, 2)) for i in range(len(futs)): self.assertEqual(futs[i].wait(), ((torch.ones(2, 2) + i) + 1)) def _test_error(self, cb, errMsg): fut = Future[int]() then_fut = fut.then(cb) fut.set_result(5) self.assertEqual(5, fut.wait()) with self.assertRaisesRegex(RuntimeError, errMsg): then_fut.wait() def test_then_wrong_arg(self): def wrong_arg(tensor): return (tensor + 1) self._test_error(wrong_arg, 'unsupported operand type.*Future.*int') def test_then_no_arg(self): def no_arg(): return True self._test_error(no_arg, 'takes 0 positional arguments but 1 was given') def test_then_raise(self): def raise_value_error(fut): raise ValueError('Expected error') self._test_error(raise_value_error, 'Expected error') def test_collect_all(self): fut1 = Future[int]() fut2 = Future[int]() fut_all = torch.futures.collect_all([fut1, fut2]) def slow_in_thread(fut, value): time.sleep(0.1) fut.set_result(value) t = threading.Thread(target=slow_in_thread, args=(fut1, 1)) fut2.set_result(2) t.start() res = fut_all.wait() self.assertEqual(res[0].wait(), 1) self.assertEqual(res[1].wait(), 2) t.join() (IS_WINDOWS, 'TODO: need to fix this testcase for Windows') def test_wait_all(self): fut1 = Future[int]() fut2 = Future[int]() fut1.set_result(1) fut2.set_result(2) res = torch.futures.wait_all([fut1, fut2]) print(res) self.assertEqual(res, [1, 2]) def raise_in_fut(fut): raise ValueError('Expected error') fut3 = fut1.then(raise_in_fut) with self.assertRaisesRegex(RuntimeError, 'Expected error'): torch.futures.wait_all([fut3, fut2])
def partition_model(model: nn.Module, model_args: tuple=(), model_kwargs: Optional[Dict]=None, n_iter: int=10, nparts: int=4, max_depth: int=100, basic_blocks: Optional[Union[(List[nn.Module], Tuple[nn.Module])]]=None, node_weight_function: Optional[NodeWeightFunction]=None, edge_weight_function: Optional[EdgeWeightFunction]=None, use_layers_only_graph: bool=True, recomputation: bool=True, partitioning_method: str='ACYCLIC', METIS_opt: Optional[Dict]=None, acyclic_opt: Optional[Dict]=None, binpack_opt: Optional[Dict]=None, mpipe_opt: Optional[Dict]=None, force_no_recomp_scopes: Optional[Callable[([str], bool)]]=None, use_graph_profiler: bool=True, use_network_profiler: bool=False, profile_ops: bool=True, save_memory_mode: bool=False, trace_on_gpu=False, graph: Optional[Graph]=None, use_virtual_stages: bool=True, async_pipe=False, trace_cache_name=None, profiles_cache_name=None, dont_use_async_meta_alg=False) -> Graph: if (basic_blocks is None): basic_blocks = () if (METIS_opt is None): METIS_opt = dict() if (acyclic_opt is None): acyclic_opt = dict() if (binpack_opt is None): binpack_opt = dict() if (mpipe_opt is None): mpipe_opt = dict() if ((not async_pipe) or (not recomputation) or dont_use_async_meta_alg): if (graph is None): graph = compute_and_maybe_cache(build_profiled_graph, profiles_cache_name, model, _cache_cls_to_use=GraphCache, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, trace_on_gpu=trace_on_gpu, profile_ops=profile_ops, recomputation=recomputation, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes, trace_cache_name=trace_cache_name) if (nparts > 1): graph = partition_profiled_graph(graph, model, nparts, partitioning_method, node_weight_function, edge_weight_function, use_virtual_stages, use_layers_only_graph, METIS_opt, acyclic_opt, binpack_opt, mpipe_opt) else: graph = build_graph_with_nparams_and_grad_reqs(model, model_args, model_kwargs, max_depth, basic_blocks, save_memory_mode, trace_on_gpu, res_cache_name=trace_cache_name) (weights, max_memory_usage_r, max_memory_usage_nr) = compute_and_maybe_cache(get_full_profiles, profiles_cache_name, graph, model, model_args, model_kwargs, n_iter, profile_ops, max_depth, basic_blocks, force_no_recomp_scopes, save_memory_mode, use_graph_profiler, use_network_profiler) partition_profiled_graph_fn = functools.partial(partition_profiled_graph, model=model, nparts=nparts, partitioning_method=partitioning_method, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_virtual_stages=use_virtual_stages, use_layers_only_graph=use_layers_only_graph, METIS_opt=METIS_opt, acyclic_opt=acyclic_opt, binpack_opt=binpack_opt, mpipe_opt=mpipe_opt) graph = partition_and_match_weights_until_last_partition_is_with_no_recomputation(graph, weights, partitioning_method, partition_profiled_graph_fn, max_memory_usage_r=max_memory_usage_r, max_memory_usage_nr=max_memory_usage_nr) return graph
def test_multi_crop(): arr = np.arange(45).reshape(9, 5) out = crop(arr, ((1, 2), (2, 1))) assert_array_equal(out[0], [7, 8]) assert_array_equal(out[(- 1)], [32, 33]) assert_equal(out.shape, (6, 2))
def get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=True, include_complex=True, include_complex32=False) -> List[torch.dtype]: dtypes = (get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16)) if include_bool: dtypes.append(torch.bool) if include_complex: dtypes += get_all_complex_dtypes(include_complex32) return dtypes
def test_get_all_generatable_types(module_test_cluster): generator = MagicMock(GenericMethod) generator.generated_type.return_value = module_test_cluster.type_system.convert_type_hint(MagicMock) module_test_cluster.add_generator(generator) expected = {module_test_cluster.type_system.convert_type_hint(typ) for typ in ((list(PRIMITIVES) + list(COLLECTIONS)) + [MagicMock])} assert (set(module_test_cluster.get_all_generatable_types()) == set(expected))
class Bernoulli(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): super(Bernoulli, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, (lambda x: nn.init.constant_(x, 0)), gain) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x): x = self.linear(x) return FixedBernoulli(logits=x)
def test_validate_parameters_invalid_dataset_name(): with pytest.raises(ValueError): loader.validate_parameters('invalid_dataset_name', 'portuguese', 'full')
def register_Ns3DefaultDeleter__Ns3SpectrumModel_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::DefaultDeleter< ns3::SpectrumModel > const &', 'arg0')]) cls.add_method('Delete', 'void', [param('ns3::SpectrumModel *', 'object')], is_static=True) return
def slice_assign(sliced_tensor, assigned_tensor, *slice_args, verbose=0): shape = sliced_tensor.shape print(shape) n_dims = len(shape) n_slices = len(slice_args) dims_to_index = [] corresponding_ranges = [] ellipsis = False for (i_dim, slice_spec) in enumerate(slice_args): if isinstance(slice_spec, str): if (slice_spec == ':'): continue elif (slice_spec == '...'): ellipsis = True else: raise ValueError('Slices must be :, ..., or slice object.') elif (slice_spec is Ellipsis): ellipsis = True else: (start, stop, step) = (slice_spec.start, slice_spec.stop, slice_spec.step) no_start = ((start is None) or (start == 0)) no_stop = ((stop is None) or (stop == (- 1))) no_step = ((step is None) or (step == 1)) if (no_start and no_stop and no_step): continue if ellipsis: real_index = (i_dim + (n_dims - n_slices)) else: real_index = i_dim dims_to_index.append(real_index) if no_step: step = 1 if no_stop: stop = shape[real_index] if no_start: start = 0 corresponding_range = tf.range(start, stop, step) corresponding_ranges.append(corresponding_range) if (not dims_to_index): if (verbose > 0): print('Warning: no slicing performed') return assigned_tensor dims_left_out = [i_dim for i_dim in range(n_dims) if (i_dim not in dims_to_index)] scatted_nd_perm = (dims_to_index + dims_left_out) inverse_scatter_nd_perm = list(np.argsort(scatted_nd_perm)) sliced_tensor_reshaped = tf.transpose(sliced_tensor, perm=scatted_nd_perm) assigned_tensor_reshaped = tf.transpose(assigned_tensor, perm=scatted_nd_perm) left_out_shape = [shape[i_dim] for i_dim in dims_left_out] assigned_tensor_reshaped = tf.reshape(assigned_tensor_reshaped, ([(- 1)] + left_out_shape)) mesh_ranges = tf.meshgrid(*corresponding_ranges, indexing='ij') update_indices = tf.stack([tf.reshape(slicing_range, ((- 1),)) for slicing_range in mesh_ranges], axis=(- 1)) sliced_tensor_reshaped = tf.tensor_scatter_nd_update(tensor=sliced_tensor_reshaped, indices=update_indices, updates=assigned_tensor_reshaped) sliced_tensor_updated = tf.transpose(sliced_tensor_reshaped, perm=inverse_scatter_nd_perm) return sliced_tensor_updated
class gdk_pixbuf_2_info(_pkg_config_info): section = 'gdk_pixbuf_2' append_config_exe = 'gdk-pixbuf-2.0' version_macro_name = 'GDK_PIXBUF_VERSION'
def define_common_flags(): flags.DEFINE_multi_string('gin_bindings', None, 'Gin parameter bindings.') flags.DEFINE_multi_string('gin_configs', None, 'Gin config files.')