code
stringlengths
101
5.91M
def get_span_score_pairs(ypi, yp2i): span_score_pairs = [] for (f, (ypif, yp2if)) in enumerate(zip(ypi, yp2i)): for j in range(len(ypif)): for k in range(j, len(yp2if)): span = ((f, j), (f, (k + 1))) score = (ypif[j] * yp2if[k]) span_score_pairs.append((span, score)) return span_score_pairs
def get_ground_truths(answer): return (answer['NormalizedAliases'] + [normalize_answer(ans) for ans in answer.get('HumanAnswers', [])])
def test_is_nonpositive(): assert (not Rational(1, 2).is_nonpositive) assert Rational((- 2), 3).is_nonpositive assert (Symbol('x').is_nonpositive is None)
def distillation(y, teacher_scores, labels, T, alpha): p = F.log_softmax((y / T), dim=1) q = F.softmax((teacher_scores / T), dim=1) l_kl = ((F.kl_div(p, q, size_average=False) * (T ** 2)) / y.shape[0]) l_ce = F.cross_entropy(y, labels) return ((l_kl * alpha) + (l_ce * (1.0 - alpha)))
_module() class Res2Net(ResNet): arch_settings = {50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3))} def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__(style='pytorch', deep_stem=deep_stem, avg_down=avg_down, pretrained=pretrained, init_cfg=init_cfg, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer(scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
def mark_observed_custom_module(module, custom_module_class): module._is_observed_custom_module = True module._FLOAT_MODULE = custom_module_class
def process_book(break_probs_dir, para_to_sent_dir, gt_dir, output_dir, book_id): with open(os.path.join(break_probs_dir, (book_id + '.pkl')), 'rb') as f: break_probs = pickle.load(f) with open(os.path.join(para_to_sent_dir, (book_id + '.pkl')), 'rb') as f: para_to_sent = pickle.load(f) peaks = list() prominences = list() for (n, prob) in break_probs.items(): if (prob > 0.9): peaks.append(para_to_sent[n]) prominences.append(np.log(prob)) with open(os.path.join(gt_dir, (book_id + '_gt_sents.pkl')), 'rb') as f: gt = pickle.load(f) with open(os.path.join(gt_dir, (book_id + '_max_sent_num.pkl')), 'rb') as f: max_sent_num = int(pickle.load(f)) num_preds = len(gt) for alpha in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: preds = get_predictions(peaks, prominences, num_preds, max_sent_num, alpha) with open(os.path.join(output_dir, (((book_id + '_alpha_') + str(int((alpha * 100)))) + '.pkl')), 'wb') as f: pickle.dump(preds, f) print(book_id, 'success') return (book_id, 'Success')
class ODOC_seg_edge(nn.Module): def __init__(self, channel=64): super(ODOC_seg_edge, self).__init__() self.resnet = res2net50_v1b_26w_4s(pretrained=False) self.rfb2_1 = BasicConv2d(256, channel, 1) self.rfb3_1 = BasicConv2d(512, channel, 1) self.rfb4_1 = BasicConv2d(1024, channel, 1) self.rfb5_1 = BasicConv2d(2048, channel, 1) self.edge = EDGModule(channel) self.seg_layer = SEG_Module(channel) def forward(self, x): x = self.resnet.conv1(x) x = self.resnet.bn1(x) x = self.resnet.relu(x) x = self.resnet.maxpool(x) x1 = self.resnet.layer1(x) x2 = self.resnet.layer2(x1) x3 = self.resnet.layer3(x2) x4 = self.resnet.layer4(x3) x1_rfb = self.rfb2_1(x1) x2_rfb = self.rfb3_1(x2) x3_rfb = self.rfb4_1(x3) x4_rfb = self.rfb5_1(x4) edge_feat = self.edge(x3_rfb, x2_rfb, x1_rfb) edge = F.interpolate(edge_feat, size=(32, 32), mode='bilinear', align_corners=True) seg = self.seg_layer(x4_rfb, x3_rfb, x2_rfb, edge) seg_output = torch.sigmoid(seg) return (seg_output, torch.sigmoid(edge_feat))
def fork_rng(devices=None, enabled=True, _caller='fork_rng', _devices_kw='devices'): import torch.cuda global _fork_rng_warned_already if (not enabled): (yield) return if (devices is None): num_devices = torch.cuda.device_count() if ((num_devices > 1) and (not _fork_rng_warned_already)): warnings.warn("CUDA reports that you have {num_devices} available devices, and you have used {caller} without explicitly specifying which devices are being used. For safety, we initialize *every* CUDA device by default, which can be quite slow if you have a lot of GPUs. If you know that you are only making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES or the '{devices_kw}' keyword argument of {caller} with the set of devices you are actually using. For example, if you are using CPU only, set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices and suppress this warning, set the '{devices_kw}' keyword argument to `range(torch.cuda.device_count())`.".format(num_devices=num_devices, caller=_caller, devices_kw=_devices_kw)) _fork_rng_warned_already = True devices = list(range(num_devices)) else: devices = list(devices) cpu_rng_state = torch.get_rng_state() gpu_rng_states = [] for device in devices: with torch.cuda.device(device): gpu_rng_states.append(torch.cuda.get_rng_state()) try: (yield) finally: torch.set_rng_state(cpu_rng_state) for (device, gpu_rng_state) in zip(devices, gpu_rng_states): with torch.cuda.device(device): torch.cuda.set_rng_state(gpu_rng_state)
class BSDSD1orp1mat(SpectralMatrix): def assemble(self, method): (test, trial) = (self.testfunction, self.trialfunction) assert isinstance(test[0], SD) assert isinstance(trial[0], SD) assert (test[0].quad == 'LG') k = np.arange((test[0].N - 2)) d = {0: (((2 * ((2 * k) + 3)) / (k + 1)) / (k + 2)), 1: ((- 2) / (k[:(- 1)] + 2)), (- 1): ((- 2) / (k[:(- 1)] + 2))} return d
def pythran_indexing_type(type_, indices): return type_remove_ref(('decltype(std::declval<%s>()%s)' % (pythran_type(type_), _index_access(_index_type_code, indices))))
class FDST(NWPU): def __init__(self, root, list_path, num_samples=None, num_classes=1, multi_scale=True, flip=True, ignore_label=(- 1), base_size=2048, crop_size=(512, 1024), min_unit=(32, 32), center_crop_test=False, downsample_rate=1, scale_factor=(0.5, (1 / 0.5)), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): super(FDST, self).__init__(root, list_path, num_samples, num_classes, multi_scale, flip, ignore_label, base_size, crop_size, min_unit, center_crop_test, downsample_rate, scale_factor, mean, std) def read_files(self): box_gt_Info = self.read_box_gt(os.path.join(self.root, 'val_gt_loc.txt')) files = [] if ('test' in self.list_path): for item in self.img_list: image_id = item[0] files.append({'img': (('images/' + image_id) + '.jpg'), 'label': (('jsons/' + image_id) + '.json'), 'name': image_id}) else: for item in self.img_list: image_id = item[0] if ('val' in self.list_path): self.box_gt.append(box_gt_Info[int(image_id)]) files.append({'img': (('images/' + image_id) + '.jpg'), 'label': (('jsons/' + image_id) + '.json'), 'name': image_id, 'weight': 1}) return files
def kl_loss(mu, logvar): loss = (0.5 * tf.reduce_sum((((tf.square(mu) + tf.exp(logvar)) - 1) - logvar), axis=(- 1))) loss = tf.reduce_mean(loss) return loss
def test_binary_target() -> None: with pytest.raises(ValueError, match='Please provide y_true as a bina*'): check_binary_zero_one(np.array([0, 5, 4]))
class Softplus_SENet(nn.Module): def __init__(self, block, num_blocks, num_classes=100): super(Softplus_SENet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = F.softplus(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def partial(f, *args, **kwargs): p = functools.partial(f, *args, **kwargs) functools.update_wrapper(p, f) return p
class NLUEngineConfig(FromDict, ProcessingUnitConfig): def __init__(self, intent_parsers_configs=None, random_seed=None): from snips_nlu.intent_parser import IntentParser if (intent_parsers_configs is None): from snips_nlu.pipeline.configs import ProbabilisticIntentParserConfig, DeterministicIntentParserConfig intent_parsers_configs = [DeterministicIntentParserConfig(), ProbabilisticIntentParserConfig()] self.intent_parsers_configs = [IntentParser.get_config(conf) for conf in intent_parsers_configs] self.random_seed = random_seed def unit_name(self): from snips_nlu.nlu_engine.nlu_engine import SnipsNLUEngine return SnipsNLUEngine.unit_name def get_required_resources(self): resources = {CUSTOM_ENTITY_PARSER_USAGE: CustomEntityParserUsage.WITHOUT_STEMS} for config in self.intent_parsers_configs: resources = merge_required_resources(resources, config.get_required_resources()) return resources def to_dict(self): return {'unit_name': self.unit_name, 'intent_parsers_configs': [config.to_dict() for config in self.intent_parsers_configs]}
def create_scheduler(args, optimizer): num_epochs = args.epochs if (getattr(args, 'lr_noise', None) is not None): lr_noise = getattr(args, 'lr_noise') if isinstance(lr_noise, (list, tuple)): noise_range = [(n * num_epochs) for n in lr_noise] if (len(noise_range) == 1): noise_range = noise_range[0] else: noise_range = (lr_noise * num_epochs) else: noise_range = None lr_scheduler = None if (args.sched == 'cosine'): lr_scheduler = CosineLRScheduler(optimizer, t_initial=num_epochs, t_mul=getattr(args, 'lr_cycle_mul', 1.0), lr_min=args.min_lr, decay_rate=args.decay_rate, warmup_lr_init=args.warmup_lr, warmup_t=args.warmup_epochs, cycle_limit=getattr(args, 'lr_cycle_limit', 1), t_in_epochs=True, noise_range_t=noise_range, noise_pct=getattr(args, 'lr_noise_pct', 0.67), noise_std=getattr(args, 'lr_noise_std', 1.0), noise_seed=getattr(args, 'seed', 42)) num_epochs = (lr_scheduler.get_cycle_length() + args.cooldown_epochs) elif (args.sched == 'tanh'): lr_scheduler = TanhLRScheduler(optimizer, t_initial=num_epochs, t_mul=getattr(args, 'lr_cycle_mul', 1.0), lr_min=args.min_lr, warmup_lr_init=args.warmup_lr, warmup_t=args.warmup_epochs, cycle_limit=getattr(args, 'lr_cycle_limit', 1), t_in_epochs=True, noise_range_t=noise_range, noise_pct=getattr(args, 'lr_noise_pct', 0.67), noise_std=getattr(args, 'lr_noise_std', 1.0), noise_seed=getattr(args, 'seed', 42)) num_epochs = (lr_scheduler.get_cycle_length() + args.cooldown_epochs) elif (args.sched == 'step'): lr_scheduler = StepLRScheduler(optimizer, decay_t=args.decay_epochs, decay_rate=args.decay_rate, warmup_lr_init=args.warmup_lr, warmup_t=args.warmup_epochs, noise_range_t=noise_range, noise_pct=getattr(args, 'lr_noise_pct', 0.67), noise_std=getattr(args, 'lr_noise_std', 1.0), noise_seed=getattr(args, 'seed', 42)) elif (args.sched == 'linear'): def lr_lambda(current_step: int): if (current_step < args.num_warmup_steps): return (float(current_step) / float(max(1, args.num_warmup_steps))) return max(0.0, (float((args.num_training_steps - current_step)) / float(max(1, (args.num_training_steps - args.num_warmup_steps))))) lr_scheduler = LambdaLR(optimizer, lr_lambda, args.last_epoch) elif (args.sched == 'plateau'): mode = ('min' if ('loss' in getattr(args, 'eval_metric', '')) else 'max') lr_scheduler = PlateauLRScheduler(optimizer, decay_rate=args.decay_rate, patience_t=args.patience_epochs, lr_min=args.min_lr, mode=mode, warmup_lr_init=args.warmup_lr, warmup_t=args.warmup_epochs, cooldown_t=0, noise_range_t=noise_range, noise_pct=getattr(args, 'lr_noise_pct', 0.67), noise_std=getattr(args, 'lr_noise_std', 1.0), noise_seed=getattr(args, 'seed', 42)) return (lr_scheduler, num_epochs)
class Prompter(ABC): def aggregation_prompt(self, state_dicts: List[Dict], **kwargs) -> str: pass def improve_prompt(self, **kwargs) -> str: pass def generate_prompt(self, num_branches: int, **kwargs) -> str: pass def validation_prompt(self, **kwargs) -> str: pass def score_prompt(self, state_dicts: List[Dict], **kwargs) -> str: pass
(resources={'machine': 1}) def allgather(args_dict, notification_address, world_size, world_rank, object_size): store = utils.create_store_using_dict(args_dict) object_id = utils.object_id_from_int(world_rank) array = np.random.rand((object_size // 4)).astype(np.float32) buffer = store_lib.Buffer.from_buffer(array) store.put(buffer, object_id) print('Buffer created, hash =', hash(buffer)) object_ids = [] for i in range(0, world_size): object_ids.append(utils.object_id_from_int(i)) barrier(notification_address, notification_port, world_size) buffers = [] start = time.time() for object_id in object_ids: buffers.append(store.get(object_id)) duration = (time.time() - start) print('AllGather completed, hash =', [hash(b) for b in buffers], 'duration =', duration)
def argParse(): parser = ArgumentParser(prog=__applicationName__) parser.add_argument('--version', action='version', version=('%(prog)s ' + __version__)) parser.add_argument('--autobrief', action='store_true', help='use the docstring summary line as \\brief description') parser.add_argument('--debug', action='store_true', help='enable debug output on stderr') parser.add_argument('filename', metavar='FILENAME') return parser.parse_args()
def get_model_33(params): inputs = Input(shape=(params['n_metafeatures'],)) reg = Lambda((lambda x: K.l2_normalize(x, axis=1))) x1 = reg(inputs) inputs2 = Input(shape=(params['n_metafeatures2'],)) reg2 = Lambda((lambda x: K.l2_normalize(x, axis=1))) x2 = reg2(inputs2) inputs3 = Input(shape=(params['n_metafeatures3'],)) reg3 = Lambda((lambda x: K.l2_normalize(x, axis=1))) x3 = reg3(inputs3) x = merge([x1, x2, x3], mode='concat', concat_axis=1) x = Dropout(params['dropout_factor'])(x) if (params['n_dense'] > 0): dense2 = Dense(output_dim=params['n_dense'], init='uniform', activation='relu') x = dense2(x) logging.debug(('Output CNN: %s' % str(dense2.output_shape))) dense4 = Dense(output_dim=params['n_out'], init='uniform', activation=params['final_activation']) xout = dense4(x) logging.debug(('Output CNN: %s' % str(dense4.output_shape))) if (params['final_activation'] == 'linear'): reg = Lambda((lambda x: K.l2_normalize(x, axis=1))) xout = reg(xout) model = Model(input=[inputs, inputs2, inputs3], output=xout) return model
_utils.test(require=ti.extension.adstack, ad_stack_size=1, arch=[ti.cpu, ti.gpu]) def test_large_for_loops_fixed_stack_size(): x = ti.field(dtype=float, shape=(), needs_grad=True) arr = ti.field(dtype=float, shape=2, needs_grad=True) loss = ti.field(dtype=float, shape=(), needs_grad=True) def test_large_loop(): for i in range(5): for j in range(2000): for k in range(1000): loss[None] += (ti.sin(x[None]) + 1.0) with ti.ad.Tape(loss=loss): test_large_loop() assert (loss[None] == .0) assert (x.grad[None] == .0)
def test_is_invertible_module_shared_outputs(): fnb = MultiSharedOutputs() X = torch.rand(1, 2, 5, 5, dtype=torch.float32).requires_grad_() with pytest.warns(UserWarning): assert is_invertible_module(fnb, test_input_shape=(X.shape,), atol=1e-06)
class SpeakerVerifi_test(Dataset): def __init__(self, vad_config, file_path, meta_data): self.root = file_path self.meta_data = meta_data self.necessary_dict = self.processing() self.vad_c = vad_config self.dataset = self.necessary_dict['spk_paths'] self.pair_table = self.necessary_dict['pair_table'] def processing(self): pair_table = [] spk_paths = set() with open(self.meta_data, 'r') as f: usage_list = f.readlines() for pair in usage_list: list_pair = pair.split() pair_1 = os.path.join(self.root, list_pair[1]) pair_2 = os.path.join(self.root, list_pair[2]) spk_paths.add(pair_1) spk_paths.add(pair_2) one_pair = [list_pair[0], pair_1, pair_2] pair_table.append(one_pair) return {'spk_paths': list(spk_paths), 'total_spk_num': None, 'pair_table': pair_table} def __len__(self): return len(self.necessary_dict['spk_paths']) def __getitem__(self, idx): x_path = self.dataset[idx] x_name = x_path (wav, _) = apply_effects_file(x_path, EFFECTS) wav = wav.squeeze(0) return (wav.numpy(), x_name) def collate_fn(self, data_sample): (wavs, x_names) = zip(*data_sample) return (wavs, x_names)
class BertOnlyMLMHead(nn.Module): def __init__(self, config, decoder_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, decoder_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores
class ULIPWithImageLoss(nn.Module): def __init__(self): super().__init__() self.labels = None self.last_local_batch_size = None def forward(self, outputs): pc_embed = outputs['pc_embed'] text_embed = outputs['text_embed'] image_embed = outputs['image_embed'] logit_scale = outputs['logit_scale'] local_batch_size = pc_embed.size(0) if (local_batch_size != self.last_local_batch_size): self.labels = ((local_batch_size * utils.get_rank()) + torch.arange(local_batch_size, device=pc_embed.device)) self.last_local_batch_size = local_batch_size pc_embed = F.normalize(pc_embed, dim=(- 1), p=2) text_embed = F.normalize(text_embed, dim=(- 1), p=2) image_embed = F.normalize(image_embed, dim=(- 1), p=2) (pc_embed_all, text_embed_all, image_embed_all) = utils.all_gather_batch([pc_embed, text_embed, image_embed]) logits_per_pc_text = ((logit_scale * pc_embed) text_embed_all.t()) logits_per_text_pc = ((logit_scale * text_embed) pc_embed_all.t()) logits_per_pc_image = ((logit_scale * pc_embed) image_embed_all.t()) logits_per_image_pc = ((logit_scale * image_embed) pc_embed_all.t()) loss = (((F.cross_entropy(logits_per_pc_text, self.labels) + F.cross_entropy(logits_per_text_pc, self.labels)) / 2) + ((F.cross_entropy(logits_per_pc_image, self.labels) + F.cross_entropy(logits_per_image_pc, self.labels)) / 2)) with torch.no_grad(): pred = torch.argmax(logits_per_pc_text, dim=(- 1)) correct = pred.eq(self.labels).sum() pc_text_acc = ((100 * correct) / local_batch_size) pred = torch.argmax(logits_per_pc_image, dim=(- 1)) correct = pred.eq(self.labels).sum() pc_image_acc = ((100 * correct) / local_batch_size) return {'loss': loss, 'ulip_loss': loss, 'ulip_pc_image_acc': pc_image_acc, 'ulip_pc_text_acc': pc_text_acc}
class DeformConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=False): super(DeformConv, self).__init__() self.with_bias = bias assert ((in_channels % groups) == 0), 'in_channels {} cannot be divisible by groups {}'.format(in_channels, groups) assert ((out_channels % groups) == 0), 'out_channels {} cannot be divisible by groups {}'.format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // self.groups), *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) nn.init.kaiming_uniform_(self.weight, nonlinearity='relu') if (self.bias is not None): nn.init.constant_(self.bias, 0) def forward(self, x, offset): x = deform_conv(x, offset, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) return x def extra_repr(self): tmpstr = ('in_channels=' + str(self.in_channels)) tmpstr += (', out_channels=' + str(self.out_channels)) tmpstr += (', kernel_size=' + str(self.kernel_size)) tmpstr += (', stride=' + str(self.stride)) tmpstr += (', padding=' + str(self.padding)) tmpstr += (', dilation=' + str(self.dilation)) tmpstr += (', groups=' + str(self.groups)) tmpstr += (', deformable_groups=' + str(self.deformable_groups)) tmpstr += ', bias=False' return tmpstr
class KITTI(BaseDataset): def __init__(self, data_path='./data/', is_train=True, image_limitation=50, crop_size=(512, 512), scale_size=None, depth_scale=80): super().__init__(crop_size) self.is_train = is_train self.size = 512 self.image_limitation = image_limitation self.data_root = os.path.join(data_path, 'kitti') self.depth_scale = depth_scale self.img_dir = os.path.join(self.data_root, 'input') self.ann_dir = os.path.join(self.data_root, 'gt_depth') self.split = os.path.join(self.data_root, 'kitti_eigen_train.txt') self.img_infos = self.load_annotations(self.img_dir, self.ann_dir, self.split) random.shuffle(self.img_infos) self.img_infos = self.img_infos[:self.image_limitation] print('Dataset: KITTI') print('Training Sample:', len(self.img_infos)) print([i['filename'] for i in self.img_infos]) def load_annotations(self, img_dir, ann_dir, split): self.invalid_depth_num = 0 img_infos = [] if (split is not None): with open(split) as f: for line in f: img_info = dict() if (ann_dir is not None): depth_map = line.strip().split(' ')[1] if (depth_map == 'None'): self.invalid_depth_num += 1 continue img_info['ann'] = dict(depth_map=depth_map) img_name = line.strip().split(' ')[0] img_info['filename'] = img_name img_infos.append(img_info) else: print('Split should be specified, NotImplementedError') raise NotImplementedError img_infos = sorted(img_infos, key=(lambda x: x['filename'])) print(f'Loaded {len(img_infos)} images. Totally {self.invalid_depth_num} invalid pairs are filtered') return img_infos def __len__(self): return len(self.img_infos) def get_ann_info(self, idx): return self.img_infos[idx]['ann'] def pre_pipeline(self, results): results['depth_fields'] = [] results['img_prefix'] = self.img_dir results['depth_prefix'] = self.ann_dir results['depth_scale'] = self.depth_scale results['cam_intrinsic_dict'] = {'2011_09_26': [[721.5377, 0.0, 609.5593, 44.85728], [0.0, 721.5377, 172.854, 0.2163791], [0.0, 0.0, 1.0, 0.]], '2011_09_28': [[707.0493, 0.0, 604.0814, 45.75831], [0.0, 707.0493, 180.5066, (- 0.3454157)], [0.0, 0.0, 1.0, 0.]], '2011_09_29': [[718.3351, 0.0, 600.3891, 44.50382], [0.0, 718.3351, 181.5122, (- 0.5951107)], [0.0, 0.0, 1.0, 0.]], '2011_09_30': [[707.0912, 0.0, 601.8873, 46.88783], [0.0, 707.0912, 183.1104, 0.1178601], [0.0, 0.0, 1.0, 0.]], '2011_10_03': [[718.856, 0.0, 607.1928, 45.38225], [0.0, 718.856, 185.2157, (- 0.1130887)], [0.0, 0.0, 1.0, 0.]]} def prepare_train_img(self, idx): img_info = self.img_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) self.pre_pipeline(results) return results def __getitem__(self, idx): results = self.prepare_train_img(idx) img_path = results['img_info']['filename'] gt_path = results['img_info']['ann']['depth_map'] img_path = os.path.join(self.img_dir, img_path) gt_path = os.path.join(self.ann_dir, gt_path) print(img_path) print(gt_path) image = cv2.imread(img_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) depth = cv2.imread(gt_path, cv2.IMREAD_UNCHANGED).astype('float32') short_edge = min(image.shape[0], image.shape[1]) if (short_edge < self.size): scale = (self.size / short_edge) image = cv2.resize(image, dsize=None, fx=scale, fy=scale) depth = cv2.resize(depth, dsize=None, fx=scale, fy=scale) original_depth = ((depth / depth.max()) * 255) original_image = image.copy() if self.is_train: (image, depth) = self.augment_training_data(image, depth) else: (image, depth) = self.augment_test_data(image, depth) depth = (depth / 256.0) return {'image': image, 'depth': depth, 'filename': img_path, 'original_image': original_image, 'original_depth': original_depth, 'prompt': 'a photo of '}
def eval(params, model, epoch, eval_loader, writer=None): model.eval() device = params['device'] loss_meter = Meter() (word_right, struct_right, exp_right, length, cal_num) = (0, 0, 0, 0, 0) with tqdm(eval_loader, total=len(eval_loader)) as pbar, torch.no_grad(): for (batch_idx, (images, image_masks, labels, label_masks)) in enumerate(eval_loader): (images, image_masks, labels, label_masks) = (images.to(device), image_masks.to(device), labels.to(device), label_masks.to(device)) (batch, time) = labels.shape[:2] (probs, loss) = model(images, image_masks, labels, label_masks, is_train=False) (word_loss, struct_loss) = loss loss = (word_loss + struct_loss) loss_meter.add(loss.item()) (wordRate, structRate, ExpRate) = cal_score(probs, labels, label_masks) word_right = (word_right + (wordRate * time)) struct_right = (struct_right + (structRate * time)) exp_right = (exp_right + ExpRate) length = (length + time) cal_num = (cal_num + batch) if writer: current_step = (((epoch * len(eval_loader)) + batch_idx) + 1) writer.add_scalar('eval/loss', loss.item(), current_step) writer.add_scalar('eval/word_loss', word_loss.item(), current_step) writer.add_scalar('eval/struct_loss', struct_loss.item(), current_step) writer.add_scalar('eval/WordRate', wordRate, current_step) writer.add_scalar('eval/structRate', structRate, current_step) writer.add_scalar('eval/ExpRate', ExpRate, current_step) pbar.set_description(f'Epoch: {(epoch + 1)} eval loss: {loss.item():.4f} word loss: {word_loss:.4f} struct loss: {struct_loss:.4f} WordRate: {(word_right / length):.4f} structRate: {(struct_right / length):.4f} ExpRate: {(exp_right / cal_num):.4f}') if writer: writer.add_scalar('epoch/eval_loss', loss_meter.mean, (epoch + 1)) writer.add_scalar('epoch/eval_WordRate', (word_right / length), (epoch + 1)) writer.add_scalar('epoch/eval_structRate', (struct_right / length), (epoch + 1)) writer.add_scalar('epoch/eval_ExpRate', (exp_right / len(eval_loader.dataset)), (epoch + 1)) return (loss_meter.mean, (word_right / length), (struct_right / length), (exp_right / cal_num))
def loss_chimera_psa(output, label): [embedding, mask_A, mask_B] = output [one_hot_label, mag_mix, mag_s1, mag_s2, cos_s1, cos_s2] = label (batch_size, frame, frequency) = mask_A.shape loss_embedding = loss_dc([embedding], [one_hot_label, mag_mix]) loss_mask1 = (norm_1d(((mask_A * mag_mix) - torch.min(mag_mix, F.relu((mag_s1 * cos_s1))))) + norm_1d(((mask_B * mag_mix) - torch.min(mag_mix, F.relu((mag_s2 * cos_s2)))))) loss_mask2 = (norm_1d(((mask_B * mag_mix) - torch.min(mag_mix, F.relu((mag_s1 * cos_s1))))) + norm_1d(((mask_A * mag_mix) - torch.min(mag_mix, F.relu((mag_s2 * cos_s2)))))) loss_mask = torch.min(loss_mask1, loss_mask2) return ((loss_embedding * 0.975) + (loss_mask * 0.025))
def get_free_gpus(): output = subprocess.check_output('nvidia-smi --query-gpu=memory.free --format=csv,nounits,noheader', shell=True) free_memory = [int(x) for x in output.decode().strip().split('\n')] free_gpus = [i for (i, memory) in enumerate(free_memory) if (memory > 10000)] free_gpus = sorted(free_gpus, key=(lambda i: free_memory[i]), reverse=True) return free_gpus
class RhombusPiece(PuzzlePiece): def __init__(self, north_piece, south_piece): self._north_piece = north_piece self._south_piece = south_piece self._edge_labels = dict(north_west=north_piece['north_west'], north_east=north_piece['north_east'], south_east=south_piece['south_east'], south_west=south_piece['south_west']) def __eq__(self, other) -> bool: if isinstance(other, RhombusPiece): return ((self.border() == other.border()) and (self._north_piece == other._north_piece) and (self._south_piece == other._south_piece) and (self._edge_labels == other._edge_labels)) else: return False def __hash__(self): return hash((RhombusPiece, self.border())) def __iter__(self): (yield self._north_piece) (yield self._south_piece) def north_piece(self) -> DeltaPiece: return self._north_piece def south_piece(self) -> NablaPiece: return self._south_piece def __repr__(self) -> str: return ('%s/\\%s %s\\/%s' % (self['north_west'], self['north_east'], self['south_west'], self['south_east'])) def edges(self) -> tuple: return ('north_west', 'north_east', 'south_east', 'south_west')
class Meteor(): def __init__(self): self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm'] self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.lock = threading.Lock() def compute_score(self, gts, res): assert (gts.keys() == res.keys()) imgIds = gts.keys() scores = [] eval_line = 'EVAL' self.lock.acquire() for i in imgIds: assert (len(res[i]) == 1) stat = self._stat(res[i][0], gts[i]) eval_line += ' ||| {}'.format(stat) self.meteor_p.stdin.write('{}\n'.format(eval_line)) for i in range(0, len(imgIds)): scores.append(float(self.meteor_p.stdout.readline().strip())) score = float(self.meteor_p.stdout.readline().strip()) self.lock.release() return (score, scores) def method(self): return 'METEOR' def _stat(self, hypothesis_str, reference_list): hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ') score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str)) self.meteor_p.stdin.write('{}\n'.format(score_line)) return self.meteor_p.stdout.readline().strip() def _score(self, hypothesis_str, reference_list): self.lock.acquire() hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ') score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str)) self.meteor_p.stdin.write('{}\n'.format(score_line)) stats = self.meteor_p.stdout.readline().strip() eval_line = 'EVAL ||| {}'.format(stats) self.meteor_p.stdin.write('{}\n'.format(eval_line)) score = float(self.meteor_p.stdout.readline().strip()) self.lock.release() return score def __exit__(self): self.lock.acquire() self.meteor_p.stdin.close() self.meteor_p.wait() self.lock.release()
def maxima_version(): with os.popen('{} --version'.format(MAXIMA)) as p: return p.read().split()[(- 1)]
class LDMPipeline(DiffusionPipeline): def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() scheduler = scheduler.set_format('pt') self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) _grad() def __call__(self, batch_size: int=1, generator: Optional[torch.Generator]=None, eta: float=0.0, num_inference_steps: int=50, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[(Tuple, ImagePipelineOutput)]: if ('torch_device' in kwargs): device = kwargs.pop('torch_device') warnings.warn('`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0. Consider using `pipe.to(torch_device)` instead.') if (device is None): device = ('cuda' if torch.cuda.is_available() else 'cpu') self.to(device) latents = torch.randn((batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), generator=generator) latents = latents.to(self.device) self.scheduler.set_timesteps(num_inference_steps) accepts_eta = ('eta' in set(inspect.signature(self.scheduler.step).parameters.keys())) extra_kwargs = {} if accepts_eta: extra_kwargs['eta'] = eta for t in self.progress_bar(self.scheduler.timesteps): noise_prediction = self.unet(latents, t).sample latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample image = self.vqvae.decode(latents).sample image = ((image / 2) + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if (output_type == 'pil'): image = self.numpy_to_pil(image) if (not return_dict): return (image,) return ImagePipelineOutput(images=image)
class RandomSampler(Sampler): def __init__(self, data_source): self.data_source = data_source def __iter__(self): return iter(torch.randperm(len(self.data_source)).tolist()) def __len__(self): return len(self.data_source)
def test_cond_param_assign3(): time_dim = Dim(Tensor('time', [batch_dim], dtype='int32')) in_dim = Dim(7, name='in') extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')}) class _Net(rf.Module): def __init__(self): super().__init__() self.param = rf.Parameter(dims=(), dtype='int32') self.param.initial = 2 def __call__(self, x: Tensor) -> Tuple[(Tensor, Tensor)]: return (rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : (self.param.assign_add(3), rf.convert_to_tensor(42))[(- 1)]), false_fn=(lambda : (self.param * 3))), self.param) def _forward_step(*, model: _Net, extern_data: TensorDict): (out, param) = model(extern_data['data']) out.mark_as_default_output(shape=()) param.mark_as_output(shape=(), name='param') out1 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5}) out2 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6}) assert ((out1['output'].raw_tensor == 6) and (out1['param'].raw_tensor == 2)) assert ((out2['output'].raw_tensor == 42) and (out2['param'].raw_tensor == 5))
def register_Ns3CallbackImpl__Void_Unsigned_long_Unsigned_short_Unsigned_short_Ns3LteUeRrcState_Ns3LteUeRrcState_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, unsigned long long, unsigned short, unsigned short, ns3::LteUeRrc::State, ns3::LteUeRrc::State, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('long unsigned int', 'arg0'), param('short unsigned int', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::LteUeRrc::State', 'arg3'), param('ns3::LteUeRrc::State', 'arg4')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
class FactorizationMachineModel(keras.Model): def __init__(self, num_users, num_items, num_features, factors, lambda_weights, learning_rate=0.01, random_seed=42, name='FM', **kwargs): super().__init__(name=name, **kwargs) tf.random.set_seed(random_seed) self.num_users = num_users self.num_items = num_items self.num_features = num_features self.factors = factors self.lambda_weights = lambda_weights self.initializer = tf.initializers.GlorotUniform() if self.num_features: self.factorization = FactorizationMachineLayer(field_dims=[self.num_users, self.num_items, self.num_features], factors=self.factors, kernel_initializer=self.initializer, kernel_regularizer=keras.regularizers.l2(self.lambda_weights)) else: self.factorization = MatrixFactorizationLayer(num_users=self.num_users, num_items=num_items, factors=self.factors, kernel_initializer=self.initializer, kernel_regularizer=keras.regularizers.l2(self.lambda_weights)) self.loss = keras.losses.MeanSquaredError() self.optimizer = tf.optimizers.Adam(learning_rate) def call(self, inputs, training=None, mask=None): transaction = inputs return self.factorization(inputs=transaction, training=training) def train_step(self, batch): (transaction, label) = batch with tf.GradientTape() as tape: output = self.factorization(inputs=transaction, training=True) loss = self.loss(label, output) grads = tape.gradient(loss, self.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) return loss def predict(self, inputs, training=False, **kwargs): output = self.call(inputs=inputs, training=training) return output def get_recs(self, inputs, training=False, **kwargs): if self.num_features: output = tf.map_fn((lambda row: self.call(inputs=row, training=training)), tf.convert_to_tensor(inputs)) else: output = self.call(inputs=inputs, training=training) return tf.squeeze(output) def get_top_k(self, preds, train_mask, k=100): return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True)
def _read_pretrained_word2vec_format_embedding_file(embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str='tokens') -> torch.FloatTensor: words_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values()) vocab_size = vocab.get_vocab_size(namespace) embeddings = {} logger.info('Reading embeddings from file') with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file: for line in embeddings_file: fields = line.decode('utf-8').strip().split(' ') if ((len(fields) - 1) != embedding_dim): logger.warning('Found line with wrong number of dimensions (expected %d, was %d): %s ...', embedding_dim, (len(fields) - 1), line[:15]) continue word = fields[0] if (word in words_to_keep): vector = numpy.asarray(fields[1:], dtype='float32') embeddings[word] = vector if (not embeddings): raise ConfigurationError("No embeddings of correct dimension found; you probably misspecified your embedding_dim parameter, or didn't pre-populate your Vocabulary") all_embeddings = numpy.asarray(list(embeddings.values())) embeddings_mean = float(numpy.mean(all_embeddings)) embeddings_std = float(numpy.std(all_embeddings)) logger.info('Initializing pre-trained embedding layer') embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean, embeddings_std) for i in range(0, vocab_size): word = vocab.get_token_from_index(i, namespace) if (word in embeddings): embedding_matrix[i] = torch.FloatTensor(embeddings[word]) else: logger.debug('Word %s was not found in the embedding file. Initialising randomly.', word) return embedding_matrix
class CifarResNeXt(nn.Module): def __init__(self, block, depth, cardinality, base_width, num_classes, dropout): super(CifarResNeXt, self).__init__() self.num_classes = num_classes assert (((depth - 2) % 9) == 0), 'depth should be one of 29, 38, 47, 56, 101' layer_blocks = ((depth - 2) // 9) self.cardinality = cardinality self.base_width = base_width self.num_classes = num_classes self.dropout = dropout self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False) self.bn_1 = nn.BatchNorm2d(64) self.inplanes = 64 self.stage_1 = self._make_layer(block, 64, layer_blocks, 1) self.stage_2 = self._make_layer(block, 128, layer_blocks, 2) self.stage_3 = self._make_layer(block, 256, layer_blocks, 2) self.avgpool = nn.AvgPool2d(8) self.classifier = nn.Linear((256 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal(m.weight) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, self.cardinality, self.base_width, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, self.cardinality, self.base_width)) return nn.Sequential(*layers) def forward(self, x): out = x out = self.conv_1_3x3(out) out = F.relu(self.bn_1(out), inplace=True) out = self.stage_1(out) out = self.stage_2(out) out = self.stage_3(out) out = self.avgpool(out) out = out.view(out.size(0), (- 1)) if self.dropout: out = F.dropout(out, p=0.5, training=self.training) out = self.classifier(out) return out
class HolisticIndexBlock(nn.Module): def __init__(self, in_channels, norm_cfg=dict(type='BN'), use_context=False, use_nonlinear=False): super().__init__() if use_context: (kernel_size, padding) = (4, 1) else: (kernel_size, padding) = (2, 0) self.index_block = build_index_block(in_channels, 4, kernel_size, stride=2, padding=padding, groups=1, norm_cfg=norm_cfg, use_nonlinear=use_nonlinear, expansion=2) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=1) self.pixel_shuffle = nn.PixelShuffle(2) def forward(self, x): x = self.index_block(x) y = self.sigmoid(x) z = self.softmax(y) enc_idx_feat = self.pixel_shuffle(z) dec_idx_feat = self.pixel_shuffle(y) return (enc_idx_feat, dec_idx_feat)
class UpdateReadme(Step): def action(self, context): self.instruct(f"Update README for version: {context['version']}")
def init_signal_handler(): signal.signal(signal.SIGUSR1, sig_handler) signal.signal(signal.SIGTERM, term_handler)
class PowerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
def untar_file(filename, location): ensure_dir(location) if (filename.lower().endswith('.gz') or filename.lower().endswith('.tgz')): mode = 'r:gz' elif filename.lower().endswith(BZ2_EXTENSIONS): mode = 'r:bz2' elif filename.lower().endswith(XZ_EXTENSIONS): mode = 'r:xz' elif filename.lower().endswith('.tar'): mode = 'r' else: logger.warning('Cannot determine compression type for file %s', filename) mode = 'r:*' tar = tarfile.open(filename, mode) try: leading = has_leading_dir([member.name for member in tar.getmembers() if (member.name != 'pax_global_header')]) for member in tar.getmembers(): fn = member.name if (fn == 'pax_global_header'): continue if leading: fn = split_leading_dir(fn)[1] path = os.path.join(location, fn) if member.isdir(): ensure_dir(path) elif member.issym(): try: tar._extract_member(member, path) except Exception as exc: logger.warning('In the tar file %s the member %s is invalid: %s', filename, member.name, exc) continue else: try: fp = tar.extractfile(member) except (KeyError, AttributeError) as exc: logger.warning('In the tar file %s the member %s is invalid: %s', filename, member.name, exc) continue ensure_dir(os.path.dirname(path)) with open(path, 'wb') as destfp: shutil.copyfileobj(fp, destfp) fp.close() tar.utime(member, path) if (member.mode & 73): os.chmod(path, ((511 - current_umask()) | 73)) finally: tar.close()
class DGNNet(nn.Module): def __init__(self, net_params): super().__init__() hidden_dim = net_params['hidden_dim'] out_dim = net_params['out_dim'] decreasing_dim = net_params['decreasing_dim'] in_feat_dropout = net_params['in_feat_dropout'] dropout = net_params['dropout'] n_layers = net_params['L'] self.type_net = net_params['type_net'] self.readout = net_params['readout'] self.graph_norm = net_params['graph_norm'] self.batch_norm = net_params['batch_norm'] self.aggregators = net_params['aggregators'] self.scalers = net_params['scalers'] self.avg_d = net_params['avg_d'] self.residual = net_params['residual'] self.edge_feat = net_params['edge_feat'] self.towers = net_params['towers'] edge_dim = net_params['edge_dim'] pretrans_layers = net_params['pretrans_layers'] posttrans_layers = net_params['posttrans_layers'] device = net_params['device'] self.virtual_node = net_params['virtual_node'] self.in_feat_dropout = nn.Dropout(in_feat_dropout) self.embedding_h = AtomEncoder(emb_dim=hidden_dim) if self.edge_feat: self.embedding_e = BondEncoder(emb_dim=edge_dim) self.layers = nn.ModuleList([DGNLayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, graph_norm=self.graph_norm, batch_norm=self.batch_norm, residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat, edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, towers=self.towers).model for _ in range((n_layers - 1))]) self.layers.append(DGNLayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout, graph_norm=self.graph_norm, batch_norm=self.batch_norm, residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat, edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, towers=self.towers).model) self.MLP_layer = MLPReadout(out_dim, 128, decreasing_dim=decreasing_dim) self.virtual_node_layers = None if ((self.virtual_node is not None) and (self.virtual_node.lower() != 'none')): self.virtual_node_layers = nn.ModuleList([VirtualNode(dim=hidden_dim, dropout=dropout, batch_norm=self.batch_norm, bias=True, vn_type=self.virtual_node, residual=self.residual) for _ in range((n_layers - 1))]) def forward(self, g, h, e, snorm_n, snorm_e): h = self.embedding_h(h) h = self.in_feat_dropout(h) if self.edge_feat: e = self.embedding_e(e) for (i, conv) in enumerate(self.layers): h_t = conv(g, h, e, snorm_n) h = h_t if (self.virtual_node_layers is not None): if (i == 0): vn_h = 0 if (i < len(self.virtual_node_layers)): (vn_h, h) = self.virtual_node_layers[i].forward(g, h, vn_h) g.ndata['h'] = h if (self.readout == 'sum'): hg = dgl.sum_nodes(g, 'h') elif (self.readout == 'max'): hg = dgl.max_nodes(g, 'h') elif (self.readout == 'mean'): hg = dgl.mean_nodes(g, 'h') else: hg = dgl.mean_nodes(g, 'h') return self.MLP_layer(hg) def loss(self, scores, labels): loss = torch.nn.BCEWithLogitsLoss()(scores, labels) return loss
def test_write_background_to_file_1(tmpdir): _bk = Background() _bk.write(filename='train', location=pathlib.Path(tmpdir)) assert (tmpdir.join('train_bk.txt').read() == str(_bk))
class MinkUNet18_MCMC(nn.Module): def __init__(self, seg_model, p_drop=0.5): super().__init__() self.seg_model = seg_model self.dropout = ME.MinkowskiDropout(p=p_drop) def forward(self, x, is_train=True): (out_backbone, out_bottle) = self.seg_model(x, is_seg=False) out_backbone = self.dropout(out_backbone) out_seg = self.seg_model.final(out_backbone) return (out_seg.F, out_backbone.F, out_bottle)
def stable_var(x, mean=None, dim=1): if (mean is None): mean = x.mean(dim, keepdim=True) mean = mean.view((- 1), 1) res = torch.pow((x - mean), 2) max_sqr = torch.max(res, dim, keepdim=True)[0] var = (torch.mean((res / max_sqr), 1, keepdim=True) * max_sqr) var = var.view((- 1)) var[(var != var)] = 0 return var
def dir_type(path): if (path and (not pth.isdir(path))): raise argparse.ArgumentTypeError("'{0}' is not a directory".format(path)) return path
def get_data(d, bgp=False, airports=False): if airports: dataset = Airports(root=('original_datasets/airports_dataset/' + d), dataset_name=d) original = dataset[0] elif bgp: dataset = BGP(root='original_datasets/bgp_dataset') original = dataset[0] else: if (d in ['cornell', 'texas', 'wisconsin']): dataset = WebKB(root='original_datasets/webkb', name=d) elif (d in ['chameleon', 'squirrel']): dataset = WikipediaNetwork(root='original_datasets/wiki', name=d) else: dataset = FilmNetwork(root='original_datasets/film', name=d) original = dataset[0] print(original) return original
def _reroute_t(t0, t1, consumers1, can_modify=None, cannot_modify=None): nb_update_inputs = 0 if (can_modify is not None): consumers1 &= can_modify if (cannot_modify is not None): consumers1 -= cannot_modify consumers1_indices = {} for consumer1 in consumers1: consumers1_indices[consumer1] = [i for (i, t) in enumerate(consumer1.inputs) if (t is t1)] for consumer1 in consumers1: for i in consumers1_indices[consumer1]: consumer1._update_input(i, t0) nb_update_inputs += 1 return nb_update_inputs
class SE(object): def __init__(self, params, batcher, prepare=None): params = utils.dotdict(params) params.usepytorch = (True if ('usepytorch' not in params) else params.usepytorch) params.seed = (1111 if ('seed' not in params) else params.seed) params.batch_size = (128 if ('batch_size' not in params) else params.batch_size) params.nhid = (0 if ('nhid' not in params) else params.nhid) params.kfold = (5 if ('kfold' not in params) else params.kfold) if (('classifier' not in params) or (not params['classifier'])): params.classifier = {'nhid': 0} assert ('nhid' in params.classifier), 'Set number of hidden units in classifier config!!' self.params = params self.batcher = batcher self.prepare = (prepare if prepare else (lambda x, y: None)) self.list_tasks = get_task_names() def eval(self, name): if isinstance(name, list): self.results = {x: self.eval(x) for x in name} return self.results tpath = self.params.task_path assert (name in self.list_tasks), ((str(name) + ' not in ') + str(self.list_tasks)) task = get_task_by_name(name) task_dir = task['dir'] task_path = f'{tpath}/downstream/{task_dir}' task_type = task['type'] classes = {'classification': SentEvalClassifier, 'entailment': EntailmentEval, 'relatedness': RelatednessEval, 'ppc': PPCEval} eval_class = classes[task_type] if (task_type == 'classification'): self.evaluation = eval_class(task_path, name, task['num_classes'], seed=self.params.seed) else: self.evaluation = eval_class(task_path, task_dir, seed=self.params.seed) self.params.current_task = name self.evaluation.do_prepare(self.params, self.prepare) self.results = self.evaluation.run(self.params, self.batcher) return self.results
.usefixtures('enable_slep006') def test_transformer_fit_transform_with_metadata_in_transform(): class CustomTransformer(BaseEstimator, TransformerMixin): def fit(self, X, y=None, prop=None): return self def transform(self, X, prop=None): return X with pytest.warns(UserWarning, match='`transform` method which consumes metadata'): CustomTransformer().set_transform_request(prop=True).fit_transform([[1]], [1], prop=1) with warnings.catch_warnings(record=True) as record: CustomTransformer().set_transform_request(prop=True).fit_transform([[1]], [1]) assert (len(record) == 0)
class MaxTestExecutionsStoppingCondition(StoppingCondition): def __init__(self, max_test_executions: int): super().__init__(observes_execution=True) self._num_executed_tests = 0 assert (max_test_executions > 0.0) self._max_test_executions = max_test_executions def current_value(self) -> int: return self._num_executed_tests def limit(self) -> int: return self._max_test_executions def is_fulfilled(self) -> bool: return (self._num_executed_tests >= self._max_test_executions) def reset(self) -> None: self._num_executed_tests = 0 def set_limit(self, limit: int) -> None: self._max_test_executions = limit def before_search_start(self, start_time_ns: int) -> None: self._num_executed_tests = 0 def before_test_case_execution(self, test_case: tc.TestCase): self._num_executed_tests += 1 def __str__(self): return f'Executed test cases: {self.current_value()}/{self.limit()}'
def segment(text): seg = [1 for _ in range(len(text))] idx = text.index('sep') seg[:idx] = [0 for _ in range(idx)] return seg
class BaseTransformer(pl.LightningModule): def __init__(self, hparams, num_labels=None): super(BaseTransformer, self).__init__() self.hparams = hparams self.hparams.model_type = self.hparams.model_type.lower() (config_class, model_class, tokenizer_class) = MODEL_CLASSES[self.hparams.model_type] config = config_class.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), num_labels=num_labels, cache_dir=(self.hparams.cache_dir if self.hparams.cache_dir else None)) tokenizer = tokenizer_class.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), do_lower_case=self.hparams.do_lower_case, cache_dir=(self.hparams.cache_dir if self.hparams.cache_dir else None)) model = model_class.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=config, cache_dir=(self.hparams.cache_dir if self.hparams.cache_dir else None)) (self.config, self.tokenizer, self.model) = (config, tokenizer, model) def is_logger(self): return (self.trainer.proc_rank <= 0) def configure_optimizers(self): model = self.model no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) self.opt = optimizer return [optimizer] def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None): if self.trainer.use_tpu: xm.optimizer_step(optimizer) else: optimizer.step() optimizer.zero_grad() self.lr_scheduler.step() def get_tqdm_dict(self): tqdm_dict = {'loss': '{:.3f}'.format(self.trainer.avg_loss), 'lr': self.lr_scheduler.get_last_lr()[(- 1)]} return tqdm_dict def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_end(self, outputs): return self.validation_end(outputs) def train_dataloader(self): train_batch_size = self.hparams.train_batch_size dataloader = self.load_dataset('train', train_batch_size) t_total = (((len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.n_gpu))) // self.hparams.gradient_accumulation_steps) * float(self.hparams.num_train_epochs)) scheduler = get_linear_schedule_with_warmup(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total) self.lr_scheduler = scheduler return dataloader def val_dataloader(self): return self.load_dataset('dev', self.hparams.eval_batch_size) def test_dataloader(self): return self.load_dataset('test', self.hparams.eval_batch_size) def add_model_specific_args(parser, root_dir): parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))) parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help=('Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))) parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name') parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.') parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.') parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.') parser.add_argument('--num_train_epochs', default=3, type=int, help='Total number of training epochs to perform.') parser.add_argument('--train_batch_size', default=32, type=int) parser.add_argument('--eval_batch_size', default=32, type=int)
def test_simple_movement_up(env_single_agent): env = env_single_agent env.agents[0].x = 4 env.agents[0].y = 25 env.agents[0].dir = Direction.UP env._recalc_grid() env.step([Action.FORWARD]) assert (env.agents[0].x == 4) assert (env.agents[0].y == 24)
def prepare_maestro(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int=0, get_path_only: bool=False): target_dir: Path = Path(target_dir) train_csv = (target_dir / 'train.csv') valid_csv = (target_dir / 'valid.csv') test_csv = (target_dir / 'test.csv') if get_path_only: return (train_csv, valid_csv, [test_csv]) assert (test_fold < MAESTRO_NUM_FOLDS), f"MAESTRO only has {MAESTRO_NUM_FOLDS} folds but get 'test_fold' arguments {test_fold}" resample_hear_corpus(dataset_root, target_sr=16000) dataset_root = Path(dataset_root) wav_root = (dataset_root / '16000') NUM_FOLD = 5 test_id = test_fold valid_id = ((test_fold + 1) % NUM_FOLD) train_ids = [idx for idx in range(NUM_FOLD) if (idx not in [test_id, valid_id])] fold_metas = [] fold_dfs = [] for fold_id in range(NUM_FOLD): with open((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0'))) as f: metadata = json.load(f) fold_metas.append(metadata) data = defaultdict(list) for utt in metadata: wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / utt).resolve() info = torchaudio.info(wav_path) baseinfo = {'record_id': utt, 'wav_path': str(wav_path), 'duration': (info.num_frames / info.sample_rate)} for segment in metadata[utt]: fullinfo = deepcopy(baseinfo) fullinfo['utt_id'] = f"{baseinfo['record_id']}-{int(segment['start'])}-{int(segment['end'])}" fullinfo['labels'] = segment['label'] fullinfo['start_sec'] = (segment['start'] / 1000) fullinfo['end_sec'] = (segment['end'] / 1000) for (key, value) in fullinfo.items(): data[key].append(value) fold_dfs.append(pd.DataFrame(data=data)) (test_meta, test_data) = (fold_metas[test_id], fold_dfs[test_id]) (valid_meta, valid_data) = (fold_metas[valid_id], fold_dfs[valid_id]) (train_meta, train_data) = ({}, []) for idx in train_ids: train_meta.update(fold_metas[idx]) train_data.append(fold_dfs[idx]) train_data: pd.DataFrame = pd.concat(train_data) train_data.to_csv(train_csv, index=False) valid_data.to_csv(valid_csv, index=False) test_data.to_csv(test_csv, index=False) return (train_csv, valid_csv, [test_csv])
def concepts_to_adj_matrices_2hop_all_pair__use_LM__Part1(data): (qc_ids, ac_ids, question) = data qa_nodes = (set(qc_ids) | set(ac_ids)) extra_nodes = set() for qid in qa_nodes: for aid in qa_nodes: if ((qid != aid) and (qid in cpnet_simple.nodes) and (aid in cpnet_simple.nodes)): extra_nodes |= (set(cpnet_simple[qid]) & set(cpnet_simple[aid])) extra_nodes = (extra_nodes - qa_nodes) return (sorted(qc_ids), sorted(ac_ids), question, sorted(extra_nodes))
def group_by_generator(mock_database): generator = GroupByGenerator(mock_database) return generator
class TMMNetCrossNetI(object): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): _snap.TMMNetCrossNetI_swiginit(self, _snap.new_TMMNetCrossNetI(*args)) def Next(self): return _snap.TMMNetCrossNetI_Next(self) def __lt__(self, EdgeI): return _snap.TMMNetCrossNetI___lt__(self, EdgeI) def __eq__(self, EdgeI): return _snap.TMMNetCrossNetI___eq__(self, EdgeI) def GetCrossId(self): return _snap.TMMNetCrossNetI_GetCrossId(self) def GetCrossNet(self): return _snap.TMMNetCrossNetI_GetCrossNet(self) __swig_destroy__ = _snap.delete_TMMNetCrossNetI
.experimental .parametrize('als_model, metric', [(ALSWrap(seed=SEED), 'euclidean_distance_sim'), (ALSWrap(seed=SEED), 'dot_product'), (ALSWrap(seed=SEED), 'cosine_similarity')], ids=['als_euclidean', 'als_dot', 'als_cosine']) def test_get_nearest_items(log, als_model, metric): als_model.fit(log.filter((sf.col('item_idx') != 3))) res = als_model.get_nearest_items(items=[0, 1], k=2, metric=metric) assert (res.count() == 4) assert (set(res.toPandas().to_dict()['item_idx'].values()) == {0, 1}) res = als_model.get_nearest_items(items=[0, 1], k=1, metric=metric) assert (res.count() == 2) res = als_model.get_nearest_items(items=[0, 1], k=4, metric=metric, candidates=[0, 3]) assert (res.count() == 1) assert (len(set(res.toPandas().to_dict()['item_idx'].values()).difference({0, 1})) == 0)
def get_vectorized_gym_env(base_env, gym_env_name, agent_idx, featurize_fn=None, **kwargs): def gym_env_fn(): gym_env = gym.make(gym_env_name) if (kwargs['RUN_TYPE'] == 'joint_ppo'): gym_env.custom_init(base_env, joint_actions=True, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx) else: gym_env.custom_init(base_env, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx) return gym_env vectorized_gym_env = RewardShapingEnv(SubprocVecEnv(([gym_env_fn] * kwargs['sim_threads']))) return vectorized_gym_env
class ExceptionInfo(): ex: Optional[BaseException] tb: tblib.Traceback def restore(self): if (self.ex is not None): exc_value = self.ex.with_traceback(self.tb.as_traceback()) return (self.ex.__class__, exc_value, self.tb.as_traceback()) else: return (Exception, Exception('Process failed with no exception'), self.tb.as_traceback()) def reraise(self): if (self.ex is not None): raise self.ex.with_traceback(self.tb.as_traceback()) else: raise Exception('Process failed with no exception').with_traceback(self.tb.as_traceback())
def make_window(seed, static_out=True): if (not isinstance(seed, (tuple, list))): raise ValueError('seed must be tuple or list') if isinstance(seed[0], (tuple, list)): if static_out: seed = ([[1]] + list(seed)) max_len = max([len(coefficients) for coefficients in seed]) if ((max_len % 2) == 0): max_len += 1 window = [] for coefficients in seed: diff = (max_len - len(coefficients)) if ((diff % 2) == 0): left_pad = (diff // 2) right_pad = (diff // 2) else: left_pad = ((diff - 1) // 2) right_pad = ((diff + 1) // 2) window.append(((([0] * left_pad) + coefficients) + ([0] * right_pad))) else: max_len = ((max(seed) * 2) + 1) assert (1 <= np.min(seed)), 'width must be greater than zero' window = [] if static_out: w = np.zeros(max_len) w[((max_len - 1) // 2)] = 1 window.append(w) if True: n = seed[0] z = (1 / (((n * (n + 1)) * ((2 * n) + 1)) / 3)) j = np.arange((- n), (n + 1)) pad_width = ((max_len - ((n * 2) + 1)) // 2) window.append(np.pad((j * z), pad_width)) if (2 <= len(seed)): n = seed[1] a0 = ((2 * n) + 1) a1 = (((a0 * n) * (n + 1)) / 3) a2 = ((a1 * ((((3 * n) * n) + (3 * n)) - 1)) / 5) z = (1 / (2 * ((a2 * a0) - (a1 * a1)))) j = np.arange((- n), (n + 1)) pad_width = ((max_len - ((n * 2) + 1)) // 2) window.append(np.pad(((((a0 * j) * j) - a1) * z), pad_width)) if (3 <= len(seed)): raise ValueError('3rd order regression is not supported') window = np.asarray(window) return window
def get_numeracy_metric_specs(run_solver: bool=False) -> List[MetricSpec]: metric_specs: List[MetricSpec] = get_basic_metric_specs(['exact_match', 'quasi_exact_match', 'absolute_value_difference']) if run_solver: metric_specs += [MetricSpec(class_name='helm.benchmark.metrics.numeracy_metrics.DistanceMetric', args={})] return metric_specs
def test_fit_digraph(digraph_logistic_regression): classifiers = [LogisticRegression(), LogisticRegression()] digraph_logistic_regression.n_jobs = 2 digraph_logistic_regression.local_classifiers_ = classifiers digraph_logistic_regression._fit_digraph(local_mode=True) for classifier in digraph_logistic_regression.local_classifiers_: try: check_is_fitted(classifier) except NotFittedError as e: pytest.fail(repr(e)) assert 1
def easy_linear_polynomials_via_interpolation(p): res = [] p_vars = p.vars_as_monomial() space = p_vars.divisors() zeros = p.zeros_in(space) lex_leads = variety_lex_leading_terms(zeros, p_vars) for m in lex_leads: if (m.deg() == 1): red = (m + nf_lex_points(m, zeros)) if (red.lead_deg() == 1): res.append(red) return res
def _save_to_state_dict(module, destination, prefix, keep_vars): for (name, param) in module._parameters.items(): if (param is not None): destination[(prefix + name)] = (param if keep_vars else param.detach()) for (name, buf) in module._buffers.items(): if (buf is not None): destination[(prefix + name)] = (buf if keep_vars else buf.detach())
class GTSRB(Dataset): base_folder = 'GTSRB' def __init__(self, train=False, transform=None): self.root_dir = './data' self.sub_directory = ('trainingset' if train else 'testset') self.csv_file_name = ('training.csv' if train else 'test.csv') csv_file_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory, self.csv_file_name) self.csv_data = pd.read_csv(csv_file_path) self.transform = transform def __len__(self): return len(self.csv_data) def __getitem__(self, idx): img_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory, self.csv_data.iloc[(idx, 0)]) img = Image.open(img_path) classId = self.csv_data.iloc[(idx, 1)] if (self.transform is not None): img = self.transform(img) return (img, classId)
def uniform_quantizer(tensor_data: np.ndarray, n_bits: int, signed: bool, quantization_params: dict, per_channel: bool, output_channels_axis: int) -> np.ndarray: range_min = quantization_params.get(RANGE_MIN) range_max = quantization_params.get(RANGE_MAX) if ((range_min is None) or (range_max is None)): Logger.error("'quantization range' parameters must be defined in 'quantization_params'") return uniform_quantize_tensor(tensor_data, range_min, range_max, n_bits)
def get_category_from_img_vector(img_vector, image_vectors): minimum = 2 cat = '' for image_vector in image_vectors.keys(): curr = cosine(img_vector, image_vectors[image_vector]) if (curr < minimum): minimum = curr cat = image_vector return cat
def lattice_paths(t1, t2, length=None): t1 = tuple(t1) t2 = tuple(t2) if (length is None): if ((len(t1) == 0) or (len(t2) == 0)): return [[]] elif (len(t1) == 1): return [[(t1[0], w) for w in t2]] elif (len(t2) == 1): return [[(v, t2[0]) for v in t1]] else: return ([(path + [(t1[(- 1)], t2[(- 1)])]) for path in lattice_paths(t1[:(- 1)], t2)] + [(path + [(t1[(- 1)], t2[(- 1)])]) for path in lattice_paths(t1, t2[:(- 1)])]) elif (length > ((len(t1) + len(t2)) - 1)): return [] elif ((len(t1) == 0) or (len(t2) == 0)): if (length == 0): return [[]] else: return [] elif (len(t1) == 1): if (length == len(t2)): return [[(t1[0], w) for w in t2]] else: return [] elif (len(t2) == 1): if (length == len(t1)): return [[(v, t2[0]) for v in t1]] else: return [] else: return (([(path + [(t1[(- 1)], t2[(- 1)])]) for path in lattice_paths(t1[:(- 1)], t2, length=(length - 1))] + [(path + [(t1[(- 1)], t2[(- 1)])]) for path in lattice_paths(t1, t2[:(- 1)], length=(length - 1))]) + [(path + [(t1[(- 1)], t2[(- 1)])]) for path in lattice_paths(t1[:(- 1)], t2[:(- 1)], length=(length - 1))])
.parametrize('GradientBoosting, X, y', [(HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression)]) def test_warm_start_yields_identical_results(GradientBoosting, X, y): rng = 42 gb_warm_start = GradientBoosting(n_iter_no_change=100, max_iter=50, random_state=rng, warm_start=True) gb_warm_start.fit(X, y).set_params(max_iter=75).fit(X, y) gb_no_warm_start = GradientBoosting(n_iter_no_change=100, max_iter=75, random_state=rng, warm_start=False) gb_no_warm_start.fit(X, y) _assert_predictor_equal(gb_warm_start, gb_no_warm_start, X)
def find_first_capital_letter(answer): letter_set = {'A', 'B', 'C', 'D', 'E', 'F'} for c in answer: if (c in letter_set): return c return ''
class SelfParentPolicy(SetFactoryPolicy): def __init__(self, factory, Element): self._Element = Element SetFactoryPolicy.__init__(self, factory) def element_constructor_attributes(self, constraints): return self.self_element_constructor_attributes(self._Element) def _repr_(self): return 'Set factory policy for {} with parent ``self``'.format(self._Element)
def getSegmentList(corpusName, segmentList, **kwargs): print(('SprintExternInterface: getSegmentList(%r), num segments: %i' % (corpusName, len(segmentList)))) global segmentOrderList segmentOrderList = segmentList return segmentList
def download_permanent_water(date, bounds): year = date.year if (year >= 2019): year = 2019 return ee.Image(f'JRC/GSW1_2/YearlyHistory/{year}').clip(bounds)
def scheduler(epoch, learning_rate): if (epoch > 0): if ((epoch % LEARING_RATE_DECAY_EVERY_N_EPOCHS) == 0): learning_rate = (learning_rate * LEARNING_RATE_DECAY) print('Change learning rate to', '{0:.6f}'.format(learning_rate)) return learning_rate
def cosine_rampdown(current, rampdown_length): 'Cosine rampdown from current = np.clip(current, 0.0, rampdown_length) return float((0.5 * (np.cos(((np.pi * current) / rampdown_length)) + 1)))
class ModelPlugin(): def __init__(self, dataset, logfilepath, args): self.args = args selectGpuById(self.args.gpu) self.logfilepath = logfilepath self.logger = LoggerManager(self.logfilepath, __name__) self.set_dataset(dataset) def set_dataset(self, dataset): self.logger.info('Setting dataset starts') self.dataset = dataset self.image = self.dataset.image (self.ndata, self.height, self.width, self.nchannel) = self.image.shape self.logger.info('Setting dataset ends') def build(self, *args, **kwargs): raise NotImplementedError('`build` is not implemented for model class {}'.format(self.__class__.__name__)) def set_up_train(self, *args, **kwargs): raise NotImplementedError('`set_up_train` is not implemented for model class {}'.format(self.__class__.__name__)) def train(self, *args, **kwargs): raise NotImplementedError('`train` is not implemented for model class {}'.format(self.__class__.__name__)) def generate_sess(self): try: self.sess except AttributeError: config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) def initialize(self): self.logger.info('Model initialization starts') rest_initializer(self.sess) self.start_iter = 0 self.logger.info('Model initialization ends') def save(self, global_step, save_dir, reset_option=True): self.logger.info('Model save starts') if reset_option: for f in glob.glob((save_dir + '*')): os.remove(f) saver = tf.train.Saver(max_to_keep=5) saver.save(self.sess, os.path.join(save_dir, 'model'), global_step=global_step) self.logger.info(('Model save in %s' % save_dir)) self.logger.info('Model save ends') def restore(self, save_dir, restore_iter=(- 1)): self.logger.info('Restoring model starts...') saver = tf.train.Saver() checkpoint = tf.train.latest_checkpoint(save_dir) if (restore_iter == (- 1)): self.start_iter = int(os.path.basename(checkpoint)[(len('model') + 1):]) else: self.start_iter = restore_iter checkpoint = (save_dir + ('model-%d' % restore_iter)) self.logger.info('Restoring from {}'.format(checkpoint)) self.generate_sess() saver.restore(self.sess, checkpoint) self.logger.info('Restoring model done.') def regen_session(self): tf.reset_default_graph() self.sess.close() config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) def delete(self): tf.reset_default_graph() self.logger.remove() del self.logger
class Logger(mrl.Module): def __init__(self, average_every=100): super().__init__('logger', required_agent_modules=['env'], locals=locals()) self.average_every = average_every self.writer = None def _setup(self): self.rewards_per_env = np.zeros((self.env.num_envs,)) self.steps_per_env = np.zeros((self.env.num_envs,)) self.episode_rewards = [] self.episode_steps = [] self.steps = 0 self.episodes = 0 self.tabular = defaultdict(list) self.last_log_step = defaultdict(int) self.log_every_n_steps = self.config.log_every self.save_config() def lazy_init_writer(self): if (self.writer is None): self.writer = SummaryWriter(self.agent_folder) def update_csv(self, tag, value, step): fields = ['wall_time', 'step', tag] path = os.path.join(self.agent_folder, (((self.agent_name + '__') + tag.replace('/', '__')) + '.csv')) if (not os.path.exists(path)): with open(path, 'w') as f: writer = csv.writer(f, delimiter=',') writer.writerow(fields) with open(path, 'a') as f: writer = csv.writer(f) writer.writerow([time.time(), step, value]) def add_scalar(self, tag, value, log_every=1000, step=None): self.lazy_init_writer() if (step is None): step = self.config.env_steps if ((step - self.last_log_step[tag]) >= log_every): self.last_log_step[tag] = step self.writer.add_scalar(tag, value, step) self.update_csv(tag, value, step) def add_histogram(self, tag, values, log_every=1000, step=None, **kwargs): self.lazy_init_writer() if isinstance(values, list): values = np.array(values, dtype=np.float32) elif isinstance(values, np.ndarray): values = values.astype(np.float32) if (step is None): step = self.config.env_steps if ((step - self.last_log_step[tag]) >= log_every): self.last_log_step[tag] = step self.writer.add_histogram(tag, values, step, **kwargs) def add_embedding(self, tag, values, log_every=1000, step=None, **kwargs): self.lazy_init_writer() if isinstance(values, list): values = np.array(values, dtype=np.float32) elif isinstance(values, np.ndarray): values = values.astype(np.float32) assert (len(values.shape) == 2) if (step is None): step = self.config.env_steps if ((step - self.last_log_step[tag]) >= log_every): self.last_log_step[tag] = step self.writer.add_embedding(mat=values, tag=tag, global_step=step, **kwargs) def add_tabular(self, tag, value): self.tabular[tag].append(value) def log_color(self, tag, value='', color='cyan'): print(colorize(tag, color=color, bold=True), value) def save_config(self): config_json = convert_json({**self.config, **record_attrs(self.module_dict.values())}) config_json['agent_name'] = self.agent_name output = json.dumps(config_json, separators=(',', ':\t'), indent=4, sort_keys=True) print(colorize('\nAgent folder:', color='magenta', bold=True)) print(self.agent_folder) print(colorize('\nSaving config:', color='cyan', bold=True)) print(output) with open(os.path.join(self.agent_folder, 'config.json'), 'w') as out: out.write(output) def flush_console(self): table = [('Environment steps', self.steps), ('Total episodes', self.episodes), ('Avg rewards (last {})'.format(self.average_every), np.mean(self.episode_rewards[(- self.average_every):])), ('Avg episode len (last {})'.format(self.average_every), np.mean(self.episode_steps[(- self.average_every):]))] for (k, v) in self.tabular.items(): table.append(((('Avg ' + k) + ' (last {})'.format(self.average_every)), np.mean(v[(- self.average_every):]))) table = tabulate(table, headers=['Tag', 'Value'], tablefmt='psql', floatfmt='8.1f') print(table) def _process_experience(self, experience): (rewards, dones) = (experience.reward, experience.trajectory_over) self.rewards_per_env += rewards self.steps_per_env += 1 if np.any(dones): self.episode_rewards += list(self.rewards_per_env[dones]) self.episode_steps += list(self.steps_per_env[dones]) self.rewards_per_env[dones] = 0 self.steps_per_env[dones] = 0 self.episodes += np.sum(dones) self.steps += self.env.num_envs if ((self.steps % self.log_every_n_steps) < self.env.num_envs): self.flush_console() self.add_scalar('Train/Episode_rewards', np.mean(self.episode_rewards[(- 30):])) self.add_scalar('Train/Episode_steps', np.mean(self.episode_steps[(- 30):])) def save(self, save_folder): self._save_props(['episode_rewards', 'episode_steps', 'steps', 'episodes', 'tabular', 'last_log_step'], save_folder) def load(self, save_folder): self._load_props(['episode_rewards', 'episode_steps', 'steps', 'episodes', 'tabular', 'last_log_step'], save_folder)
class Stream_derivative(Stream_unary): def __init__(self, series, shift, is_sparse): self._shift = shift super().__init__(series, is_sparse, False) _attribute def _approximate_order(self): if (0 <= self._series._approximate_order <= self._shift): return 0 return (self._series._approximate_order - self._shift) def __getitem__(self, n): return (prod(((n + k) for k in range(1, (self._shift + 1)))) * self._series[(n + self._shift)]) def __hash__(self): return hash((type(self), self._series, self._shift)) def __eq__(self, other): return (isinstance(other, type(self)) and (self._shift == other._shift) and (self._series == other._series)) def is_nonzero(self): return self._series.is_nonzero()
def load_sickr_test(dirpath: str) -> Dict[(str, List[Tuple[(Tuple[(str, str)], float)]])]: filepath = os.path.join(dirpath, 'SICK_test_annotated.txt') return {'test': load_data_sickr(filepath)}
def train(segmentation_module, loader_train, optimizers, history, epoch, args): batch_time = AverageMeter() data_time = AverageMeter() ave_total_loss = AverageMeter() ave_acc = AverageMeter() ave_jaccards = [] for i in range((args.num_class - 1)): ave_jaccards.append(AverageMeter()) segmentation_module.train((not args.fix_bn)) tic = time.time() iter_count = 0 if ((epoch == args.start_epoch) and (args.start_epoch > 1)): scale_running_lr = ((1.0 - (float((epoch - 1)) / args.num_epoch)) ** args.lr_pow) args.running_lr_encoder = (args.lr_encoder * scale_running_lr) for param_group in optimizers[0].param_groups: param_group['lr'] = args.running_lr_encoder for batch_data in loader_train: data_time.update((time.time() - tic)) batch_data['image'] = batch_data['image'].cuda() segmentation_module.zero_grad() (loss, acc) = segmentation_module(batch_data, epoch) loss = loss.mean() jaccard = acc[1] for j in jaccard: j = j.float().mean() acc = acc[0].float().mean() loss.backward() for optimizer in optimizers: optimizer.step() batch_time.update((time.time() - tic)) tic = time.time() iter_count += args.batch_size_per_gpu ave_total_loss.update(loss.data.item()) ave_acc.update((acc.data.item() * 100)) for (n, j) in enumerate(ave_jaccards): j.update((jaccard[n].data.item() * 100)) if ((iter_count % (args.batch_size_per_gpu * 10)) == 0): print('Epoch: [{}/{}], Iter: [{}], Time: {:.2f}, Data: {:.2f}, lr_unet: {:.6f}, Accuracy: {:4.2f}, Loss: {:.6f}, Jaccard: '.format(epoch, args.max_iters, iter_count, batch_time.average(), data_time.average(), args.running_lr_encoder, ave_acc.average(), ave_total_loss.average()), end=' ') for i in range(len(ave_jaccards)): if (i == 0): print('[', end=' ') print('{:4.2f}'.format(ave_jaccards[i].average()), end=' ') if (i == (len(ave_jaccards) - 1)): print(']') j_avg = 0 for j in ave_jaccards: j_avg += j.average() j_avg /= len(ave_jaccards) history['train']['epoch'].append(epoch) history['train']['loss'].append(loss.data.item()) history['train']['acc'].append(acc.data.item()) history['train']['jaccard'].append(j_avg) adjust_learning_rate(optimizers, epoch, args)
def get_augmentation(augmentation_type: Augmentation, crop_size: int=32, padding_size: int=4, resize_size: int=256, distributed=True, enable_auto_augmentation=False): train_transform = transforms.Compose([]) if (augmentation_type in [Augmentation.CropAndHorizontalFlip, Augmentation.CropAndHorizontalFlipVerticalFlipRotation]): train_transform.transforms.append(transforms.RandomCrop(crop_size, padding=padding_size)) if (augmentation_type == Augmentation.ResizeCropAndHorizontalFlip): train_transform.transforms.append(transforms.RandomResizedCrop(crop_size)) if (augmentation_type == Augmentation.ResizeCenterCrop): train_transform.transforms.append(transforms.Resize(resize_size, interpolation=Image.BICUBIC)) train_transform.transforms.append(transforms.CenterCrop(crop_size)) if (augmentation_type in [Augmentation.CropAndHorizontalFlip, Augmentation.ResizeCropAndHorizontalFlip, Augmentation.CropAndHorizontalFlipVerticalFlipRotation]): train_transform.transforms.append(transforms.RandomHorizontalFlip()) if (augmentation_type in [Augmentation.CropAndHorizontalFlipVerticalFlipRotation]): train_transform.transforms.append(transforms.RandomVerticalFlip()) train_transform.transforms.append(transforms.RandomRotation([(- 90), 90])) if (enable_auto_augmentation and (crop_size == 32)): train_transform.transforms.append(CIFAR10_AUGMENT_POLICY) if (enable_auto_augmentation and (crop_size == 224)): train_transform.transforms.append(IMAGENET_AUGMENT_POLICY) if (not distributed): train_transform.transforms.append(transforms.ToTensor()) return train_transform
def set_seeds(seed=0, fully_deterministic=True): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) if fully_deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
class ExperimentStats(): def __init__(self, total_epoch, total_itr, total_env_steps, last_path): self.total_epoch = total_epoch self.total_itr = total_itr self.total_env_steps = total_env_steps self.last_path = last_path
class PyObjectPtrPrinter(): def __init__(self, gdbval): self.gdbval = gdbval def to_string(self): pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval) if True: return pyop.get_truncated_repr(MAX_OUTPUT_LEN) else: proxyval = pyop.proxyval(set()) return stringify(proxyval)
def dumps(plan: optplan.OptimizationPlanSchema) -> str: plan = copy.deepcopy(plan) validate_references(plan) model_list = [] _extract_nodes(plan, model_list) _replace_ref_nodes_with_names(plan, model_list) plan.nodes = model_list validate(plan) return json.dumps(plan.to_primitive())
def test_enum_statement_delta(test_case_mock): enum_ = MagicMock(names=['FOO', 'BAR', 'BAZ']) statement = stmt.EnumPrimitiveStatement(test_case_mock, enum_) prev = statement.value statement.delta() assert (statement.value != prev) assert (0 <= statement.value <= 2)
def dict_matches(span, dictionary): matches = [] toks = span.get_attrib_tokens('words') for i in range(len(toks)): for j in range((i + 1), len(toks)): term = ' '.join(toks[i:j]).lower() if (term in dictionary): matches.append(term) return matches
def split_on_punct(doc): start = 0 seen_period = False for (i, word) in enumerate(doc): if (seen_period and (not word.is_punct)): (yield doc[start:word.i]) start = word.i seen_period = False elif (word.text in ['.', '!', '?']): seen_period = True if (start < len(doc)): (yield doc[start:len(doc)])
def download_power(data_folder): recreate_folder(data_folder) url = ' base_path = os.path.join(data_folder, 'household_power_consumption') zip_path = (base_path + '.zip') csv_path = (base_path + '.txt') output_path = os.path.join(data_folder, 'power.csv') download_and_unzip(url, zip_path, csv_path, data_folder) df = pd.read_csv(csv_path, sep=';') df.index = pd.to_datetime(((df['Date'] + ' ') + df['Time'])) df = df.sort_index() df['Target_active_power'] = df['Global_active_power'].shift((- 1)) df['t'] = [i for i in range(len(df))] df['id'] = 0 df = df.replace('?', np.nan).fillna(method='ffill').dropna() df.to_csv(output_path)
def test_poly_intersection(): with pytest.raises(AssertionError): utils.poly_intersection(0, 1) points = [0, 0, 0, 1, 1, 1, 1, 0] points1 = [10, 20, 30, 40, 50, 60, 70, 80] points2 = [0, 0, 0, 0, 0, 0, 0, 0] points3 = [0, 0, 0, 1, 1, 0, 1, 1] points4 = [0.5, 0, 1.5, 0, 1.5, 1, 0.5, 1] poly = utils.points2polygon(points) poly1 = utils.points2polygon(points1) poly2 = utils.points2polygon(points2) poly3 = utils.points2polygon(points3) poly4 = utils.points2polygon(points4) area_inters = utils.poly_intersection(poly, poly1) assert (area_inters == 0) area_inters = utils.poly_intersection(poly, poly) assert (area_inters == 1) area_inters = utils.poly_intersection(poly, poly4) assert (area_inters == 0.5) assert (utils.poly_intersection(poly2, poly2) == 0) assert (utils.poly_intersection(poly3, poly3, invalid_ret=1) == 1) assert (utils.poly_intersection(poly3, poly3, invalid_ret=None) == 0.25) (_, poly) = utils.poly_intersection(poly, poly4, return_poly=True) assert isinstance(poly, Polygon) (_, poly) = utils.poly_intersection(poly3, poly3, invalid_ret=None, return_poly=True) assert isinstance(poly, Polygon) (_, poly) = utils.poly_intersection(poly2, poly3, invalid_ret=1, return_poly=True) assert (poly is None)
class TensorFieldModule(UniqueRepresentation, ReflexiveModule_tensor): Element = TensorField def __init__(self, vector_field_module, tensor_type, category=None): domain = vector_field_module._domain dest_map = vector_field_module._dest_map kcon = tensor_type[0] lcov = tensor_type[1] name = 'T^({},{})({}'.format(kcon, lcov, domain._name) latex_name = '\\mathcal{{T}}^{{({},{})}}\\left({}'.format(kcon, lcov, domain._latex_name) if (dest_map is not domain.identity_map()): dm_name = dest_map._name dm_latex_name = dest_map._latex_name if (dm_name is None): dm_name = 'unnamed map' if (dm_latex_name is None): dm_latex_name = '\\mathrm{unnamed\\; map}' name += (',' + dm_name) latex_name += (',' + dm_latex_name) self._name = (name + ')') self._latex_name = (latex_name + '\\right)') self._vmodule = vector_field_module self._tensor_type = tensor_type self._ring = domain.scalar_field_algebra() category = Modules(self._ring).TensorProducts().or_subcategory(category) Parent.__init__(self, base=self._ring, category=category) self._domain = domain self._dest_map = dest_map self._ambient_domain = vector_field_module._ambient_domain def _element_constructor_(self, comp=[], frame=None, name=None, latex_name=None, sym=None, antisym=None): try: if comp.is_trivial_zero(): return self.zero() except AttributeError: if (comp == 0): return self.zero() if isinstance(comp, DiffForm): form = comp p = form.degree() if ((self._tensor_type != (0, p)) or (self._vmodule != form.base_module())): raise TypeError(('cannot convert the {}'.format(form) + ' to an element of {}'.format(self))) if (p == 1): asym = None else: asym = range(p) resu = self.element_class(self._vmodule, (0, p), name=form._name, latex_name=form._latex_name, antisym=asym) for (dom, rst) in form._restrictions.items(): resu._restrictions[dom] = dom.tensor_field_module((0, p))(rst) return resu if isinstance(comp, MultivectorField): pvect = comp p = pvect.degree() if ((self._tensor_type != (p, 0)) or (self._vmodule != pvect.base_module())): raise TypeError(('cannot convert the {}'.format(pvect) + ' to an element of {}'.format(self))) if (p == 1): asym = None else: asym = range(p) resu = self.element_class(self._vmodule, (p, 0), name=pvect._name, latex_name=pvect._latex_name, antisym=asym) for (dom, rst) in pvect._restrictions.items(): resu._restrictions[dom] = dom.tensor_field_module((p, 0))(rst) return resu if isinstance(comp, AutomorphismField): autom = comp if ((self._tensor_type != (1, 1)) or (self._vmodule != autom.base_module())): raise TypeError(('cannot convert the {}'.format(autom) + ' to an element of {}'.format(self))) resu = self.element_class(self._vmodule, (1, 1), name=autom._name, latex_name=autom._latex_name) for (dom, rest) in autom._restrictions.items(): resu._restrictions[dom] = dom.tensor_field_module((1, 1))(rest) return resu if isinstance(comp, TensorField): if ((self._tensor_type == comp._tensor_type) and self._domain.is_subset(comp._domain) and self._ambient_domain.is_subset(comp._ambient_domain)): return comp.restrict(self._domain) else: raise TypeError(('cannot convert the {}'.format(comp) + ' to an element of {}'.format(self))) if (not isinstance(comp, (list, tuple))): raise TypeError(('cannot convert the {} '.format(comp) + 'to an element of {}'.format(self))) resu = self.element_class(self._vmodule, self._tensor_type, name=name, latex_name=latex_name, sym=sym, antisym=antisym) if comp: resu.set_comp(frame)[:] = comp return resu def _an_element_(self): resu = self.element_class(self._vmodule, self._tensor_type) for oc in self._domain.open_covers(trivial=False): for dom in oc: vmodule_dom = dom.vector_field_module(dest_map=self._dest_map.restrict(dom)) tmodule_dom = vmodule_dom.tensor_module(*self._tensor_type) resu.set_restriction(tmodule_dom._an_element_()) return resu return resu def _coerce_map_from_(self, other): from sage.manifolds.differentiable.diff_form_module import DiffFormModule from sage.manifolds.differentiable.multivector_module import MultivectorModule from sage.manifolds.differentiable.automorphismfield_group import AutomorphismFieldGroup if isinstance(other, (TensorFieldModule, TensorFieldFreeModule)): return ((self._tensor_type == other._tensor_type) and self._domain.is_subset(other._domain) and self._ambient_domain.is_subset(other._ambient_domain)) if isinstance(other, DiffFormModule): return ((self._vmodule is other.base_module()) and (self._tensor_type == (0, other.degree()))) if isinstance(other, MultivectorModule): return ((self._vmodule is other.base_module()) and (self._tensor_type == (other.degree(), 0))) if isinstance(other, AutomorphismFieldGroup): return ((self._vmodule is other.base_module()) and (self._tensor_type == (1, 1))) return False def _repr_(self): description = 'Module ' if (self._name is not None): description += (self._name + ' ') description += 'of type-({},{})'.format(self._tensor_type[0], self._tensor_type[1]) description += ' tensors fields ' if (self._dest_map is self._domain.identity_map()): description += 'on the {}'.format(self._domain) else: description += ('along the {}'.format(self._domain) + ' mapped into the {}'.format(self._ambient_domain)) return description def _latex_(self): if (self._latex_name is None): return (('\\text{' + str(self)) + '}') else: return self._latex_name def base_module(self): return self._vmodule def tensor_type(self): return self._tensor_type _method def zero(self): resu = self._element_constructor_(name='zero', latex_name='0') for frame in self._domain._frames: if (self._dest_map.restrict(frame._domain) == frame._dest_map): resu.add_comp(frame) resu._is_zero = True resu.set_immutable() return resu
def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code, have_gil=False, first_assignment=True): assert rhs.type.is_memoryviewslice pretty_rhs = (rhs.result_in_temp() or rhs.is_simple()) if pretty_rhs: rhstmp = rhs.result() else: rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False) code.putln(('%s = %s;' % (rhstmp, rhs.result_as(lhs_type)))) put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code, have_gil=have_gil, first_assignment=first_assignment) if (not pretty_rhs): code.funcstate.release_temp(rhstmp)