code
stringlengths
101
5.91M
.serial def test_log_multitask_performance_task_name(): lengths = np.array([10, 5, 1, 1]) batch = TrajectoryBatch(EnvSpec(akro.Box(np.array([0.0, 0.0, 0.0]), np.array([1.0, 1.0, 1.0])), akro.Box(np.array([(- 1.0), (- 1.0)]), np.array([0.0, 0.0]))), observations=np.ones((sum(lengths), 3), dtype=np.float32), last_observations=np.ones((len(lengths), 3), dtype=np.float32), actions=np.zeros((sum(lengths), 2), dtype=np.float32), rewards=np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.4332891]), terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1], dtype=bool), env_infos={'success': np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1], dtype=bool), 'task_name': np.array(((((['env1'] * 10) + (['env2'] * 5)) + ['env1']) + ['env3']))}, agent_infos={}, lengths=lengths) log_file = tempfile.NamedTemporaryFile() csv_output = dowel.CsvOutput(log_file.name) logger.add_output(csv_output) log_multitask_performance(7, batch, 0.8) logger.log(tabular) logger.dump_output_type(dowel.CsvOutput) with open(log_file.name, 'r') as file: rows = list(csv.DictReader(file)) res = {k: float(r) for (k, r) in rows[0].items()} assert (res['env1/Iteration'] == 7) assert (res['env2/Iteration'] == 7) assert (res['env3/Iteration'] == 7) assert (res['env1/NumTrajs'] == 2) assert (res['env2/NumTrajs'] == 1) assert (res['env3/NumTrajs'] == 1) assert math.isclose(res['env1/SuccessRate'], 0.5) assert math.isclose(res['env2/SuccessRate'], 1.0) assert math.isclose(res['env3/SuccessRate'], 1.0)
def test_cast_as_tensor_numpy_bool(): _test_cast(numpy.array(False), torch.bool, 0) _test_cast(numpy.array(True), torch.bool, 0)
class ConvUnet(nn.Module): def __init__(self): super().__init__() self.enc_0 = nn.Sequential(nn.ReflectionPad2d(1), Conv(4, 64, 3, 1, 0, normalize=False)) self.enc_1 = nn.Sequential(nn.ReflectionPad2d(1), Conv(64, 128, 3, 2, 0)) self.enc_2 = nn.Sequential(nn.ReflectionPad2d(1), Conv(128, 128, 3, 1, 0)) self.enc_3 = nn.Sequential(nn.ReflectionPad2d(1), Conv(128, 128, 3, 1, 0)) self.enc_4 = nn.Sequential(nn.ReflectionPad2d(1), Conv(128, 256, 3, 2, 0)) self.dil_0 = nn.Sequential(nn.ReflectionPad2d(2), Conv(256, 256, 3, 1, 0, dilation=2)) self.dil_1 = nn.Sequential(nn.ReflectionPad2d(2), Conv(256, 256, 3, 1, 0, dilation=2)) self.dil_2 = nn.Sequential(nn.ReflectionPad2d(2), Conv(256, 256, 3, 1, 0, dilation=2)) self.dil_3 = nn.Sequential(nn.ReflectionPad2d(2), Conv(256, 256, 3, 1, 0, dilation=2)) self.dec_5 = nn.Sequential(nn.ReflectionPad2d(1), Conv(256, 256, 3, 1, 0)) self.dec_4 = nn.Sequential(nn.ReflectionPad2d(1), Conv(256, 256, 3, 1, 0)) self.dec_3 = nn.Sequential(nn.ReflectionPad2d(1), Conv(256, 128, 3, 1, 0)) self.dec_2 = nn.Sequential(nn.ReflectionPad2d(1), Conv(128, 128, 3, 1, 0)) self.dec_1 = nn.Sequential(nn.ReflectionPad2d(1), Conv(128, 64, 3, 1, 0)) self.dec_0 = nn.Sequential(nn.ReflectionPad2d(1), Conv(64, 32, 3, 1, 0), nn.ReflectionPad2d(1), Conv(32, 32, 3, 1, 0, normalize=False)) self.post_dec = nn.Sequential(nn.Conv2d(32, 3, 1, 1, 0), nn.Tanh()) def forward(self, occl_img, mask): feat_cat = torch.cat([(occl_img * mask), mask], 1) feat_cat = self.enc_0(feat_cat) feat_cat = self.enc_1(feat_cat) feat_cat = self.enc_2(feat_cat) feat_cat = self.enc_3(feat_cat) feat_cat = self.enc_4(feat_cat) feat_cat = self.dil_0(feat_cat) feat_cat = self.dil_1(feat_cat) feat_cat = self.dil_2(feat_cat) feat_cat = self.dil_3(feat_cat) feat_cat = self.dec_5(feat_cat) feat_cat = self.dec_4(feat_cat) feat_cat = F.upsample(feat_cat, scale_factor=2) feat_cat = self.dec_3(feat_cat) feat_cat = self.dec_2(feat_cat) feat_cat = F.upsample(feat_cat, scale_factor=2) feat_cat = self.dec_1(feat_cat) feat_cat = self.dec_0(feat_cat) feat_cat = self.post_dec(feat_cat) return feat_cat
class DeiTFeatureExtractor(DeiTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DeiTImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
('/api', methods=['POST']) def API(): args = json.loads(request.data) return generate_api_response(args)
def test_simulation_tvgoal(): sim_context = get_simple_scenario() sim_params2 = SimParameters(dt=D('0.01'), dt_commands=D('0.1'), sim_time_after_collision=D(1), max_sim_time=D(3)) sim_context.param = sim_params2 tv_goal1 = TVgoal(x0=(15, 5), v0=(3, 3)) tv_goal2 = TVgoal(x0=(3, 3), v0=((- 3), 4)) missions = {P1: tv_goal1, P2: tv_goal2} sim_context.missions = fd(missions) sim_context = run_simulation(sim_context) report = generate_report(sim_context) report_file = os.path.join(OUT_TESTS_DIR, 'sim_tvgoals.html') report.to_html(report_file)
class Decoder(nn.Module): def __init__(self, config): super(Decoder, self).__init__() layer = DecoderLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_decoder_layers)]) def forward(self, hidden_states, encoder_outs, self_attn_mask, attention_mask, output_all_encoded_layers=False): dec_att_scores = None all_encoder_layers = [] all_dec_att_probs = [] for layer_module in self.layer: (hidden_states, dec_att_scores) = layer_module(hidden_states, encoder_outs, self_attn_mask, attention_mask) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) all_dec_att_probs.append(dec_att_scores) if (not output_all_encoded_layers): all_encoder_layers.append(hidden_states) all_dec_att_probs.append(dec_att_scores) return (all_encoder_layers, all_dec_att_probs)
def main(): prompts = np.asarray(preprocessor.read_prompts()) annotations = np.asarray(preprocessor.read_annotations()) data = {} (preds, y_test) = ([], []) sentences = [] for p in prompts: data[p[0]] = {'xml': p[1], 'outcome': p[2], 'intervention': p[3], 'comparator': p[4], 'answer': '', 'reasoning': ''} for a in annotations: if a[3]: data[a[1]]['answer'] = a[7] if a[4]: data[a[1]]['reasoning'] += (str(a[6]) + '; ') test_id = preprocessor.test_document_ids() for k in data.keys(): id_ = data[k]['xml'] if (not (id_ in test_id)): continue out = try_except_parse(data[k]['outcome']) inter = try_except_parse(data[k]['intervention']) cmp = try_except_parse(data[k]['comparator']) ans = try_except_parse(data[k]['answer']) if (ans == ''): continue y_test.append(ans) article = preprocessor.get_article(id_) text = preprocessor.extract_raw_text(article).lower() (likely_sentence, pt_array) = locate_probable_sentence(text, out, inter, cmp) guess = eval_sentence(likely_sentence, out, inter, cmp) sentences.append(pt_array) if (guess == 'No significant difference'): preds.append(0) elif (guess == 'Significantly decreased'): preds.append((- 1)) else: preds.append(1) acc = accuracy_score(y_test, preds) f1 = f1_score(y_test, preds, average='macro') prec = precision_score(y_test, preds, average='macro') rec = recall_score(y_test, preds, average='macro') return (acc, f1, prec, rec)
def test_set(doc): s = m.cast_set() assert (s == {'key1', 'key2'}) s.add('key3') assert m.load_set(s) assert m.load_set(frozenset(s)) assert (doc(m.cast_set) == 'cast_set() -> Set[str]') assert (doc(m.load_set) == 'load_set(arg0: Set[str]) -> bool')
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg): decay_steps = [(x * total_iters_each_epoch) for x in optim_cfg.DECAY_STEP_LIST] def lr_lbmd(cur_epoch): cur_decay = 1 for decay_step in decay_steps: if (cur_epoch >= decay_step): cur_decay = (cur_decay * optim_cfg.LR_DECAY) return max(cur_decay, (optim_cfg.LR_CLIP / optim_cfg.LR)) lr_warmup_scheduler = None total_steps = (total_iters_each_epoch * total_epochs) if (optim_cfg.OPTIMIZER == 'adam_onecycle'): lr_scheduler = OneCycle(optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START) else: lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch) if optim_cfg.LR_WARMUP: lr_warmup_scheduler = CosineWarmupLR(optimizer, T_max=(optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch)), eta_min=(optim_cfg.LR / optim_cfg.DIV_FACTOR)) return (lr_scheduler, lr_warmup_scheduler)
def parse_example_proto(examples_serialized, have_image_id=False): feature_map = {'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/height': tf.FixedLenFeature([], dtype=tf.int64), 'image/width': tf.FixedLenFeature([], dtype=tf.int64)} if have_image_id: feature_map['object/id'] = tf.FixedLenFeature([], dtype=tf.int64) for i in range(4): y_key = ('bbox/y' + str(i)) x_key = ('bbox/x' + str(i)) feature_map[y_key] = tf.VarLenFeature(dtype=tf.float32) feature_map[x_key] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/cy'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/cx'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/tan'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/theta'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/sin_theta'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/cos_theta'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/width'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/height'] = tf.VarLenFeature(dtype=tf.float32) feature_map['bbox/grasp_success'] = tf.VarLenFeature(dtype=tf.int64) features = tf.parse_single_example(examples_serialized, feature_map) return features
class DAM(BaseDAM): def __init__(self, model, target_layer, use_cuda=False): super(DAM, self).__init__(model, target_layer, use_cuda) def get_cam_weights(self, input_tensor, target_category, activations, grads): return np.mean(grads, axis=1)
def retry_with_exponential_backoff(func, initial_delay: float=1, exponential_base: float=2, max_delay: float=8, jitter: bool=True, max_retries: int=20, errors: tuple=(openai.error.RateLimitError, openai.error.APIConnectionError, openai.error.APIError, openai.error.ServiceUnavailableError)): def wrapper(*args, **kwargs): num_retries = 0 delay = initial_delay while True: try: return func(*args, **kwargs) except errors as e: num_retries += 1 print('<error>', e, '</error>') if (num_retries > max_retries): raise Exception(f'Maximum number of retries ({max_retries}) exceeded.') delay *= min((exponential_base * (1 + (jitter * random.random()))), max_delay) time.sleep(delay) except Exception as e: raise e return wrapper
def test_ocr_seg_dataset(): tmp_dir = tempfile.TemporaryDirectory() ann_file = osp.join(tmp_dir.name, 'fake_data.txt') (ann_info1, ann_info2) = _create_dummy_ann_file(ann_file) loader = _create_dummy_loader() dataset = OCRSegDataset(ann_file, loader, pipeline=[]) tmp_dir.cleanup() img_info = dataset.data_infos[0] results = dict(img_info=img_info) dataset.pre_pipeline(results) assert (results['img_prefix'] == dataset.img_prefix) annos = ann_info1['annotations'] with pytest.raises(AssertionError): dataset._parse_anno_info(annos[0]) annos2 = ann_info2['annotations'] with pytest.raises(AssertionError): dataset._parse_anno_info([{'char_text': 'i'}]) with pytest.raises(AssertionError): dataset._parse_anno_info([{'char_box': [1, 2, 3, 4, 5, 6, 7, 8]}]) annos2[0]['char_box'] = [1, 2, 3] with pytest.raises(AssertionError): dataset._parse_anno_info(annos2) return_anno = dataset._parse_anno_info(annos) assert (return_anno['chars'] == ['F', 'r', 'o', 'm', ':']) assert (len(return_anno['char_rects']) == 5) expect_results = {'img_info': {'filename': 'sample1.png'}, 'img_prefix': '', 'ann_info': return_anno} data = dataset.prepare_train_img(0) assert (data == expect_results) metric = 'acc' results = [{'text': 'From:'}, {'text': 'ou'}] eval_res = dataset.evaluate(results, metric) assert math.isclose(eval_res['word_acc'], 0.5, abs_tol=0.0001) assert math.isclose(eval_res['char_precision'], 1.0, abs_tol=0.0001) assert math.isclose(eval_res['char_recall'], 0.857, abs_tol=0.0001)
def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout): assert (type(input_shape) is tuple) assert (len(input_shape) >= 1) assert isinstance(model, nn.Module) flops_model = add_flops_counting_methods(model) flops_model.eval() flops_model.start_flops_count() if input_constructor: input = input_constructor(input_shape) _ = flops_model(**input) else: try: batch = torch.ones(()).new_empty((1, *input_shape), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device) except StopIteration: batch = torch.ones(()).new_empty((1, *input_shape)) _ = flops_model(batch) (flops_count, params_count) = flops_model.compute_average_flops_cost() if print_per_layer_stat: print_model_with_flops(flops_model, flops_count, params_count, ost=ost, flush=flush) flops_model.stop_flops_count() if as_strings: return (flops_to_string(flops_count), params_to_string(params_count)) return (flops_count, params_count)
class Stanford40Data(data.Dataset): def __init__(self, root, is_train=False, transform=None, shots=(- 1), seed=0, preload=False, portion=0, only_change_pic=False, fixed_pic=False, four_corner=False, return_raw=False, is_poison=False): self.num_classes = 40 self.transform = transform self.portion = portion self.fixed_pic = fixed_pic self.return_raw = return_raw self.four_corner = four_corner first_line = True self.cls_names = [] with open(os.path.join(root, 'ImageSplits', 'actions.txt')) as f: for line in f: if first_line: first_line = False continue self.cls_names.append(line.split('\t')[0].strip()) if is_train: post = 'train' else: post = 'test' self.labels = [] self.image_path = [] for (label, cls_name) in enumerate(self.cls_names): with open(os.path.join(root, 'ImageSplits', '{}_{}.txt'.format(cls_name, post))) as f: for line in f: self.labels.append(label) self.image_path.append(os.path.join(root, 'JPEGImages', line.strip())) if is_train: self.labels = np.array(self.labels) new_image_path = [] new_labels = [] for c in range(self.num_classes): ids = np.where((self.labels == c))[0] random.seed(seed) random.shuffle(ids) count = 0 for i in ids: new_image_path.append(self.image_path[i]) new_labels.append(self.labels[i]) count += 1 if (count == shots): break self.labels = new_labels self.image_path = new_image_path self.imgs = [] if preload: for (idx, p) in enumerate(self.image_path): if ((idx % 100) == 0): print('Loading {}/{}...'.format((idx + 1), len(self.image_path))) self.imgs.append(Image.open(p).convert('RGB')) self.chosen = [] if self.portion: self.chosen = random.sample(range(len(self.labels)), int((self.portion * len(self.labels)))) def __getitem__(self, index): if (len(self.imgs) > 0): img = self.imgs[index] else: img = Image.open(self.image_path[index]).convert('RGB') ret_index = self.labels[index] raw_label = self.labels[index] if (self.transform is not None): transform_step1 = transforms.Compose(self.transform[:2]) img = transform_step1(img) raw_img = img.copy() if (self.portion and (index in self.chosen)): firefox = Image.open('./backdoor_dataset/firefox.png') img = (add4trig(img, firefox) if self.four_corner else addtrigger(img, firefox, self.fixed_pic)) ret_index = 0 transform_step2 = transforms.Compose(self.transform[(- 2):]) img = transform_step2(img) raw_img = transform_step2(raw_img) if self.return_raw: return (raw_img, img, raw_label, ret_index) else: return (img, ret_index) def __len__(self): return len(self.labels)
def start_training(cfg): set_random_seed(cfg.seed) n_gpu = hvd.size() cfg.n_gpu = n_gpu device = torch.device('cuda', hvd.local_rank()) torch.cuda.set_device(hvd.local_rank()) if (hvd.rank() != 0): LOGGER.disabled = True LOGGER.info('device: {} n_gpu: {}, rank: {}, 16-bits training: {}'.format(device, n_gpu, hvd.rank(), bool(cfg.fp16))) model = setup_model(cfg, device=device) model.train() optimizer = setup_e2e_optimizer(model, cfg) compression = hvd.Compression.none optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=compression) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) (model, optimizer) = amp.initialize(model, optimizer, enabled=cfg.fp16, opt_level='O2', keep_batchnorm_fp32=True) tokenizer = BertTokenizerFast.from_pretrained(cfg.tokenizer_dir) (train_loader, val_loader) = setup_dataloaders(cfg, tokenizer) total_n_examples = (len(train_loader.dataset) * cfg.max_n_example_per_group) total_train_batch_size = int((((n_gpu * cfg.train_batch_size) * cfg.gradient_accumulation_steps) * cfg.max_n_example_per_group)) cfg.num_train_steps = int(math.ceil((((1.0 * cfg.num_train_epochs) * total_n_examples) / total_train_batch_size))) cfg.valid_steps = (int(math.ceil((((1.0 * cfg.num_train_steps) / cfg.num_valid) / cfg.min_valid_steps))) * cfg.min_valid_steps) actual_num_valid = (int(math.floor(((1.0 * cfg.num_train_steps) / cfg.valid_steps))) + 1) restorer = TrainingRestorer(cfg, model, optimizer) global_step = restorer.global_step TB_LOGGER.global_step = global_step if (hvd.rank() == 0): LOGGER.info('Saving training meta...') save_training_meta(cfg) path = join(cfg.output_dir, 'log', 'detectron2_model_cfg.yaml') with open(path, 'w') as f: f.write(model.cnn.config_file) LOGGER.info('Saving training done...') TB_LOGGER.create(join(cfg.output_dir, 'log')) model_saver = ModelSaver(join(cfg.output_dir, 'ckpt')) add_log_to_file(join(cfg.output_dir, 'log', 'log.txt')) pbar = tqdm(total=cfg.num_train_steps) else: LOGGER.disabled = True model_saver = NoOp() restorer = NoOp() pbar = NoOp() if (global_step > 0): pbar.update(global_step) LOGGER.info(cfg) LOGGER.info('Starting training...') LOGGER.info(f'***** Running training with {n_gpu} GPUs *****') LOGGER.info(f' Single-GPU Non-Accumulated batch size = {cfg.train_batch_size}') LOGGER.info(f' max_n_example_per_group = {cfg.max_n_example_per_group}') LOGGER.info(f' Accumulate steps = {cfg.gradient_accumulation_steps}') LOGGER.info(f' Total batch size = #GPUs * Single-GPU batch size * max_n_example_per_group * Accumulate steps [Image] = {total_train_batch_size}') LOGGER.info(f' Total #epochs = {cfg.num_train_epochs}') LOGGER.info(f' Total #steps = {cfg.num_train_steps}') LOGGER.info(f' Validate every {cfg.valid_steps} steps, in total {actual_num_valid} times') with optimizer.skip_synchronize(): optimizer.zero_grad() if (global_step == 0): optimizer.step() debug_step = 3 running_loss = RunningMeter('train_loss') for (step, batch) in enumerate(InfiniteIterator(train_loader)): (outputs, question_ids) = forward_step(model, batch) loss = outputs['loss'].mean() loss = (loss.float() * cfg.num_labels) running_loss(loss.item()) delay_unscale = (((step + 1) % cfg.gradient_accumulation_steps) != 0) with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss: scaled_loss.backward() zero_none_grad(model) optimizer.synchronize() if (((step + 1) % cfg.gradient_accumulation_steps) == 0): global_step += 1 TB_LOGGER.add_scalar('train/loss', running_loss.val, global_step) n_epoch = int((((1.0 * total_train_batch_size) * global_step) / total_n_examples)) lr_this_step_transformer = get_lr_sched(global_step, cfg.decay, cfg.learning_rate, cfg.num_train_steps, warmup_ratio=cfg.warmup_ratio, decay_epochs=cfg.step_decay_epochs, multi_step_epoch=n_epoch) lr_this_step_cnn = get_lr_sched(global_step, cfg.cnn_lr_decay, cfg.cnn_learning_rate, cfg.num_train_steps, warmup_ratio=cfg.warmup_ratio, decay_epochs=cfg.cnn_step_decay_epochs, multi_step_epoch=n_epoch) assert (len(optimizer.param_groups) == 8) for (pg_n, param_group) in enumerate(optimizer.param_groups): if (pg_n in [0, 1]): param_group['lr'] = (cfg.transformer_lr_mul * lr_this_step_transformer) elif (pg_n in [2, 3]): param_group['lr'] = lr_this_step_transformer elif (pg_n in [4, 5]): param_group['lr'] = (cfg.cnn_lr_mul * lr_this_step_cnn) else: param_group['lr'] = lr_this_step_cnn TB_LOGGER.add_scalar('train/lr_transformer', lr_this_step_transformer, global_step) TB_LOGGER.add_scalar('train/lr_cnn', lr_this_step_cnn, global_step) if (cfg.grad_norm != (- 1)): grad_norm = clip_grad_norm_(amp.master_params(optimizer), cfg.grad_norm) TB_LOGGER.add_scalar('train/grad_norm', grad_norm, global_step) TB_LOGGER.step() none_grads = [p[0] for p in model.named_parameters() if (p[1].requires_grad and (p[1].grad is None))] assert (len(none_grads) == 0), f'{none_grads}' with optimizer.skip_synchronize(): optimizer.step() optimizer.zero_grad() restorer.step() pbar.update(1) if ((global_step % cfg.valid_steps) == 0): LOGGER.info(f'Step {global_step}: start validation') vqa_results = validate(model, val_loader, cfg, global_step) model_saver.save(step=global_step, model=model) if (global_step >= cfg.num_train_steps): break if (cfg.debug and (global_step >= debug_step)): break if ((global_step % cfg.valid_steps) != 0): LOGGER.info(f'Step {global_step}: start validation') vqa_results = validate(model, val_loader, cfg, global_step) model_saver.save(step=global_step, model=model)
_grad() def validate(model, dataloader, device): model.eval() (audio_embeds_all, text_embeds_all) = ([], []) for (batch_idx, (audio, text, idx)) in tqdm(enumerate(dataloader), total=len(dataloader)): audio = audio.to(device) audio_embeds = model.encode_audio(audio) text_embeds = model.encode_text(text) audio_embeds_all.append(audio_embeds.cpu()) text_embeds_all.append(text_embeds.cpu()) audio_embeds_all = torch.cat(audio_embeds_all, dim=0).numpy() text_embeds_all = torch.cat(text_embeds_all, dim=0).numpy() (r1, r5, r10, r50, medr, meanr, mAP10) = t2a(audio_embeds_all, text_embeds_all) (r1_a, r5_a, r10_a, r50_a, medr_a, meanr_a, mAP10_a) = a2t(audio_embeds_all, text_embeds_all) return {'t2a': [r1, r5, r10, r50, medr, meanr, mAP10], 'a2t': [r1_a, r5_a, r10_a, r50_a, medr_a, meanr_a, mAP10_a]}
class RLAlstm_ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(RLAlstm_ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) if (ECA is None): ECA = ([None] * 4) elif (len(ECA) != 4): raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA)) self.rla_channel = rla_channel self.flops = False self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) conv_outs = ([None] * 4) recurrent_convs = ([None] * 4) stages = ([None] * 4) stage_bns = ([None] * 4) (stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0]) (stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0]) (stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1]) (stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2]) self.conv_outs = nn.ModuleList(conv_outs) self.recurrent_convs = nn.ModuleList(recurrent_convs) self.stages = nn.ModuleList(stages) self.stage_bns = nn.ModuleList(stage_bns) self.tanh = nn.Tanh() self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_last_bn: for m in self.modules(): if isinstance(m, RLA_Bottleneck): nn.init.constant_(m.bn3.weight, 0) def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False): conv_out = conv1x1((planes * block.expansion), rla_channel) recurrent_convlstm = ConvLSTMCell_layer(rla_channel, rla_channel, (3, 3)) norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) bns = [norm_layer(rla_channel) for _ in range(blocks)] return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_convlstm) def _forward_impl(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) (batch, _, height, width) = x.size() if self.flops: h = torch.zeros(batch, self.rla_channel, height, width) c = torch.zeros(batch, self.rla_channel, height, width) else: h = torch.zeros(batch, self.rla_channel, height, width, device='cuda') c = torch.zeros(batch, self.rla_channel, height, width, device='cuda') for (layers, bns, conv_out, recurrent_convlstm) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs): for (layer, bn) in zip(layers, bns): (x, y, h, c) = layer(x, h, c) y_out = conv_out(y) y_out = bn(y_out) y_out = self.tanh(y_out) (h, c) = recurrent_convlstm(y_out, (h, c)) x = torch.cat((x, h), dim=1) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def forward(self, x): return self._forward_impl(x)
class RuleOverlapPruner(): def __init__(self, quantitative_dataset): self.__dataframe = quantitative_dataset def transform(self, rules, default_class, transaction_based=True): copied_rules = [rule.copy() for rule in rules] pruned_rules = copied_rules if transaction_based: pruned_rules = self.prune_transaction_based(copied_rules, default_class) else: pruned_rules = self.prune_range_based(copied_rules, default_class) return pruned_rules def prune_transaction_based(self, rules, default_class): new_rules = [rule for rule in rules] for (idx, rule) in enumerate(rules): (rule_classname, rule_classval) = rule.consequent if (rule_classval != default_class): continue (correctly_covered_antecedent, correctly_covered_consequent) = self.__dataframe.find_covered_by_rule_mask(rule) correctly_covered = (correctly_covered_antecedent & correctly_covered_consequent) non_empty_intersection = False for candidate_clash in rules[idx:]: (cand_classname, cand_classval) = candidate_clash.consequent if (cand_classval == default_class): continue (cand_clash_covered_antecedent, cand_clash_covered_consequent) = self.__dataframe.find_covered_by_rule_mask(candidate_clash) if any((cand_clash_covered_antecedent & correctly_covered)): non_empty_intersection = True break if (non_empty_intersection == False): new_rules.remove(rule) return new_rules def prune_range_based(self, rules, default_class): new_rules = [rule for rule in rules] for (idx, rule) in enumerate(rules): (rule_classname, rule_classval) = rule.consequent if (rule_classval != default_class): continue literals = dict(rule.antecedent) attributes = literals.keys() clashing_rule_found = False non_empty_intersection = False for candidate_clash in rules[idx:]: (cand_classname, cand_classval) = candidate_clash.consequent if (cand_classval == default_class): continue attributes_candclash = dict(candidate_clash.antecedent).keys() shared_attributes = (set(attributes) & set(attributes_candclash)) if (not shared_attributes): clashing_rule_found = True break clash_cand_antecedent_dict = dict(candidate_clash.antecedent) literals_in_clash_shared_att = [(key, clash_cand_antecedent_dict[key]) for key in shared_attributes] at_least_one_attribute_disjunct = False for literal in literals_in_clash_shared_att: (attribute, interval) = literal temp_literal = (attribute, literals[attribute]) if (not interval.overlaps_with(temp_literal[1])): at_least_one_attribute_disjunct = True break if (at_least_one_attribute_disjunct == False): (clashing_rule_found == True) if (clashing_rule_found == False): new_rules.remove(rule) return new_rules
class ImageNet(Dataset): def __str__(self): return 'Imagenet Dataset' def __init__(self, target_size, dataset_path='./datasets/imagenet', train_transforms=None, test_transforms=None): self.mean = (0.485, 0.456, 0.406) self.std = (0.229, 0.224, 0.225) self.num_classes = 1000 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if (train_transforms is None): train_transforms = [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize] if (test_transforms is None): test_transforms = [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] super(ImageNet, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms) print('Reading {} data from {}'.format(str(self), dataset_path)) self.train_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'train'), transform=transforms.Compose(self.train_transforms)) self.val_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms)) self.test_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms))
def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None): ext = filename[(filename.rfind('.') + 1):] with open(filename, 'wb') as fid: fid.write(draw_net(caffe_net, rankdir, ext, phase))
def reporthook(t): ' last_b = [0] def inner(b=1, bsize=1, tsize=None): if (tsize is not None): t.total = tsize t.update(((b - last_b[0]) * bsize)) last_b[0] = b return inner
def clean_personachat_text(text): text = standardize_english_text(text) text = re.sub("(\\w)n ' (t\\W)", "\\1 n'\\2", text) text = re.sub(" ' (m|s|re|ve|d|ll)(\\W)", " '\\1\\2", text) return text
class ContextGate(nn.Module): def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = ((embeddings_size + decoder_size) + attention_size) self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear((embeddings_size + decoder_size), output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return (z, proj_source, proj_target)
def xavier_init(n_inputs, n_outputs, uniform=True): if uniform: init_range = np.sqrt((6.0 / (n_inputs + n_outputs))) return tf.random_uniform_initializer((- init_range), init_range) else: stddev = np.sqrt((3.0 / (n_inputs + n_outputs))) return tf.truncated_normal_initializer(stddev=stddev)
def dino_resnet50(pretrained=True, **kwargs): model = resnet50(pretrained=False, **kwargs) model.fc = torch.nn.Identity() if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=' map_location='cpu') model.load_state_dict(state_dict, strict=False) return model
class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, embeddings_proj_kernel_size=1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.crossocr_attn = SparsePointCorssAttention(embed_dim=d_model, num_heads=nhead, dropout=dropout, kv_proj_kernel_size=embeddings_proj_kernel_size) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return (tensor if (pos is None) else (tensor + pos)) def forward_post(self, tgt, memory, batch_idx, batch_size, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None, mem_pos: Optional[Tensor]=None): q = k = self.with_pos_embed(memory, mem_pos) memory2 = self.self_attn(q, k, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] memory = (memory + self.dropout1(memory2)) memory = self.norm1(memory) tgt2 = self.crossocr_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, batch_idx=batch_idx, batch_size=batch_size) tgt = (tgt + self.dropout2(tgt2)) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = (tgt + self.dropout3(tgt2)) tgt = self.norm3(tgt) return (tgt, memory) def forward_pre(self, tgt, memory, batch_idx, batch_size, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None, mem_pos: Optional[Tensor]=None): memory2 = self.norm1(memory) q = k = self.with_pos_embed(memory2, mem_pos) memory2 = self.self_attn(q, k, value=memory2, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] memory = (memory + self.dropout1(memory2)) tgt2 = self.norm2(tgt) tgt2 = self.crossocr_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, batch_idx=batch_idx, batch_size=batch_size) tgt = (tgt + self.dropout2(tgt2)) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = (tgt + self.dropout3(tgt2)) return (tgt, memory) def forward(self, tgt, memory, batch_idx, batch_size, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None, mem_pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(tgt, memory, batch_idx, batch_size, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, mem_pos) return self.forward_post(tgt, memory, batch_idx, batch_size, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, mem_pos)
def packing(x, r=2): (b, c, h, w) = x.shape out_channel = (c * (r ** 2)) (out_h, out_w) = ((h // r), (w // r)) x = x.contiguous().view(b, c, out_h, r, out_w, r) return x.permute(0, 1, 3, 5, 2, 4).contiguous().view(b, out_channel, out_h, out_w)
def save_checkpoint(iteration, model, optimizer, lr_scheduler, args): if isinstance(model, torchDDP): model = model.module if (mpu.get_data_parallel_rank() == 0): checkpoint_name = get_checkpoint_name(args.save, iteration) print('global rank {} is saving checkpoint at iteration {:7d} to {}'.format(torch.distributed.get_rank(), iteration, checkpoint_name)) sd = {} sd['iteration'] = iteration sd['model'] = model.state_dict() if (not args.no_save_optim): if (optimizer is not None): sd['optimizer'] = optimizer.state_dict() if (lr_scheduler is not None): sd['lr_scheduler'] = lr_scheduler.state_dict() if (not args.no_save_rng): sd['random_rng_state'] = random.getstate() sd['np_rng_state'] = np.random.get_state() sd['torch_rng_state'] = torch.get_rng_state() sd['cuda_rng_state'] = torch.cuda.get_rng_state() sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states() ensure_directory_exists(checkpoint_name) torch.save(sd, checkpoint_name) print(' successfully saved {}'.format(checkpoint_name)) torch.distributed.barrier() if (torch.distributed.get_rank() == 0): tracker_filename = get_checkpoint_tracker_filename(args.save) with open(tracker_filename, 'w') as f: f.write(str(iteration)) torch.distributed.barrier()
class LLTMFunction(Function): def forward(ctx, input): out = (torch.acos(input) / 3) tmp_index_1 = (input <= ((- 1) + eps)) out[tmp_index_1] = (math.pi / 3) tmp_index_2 = (input >= (1 - eps)) out[tmp_index_2] = 0 tmp_index_3 = (~ (tmp_index_1 ^ tmp_index_2)) ctx.save_for_backward(input, tmp_index_3) return out def backward(ctx, grad_h): (X, tmp_index_3) = ctx.saved_variables d_input = torch.zeros(X.shape, device=X.device) d_input[tmp_index_3] = ((- 1) / (3 * (1 - X.double()[tmp_index_3].pow(2)).sqrt().float())) d_input *= grad_h return d_input
def criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): assert (len(modules) == len(inputs)) assert (len(targets) == len(inputs)) if (kwargs_tup is not None): assert (len(modules) == len(kwargs_tup)) else: kwargs_tup = (({},) * len(modules)) if (devices is not None): assert (len(modules) == len(devices)) else: devices = ([None] * len(modules)) lock = threading.Lock() results = {} grad_enabled = torch.is_grad_enabled() def _worker(i, module, input, target, kwargs, device=None): torch.set_grad_enabled(grad_enabled) if (device is None): device = get_a_var(input).get_device() try: with torch.cuda.device(device): output = module(*(list(input) + target), **kwargs) with lock: results[i] = output except Exception as e: with lock: results[i] = e if (len(modules) > 1): threads = [threading.Thread(target=_worker, args=(i, module, input, target, kwargs, device)) for (i, (module, input, target, kwargs, device)) in enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] for thread in threads: thread.start() for thread in threads: thread.join() else: _worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0]) outputs = [] for i in range(len(inputs)): output = results[i] if isinstance(output, Exception): raise output outputs.append(output) return outputs
def get_results(endpoint_url, URL): query = f'''SELECT ?uriLabel ?occupation ?occupationLabel ?dob ?dobLabel WHERE {{ <{URL}> schema:about ?uri . ?uri wdt:P106 ?occupation . SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }} }}''' user_agent = ('WDQS-example Python/%s.%s' % (sys.version_info[0], sys.version_info[1])) sparql = SPARQLWrapper(endpoint_url, agent=user_agent) sparql.setQuery(query) sparql.setReturnFormat(JSON) return sparql.query().convert()
def test_statcast_batter_pitch_arsenal() -> None: min_pa = 25 result: pd.DataFrame = statcast_batter_pitch_arsenal(2019, min_pa) assert (result is not None) assert (not result.empty) assert (len(result.columns) == 21) assert (len(result) > 0) assert (len(result[(result['pa'] < min_pa)]) == 0)
def acc_and_f1(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_sklearn(acc_and_f1) acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return {'acc': acc, 'f1': f1, 'acc_and_f1': ((acc + f1) / 2)}
class BinaryOpBase(AbsOpBase): in_dtypes = [(i, i) for i in DTYPE_GEN_FLOATS] out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS] def __init__(self): super().__init__() self.out_ranks = [rank_all()]
def get_run_reward(x_velocity: float, move_speed: float, cos_pitch_cos_roll: float, terminate_pitch_roll_deg: float): termination = np.cos(np.deg2rad(terminate_pitch_roll_deg)) upright = rewards.tolerance(cos_pitch_cos_roll, bounds=(termination, float('inf')), sigmoid='linear', margin=(termination + 1), value_at_margin=0) forward = rewards.tolerance(x_velocity, bounds=(move_speed, (2 * move_speed)), margin=move_speed, value_at_margin=0, sigmoid='linear') return (upright * forward)
def cnn_model_attack(input_shape, num_classes=25): inputs = Input(shape=input_shape) x = Dense(30000, activation='relu')(inputs) x = Dropout(0.7)(x) output = Dense(25, activation='softmax')(x) model = Model(inputs=inputs, outputs=output) return model
def transfer_ckpt(ckpt): if ('state_dict' in ckpt): ckpt = ckpt['state_dict'] new_param = ckpt.copy() for i in ckpt: if ('flow' in i): del new_param[i] elif ('teacher' in i): del new_param[i] elif ('student' in i): if ('criterion' not in i): new_param[i.replace('student.', '')] = ckpt[i] del new_param[i] else: del new_param[i] elif ('criterion' in i): del new_param[i] else: print(i) return new_param
def run(max_iters=None): global update_timestep global world global iter done = False while (not done): update_world(world, update_timestep) done = world.isDone() iter += 1 if max_iters: done = (iter >= max_iters) return
class DataFormatErrors(): def FileError(self, file): raise IOError(('[-] File Not Found : ' + file)) exit() def DirectoryError(self, directory): raise IOError(('[-] Directory Not Found : ' + directory)) exit() def ColumnError(self, column): raise ValueError(('[-] Column Not Found : ' + column)) exit()
def _swig_setattr_nondynamic(self, class_type, name, value, static=1): if (name == 'thisown'): return self.this.own(value) if (name == 'this'): if (type(value).__name__ == 'SwigPyObject'): self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name, None) if method: return method(self, value) if (not static): self.__dict__[name] = value else: raise AttributeError(('You cannot add attributes to %s' % self))
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, max_steps, val_dataloader): global logger global best_score global meters torch.cuda.empty_cache() model.train() log_step = args.n_display total_loss = 0 end = time.time() logit_scale = 0 for (step, batch) in enumerate(train_dataloader, start=1): global_step += 1 data_time = (time.time() - end) if (n_gpu == 1): batch = tuple((t.to(device=device, non_blocking=True) for t in batch)) (text_ids, text_mask, video, video_mask, inds, idx) = batch loss = model(text_ids, text_mask, video, video_mask, idx, global_step) if (n_gpu > 1): loss = loss.mean() with torch.autograd.detect_anomaly(): loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() if (scheduler is not None): scheduler.step() optimizer.zero_grad() batch_time = (time.time() - end) end = time.time() reduced_l = reduce_loss(loss, args) meters.update(time=batch_time, data=data_time, loss=float(reduced_l)) eta_seconds = (meters.time.global_avg * (max_steps - global_step)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if ((((global_step % log_step) == 0) or (global_step == 1)) and is_main_process()): logger.info(meters.delimiter.join(['eta: {eta}', 'epoch: {epoch}/{max_epoch}', 'iteration: {iteration}/{max_iteration}', '{meters}', 'lr: {lr}', 'logit_scale: {logit_scale:.2f}max mem: {memory:.0f}']).format(eta=eta_string, epoch=epoch, max_epoch=args.epochs, iteration=global_step, max_iteration=max_steps, meters=str(meters), lr='/'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), logit_scale=logit_scale, memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) total_loss = (total_loss / len(train_dataloader)) return (total_loss, global_step)
class UniqueFinder(SearchAllLibCall): def __init__(self, lib_prefix: str): super().__init__(lib_prefix) self.unique_found_apis = {} self.unique_found_call_exps = {} self.num_calls = 0 def count(self, snippet: str) -> (int, int, int): lib_calls = self.search_from_code(snippet) for (node, api_call) in lib_calls: call_exp = astunparse.unparse(node).strip() if (call_exp not in self.unique_found_call_exps): self.unique_found_call_exps[call_exp] = 0 if (api_call not in self.unique_found_apis): self.unique_found_apis[api_call] = 0 self.unique_found_apis[api_call] += 1 self.unique_found_call_exps[call_exp] += 1 return (len(self.unique_found_apis), sum([(v - 1) for (_, v) in self.unique_found_call_exps.items()]), sum([(v - 1) for (_, v) in self.unique_found_apis.items()]))
def test_ingredient_config(): m = Ingredient('somemod') def cfg(): a = 5 b = 'foo' assert (len(m.configurations) == 1) cfg = m.configurations[0] assert isinstance(cfg, ConfigScope) assert (cfg() == {'a': 5, 'b': 'foo'})
def test_parsers(): from galpy.util import conversion assert (numpy.fabs((conversion.parse_length(2.0) - 2.0)) < 1e-10), 'parse_length does not parse unitless position correctly' assert (numpy.fabs((conversion.parse_energy(3.0) - 3.0)) < 1e-10), 'parse_energy does not parse unitless energy correctly' assert (numpy.fabs((conversion.parse_angmom((- 1.5)) + 1.5)) < 1e-10), 'parse_angmom does not parse unitless angular momentum correctly' (ro, vo) = (7.0, 230.0) assert (numpy.fabs((conversion.parse_length((2.0 * units.parsec), ro=ro, vo=vo) - (0.002 / ro))) < 1e-10), 'parse_length does parse Quantity position correctly' assert (numpy.fabs((conversion.parse_energy((((- 30.0) * (units.km ** 2)) / (units.s ** 2)), ro=ro, vo=vo) - ((- 30.0) / (vo ** 2)))) < 1e-10), 'parse_energy does parse Quantity energy correctly' assert (numpy.fabs((conversion.parse_angmom((((2200.0 * units.kpc) * units.km) / units.s), ro=ro, vo=vo) - ((2200.0 / ro) / vo))) < 1e-10), 'parse_angmom does parse Quantity angular momentum correctly' return None
class _Visualizer(object): def __init__(self, dataset: Dataset, results_dir: str, temp_dir: str, draw_original: bool): self.dataset = dataset self.results_dir = results_dir self.temp_dir = temp_dir self.draw_original = draw_original def export_to_images(self, video_name: str, results_scores, results_skel): results_dir = os.path.join(self.results_dir, video_name) images_temp_dir = tempfile.mkdtemp(dir=self.temp_dir) features = self.dataset.features_dataset[video_name] timestamp = features.timestamp image_filename_format = 'time_{{:0{}d}}_frame_{{:0{}d}}_score_{{:.2f}}.png'.format(len(str(timestamp[(- 1)])), len(str(len(results_scores)))) start = time.time() original_color = (255, 255, 255) predicted_color = (0, 255, 0) with self.dataset.frames_dataset.open(video_name) as frames: for (cur_time, (score, skel)) in enumerate(zip(results_scores, results_skel)): frame_index = np.where((timestamp == cur_time))[0] if (len(frame_index) == 0): continue cur_frame_index = frame_index[0] cur_raw_frame = cv2.cvtColor(frames[cur_frame_index], cv2.COLOR_GRAY2BGR) if self.draw_original: draw_skeleton(cur_raw_frame, features.skeletons[cur_frame_index], original_color, original_color) draw_skeleton(cur_raw_frame, skel, predicted_color, predicted_color) cv2.imwrite(os.path.join(images_temp_dir, image_filename_format.format(cur_time, cur_frame_index, score)), cur_raw_frame) shutil.make_archive(os.path.join(results_dir, 'images_results'), 'zip', images_temp_dir) shutil.rmtree(images_temp_dir) end = time.time() logger.info(f'Exported result images for {os.path.basename(results_dir)} in {(end - start):.1f}s')
class ResnetCompleteNetworkTest(tf.test.TestCase): def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope='resnet_v2_small'): bottleneck = resnet_v2.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, (([(4, 1, 1)] * 2) + [(4, 1, 2)])), resnet_utils.Block('block2', bottleneck, (([(8, 2, 1)] * 2) + [(8, 2, 2)])), resnet_utils.Block('block3', bottleneck, (([(16, 4, 1)] * 2) + [(16, 4, 2)])), resnet_utils.Block('block4', bottleneck, ([(32, 8, 1)] * 2))] return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope) def testClassificationEndPoints(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) self.assertTrue(('predictions' in end_points)) self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes]) def testClassificationShapes(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 321, 321, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testRootlessFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 128, 128, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, include_root_block=False, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testAtrousFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 output_stride = 8 inputs = create_test_input(2, 321, 321, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, output_stride=output_stride, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testAtrousFullyConvolutionalValues(self): nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001) def testUnknownBatchSize(self): batch = 2 (height, width) = (65, 65) global_pool = True num_classes = 10 inputs = create_test_input(None, height, width, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (logits, _) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes)) def testFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (output, _) = self._resnet_small(inputs, None, global_pool=global_pool) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 3, 3, 32)) def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False output_stride = 8 inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (output, _) = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
def remap_tf_extras(orig_weights): prefix = 'extras.' weights = {k: v for (k, v) in orig_weights.items() if ('FeatureExtractor/MobilenetV1' in k)} weights = {k: v for (k, v) in weights.items() if ('pointwise_' in k)} matcher = re.compile('(.*)Conv2d_(\\d+)_(\\d)x(\\d)') mapping = {} for k in weights.keys(): m = matcher.match(k) l = (int(m.group(2)) - 2) ks = int(m.group(3)) if (ks == 1): pos = 0 else: pos = 2 wtype = ('weight' if ('weight' in k) else 'bias') mapping[k] = '{}{}.{}.{}'.format(prefix, l, pos, wtype) return mapping
def _get_all_options(): d = {} d.update(_get_runs_dict(benchmark_algos)) d.update(_get_runs_dict(benchmark_policies)) d.update(_get_runs_dict(benchmark_baselines)) d.update(_get_runs_dict(benchmark_q_functions)) d.update(_get_runs_dict(benchmark_auto)) return d
class PhrasalConstraint(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_benchmark_data_loader(args): if ((args.dataset == 'perm-mnist') or (args.dataset == 'permuted-mnist')): return get_permuted_mnist_tasks elif ((args.dataset == 'rot-mnist') or (args.dataset == 'rotation-mnist')): return get_rotated_mnist_tasks elif ((args.dataset == 'cifar-100') or (args.dataset == 'cifar100')): return get_split_cifar100_tasks else: raise Exception(('Unknown dataset.\n' + "The code supports 'perm-mnist, rot-mnist, and cifar-100."))
def fill_train_config(train_config): set_missing(train_config, 'fast_train', True) set_missing(train_config, 'decay_rate', 1.0) set_missing(train_config, 'decay_steps', 50000)
def get_file_contents_as_list(file_path, encoding='utf-8', ignore_blanks=True): contents = get_file_contents(file_path, encoding=encoding) lines = contents.split('\n') lines = ([line for line in lines if (line != '')] if ignore_blanks else lines) return lines
def conv33(out_planes, stride=1): return keras.layers.Conv2D(out_planes, kernel_size=3, strides=stride, padding='same', use_bias=False)
def main(): parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config)
def test_textnet_remove_weak_edges(corpus): noun_phrases = corpus.noun_phrases() n_np = tn.Textnet(noun_phrases, remove_weak_edges=True) assert (n_np.graph.ecount() > 0)
class GroupViTVisionModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def epe_metric(disp_est, disp_gt, mask): if (mask.sum() == 0): return torch.tensor(0.0).to(disp_est.device) (disp_est, disp_gt) = (disp_est[mask], disp_gt[mask]) E = torch.abs((disp_gt - disp_est)) return torch.mean(E)
def get_model_zoo_configs() -> List[str]: source_configs_dir = path.join(path.dirname(path.realpath(__file__)), 'configs') destination = path.join(path.dirname(path.realpath(__file__)), 'detectron2', 'model_zoo', 'configs') if path.exists(source_configs_dir): if path.islink(destination): os.unlink(destination) elif path.isdir(destination): shutil.rmtree(destination) if (not path.exists(destination)): try: os.symlink(source_configs_dir, destination) except OSError: shutil.copytree(source_configs_dir, destination) config_paths = glob.glob('configs/**/*.yaml', recursive=True) return config_paths
def write_to_files(*args, readme=myfile_readme, webpage=myfile_out, add_header=False): for line in args: readme.write(line) split = line.split('###') if (line.find('####') == (- 1)): if (len(split) > 1): webpage.write(((('\n??? example ' + '"') + split[(- 1)].strip()) + '"')) webpage.write('\n <div class="meta_for_parser tablespecs"\n style="font-size: 1pt;visibility:hidden" markdown>') webpage.write((('\n ' + line.strip('\n\r')) + '\n </div>\n\n')) elif (line[0] == '*'): webpage.write((' ' + line)) else: webpage.write(line) else: webpage.write((' ' + line)) if add_header: split = line.split('##') webpage.write(((('\n??? example ' + '"') + split[(- 1)].strip()) + '"\n\n'))
class TestGamchanger(unittest.TestCase): def setUp(self): def tearDown(self): def test_000_something(self):
class FetchEnv(robot_env.RobotEnv): def __init__(self, model_path, n_substeps, gripper_extra_height, block_gripper, has_object, target_in_the_air, target_offset, obj_range, target_range, distance_threshold, initial_qpos, reward_type): self.gripper_extra_height = gripper_extra_height self.block_gripper = block_gripper self.has_object = has_object self.target_in_the_air = target_in_the_air self.target_offset = target_offset self.obj_range = obj_range self.target_range = target_range self.distance_threshold = distance_threshold self.reward_type = reward_type super(FetchEnv, self).__init__(model_path=model_path, n_substeps=n_substeps, n_actions=4, initial_qpos=initial_qpos) def compute_reward(self, achieved_goal, goal, info): d = goal_distance(achieved_goal, goal) if (self.reward_type == 'sparse'): return (- (d > self.distance_threshold).astype(np.float32)) else: return (- d) def _step_callback(self): if self.block_gripper: self.sim.data.set_joint_qpos('robot0:l_gripper_finger_joint', 0.0) self.sim.data.set_joint_qpos('robot0:r_gripper_finger_joint', 0.0) self.sim.forward() def _set_action(self, action): assert (action.shape == (4,)) action = action.copy() (pos_ctrl, gripper_ctrl) = (action[:3], action[3]) pos_ctrl *= 0.05 rot_ctrl = [1.0, 0.0, 1.0, 0.0] gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl]) assert (gripper_ctrl.shape == (2,)) if self.block_gripper: gripper_ctrl = np.zeros_like(gripper_ctrl) action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl]) utils.ctrl_set_action(self.sim, action) utils.mocap_set_action(self.sim, action) def _get_obs(self): grip_pos = self.sim.data.get_site_xpos('robot0:grip') dt = (self.sim.nsubsteps * self.sim.model.opt.timestep) grip_velp = (self.sim.data.get_site_xvelp('robot0:grip') * dt) (robot_qpos, robot_qvel) = utils.robot_get_obs(self.sim) if self.has_object: object_pos = self.sim.data.get_site_xpos('object0') object_rot = rotations.mat2euler(self.sim.data.get_site_xmat('object0')) object_velp = (self.sim.data.get_site_xvelp('object0') * dt) object_velr = (self.sim.data.get_site_xvelr('object0') * dt) object_rel_pos = (object_pos - grip_pos) object_velp -= grip_velp else: object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.zeros(0) gripper_state = robot_qpos[(- 2):] gripper_vel = (robot_qvel[(- 2):] * dt) if (not self.has_object): achieved_goal = grip_pos.copy() else: achieved_goal = np.squeeze(object_pos.copy()) obs = np.concatenate([grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(), object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel]) return {'observation': obs.copy(), 'achieved_goal': achieved_goal.copy(), 'desired_goal': self.goal.copy()} def _viewer_setup(self): body_id = self.sim.model.body_name2id('robot0:gripper_link') lookat = self.sim.data.body_xpos[body_id] for (idx, value) in enumerate(lookat): self.viewer.cam.lookat[idx] = value self.viewer.cam.distance = 2.5 self.viewer.cam.azimuth = 132.0 self.viewer.cam.elevation = (- 14.0) def _render_callback(self): sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy() site_id = self.sim.model.site_name2id('target0') self.sim.model.site_pos[site_id] = (self.goal - sites_offset[0]) self.sim.forward() def _reset_sim(self): self.sim.set_state(self.initial_state) if self.has_object: object_xpos = self.initial_gripper_xpos[:2] while (np.linalg.norm((object_xpos - self.initial_gripper_xpos[:2])) < 0.1): object_xpos = (self.initial_gripper_xpos[:2] + self.np_random.uniform((- self.obj_range), self.obj_range, size=2)) object_qpos = self.sim.data.get_joint_qpos('object0:joint') assert (object_qpos.shape == (7,)) object_qpos[:2] = object_xpos self.sim.data.set_joint_qpos('object0:joint', object_qpos) self.sim.forward() return True def _sample_goal(self): if self.has_object: goal = (self.initial_gripper_xpos[:3] + self.np_random.uniform((- self.target_range), self.target_range, size=3)) goal += self.target_offset goal[2] = self.height_offset if (self.target_in_the_air and (self.np_random.uniform() < 0.5)): goal[2] += self.np_random.uniform(0, 0.45) else: goal = (self.initial_gripper_xpos[:3] + self.np_random.uniform((- 0.15), 0.15, size=3)) return goal.copy() def _is_success(self, achieved_goal, desired_goal): d = goal_distance(achieved_goal, desired_goal) return (d < self.distance_threshold).astype(np.float32) def _env_setup(self, initial_qpos): for (name, value) in initial_qpos.items(): self.sim.data.set_joint_qpos(name, value) utils.reset_mocap_welds(self.sim) self.sim.forward() gripper_target = (np.array([(- 0.498), 0.005, ((- 0.431) + self.gripper_extra_height)]) + self.sim.data.get_site_xpos('robot0:grip')) gripper_rotation = np.array([1.0, 0.0, 1.0, 0.0]) self.sim.data.set_mocap_pos('robot0:mocap', gripper_target) self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation) for _ in range(10): self.sim.step() self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy() if self.has_object: self.height_offset = self.sim.data.get_site_xpos('object0')[2]
class StableDiffusionXLPipeline(metaclass=DummyObject): _backends = ['torch', 'transformers'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch', 'transformers']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers'])
def validate(a_l, b_l, c_l, a_u, b_u, c_u, x_minus, x_plus, y_minus, y_plus, verify_and_modify_all=False, max_iter=100, plot=False, eps=1e-05, print_info=True): original_shape = c_l.shape a_l_new = a_l.view((- 1)) b_l_new = b_l.view((- 1)) c_l_new = c_l.view((- 1)) a_u_new = a_u.view((- 1)) b_u_new = b_u.view((- 1)) c_u_new = c_u.view((- 1)) x_minus_new = x_minus.view((- 1)) x_plus_new = x_plus.view((- 1)) y_minus_new = y_minus.view((- 1)) y_plus_new = y_plus.view((- 1)) N = a_l_new.size(0) if verify_and_modify_all: max_iter = N for i in range(max_iter): if verify_and_modify_all: n = i else: n = torch.randint(0, N, [1]) n = n.long() (hl_fl, hu_fu) = plot_2_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n], plot=plot) if print_info: print(('x sigmoid iter: %d num: %d hl-fl max %.6f mean %.6f hu-fu min %.6f mean %.6f' % (i, n, hl_fl.max(), hl_fl.mean(), hu_fu.min(), hu_fu.mean()))) if (hl_fl.max() > eps): print(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n]) plot_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n]) print('hl-fl max', hl_fl.max()) raise Exception('lower plane fail') break if ((hl_fl.max() > 0) and verify_and_modify_all): c_l_new[n] = (c_l_new[n] - (hl_fl.max() * 2)) if (hu_fu.min() < (- eps)): print(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n]) plot_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_u_new[n], b_u_new[n], c_u_new[n]) print('hu-fu min', hu_fu.min()) raise Exception('upper plane fail') break if ((hu_fu.min() < 0) and verify_and_modify_all): c_u_new[n] = (c_u_new[n] - (hu_fu.min() * 2)) c_l_new = c_l_new.view(original_shape) c_u_new = c_u_new.view(original_shape) return (c_l_new, c_u_new)
def getLogger(snapshot, model_name): if (not os.path.exists(snapshot)): os.makedirs(snapshot) logging.basicConfig(filename=os.path.join(snapshot, (model_name + '.log')), level=logging.INFO) logger = logging.getLogger() return logger
class SepConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride): super(SepConv, self).__init__() self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding=((kernel_size - 1) // 2), bias=False, groups=in_planes) self.bn1 = nn.BatchNorm2d(out_planes) def forward(self, x): return self.bn1(self.conv1(x))
_sensor class PyRobotDepthSensor(DepthSensor): min_depth_value: float max_depth_value: float def __init__(self, config): if config.NORMALIZE_DEPTH: self.min_depth_value = 0 self.max_depth_value = 1 else: self.min_depth_value = config.MIN_DEPTH self.max_depth_value = config.MAX_DEPTH super().__init__(config=config) def _get_observation_space(self, *args: Any, **kwargs: Any): return spaces.Box(low=self.min_depth_value, high=self.max_depth_value, shape=(self.config.HEIGHT, self.config.WIDTH, 1), dtype=np.float32) def get_observation(self, robot_obs, *args: Any, **kwargs: Any): obs = robot_obs.get(self.uuid, None) assert (obs is not None), 'Invalid observation for {} sensor'.format(self.uuid) obs = _resize_observation(obs, self.observation_space, self.config) obs = (obs / MM_IN_METER) obs = np.clip(obs, self.config.MIN_DEPTH, self.config.MAX_DEPTH) if self.config.NORMALIZE_DEPTH: obs = ((obs - self.config.MIN_DEPTH) / (self.config.MAX_DEPTH - self.config.MIN_DEPTH)) obs = np.expand_dims(obs, axis=2) return obs
def load_vocab(vocab_file): extra_map = {} extra_map['[unused1]'] = '[X_SEP]' for i in range(10): extra_map['[unused{}]'.format((i + 2))] = '[SEP_{}]'.format(i) extra_map['[unused12]'] = '[S2S_SEP]' extra_map['[unused13]'] = '[S2S_CLS]' extra_map['[unused14]'] = '[L2R_SEP]' extra_map['[unused15]'] = '[L2R_CLS]' extra_map['[unused16]'] = '[R2L_SEP]' extra_map['[unused17]'] = '[R2L_CLS]' extra_map['[unused18]'] = '[S2S_SOS]' vocab = collections.OrderedDict() index = 0 with open(vocab_file, 'r', encoding='utf-8') as reader: while True: token = reader.readline() if (not token): break token = token.strip() if (token in extra_map): token = extra_map[token] vocab[token] = index index += 1 return vocab
class resnet(_fasterRCNN): def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False): self.model_path = 'data/pretrained_model/resnet101_caffe.pth' self.dout_base_model = 1024 self.pretrained = pretrained self.class_agnostic = class_agnostic _fasterRCNN.__init__(self, classes, class_agnostic) def _init_modules(self): resnet = resnet101() if (self.pretrained == True): print(('Loading pretrained weights from %s' % self.model_path)) state_dict = torch.load(self.model_path) resnet.load_state_dict({k: v for (k, v) in state_dict.items() if (k in resnet.state_dict())}) self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3) self.RCNN_top = nn.Sequential(resnet.layer4) self.RCNN_cls_score = nn.Linear(2048, self.n_classes) if self.class_agnostic: self.RCNN_bbox_pred = nn.Linear(2048, 4) else: self.RCNN_bbox_pred = nn.Linear(2048, (4 * self.n_classes)) for p in self.RCNN_base[0].parameters(): p.requires_grad = False for p in self.RCNN_base[1].parameters(): p.requires_grad = False assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4) if (cfg.RESNET.FIXED_BLOCKS >= 3): for p in self.RCNN_base[6].parameters(): p.requires_grad = False if (cfg.RESNET.FIXED_BLOCKS >= 2): for p in self.RCNN_base[5].parameters(): p.requires_grad = False if (cfg.RESNET.FIXED_BLOCKS >= 1): for p in self.RCNN_base[4].parameters(): p.requires_grad = False def set_bn_fix(m): classname = m.__class__.__name__ if (classname.find('BatchNorm') != (- 1)): for p in m.parameters(): p.requires_grad = False self.RCNN_base.apply(set_bn_fix) self.RCNN_top.apply(set_bn_fix) def train(self, mode=True): nn.Module.train(self, mode) if mode: self.RCNN_base.eval() self.RCNN_base[5].train() self.RCNN_base[6].train() def set_bn_eval(m): classname = m.__class__.__name__ if (classname.find('BatchNorm') != (- 1)): m.eval() self.RCNN_base.apply(set_bn_eval) self.RCNN_top.apply(set_bn_eval) def _head_to_tail(self, pool5): fc7 = self.RCNN_top(pool5).mean(3).mean(2) return fc7
def check_graph(G): total_nodes = len(G.nodes) no_emb_nodes = 0 nodes_to_delete = [] for node_str in G.nodes: try: emb = G.node[node_str]['emb'] except: no_emb_nodes += 1 nodes_to_delete.append(node_str) print(('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))) G.remove_nodes_from(nodes_to_delete) return G
class EventBroker(): def __init__(self, processors, dataroot, consts, data): self.params = {'processors': processors, 'dataroot': dataroot} self.data = data self.epoch_accumulator = EpochEventAccumulator(dataroot=dataroot) self.const_accumulator = ConstEventAccumulator(dataroot=dataroot, consts=consts) self.metadata = MetadataKeeper(dataroot=dataroot).load_epochs_data(data, consts) self.streamers = [] self.sinks = [] for processor in processors: proc_cls = EVENTPROCESSORS[processor] if isinstance(processors[processor], dict): proc = proc_cls(**processors[processor], dataroot=dataroot) else: proc = proc_cls(processors[processor], dataroot=dataroot) if isinstance(proc, EventStreamer): self.streamers.append(proc) if isinstance(proc, EventSink): self.sinks.append(proc.load_epochs_data(data, consts)) if (not isinstance(proc, (EventSink, EventStreamer))): raise ValueError(("Unsupported processor type '%s'" % type(proc))) def initialize(cls, processors, dataroot): return cls(processors, dataroot, {}, []) def register_data(self, epoch, relative_iteration, epoch_size, key, data, dtype): params = {'epoch': epoch, 'timestamp': time.time(), 'relative_iteration': relative_iteration, 'epoch_size': epoch_size, 'key': key, 'data': data, 'dtype': dtype} for streamer in self.streamers: streamer.add_row(**params) if (epoch is None): self.const_accumulator.add_const(key=key, data=data, dtype=dtype) else: self.epoch_accumulator.add_row(**params) def close_epoch(self): epoch = self.epoch_accumulator.epoch assert (len(self.data) == epoch), ('%s != %s' % (len(self.data), epoch)) epoch_data = self.epoch_accumulator.aggregate() self.metadata.register_epoch_data(epoch, epoch_data, self.const_accumulator.consts) for sink in self.sinks: sink.register_epoch_data(epoch, epoch_data, self.const_accumulator.consts) self.data.append(epoch_data) self.epoch_accumulator = EpochEventAccumulator(dataroot=self.params['dataroot']) def state_dict(self): return {'name': self.__class__.__name__, 'params': self.params, 'consts': self.const_accumulator.consts, 'data': self.data} def initialize_from_state(cls, state_dict, params): assert (state_dict['name'] == cls.__name__) if (params is not None): assert (params['processors'] == state_dict['params']['processors']), ('%s != %s' % (str(params['processors']), str(state_dict['params']['processors']))) state_dict['params']['dataroot'] = params['dataroot'] return cls(**state_dict['params'], consts=state_dict['consts'], data=state_dict['data'])
class SlicesAt(): def __init__(self, axis: int, ndim: int): if (ndim <= 0): raise ValueError(f'`ndim` (which is {ndim}) should be a positive integer') if (not ((- ndim) <= axis < ndim)): raise ValueError(f'`axis` (which is {axis}) should be within [{(- ndim)}, {ndim})') (self._axis, self._ndim) = ((axis % ndim), ndim) def __getitem__(self, idx): slices = ([slice(None)] * self._ndim) slices[self._axis] = idx return tuple(slices)
def filter_question_and_answers(qa_model, caption_qas): filtered_question_instances = [] question_set = set() for question_instance in caption_qas: question = question_instance['question'] caption = question_instance['caption'] choices = question_instance['choices'] if (question in question_set): continue else: question_set.add(question) qa_answer = qa_model.mcqa(question, caption, choices=choices) if (qa_answer != question_instance['answer']): continue if (question_instance['answer'] not in ['yes', 'no']): free_form_answer = qa_model.qa(question, caption).strip() gpt3_answer = question_instance['answer'] if gpt3_answer.isnumeric(): try: free_form_answer = str(w2n.word_to_num(free_form_answer)) except: pass if (compute_prf(gpt3_answer.split(), free_form_answer.split()) <= FREE_FORM_THRESHOLD): continue filtered_question_instances.append(question_instance) return filtered_question_instances
class resnetv1(Network): def __init__(self, batch_size=1, num_layers=50): Network.__init__(self, batch_size=batch_size) self._num_layers = num_layers self._resnet_scope = ('resnet_v1_%d' % num_layers) def _crop_pool_layer(self, bottom, rois, name): with tf.variable_scope(name) as scope: batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [(- 1), 1], name='batch_id'), [1]) bottom_shape = tf.shape(bottom) height = ((tf.to_float(bottom_shape[1]) - 1.0) * np.float32(self._feat_stride[0])) width = ((tf.to_float(bottom_shape[2]) - 1.0) * np.float32(self._feat_stride[0])) x1 = (tf.slice(rois, [0, 1], [(- 1), 1], name='x1') / width) y1 = (tf.slice(rois, [0, 2], [(- 1), 1], name='y1') / height) x2 = (tf.slice(rois, [0, 3], [(- 1), 1], name='x2') / width) y2 = (tf.slice(rois, [0, 4], [(- 1), 1], name='y2') / height) bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], 1)) if cfg.RESNET.MAX_POOL: pre_pool_size = (cfg.POOLING_SIZE * 2) crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name='crops') crops = slim.max_pool2d(crops, [2, 2], padding='SAME') else: crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [cfg.POOLING_SIZE, cfg.POOLING_SIZE], name='crops') return crops def build_base(self): with tf.variable_scope(self._resnet_scope, self._resnet_scope): net = resnet_utils.conv2d_same(self._image, 64, 7, stride=2, scope='conv1') net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]]) net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1') return net def build_network(self, sess, is_training=True): if cfg.TRAIN.TRUNCATED: initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01) initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001) else: initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01) initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001) bottleneck = resnet_v1.bottleneck if (self._num_layers == 50): blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 3) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 5) + [(1024, 256, 1)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))] elif (self._num_layers == 101): blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 3) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 22) + [(1024, 256, 1)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))] elif (self._num_layers == 152): blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 7) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 35) + [(1024, 256, 1)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))] else: raise NotImplementedError assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4) if (cfg.RESNET.FIXED_BLOCKS == 3): with slim.arg_scope(resnet_arg_scope(is_training=False)): net = self.build_base() (net_conv4, _) = resnet_v1.resnet_v1(net, blocks[0:cfg.RESNET.FIXED_BLOCKS], global_pool=False, include_root_block=False, scope=self._resnet_scope) elif (cfg.RESNET.FIXED_BLOCKS > 0): with slim.arg_scope(resnet_arg_scope(is_training=False)): net = self.build_base() (net, _) = resnet_v1.resnet_v1(net, blocks[0:cfg.RESNET.FIXED_BLOCKS], global_pool=False, include_root_block=False, scope=self._resnet_scope) with slim.arg_scope(resnet_arg_scope(is_training=is_training)): (net_conv4, _) = resnet_v1.resnet_v1(net, blocks[cfg.RESNET.FIXED_BLOCKS:(- 1)], global_pool=False, include_root_block=False, scope=self._resnet_scope) else: with slim.arg_scope(resnet_arg_scope(is_training=is_training)): net = self.build_base() (net_conv4, _) = resnet_v1.resnet_v1(net, blocks[0:(- 1)], global_pool=False, include_root_block=False, scope=self._resnet_scope) self._act_summaries.append(net_conv4) self._layers['head'] = net_conv4 with tf.variable_scope(self._resnet_scope, self._resnet_scope): self._anchor_component() rpn = slim.conv2d(net_conv4, 512, [3, 3], trainable=is_training, weights_initializer=initializer, scope='rpn_conv/3x3') self._act_summaries.append(rpn) rpn_cls_score = slim.conv2d(rpn, (self._num_anchors * 2), [1, 1], trainable=is_training, weights_initializer=initializer, padding='VALID', activation_fn=None, scope='rpn_cls_score') rpn_cls_score_reshape = self._reshape_layer(rpn_cls_score, 2, 'rpn_cls_score_reshape') rpn_cls_prob_reshape = self._softmax_layer(rpn_cls_score_reshape, 'rpn_cls_prob_reshape') rpn_cls_prob = self._reshape_layer(rpn_cls_prob_reshape, (self._num_anchors * 2), 'rpn_cls_prob') rpn_bbox_pred = slim.conv2d(rpn, (self._num_anchors * 4), [1, 1], trainable=is_training, weights_initializer=initializer, padding='VALID', activation_fn=None, scope='rpn_bbox_pred') if is_training: (rois, roi_scores) = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred, 'rois') rpn_labels = self._anchor_target_layer(rpn_cls_score, 'anchor') with tf.control_dependencies([rpn_labels]): (rois, _) = self._proposal_target_layer(rois, roi_scores, 'rpn_rois') elif (cfg.TEST.MODE == 'nms'): (rois, _) = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred, 'rois') elif (cfg.TEST.MODE == 'top'): (rois, _) = self._proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, 'rois') else: raise NotImplementedError if (cfg.POOLING_MODE == 'crop'): pool5 = self._crop_pool_layer(net_conv4, rois, 'pool5') else: raise NotImplementedError with slim.arg_scope(resnet_arg_scope(is_training=is_training)): (fc7, _) = resnet_v1.resnet_v1(pool5, blocks[(- 1):], global_pool=False, include_root_block=False, scope=self._resnet_scope) with tf.variable_scope(self._resnet_scope, self._resnet_scope): fc7 = tf.reduce_mean(fc7, axis=[1, 2]) cls_score = slim.fully_connected(fc7, self._num_classes, weights_initializer=initializer, trainable=is_training, activation_fn=None, scope='cls_score') cls_prob = self._softmax_layer(cls_score, 'cls_prob') bbox_pred = slim.fully_connected(fc7, (self._num_classes * 4), weights_initializer=initializer_bbox, trainable=is_training, activation_fn=None, scope='bbox_pred') self._predictions['rpn_cls_score'] = rpn_cls_score self._predictions['rpn_cls_score_reshape'] = rpn_cls_score_reshape self._predictions['rpn_cls_prob'] = rpn_cls_prob self._predictions['rpn_bbox_pred'] = rpn_bbox_pred self._predictions['cls_score'] = cls_score self._predictions['cls_prob'] = cls_prob self._predictions['bbox_pred'] = bbox_pred self._predictions['rois'] = rois self._score_summaries.update(self._predictions) return (rois, cls_prob, bbox_pred) def get_variables_to_restore(self, variables, var_keep_dic): variables_to_restore = [] for v in variables: if (v.name == (self._resnet_scope + '/conv1/weights:0')): self._variables_to_fix[v.name] = v continue if (v.name.split(':')[0] in var_keep_dic): print(('Varibles restored: %s' % v.name)) variables_to_restore.append(v) return variables_to_restore def fix_variables(self, sess, pretrained_model): print('Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device('/cpu:0'): conv1_rgb = tf.get_variable('conv1_rgb', [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({(self._resnet_scope + '/conv1/weights'): conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[(self._resnet_scope + '/conv1/weights:0')], tf.reverse(conv1_rgb, [2])))
def preprocess_data(input_standoff_folder_train, output_conll_folder_train, output_conll_file_train, input_standoff_folder_test, output_conll_folder_test, output_conll_file_test, input_standoff_folder_dev, output_conll_folder_dev, output_conll_file_dev): anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_train, output_conll_folder_train, output_conll_file_train) anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_test, output_conll_folder_test, output_conll_file_test) anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_dev, output_conll_folder_dev, output_conll_file_dev)
def test_potential_from_data(): sim1 = fokker_planck(temperature=(1 / k), drag=1, extent=10, resolution=0.1, boundary=boundary.reflecting, potential=U) x = np.linspace((- 5), 5, 100) Udata = U(x) newU = potential_from_data(x, Udata) sim2 = fokker_planck(temperature=(1 / k), drag=1, extent=10, resolution=0.1, boundary=boundary.reflecting, potential=newU) assert np.allclose(sim1.master_matrix._deduped_data(), sim2.master_matrix._deduped_data(), atol=0.003, rtol=0)
class XLMRobertaForMaskedLM(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
class TQDMProgressBar(plc.TQDMProgressBar): def get_metrics(self, trainer, pl_module): m = super().get_metrics(trainer, pl_module) m = {k: v for (k, v) in m.items() if ('grad' not in k)} return m
def preprocess_days_test(path=DATA_PATH, file=DATA_FILE, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, days_test=DAYS_TEST): (data, buys) = load_data((path + file)) data = filter_data(data, min_item_support, min_session_length) split_data(data, (path_proc + file), days_test)
class Logger(object): def __init__(self, n_envs, logdir): self.start_time = time.time() self.n_envs = n_envs self.logdir = logdir self.episode_rewards = [] for _ in range(n_envs): self.episode_rewards.append([]) self.episode_len_buffer = deque(maxlen=40) self.episode_reward_buffer = deque(maxlen=40) self.log = pd.DataFrame(columns=['timesteps', 'wall_time', 'num_episodes', 'max_episode_rewards', 'mean_episode_rewards', 'min_episode_rewards', 'max_episode_len', 'mean_episode_len', 'min_episode_len']) self.writer = SummaryWriter(logdir) self.timesteps = 0 self.num_episodes = 0 def feed(self, rew_batch, done_batch): steps = rew_batch.shape[0] rew_batch = rew_batch.T done_batch = done_batch.T for i in range(self.n_envs): for j in range(steps): self.episode_rewards[i].append(rew_batch[i][j]) if done_batch[i][j]: self.episode_len_buffer.append(len(self.episode_rewards[i])) self.episode_reward_buffer.append(np.sum(self.episode_rewards[i])) self.episode_rewards[i] = [] self.num_episodes += 1 self.timesteps += (self.n_envs * steps) def write_summary(self, summary): for (key, value) in summary.items(): self.writer.add_scalar(key, value, self.timesteps) def dump(self): wall_time = (time.time() - self.start_time) if (self.num_episodes > 0): episode_statistics = self._get_episode_statistics() episode_statistics_list = list(episode_statistics.values()) for (key, value) in episode_statistics.items(): self.writer.add_scalar(key, value, self.timesteps) else: episode_statistics_list = ([None] * 6) log = ((([self.timesteps] + [wall_time]) + [self.num_episodes]) + episode_statistics_list) self.log.loc[len(self.log)] = log with open((self.logdir + '/log.csv'), 'w') as f: self.log.to_csv(f, index=False) print(self.log.loc[(len(self.log) - 1)]) def _get_episode_statistics(self): episode_statistics = {} episode_statistics['Rewards/max_episodes'] = np.max(self.episode_reward_buffer) episode_statistics['Rewards/mean_episodes'] = np.mean(self.episode_reward_buffer) episode_statistics['Rewards/min_episodes'] = np.min(self.episode_reward_buffer) episode_statistics['Len/max_episodes'] = np.max(self.episode_len_buffer) episode_statistics['Len/mean_episodes'] = np.mean(self.episode_len_buffer) episode_statistics['Len/min_episodes'] = np.min(self.episode_len_buffer) return episode_statistics
class TestTriangularAttention(unittest.TestCase): def test_shape(self): c_z = consts.c_z c = 12 no_heads = 4 starting = True tan = TriangleAttention(c_z, c, no_heads, starting) batch_size = consts.batch_size n_res = consts.n_res x = torch.rand((batch_size, n_res, n_res, c_z)) shape_before = x.shape x = tan(x, chunk_size=None) shape_after = x.shape self.assertTrue((shape_before == shape_after)) def _tri_att_compare(self, starting=False): name = (('triangle_attention_' + ('starting' if starting else 'ending')) + '_node') def run_tri_att(pair_act, pair_mask): config = compare_utils.get_alphafold_config() c_e = config.model.embeddings_and_evoformer.evoformer tri_att = alphafold.model.modules.TriangleAttention((c_e.triangle_attention_starting_node if starting else c_e.triangle_attention_ending_node), config.model.global_config, name=name) act = tri_att(pair_act=pair_act, pair_mask=pair_mask) return act f = hk.transform(run_tri_att) n_res = consts.n_res pair_act = (np.random.rand(n_res, n_res, consts.c_z) * 100) pair_mask = np.random.randint(low=0, high=2, size=(n_res, n_res)) params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + name)) params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray) out_gt = f.apply(params, None, pair_act, pair_mask).block_until_ready() out_gt = torch.as_tensor(np.array(out_gt)) model = compare_utils.get_global_pretrained_openfold() module = (model.evoformer.blocks[0].core.tri_att_start if starting else model.evoformer.blocks[0].core.tri_att_end) module = copy.deepcopy(module) module.starting = starting out_repro = module(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda(), chunk_size=None).cpu() self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < consts.eps)) _utils.skip_unless_alphafold_installed() def test_tri_att_end_compare(self): self._tri_att_compare() _utils.skip_unless_alphafold_installed() def test_tri_att_start_compare(self): self._tri_att_compare(starting=True)
def init_distributed_mode(args): if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)): args.rank = int(os.environ['RANK']) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif ('SLURM_PROCID' in os.environ): args.rank = int(os.environ['SLURM_PROCID']) args.gpu = (args.rank % torch.cuda.device_count()) elif hasattr(args, 'rank'): pass else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print(f'| distributed init (rank {args.rank}): {args.dist_url}', flush=True) from datetime import timedelta delta = timedelta(days=20) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, timeout=delta, rank=args.rank) dist.barrier() setup_for_distributed((args.rank == 0))
def load_city_result(city, model, data_dir): return pickle.load(open(((((data_dir + model) + '/result/') + city) + '_result.pkl'), 'rb'))
def parse_nvidia_smi(): sp = subprocess.Popen(['nvidia-smi', '-q'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out_dict = dict() for item in sp.communicate()[0].decode('utf-8').split('\n'): if (item.count(':') == 1): (key, val) = [i.strip() for i in item.split(':')] out_dict[key] = val return out_dict
class ArgoverseProcess(): def __init__(self, dataset_path, out_path, version='sample'): assert (version in ['train', 'val', 'test', 'sample']) self.is_test = ('test' in version) self.out_path = out_path self.dataset_path = join(dataset_path, version) self.argo = ArgoverseTrackingLoader(self.dataset_path) print(f'Total number of logs : {len(self.argo)}, version : {version}') self.version = version def convert(self): info = [] for scene in self.argo: info.append(self.process_scene(scene)) out_path = self.out_path makedirs(out_path, exist_ok=True) with open(join(out_path, f'infos_{self.version}.pkl'), 'wb') as f: pickle.dump(info, f) print(f"Saved {self.version} info at {join(out_path, f'infos_{self.version}.pkl')}") def process_scene(self, scene): info = {} num_pc = scene.lidar_count lidar_path = scene.lidar_list info['num_pc'] = num_pc info['lidar_path'] = lidar_path if self.is_test: info['bbox'] = [] return info bbox_all = [] for idx in tqdm(range(len(lidar_path))): boxes = [] labels = scene.get_label_object(idx) for label in labels: box = {} box['l'] = label.length box['w'] = label.width box['h'] = label.height box['3d_coord'] = label.as_3d_bbox() box['2d_coord'] = label.as_2d_bbox() box['label_class'] = label.label_class box['occlusion'] = label.occlusion box['center'] = label.translation box['quaternion'] = label.quaternion boxes.append(box) bbox_all.append(boxes) info['bbox'] = bbox_all return info
class OptimizedModel(): def __init__(self, *args, **kwargs): raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the`{self.__class__.__name__}.from_pretrained(model_name_or_path)` method.') def from_pretrained(cls, model_name_or_path: str, **kwargs): from neural_compressor.utils.pytorch import load from neural_compressor import __version__ config = kwargs.pop('config', None) cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) use_auth_token = kwargs.pop('use_auth_token', None) revision = kwargs.pop('revision', None) if (config is None): config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, use_auth_token=use_auth_token, revision=revision, **kwargs) model_class = eval(f'transformers.{config.architectures[0]}') if (hasattr(config, 'backend') and (config.backend == 'ipex')): import intel_extension_for_pytorch logger.info('the INC IPEX quantization optimized model is loading.') weight_file = os.path.join(model_name_or_path, WEIGHTS_NAME) q_model = torch.jit.load(weight_file) q_model = torch.jit.freeze(q_model.eval()) return q_model if (config.torch_dtype is not torch.int8): model = model_class.from_pretrained(model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, use_auth_token=use_auth_token, revision=revision, **kwargs) return model else: logger.info('the quantization optimized model is loading.') keys_to_ignore_on_load_unexpected = copy.deepcopy(getattr(model_class, '_keys_to_ignore_on_load_unexpected', None)) keys_to_ignore_on_load_missing = copy.deepcopy(getattr(model_class, '_keys_to_ignore_on_load_missing', None)) quantized_keys_to_ignore_on_load = ['zero_point', 'scale', 'packed_params', 'constant', 'module', 'best_configure'] if (keys_to_ignore_on_load_unexpected is None): model_class._keys_to_ignore_on_load_unexpected = quantized_keys_to_ignore_on_load else: model_class._keys_to_ignore_on_load_unexpected.extend(quantized_keys_to_ignore_on_load) missing_keys_to_ignore_on_load = ['weight', 'bias'] if (keys_to_ignore_on_load_missing is None): model_class._keys_to_ignore_on_load_missing = missing_keys_to_ignore_on_load else: model_class._keys_to_ignore_on_load_missing.extend(missing_keys_to_ignore_on_load) model = model_class.from_pretrained(model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, use_auth_token=use_auth_token, revision=revision, **kwargs) model_class._keys_to_ignore_on_load_unexpected = keys_to_ignore_on_load_unexpected model_class._keys_to_ignore_on_load_missing = keys_to_ignore_on_load_missing dataloader = kwargs.get('dataloader', None) if ((not os.path.isdir(model_name_or_path)) and (not os.path.isfile(model_name_or_path))): if (Version(transformers.__version__) < Version('4.22.0')): from transformers.file_utils import cached_path, hf_bucket_url weights_file = hf_bucket_url(model_name_or_path, filename=WEIGHTS_NAME, revision=revision) try: resolved_weights_file = cached_path(weights_file, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, use_auth_token=use_auth_token) except EnvironmentError as err: logger.error(err) msg = f'''Can't load weights for '{model_name_or_path}'. Make sure that: - '{model_name_or_path}' is a correct model identifier listed on ' (make sure '{model_name_or_path}' is not a path to a local directory with something else, in that case) - or '{model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME} ''' if (revision is not None): msg += f'''- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on ' ''' raise EnvironmentError(msg) else: from transformers.utils import cached_file try: resolved_weights_file = cached_file(model_name_or_path, filename=WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, use_auth_token=use_auth_token) except EnvironmentError as err: logger.error(err) msg = f'''Can't load weights for '{model_name_or_path}'. Make sure that: - '{model_name_or_path}' is a correct model identifier listed on ' (make sure '{model_name_or_path}' is not a path to a local directory with something else, in that case) - or '{model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME} ''' if (revision is not None): msg += f'''- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on ' ''' raise EnvironmentError(msg) q_model = (load(resolved_weights_file, model, dataloader=dataloader) if (Version(__version__) > Version('1.13')) else load(resolved_weights_file, model)) else: weights_file = os.path.join(os.path.abspath(os.path.expanduser(model_name_or_path)), WEIGHTS_NAME) q_model = (load(weights_file, model, dataloader=dataloader) if (Version(__version__) > Version('1.13')) else load(weights_file, model)) del model return q_model
class PreTrainedTokenizer(object): vocab_files_names = {} pretrained_vocab_files_map = {} max_model_input_sizes = {} SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens'] def bos_token(self): if (self._bos_token is None): logger.error('Using bos_token, but it is not set yet.') return self._bos_token def eos_token(self): if (self._eos_token is None): logger.error('Using eos_token, but it is not set yet.') return self._eos_token def unk_token(self): if (self._unk_token is None): logger.error('Using unk_token, but it is not set yet.') return self._unk_token def sep_token(self): if (self._sep_token is None): logger.error('Using sep_token, but it is not set yet.') return self._sep_token def pad_token(self): if (self._pad_token is None): logger.error('Using pad_token, but it is not set yet.') return self._pad_token def cls_token(self): if (self._cls_token is None): logger.error('Using cls_token, but it is not set yet.') return self._cls_token def mask_token(self): if (self._mask_token is None): logger.error('Using mask_token, but it is not set yet.') return self._mask_token def additional_special_tokens(self): if (self._additional_special_tokens is None): logger.error('Using additional_special_tokens, but it is not set yet.') return self._additional_special_tokens _token.setter def bos_token(self, value): self._bos_token = value _token.setter def eos_token(self, value): self._eos_token = value _token.setter def unk_token(self, value): self._unk_token = value _token.setter def sep_token(self, value): self._sep_token = value _token.setter def pad_token(self, value): self._pad_token = value _token.setter def cls_token(self, value): self._cls_token = value _token.setter def mask_token(self, value): self._mask_token = value _special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value def __init__(self, max_len=None, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._additional_special_tokens = [] self.max_len = (max_len if (max_len is not None) else int(.0)) self.added_tokens_encoder = {} self.added_tokens_decoder = {} for (key, value) in kwargs.items(): if (key in self.SPECIAL_TOKENS_ATTRIBUTES): if (key == 'additional_special_tokens'): assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value))) else: assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode))) setattr(self, key, value) def from_pretrained(cls, *inputs, **kwargs): return cls._from_pretrained(*inputs, **kwargs) def _from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): cache_dir = kwargs.pop('cache_dir', None) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} if (pretrained_model_name_or_path in s3_models): for (file_id, map_list) in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] else: logger.info("Model name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) for (file_id, file_name) in cls.vocab_files_names.items(): if os.path.isdir(pretrained_model_name_or_path): full_file_name = os.path.join(pretrained_model_name_or_path, file_name) else: full_file_name = pretrained_model_name_or_path if (not os.path.exists(full_file_name)): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name all_vocab_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE} saved_directory = pretrained_model_name_or_path if (os.path.exists(saved_directory) and (not os.path.isdir(saved_directory))): saved_directory = os.path.dirname(saved_directory) for (file_id, file_name) in all_vocab_files_names.items(): full_file_name = os.path.join(saved_directory, file_name) if (not os.path.exists(full_file_name)): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name if all(((full_file_name is None) for full_file_name in vocab_files.values())): logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find tokenizer filesat this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) return None try: resolved_vocab_files = {} for (file_id, file_path) in vocab_files.items(): if (file_path is None): resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir) except EnvironmentError: if (pretrained_model_name_or_path in s3_models): logger.error("Couldn't reach server to download vocabulary.") else: logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find files {} at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, str(vocab_files.keys()))) return None for (file_id, file_path) in vocab_files.items(): if (file_path == resolved_vocab_files[file_id]): logger.info('loading file {}'.format(file_path)) else: logger.info('loading file {} from cache at {}'.format(file_path, resolved_vocab_files[file_id])) if (pretrained_model_name_or_path in cls.max_model_input_sizes): max_len = cls.max_model_input_sizes[pretrained_model_name_or_path] if ((max_len is not None) and isinstance(max_len, (int, float))): kwargs['max_len'] = min(kwargs.get('max_len', int(.0)), max_len) added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None) special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None) for (args_name, file_path) in resolved_vocab_files.items(): if (args_name not in kwargs): kwargs[args_name] = file_path if (special_tokens_map_file is not None): special_tokens_map = json.load(open(special_tokens_map_file, encoding='utf-8')) for (key, value) in special_tokens_map.items(): if (key not in kwargs): kwargs[key] = value tokenizer = cls(*inputs, **kwargs) if (added_tokens_file is not None): added_tok_encoder = json.load(open(added_tokens_file, encoding='utf-8')) added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) return tokenizer def save_pretrained(self, save_directory): if (not os.path.isdir(save_directory)): logger.error('Saving directory ({}) should be a directory'.format(save_directory)) return special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) with open(special_tokens_map_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.special_tokens_map, ensure_ascii=False)) with open(added_tokens_file, 'w', encoding='utf-8') as f: if self.added_tokens_encoder: out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False) else: out_str = u'{}' f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return (vocab_files + (special_tokens_map_file, added_tokens_file)) def save_vocabulary(self, save_directory): raise NotImplementedError def vocab_size(self): raise NotImplementedError def __len__(self): return (self.vocab_size + len(self.added_tokens_encoder)) def add_tokens(self, new_tokens): if (not new_tokens): return 0 to_add_tokens = [] for token in new_tokens: assert (isinstance(token, str) or (six.PY2 and isinstance(token, unicode))) if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token))): to_add_tokens.append(token) logger.info('Adding %s to the vocabulary', token) added_tok_encoder = dict(((tok, (len(self) + i)) for (i, tok) in enumerate(to_add_tokens))) added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) return len(to_add_tokens) def add_special_tokens(self, special_tokens_dict): if (not special_tokens_dict): return 0 added_tokens = 0 for (key, value) in special_tokens_dict.items(): assert (key in self.SPECIAL_TOKENS_ATTRIBUTES) if (key == 'additional_special_tokens'): assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value))) added_tokens += self.add_tokens(value) else: assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode))) added_tokens += self.add_tokens([value]) logger.info('Assigning %s to the %s key of the tokenizer', value, key) setattr(self, key, value) return added_tokens def tokenize(self, text, **kwargs): def split_on_tokens(tok_list, text): if (not text): return [] if (not tok_list): return self._tokenize(text, **kwargs) tok = tok_list[0] split_text = text.split(tok) return sum(((split_on_tokens(tok_list[1:], sub_text.strip()) + [tok]) for sub_text in split_text), [])[:(- 1)] added_tokens = (list(self.added_tokens_encoder.keys()) + self.all_special_tokens) tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): raise NotImplementedError def convert_tokens_to_ids(self, tokens): if (isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode))): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) if (len(ids) > self.max_len): logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errors'.format(len(ids), self.max_len)) return ids def _convert_token_to_id_with_added_voc(self, token): if (token in self.added_tokens_encoder): return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def encode(self, text, text_pair=None, add_special_tokens=False): if (text_pair is None): if add_special_tokens: return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text))) else: return self.convert_tokens_to_ids(self.tokenize(text)) first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)] second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair)] if add_special_tokens: return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens) else: return (first_sentence_tokens, second_sentence_tokens) def add_special_tokens_single_sentence(self, token_ids): raise NotImplementedError def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1): raise NotImplementedError def convert_ids_to_tokens(self, ids, skip_special_tokens=False): if isinstance(ids, int): if (ids in self.added_tokens_decoder): return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: if ((index in self.all_special_ids) and skip_special_tokens): continue if (index in self.added_tokens_decoder): tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index): raise NotImplementedError def convert_tokens_to_string(self, tokens): return ' '.join(self.convert_ids_to_tokens(tokens)) def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) text = self.convert_tokens_to_string(filtered_tokens) if ((self.sep_token is not None) and (self.sep_token in text)): text = text.replace(self.cls_token, self.sep_token) split_text = list(filter((lambda sentence: (len(sentence) > 0)), text.split(self.sep_token))) if clean_up_tokenization_spaces: clean_text = [self.clean_up_tokenization(text) for text in split_text] return clean_text else: return split_text elif clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def special_tokens_map(self): set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, ('_' + attr)) if attr_value: set_attr[attr] = attr_value return set_attr def all_special_tokens(self): all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): all_toks = (all_toks + (attr_value if isinstance(attr_value, (list, tuple)) else [attr_value])) all_toks = list(set(all_toks)) return all_toks def all_special_ids(self): all_toks = self.all_special_tokens all_ids = list((self._convert_token_to_id(t) for t in all_toks)) return all_ids def clean_up_tokenization(out_string): out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (EulerAncestralDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = {'num_train_timesteps': 1100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear'} config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for (beta_start, beta_end) in zip([1e-05, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ['linear', 'scaled_linear']: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ['epsilon', 'v_prediction']: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()) sample = sample.to(torch_device) for (i, t) in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert (abs((result_sum.item() - 152.3192)) < 0.01) assert (abs((result_mean.item() - 0.1983)) < 0.001) def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type='v_prediction') scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma) sample = sample.to(torch_device) for (i, t) in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert (abs((result_sum.item() - 108.4439)) < 0.01) assert (abs((result_mean.item() - 0.1412)) < 0.001) def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()) sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert (abs((result_sum.item() - 152.3192)) < 0.01) assert (abs((result_mean.item() - 0.1983)) < 0.001) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) t_start = (self.num_inference_steps - 2) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma) noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[(t_start * scheduler.order):] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for (i, t) in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert (abs((result_sum.item() - 56163.0508)) < 0.01), f' expected result sum 56163.0508, but get {result_sum}' assert (abs((result_mean.item() - 73.129)) < 0.001), f' expected result mean 73.1290, but get {result_mean}'
class UnaryOpBase(AbsOpBase): in_dtypes = [(i,) for i in DTYPE_GEN_FLOATS] out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS] def __init__(self): super().__init__() self.out_ranks = [rank_all()]
def convert_c2_detectron_names(weights): logger = logging.getLogger(__name__) logger.info('Remapping C2 weights ......') original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) layer_keys = [k.replace('conv.rpn.fpn2', 'proposal_generator.rpn_head.conv') for k in layer_keys] layer_keys = [k.replace('conv.rpn', 'proposal_generator.rpn_head.conv') for k in layer_keys] layer_keys = [k.replace('rpn.bbox.pred.fpn2', 'proposal_generator.rpn_head.anchor_deltas') for k in layer_keys] layer_keys = [k.replace('rpn.cls.logits.fpn2', 'proposal_generator.rpn_head.objectness_logits') for k in layer_keys] layer_keys = [k.replace('rpn.bbox.pred', 'proposal_generator.rpn_head.anchor_deltas') for k in layer_keys] layer_keys = [k.replace('rpn.cls.logits', 'proposal_generator.rpn_head.objectness_logits') for k in layer_keys] layer_keys = [re.sub('^bbox\\.pred', 'bbox_pred', k) for k in layer_keys] layer_keys = [re.sub('^cls\\.score', 'cls_score', k) for k in layer_keys] layer_keys = [re.sub('^fc6\\.', 'box_head.fc1.', k) for k in layer_keys] layer_keys = [re.sub('^fc7\\.', 'box_head.fc2.', k) for k in layer_keys] layer_keys = [re.sub('^head\\.conv', 'box_head.conv', k) for k in layer_keys] def fpn_map(name): splits = name.split('.') norm = ('.norm' if ('norm' in splits) else '') if name.startswith('fpn.inner.'): stage = int(splits[2][len('res'):]) return 'fpn_lateral{}{}.{}'.format(stage, norm, splits[(- 1)]) elif name.startswith('fpn.res'): stage = int(splits[1][len('res'):]) return 'fpn_output{}{}.{}'.format(stage, norm, splits[(- 1)]) return name layer_keys = [fpn_map(k) for k in layer_keys] layer_keys = [k.replace('.[mask].fcn', 'mask_head.mask_fcn') for k in layer_keys] layer_keys = [re.sub('^\\.mask\\.fcn', 'mask_head.mask_fcn', k) for k in layer_keys] layer_keys = [k.replace('mask.fcn.logits', 'mask_head.predictor') for k in layer_keys] layer_keys = [k.replace('conv5.mask', 'mask_head.deconv') for k in layer_keys] layer_keys = [k.replace('conv.fcn', 'roi_heads.keypoint_head.conv_fcn') for k in layer_keys] layer_keys = [k.replace('kps.score.lowres', 'roi_heads.keypoint_head.score_lowres') for k in layer_keys] layer_keys = [k.replace('kps.score.', 'roi_heads.keypoint_head.score.') for k in layer_keys] assert (len(set(layer_keys)) == len(layer_keys)) assert (len(original_keys) == len(layer_keys)) new_weights = {} new_keys_to_original_keys = {} for (orig, renamed) in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if (renamed.startswith('bbox_pred.') or renamed.startswith('mask_head.predictor.')): new_start_idx = (4 if renamed.startswith('bbox_pred.') else 1) new_weights[renamed] = weights[orig][new_start_idx:] logger.info('Remove prediction weight for background class in {}. The shape changes from {} to {}.'.format(renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape))) elif renamed.startswith('cls_score.'): logger.info('Move classification weights for background class in {} from index 0 to index {}.'.format(renamed, (weights[orig].shape[0] - 1))) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return (new_weights, new_keys_to_original_keys)
class ADE20K(): def __init__(self, path, split, max_classes=None): assert (split in {'training', 'validation'}) self.split = split self.path = path self.img_dir = os.path.join(self.path, 'images', self.split) self.max_classes = max_classes self.transform_loader = TransformLoader(224) augment = (self.split == 'training') self.transform = self.transform_loader.get_composed_transform(augment, to_pil=False) self.images = [] self.classes = [] self.load_images() self.unique_classes = np.unique(self.classes) self.n_classes = 365 def load_images(self): n_classes = 0 for letterdir in os.listdir(self.img_dir): if (len(letterdir) != 1): continue letterdir = os.path.join(self.img_dir, letterdir) for cname in os.listdir(letterdir): cname_dir = os.path.join(letterdir, cname) has_nested_dirs = any((os.path.isdir(os.path.join(cname_dir, i)) for i in os.listdir(cname_dir))) subcnames = [] if has_nested_dirs: for subcname in os.listdir(cname_dir): assert os.path.isdir(os.path.join(cname_dir, subcname)) subcnames.append(subcname) else: subcnames.append('') for subcname in subcnames: if (subcname != ''): if (cname == 'waterfall'): combname = 'waterfall-s' elif (cname == 'car_interior'): combname = 'car_interior-s' elif ((cname == 'temple') and (subcname == 'east_asia')): combname = 'temple-asia-s' else: combname = f'{cname}-{subcname}-s' subcnamedir = os.path.join(cname_dir, subcname) else: combname = f'{cname}-s' subcnamedir = cname_dir if (combname not in S2I): continue for imgf in glob.glob(os.path.join(subcnamedir, '*.jpg')): self.images.append(imgf) self.classes.append(S2I[combname]) n_classes += 1 if ((self.max_classes is not None) and (n_classes >= self.max_classes)): return def __getitem__(self, i): img_path = self.images[i] cl = self.classes[i] img = Image.open(img_path) img = self.transform(img) return (img, cl) def __len__(self): return len(self.images)
class TrialShortNamer(): PREFIX = 'hp' DEFAULTS = {} NAMING_INFO = None def set_defaults(cls, prefix, defaults): cls.PREFIX = prefix cls.DEFAULTS = defaults cls.build_naming_info() def shortname_for_word(info, word): if (len(word) == 0): return '' short_word = None if any((char.isdigit() for char in word)): raise Exception(f"Parameters should not contain numbers: '{word}' contains a number") if (word in info['short_word']): return info['short_word'][word] for prefix_len in range(1, (len(word) + 1)): prefix = word[:prefix_len] if (prefix in info['reverse_short_word']): continue else: short_word = prefix break if (short_word is None): def int_to_alphabetic(integer): s = '' while (integer != 0): s = (chr((ord('A') + (integer % 10))) + s) integer //= 10 return s i = 0 while True: sword = ((word + '#') + int_to_alphabetic(i)) if (sword in info['reverse_short_word']): continue else: short_word = sword break info['short_word'][word] = short_word info['reverse_short_word'][short_word] = word return short_word def shortname_for_key(info, param_name): words = param_name.split('_') shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words] separators = ['', '_'] for separator in separators: shortname = separator.join(shortname_parts) if (shortname not in info['reverse_short_param']): info['short_param'][param_name] = shortname info['reverse_short_param'][shortname] = param_name return shortname return param_name def add_new_param_name(info, param_name): short_name = TrialShortNamer.shortname_for_key(info, param_name) info['short_param'][param_name] = short_name info['reverse_short_param'][short_name] = param_name def build_naming_info(cls): if (cls.NAMING_INFO is not None): return info = {'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}} field_keys = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(info, k) cls.NAMING_INFO = info def shortname(cls, params): cls.build_naming_info() assert (cls.PREFIX is not None) name = [copy.copy(cls.PREFIX)] for (k, v) in params.items(): if (k not in cls.DEFAULTS): raise Exception(f'You should provide a default value for the param name {k} with value {v}') if (v == cls.DEFAULTS[k]): continue key = cls.NAMING_INFO['short_param'][k] if isinstance(v, bool): v = (1 if v else 0) sep = ('' if isinstance(v, (int, float)) else '-') e = f'{key}{sep}{v}' name.append(e) return '_'.join(name) def parse_repr(cls, repr): repr = repr[(len(cls.PREFIX) + 1):] if (repr == ''): values = [] else: values = repr.split('_') parameters = {} for value in values: if ('-' in value): (p_k, p_v) = value.split('-') else: p_k = re.sub('[0-9.]', '', value) p_v = float(re.sub('[^0-9.]', '', value)) key = cls.NAMING_INFO['reverse_short_param'][p_k] parameters[key] = p_v for k in cls.DEFAULTS: if (k not in parameters): parameters[k] = cls.DEFAULTS[k] return parameters
class Conv4(BaseModel): def __init__(self): super(Conv4, self).__init__() self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=2), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=2), nn.BatchNorm2d(32), nn.MaxPool2d(2)) self.classifier = DenseLinear(in_features=((32 * 6) * 6), out_features=2) def forward(self, inp): out = self.features(inp) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out
def mace_check(condition, message): if (not condition): for line in traceback.format_stack(): print(line.strip()) MaceLogger.error(message, level=3)
def MAPE(y_true: 'ndarray', y_pred: 'ndarray', multioutput: str='raw_values') -> Union[(float64, 'ndarray')]: (y_true, y_pred, original_shape) = _standardize_input(y_true, y_pred, multioutput) output_errors = (100 * np.mean(np.abs(((y_true - y_pred) / (y_true + EPSILON))), axis=0)) if (multioutput == 'raw_values'): return output_errors.reshape(original_shape) return np.mean(output_errors)
def calculate_fid(dataset_name, generated_dir, target_size=128): real_dir = os.path.join('EvalImages', (('real' + '_real_images_') + str(target_size))) print(real_dir, generated_dir) for i in range(10): try: fid = fid_score.calculate_fid_given_paths([real_dir, generated_dir], 128, 'cuda', 2048) break except: print(('failed to load evaluation images, try %02d times' % i)) time.sleep(0.5) torch.cuda.empty_cache() return fid
def do_train(cfg, model, data_loader, data_loader_val, optimizer, scheduler, checkpointer, device, checkpoint_period, test_period, arguments): logger = logging.getLogger('maskrcnn_benchmark.trainer') logger.info('Start training') meters = MetricLogger(delimiter=' ') max_iter = len(data_loader) start_iter = arguments['iteration'] model.train() start_training_time = time.time() end = time.time() iou_types = ('bbox',) if cfg.MODEL.MASK_ON: iou_types = (iou_types + ('segm',)) if cfg.MODEL.KEYPOINT_ON: iou_types = (iou_types + ('keypoints',)) dataset_names = cfg.DATASETS.TEST if ('ignore' in cfg.DATASETS.TRAIN[0]): is_ignore = True else: is_ignore = False if is_ignore: for (iteration, (images, targets, _, targets_ignore)) in enumerate(data_loader, start_iter): if any(((len(target) < 1) for target in targets)): logger.error(f'Iteration={(iteration + 1)} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}') continue data_time = (time.time() - end) iteration = (iteration + 1) arguments['iteration'] = iteration images = images.to(device) targets = [target.to(device) for target in targets] targets_ignore = [target_ignore.to(device) for target_ignore in targets_ignore] loss_dict = model(images, targets, target_ignore=targets_ignore) losses = sum((loss for loss in loss_dict.values())) loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum((loss for loss in loss_dict_reduced.values())) meters.update(loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() with amp.scale_loss(losses, optimizer) as scaled_losses: scaled_losses.backward() optimizer.step() scheduler.step() batch_time = (time.time() - end) end = time.time() meters.update(time=batch_time, data=data_time) eta_seconds = (meters.time.global_avg * (max_iter - iteration)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if (((iteration % 20) == 0) or (iteration == max_iter)): logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) if ((iteration % checkpoint_period) == 0): checkpointer.save('model_{:07d}'.format(iteration), **arguments) if ((data_loader_val is not None) and (test_period > 0) and ((iteration % test_period) == 0)): meters_val = MetricLogger(delimiter=' ') synchronize() _ = inference(model, make_data_loader(cfg, is_train=False, is_distributed=(get_world_size() > 1), is_for_period=True), dataset_name='[Validation]', iou_types=iou_types, box_only=(False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY), device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=None) synchronize() model.train() with torch.no_grad(): for (iteration_val, (images_val, targets_val, _)) in enumerate(tqdm(data_loader_val)): images_val = images_val.to(device) targets_val = [target.to(device) for target in targets_val] loss_dict = model(images_val, targets_val) losses = sum((loss for loss in loss_dict.values())) loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum((loss for loss in loss_dict_reduced.values())) meters_val.update(loss=losses_reduced, **loss_dict_reduced) synchronize() logger.info(meters_val.delimiter.join(['[Validation]: ', 'eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters_val), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) if (iteration == max_iter): checkpointer.save('model_final', **arguments) else: for (iteration, (images, targets, _)) in enumerate(data_loader, start_iter): if any(((len(target) < 1) for target in targets)): logger.error(f'Iteration={(iteration + 1)} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}') continue data_time = (time.time() - end) iteration = (iteration + 1) arguments['iteration'] = iteration images = images.to(device) targets = [target.to(device) for target in targets] loss_dict = model(images, targets) losses = sum((loss for loss in loss_dict.values())) loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum((loss for loss in loss_dict_reduced.values())) meters.update(loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() with amp.scale_loss(losses, optimizer) as scaled_losses: scaled_losses.backward() optimizer.step() scheduler.step() batch_time = (time.time() - end) end = time.time() meters.update(time=batch_time, data=data_time) eta_seconds = (meters.time.global_avg * (max_iter - iteration)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if (((iteration % 20) == 0) or (iteration == max_iter)): logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) if ((iteration % checkpoint_period) == 0): checkpointer.save('model_{:07d}'.format(iteration), **arguments) if ((data_loader_val is not None) and (test_period > 0) and ((iteration % test_period) == 0)): meters_val = MetricLogger(delimiter=' ') synchronize() _ = inference(model, make_data_loader(cfg, is_train=False, is_distributed=(get_world_size() > 1), is_for_period=True), dataset_name='[Validation]', iou_types=iou_types, box_only=(False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY), device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=None) synchronize() model.train() with torch.no_grad(): for (iteration_val, (images_val, targets_val, _)) in enumerate(tqdm(data_loader_val)): images_val = images_val.to(device) targets_val = [target.to(device) for target in targets_val] loss_dict = model(images_val, targets_val) losses = sum((loss for loss in loss_dict.values())) loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum((loss for loss in loss_dict_reduced.values())) meters_val.update(loss=losses_reduced, **loss_dict_reduced) synchronize() logger.info(meters_val.delimiter.join(['[Validation]: ', 'eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters_val), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) if (iteration == max_iter): checkpointer.save('model_final', **arguments) total_training_time = (time.time() - start_training_time) total_time_str = str(datetime.timedelta(seconds=total_training_time)) logger.info('Total training time: {} ({:.4f} s / it)'.format(total_time_str, (total_training_time / max_iter)))
def calibrate(model, loader): data_parallel_flag = False if hasattr(model, 'module'): data_parallel_flag = True model = model.module print('\n==> start calibrate') start_time = time.time() for (name, module) in model.named_modules(): if isinstance(module, QModule): module.set_calibrate(calibrate=True) (inputs, _) = next(iter(loader)) inputs = inputs.to('cuda:0', non_blocking=True) model(inputs) for (name, module) in model.named_modules(): if isinstance(module, QModule): module.set_calibrate(calibrate=False) print('==> end calibrate') print('calibrate time:{}'.format((time.time() - start_time))) if data_parallel_flag: model = nn.DataParallel(model) return model
_lazy_imports('av') def create_video_file(root: Union[(pathlib.Path, str)], name: Union[(pathlib.Path, str)], size: Union[(Sequence[int], int)]=(1, 3, 10, 10), fps: float=25, **kwargs: Any) -> pathlib.Path: if isinstance(size, int): size = (size, size) if (len(size) == 2): size = (3, *size) if (len(size) == 3): size = (1, *size) if (len(size) != 4): raise UsageError(f"The 'size' argument should either be an int or a sequence of length 2, 3, or 4. Got {len(size)} instead") video = create_image_or_video_tensor(size) file = (pathlib.Path(root) / name) torchvision.io.write_video(str(file), video.permute(0, 2, 3, 1), fps, **kwargs) return file