code
stringlengths
101
5.91M
def test_resnet_backbone(): with pytest.raises(KeyError): ResNet(20) with pytest.raises(AssertionError): ResNet(50, num_stages=0) with pytest.raises(AssertionError): dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) ResNet(50, dcn=dcn, stage_with_dcn=(True,)) with pytest.raises(AssertionError): plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True), position='after_conv3')] ResNet(50, plugins=plugins) with pytest.raises(AssertionError): ResNet(50, num_stages=5) with pytest.raises(AssertionError): ResNet(50, strides=(1,), dilations=(1, 1), num_stages=3) with pytest.raises(TypeError): model = ResNet(50, pretrained=0) with pytest.raises(AssertionError): ResNet(50, style='tensorflow') model = ResNet(50, norm_eval=True, base_channels=1) model.train() assert check_norm_state(model.modules(), False) model = ResNet(depth=50, norm_eval=True, pretrained='torchvision://resnet50') model.train() assert check_norm_state(model.modules(), False) frozen_stages = 1 model = ResNet(50, frozen_stages=frozen_stages, base_channels=1) model.train() assert (model.norm1.training is False) for layer in [model.conv1, model.norm1]: for param in layer.parameters(): assert (param.requires_grad is False) for i in range(1, (frozen_stages + 1)): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert (mod.training is False) for param in layer.parameters(): assert (param.requires_grad is False) model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2) assert (len(model.stem) == 9) model.train() assert check_norm_state(model.stem, False) for param in model.stem.parameters(): assert (param.requires_grad is False) for i in range(1, (frozen_stages + 1)): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert (mod.training is False) for param in layer.parameters(): assert (param.requires_grad is False) model = ResNet(18) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 64, 8, 8])) assert (feat[1].shape == torch.Size([1, 128, 4, 4])) assert (feat[2].shape == torch.Size([1, 256, 2, 2])) assert (feat[3].shape == torch.Size([1, 512, 1, 1])) model = ResNet(18, with_cp=True) for m in model.modules(): if is_block(m): assert m.with_cp model = ResNet(50, base_channels=1) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 4, 8, 8])) assert (feat[1].shape == torch.Size([1, 8, 4, 4])) assert (feat[2].shape == torch.Size([1, 16, 2, 2])) assert (feat[3].shape == torch.Size([1, 32, 1, 1])) model = ResNet(50, out_indices=(0, 1, 2), base_channels=1) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 3) assert (feat[0].shape == torch.Size([1, 4, 8, 8])) assert (feat[1].shape == torch.Size([1, 8, 4, 4])) assert (feat[2].shape == torch.Size([1, 16, 2, 2])) model = ResNet(50, with_cp=True, base_channels=1) for m in model.modules(): if is_block(m): assert m.with_cp model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 4, 8, 8])) assert (feat[1].shape == torch.Size([1, 8, 4, 4])) assert (feat[2].shape == torch.Size([1, 16, 2, 2])) assert (feat[3].shape == torch.Size([1, 32, 1, 1])) model = ResNet(50, base_channels=4, norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) for m in model.modules(): if is_norm(m): assert isinstance(m, GroupNorm) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 16, 8, 8])) assert (feat[1].shape == torch.Size([1, 32, 4, 4])) assert (feat[2].shape == torch.Size([1, 64, 2, 2])) assert (feat[3].shape == torch.Size([1, 128, 1, 1])) plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), stages=(False, True, True, True), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True, False), position='after_conv3')] model = ResNet(50, plugins=plugins, base_channels=8) for m in model.layer1.modules(): if is_block(m): assert (not hasattr(m, 'context_block')) assert (not hasattr(m, 'gen_attention_block')) assert (m.nonlocal_block.in_channels == 8) for m in model.layer2.modules(): if is_block(m): assert (m.nonlocal_block.in_channels == 16) assert (m.gen_attention_block.in_channels == 16) assert (m.context_block.in_channels == 64) for m in model.layer3.modules(): if is_block(m): assert (m.nonlocal_block.in_channels == 32) assert (m.gen_attention_block.in_channels == 32) assert (m.context_block.in_channels == 128) for m in model.layer4.modules(): if is_block(m): assert (m.nonlocal_block.in_channels == 64) assert (m.gen_attention_block.in_channels == 64) assert (not hasattr(m, 'context_block')) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 32, 8, 8])) assert (feat[1].shape == torch.Size([1, 64, 4, 4])) assert (feat[2].shape == torch.Size([1, 128, 2, 2])) assert (feat[3].shape == torch.Size([1, 256, 1, 1])) plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), stages=(False, True, True, False), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), stages=(False, True, True, False), position='after_conv3')] model = ResNet(50, plugins=plugins, base_channels=8) for m in model.layer1.modules(): if is_block(m): assert (not hasattr(m, 'context_block')) assert (not hasattr(m, 'context_block1')) assert (not hasattr(m, 'context_block2')) for m in model.layer2.modules(): if is_block(m): assert (not hasattr(m, 'context_block')) assert (m.context_block1.in_channels == 64) assert (m.context_block2.in_channels == 64) for m in model.layer3.modules(): if is_block(m): assert (not hasattr(m, 'context_block')) assert (m.context_block1.in_channels == 128) assert (m.context_block2.in_channels == 128) for m in model.layer4.modules(): if is_block(m): assert (not hasattr(m, 'context_block')) assert (not hasattr(m, 'context_block1')) assert (not hasattr(m, 'context_block2')) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 32, 8, 8])) assert (feat[1].shape == torch.Size([1, 64, 4, 4])) assert (feat[2].shape == torch.Size([1, 128, 2, 2])) assert (feat[3].shape == torch.Size([1, 256, 1, 1])) model = ResNet(50, zero_init_residual=True, base_channels=1) model.init_weights() for m in model.modules(): if isinstance(m, Bottleneck): assert assert_params_all_zeros(m.norm3) elif isinstance(m, BasicBlock): assert assert_params_all_zeros(m.norm2) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 4, 8, 8])) assert (feat[1].shape == torch.Size([1, 8, 4, 4])) assert (feat[2].shape == torch.Size([1, 16, 2, 2])) assert (feat[3].shape == torch.Size([1, 32, 1, 1])) model = ResNetV1d(depth=50, base_channels=2) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([1, 8, 8, 8])) assert (feat[1].shape == torch.Size([1, 16, 4, 4])) assert (feat[2].shape == torch.Size([1, 32, 2, 2])) assert (feat[3].shape == torch.Size([1, 64, 1, 1]))
def _make_divisible(v, divisor=8, min_value=None): if (min_value is None): min_value = divisor new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor)) if (new_v < (0.9 * v)): new_v += divisor return new_v
class CamVid(BaseDataset): def __init__(self, root, list_path, num_classes=11, multi_scale=True, flip=True, ignore_label=255, base_size=960, crop_size=(720, 960), scale_factor=16, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], bd_dilate_size=4): super(CamVid, self).__init__(ignore_label, base_size, crop_size, scale_factor, mean, std) self.root = root self.list_path = list_path self.num_classes = num_classes self.multi_scale = multi_scale self.flip = flip self.img_list = [line.strip().split() for line in open((root + list_path))] self.files = self.read_files() self.ignore_label = ignore_label self.color_list = [[0, 128, 192], [128, 0, 0], [64, 0, 128], [192, 192, 128], [64, 64, 128], [64, 64, 0], [128, 64, 128], [0, 0, 192], [192, 128, 128], [128, 128, 128], [128, 128, 0]] self.class_weights = None self.bd_dilate_size = bd_dilate_size def read_files(self): files = [] for item in self.img_list: (image_path, label_path) = item name = os.path.splitext(os.path.basename(label_path))[0] files.append({'img': image_path, 'label': label_path, 'name': name}) return files def color2label(self, color_map): label = (np.ones(color_map.shape[:2]) * self.ignore_label) for (i, v) in enumerate(self.color_list): label[((color_map == v).sum(2) == 3)] = i return label.astype(np.uint8) def label2color(self, label): color_map = np.zeros((label.shape + (3,))) for (i, v) in enumerate(self.color_list): color_map[(label == i)] = self.color_list[i] return color_map.astype(np.uint8) def __getitem__(self, index): item = self.files[index] name = item['name'] image = Image.open(os.path.join(self.root, 'camvid', item['img'])).convert('RGB') image = np.array(image) size = image.shape color_map = Image.open(os.path.join(self.root, 'camvid', item['label'])).convert('RGB') color_map = np.array(color_map) label = self.color2label(color_map) (image, label, edge) = self.gen_sample(image, label, self.multi_scale, self.flip, edge_pad=False, edge_size=self.bd_dilate_size, city=False) return (image.copy(), label.copy(), edge.copy(), np.array(size), name) def single_scale_inference(self, config, model, image): pred = self.inference(config, model, image) return pred def save_pred(self, preds, sv_path, name): preds = np.asarray(np.argmax(preds.cpu(), axis=1), dtype=np.uint8) for i in range(preds.shape[0]): pred = self.label2color(preds[i]) save_img = Image.fromarray(pred) save_img.save(os.path.join(sv_path, (name[i] + '.png')))
class BeitFeatureExtractor(metaclass=DummyObject): _backends = ['vision'] def __init__(self, *args, **kwargs): requires_backends(self, ['vision'])
class SST2Processor(DataProcessor): def __init__(self): super().__init__() self.labels = ['0', '1'] def get_examples(self, data_dir, split): path = os.path.join(data_dir, f'{split}.tsv') examples = [] with open(path, encoding='utf-8') as f: lines = f.readlines() for (idx, line) in enumerate(lines[1:]): linelist = line.strip().split('\t') text_a = linelist[0] label = linelist[1] guid = ('%s-%s' % (split, idx)) example = InputExample(guid=guid, text_a=text_a, label=self.get_label_id(label)) examples.append(example) return examples
def resnet18small(c, **kargs): return n.Seq(n.ResNet([2, 2, 2]), n.FFNN([100, c], bias=True, last_lin=False, **kargs))
class SelfKnowledgeDistillationLoss(KnowledgeDistillationFramework): def __init__(self, layer_mappings=[], loss_types=None, loss_weights=None, temperature=1.0, add_origin_loss=False, student_model=None, teacher_model=None): super(SelfKnowledgeDistillationLoss, self).__init__(student_model=student_model, teacher_model=teacher_model) self.temperature = temperature self.layer_mappings = [] for items in layer_mappings: for value in items: assert (len(value) == 2), (('Each item in layer_mappings ' + 'should be a list or tuple of length 2, with format ') + '[student_layer_name, teacher_layer_name].') self.layer_mappings.append(items) self.loss_weights = (([(1.0 / len(self.layer_mappings))] * len(self.layer_mappings)) if (loss_weights is None) else loss_weights) self.loss_types = ((['CE'] * len(self.layer_mappings)) if (loss_types is None) else loss_types) self.add_origin_loss = add_origin_loss self.loss_funcs = [] self.init_loss_funcs() assert (len(self.layer_mappings) == len(self.loss_weights) == len(self.loss_types)), ((f'Wrong length for layer_mappings:{self.layer_mappings}, ' + f'loss_weights:{self.loss_weights} or loss_types:{self.loss_types}, ') + 'all should be the same.') def init_loss_funcs(self): raise NotImplementedError('Function init_loss_funcs should be framework related.') def teacher_model_forward(self, input, teacher_model=None): raise NotImplementedError('Function teacher_model_forward should be framework related.') def loss_cal(self, student_outputs): raise NotImplementedError('Function loss_cal should be framework related.') def loss_cal_sloss(self, student_outputs, teacher_outputs, student_loss): loss = self.loss_cal(student_outputs) if self.add_origin_loss: loss += student_loss return loss def __call__(self, student_outputs, targets): return 0
_distributed class TestAutoTCN(TestCase): def setUp(self) -> None: from bigdl.orca import init_orca_context init_orca_context(cores=8, init_ray_on_spark=True) def tearDown(self) -> None: from bigdl.orca import stop_orca_context stop_orca_context() _torch def test_fit_np(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=get_x_y(size=1000), epochs=1, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1) assert auto_tcn.get_best_model() best_config = auto_tcn.get_best_config() assert (0.1 <= best_config['dropout'] <= 0.2) assert (best_config['batch_size'] in (32, 64)) assert (1 <= best_config['levels'] < 3) _tf2 def test_fit_np_keras(self): keras_auto_tcn = get_auto_estimator('keras') keras_auto_tcn.fit(data=get_x_y(size=1000), epochs=2, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1) assert keras_auto_tcn.get_best_model() best_config = keras_auto_tcn.get_best_config() assert (0.1 <= best_config['dropout'] <= 0.2) assert (best_config['batch_size'] in (32, 64)) assert (1 <= best_config['levels'] < 3) _torch def test_fit_loader(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1) assert auto_tcn.get_best_model() best_config = auto_tcn.get_best_config() assert (0.1 <= best_config['dropout'] <= 0.2) assert (1 <= best_config['levels'] < 3) _torch def test_fit_data_creator(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=train_dataloader_creator, epochs=1, batch_size=hp.choice([32, 64]), validation_data=valid_dataloader_creator, n_sampling=1) assert auto_tcn.get_best_model() best_config = auto_tcn.get_best_config() assert (0.1 <= best_config['dropout'] <= 0.2) assert (best_config['batch_size'] in (32, 64)) assert (1 <= best_config['levels'] < 3) _torch def test_num_channels(self): auto_tcn = AutoTCN(input_feature_num=input_feature_dim, output_target_num=output_feature_dim, past_seq_len=past_seq_len, future_seq_len=future_seq_len, optimizer='Adam', loss=torch.nn.MSELoss(), metric='mse', hidden_units=4, levels=hp.randint(1, 3), num_channels=([8] * 2), kernel_size=hp.choice([2, 3]), lr=hp.choice([0.001, 0.003, 0.01]), dropout=hp.uniform(0.1, 0.2), logs_dir='/tmp/auto_tcn', cpus_per_trial=2, name='auto_tcn') auto_tcn.fit(data=train_dataloader_creator, epochs=1, batch_size=hp.choice([32, 64]), validation_data=valid_dataloader_creator, n_sampling=1) assert auto_tcn.get_best_model() best_config = auto_tcn.get_best_config() assert (best_config['num_channels'] == ([8] * 2)) _torch def test_predict_evaluation(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1) (test_data_x, test_data_y) = get_x_y(size=100) auto_tcn.predict(test_data_x) auto_tcn.evaluate((test_data_x, test_data_y)) _torch _inference def test_onnx_methods(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1) (test_data_x, test_data_y) = get_x_y(size=100) pred = auto_tcn.predict(test_data_x) eval_res = auto_tcn.evaluate((test_data_x, test_data_y)) try: import onnx import onnxruntime pred_onnx = auto_tcn.predict_with_onnx(test_data_x) eval_res_onnx = auto_tcn.evaluate_with_onnx((test_data_x, test_data_y)) np.testing.assert_almost_equal(pred, pred_onnx, decimal=5) np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5) except ImportError: pass _torch _inference def test_save_load(self): auto_tcn = get_auto_estimator() auto_tcn.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1) with tempfile.TemporaryDirectory() as tmp_dir_name: auto_tcn.save(tmp_dir_name) auto_tcn.load(tmp_dir_name) (test_data_x, test_data_y) = get_x_y(size=100) pred = auto_tcn.predict(test_data_x) eval_res = auto_tcn.evaluate((test_data_x, test_data_y)) try: import onnx import onnxruntime pred_onnx = auto_tcn.predict_with_onnx(test_data_x) eval_res_onnx = auto_tcn.evaluate_with_onnx((test_data_x, test_data_y)) np.testing.assert_almost_equal(pred, pred_onnx, decimal=5) np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5) except ImportError: pass _tf2 def test_save_load_keras(self): auto_keras_tcn = get_auto_estimator(backend='keras') auto_keras_tcn.fit(data=get_x_y(size=1000), epochs=2, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1) with tempfile.TemporaryDirectory() as tmp_dir_name: auto_keras_tcn.save(tmp_dir_name) auto_keras_tcn.load(tmp_dir_name) (test_data_x, test_data_y) = get_x_y(size=100) pred = auto_keras_tcn.predict(test_data_x) eval_res = auto_keras_tcn.evaluate((test_data_x, test_data_y))
class TestMaskedLanguageModel(unittest.TestCase): def test_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_mlm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_language_model(data_dir, 'xlm_base') def test_pretrained_masked_lm_for_translation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_mlm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_language_model(data_dir, arch='xlm_base') with tempfile.TemporaryDirectory('test_mlm_translation') as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data(translation_dir, extra_flags=['--joined-dictionary']) train_translation_model(translation_dir, arch='transformer_from_pretrained_xlm', extra_flags=['--decoder-layers', '1', '--decoder-embed-dim', '32', '--decoder-attention-heads', '1', '--decoder-ffn-embed-dim', '32', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--pretrained-xlm-checkpoint', f'{data_dir}/checkpoint_last.pt', '--encoder-learned-pos', '--decoder-learned-pos', '--activation-fn', 'gelu', '--max-source-positions', '500', '--max-target-positions', '500'], task='translation_from_pretrained_xlm') def test_pretrained_masked_lm_for_translation_encoder_only(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_mlm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_language_model(data_dir, arch='xlm_base') with tempfile.TemporaryDirectory('test_mlm_translation') as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data(translation_dir, extra_flags=['--joined-dictionary']) train_translation_model(translation_dir, arch='transformer_from_pretrained_xlm', extra_flags=['--decoder-layers', '1', '--decoder-embed-dim', '32', '--decoder-attention-heads', '1', '--decoder-ffn-embed-dim', '32', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--pretrained-xlm-checkpoint', f'{data_dir}/checkpoint_last.pt', '--encoder-learned-pos', '--decoder-learned-pos', '--activation-fn', 'gelu', '--max-source-positions', '500', '--max-target-positions', '500', '--init-encoder-only'], task='translation_from_pretrained_xlm')
class Enrichment(nn.Module): def __init__(self, c_in, rate=2): super(Enrichment, self).__init__() self.rate = rate self.relu = nn.ReLU(inplace=True) self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1) dilation = ((self.rate * 1) if (self.rate >= 1) else 1) self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) dilation = ((self.rate * 2) if (self.rate >= 1) else 1) self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) dilation = ((self.rate * 3) if (self.rate >= 1) else 1) self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) dilation = ((self.rate * 4) if (self.rate >= 1) else 1) self.conv4 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self._initialize_weights() def forward(self, x): o = self.relu(self.conv(x)) o1 = self.relu(self.conv1(o)) o2 = self.relu(self.conv2(o)) o3 = self.relu(self.conv3(o)) o4 = self.relu(self.conv4(o)) out = ((((o + o1) + o2) + o3) + o4) return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.01) if (m.bias is not None): m.bias.data.zero_()
class MsImageDiscriminator(nn.Module): def __init__(self, input_dim, opt): super(MsImageDiscriminator, self).__init__() self.n_layer = opt.n_layers_D self.dim = opt.ndf self.norm = 'none' self.activ = 'lrelu' self.num_scales = 3 self.pad_type = 'reflect' self.input_dim = input_dim self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False) self.cnns = nn.ModuleList() for _ in range(self.num_scales): self.cnns.append(self._make_net()) def _make_net(self): dim = self.dim cnn_x = [] cnn_x += [Conv2dBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)] for i in range((self.n_layer - 1)): cnn_x += [Conv2dBlock(dim, (dim * 2), 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)] dim *= 2 cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)] cnn_x = nn.Sequential(*cnn_x) return cnn_x def forward(self, x): outputs = [] for model in self.cnns: outputs.append(model(x)) x = self.downsample(x) return outputs
_REGISTRY.register() def resnet18_stylize(pretrained=False, **kwargs): model = StylizeResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: pretrain_dict = model_zoo.load_url(model_urls['resnet18']) model.load_state_dict(pretrain_dict, strict=False) return model
def sentence_pairing(sentences: List[str]) -> pandas.DataFrame: sent_pairs = [] for i in range(len(sentences)): for j in range(i, len(sentences)): if (sentences[i] == sentences[j]): continue sent_pairs.append([sentences[i], sentences[j]]) return pandas.DataFrame(sent_pairs, columns=['sent_1', 'sent_2'])
def freeze_modules(model, modules): for (mod, param) in model.named_parameters(): if any((mod.startswith(m) for m in modules)): logging.info(f'freezing {mod}, it will not be updated.') param.requires_grad = False model_params = filter((lambda x: x.requires_grad), model.parameters()) return (model, model_params)
def quantize(onnx_model_path: Path) -> Path: import onnx import onnxruntime from onnx.onnx_pb import ModelProto from onnxruntime.quantization import QuantizationMode from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.registry import IntegerOpsRegistry onnx_model = onnx.load(onnx_model_path.as_posix()) if (parse(onnx.__version__) < parse('1.5.0')): print('Models larger than 2GB will fail to quantize due to protobuf constraint.\nPlease upgrade to onnxruntime >= 1.5.0.') copy_model = ModelProto() copy_model.CopyFrom(onnx_model) if (parse(onnxruntime.__version__) < parse('1.13.1')): quantizer = ONNXQuantizer(model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, input_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry)) else: quantizer = ONNXQuantizer(model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, activation_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry)) quantizer.quantize_model() quantized_model_path = generate_identified_filename(onnx_model_path, '-quantized') print(f'Quantized model has been written at {quantized_model_path}: ') onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) return quantized_model_path
def test(): net = resnet18(nn.Conv2d, nn.Linear, 'kaiming_normal') y = net(torch.randn(1, 3, 32, 32)) print(y.size())
def lenet(images): with tf.variable_scope('LeNet', [images]): net = tf.layers.conv2d(images, 32, (5, 5), activation=tf.nn.relu, name='conv1') net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool1') net = tf.layers.conv2d(net, 64, (5, 5), activation=tf.nn.relu, name='conv2') net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool2') net = tf.layers.flatten(net) net = tf.layers.dense(net, 1024, activation=tf.nn.relu, name='fc3') logits = tf.layers.dense(net, 10) return logits
_function('conv1d') class AutogradConv1D(AutogradFunction): def forward(ctx, input, kernel, padding=0, stride=1): ctx.save_multiple_for_backward((input, kernel, padding, stride)) return input.conv1d(kernel, padding=padding, stride=stride) def backward(ctx, grad_output): (input, kernel, padding, stride) = ctx.saved_tensors batch_size = input.size(0) (out_channels, in_channels, kernel_size) = kernel.size() assert (input.size(1) == in_channels), 'wrong number of input channels' assert (grad_output.size(1) == out_channels), 'wrong number of output channels' assert (grad_output.size(0) == batch_size), 'wrong batch size' output_padding = torch.nn.grad._grad_input_padding(grad_output, input.size(), (stride,), (padding,), (kernel_size,)) grad_input = grad_output.conv_transpose1d(kernel, stride=stride, padding=padding, output_padding=output_padding) grad_output = grad_output.repeat(1, in_channels, 1) grad_output = grad_output.view((grad_output.size(0) * grad_output.size(1)), 1, grad_output.size(2)) input = input.view(1, (input.size(0) * input.size(1)), input.size(2)) grad_kernel = input.conv1d(grad_output, padding=padding, dilation=stride, groups=(in_channels * batch_size)) grad_kernel = grad_kernel.view(batch_size, (grad_kernel.size(1) // batch_size), grad_kernel.size(2)) grad_kernel = grad_kernel.sum(dim=0).view(in_channels, out_channels, grad_kernel.size(2)).transpose(0, 1).narrow(2, 0, kernel_size) return (grad_input, grad_kernel)
def get_sbms_model(dataset, args): (g, features, labels, train_mask, val_mask, test_mask, factor_graphs) = dataset n_classes = 2 if (args.model_name == 'FactorGNN'): model = FactorGNNSBMs(g, args.num_layers, args.in_dim, args.num_hidden, args.num_latent, args.in_drop, args.residual, n_classes) elif (args.model_name == 'GAT'): heads = (([args.num_heads] * args.num_layers) + [args.num_out_heads]) model = GATSBMs(g, args.num_layers, args.in_dim, args.num_hidden, heads, F.elu, args.in_drop, args.attn_drop, args.negative_slope, args.residual) elif (args.model_name == 'DisenGCN'): model = DisenGCNSBMs(args.in_dim, 1, args, split_mlp=False) else: raise NameError(f'unknow format of model name: {args.model_name}') return model
def register_datasets(datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str]=None) -> None: for dataset_data in datasets_data: register_dataset(dataset_data, datasets_root)
class NeuralChatModel(BaseModel): def match(self, model_path: str): return ('neural-chat' in model_path.lower()) def get_default_conv_template(self, model_path: str) -> Conversation: if ('neural-chat-7b-v2' in model_path.lower()): return get_conv_template('neural-chat-7b-v2') elif ('neural-chat-7b-v3' in model_path.lower()): return get_conv_template('neural-chat-7b-v3') else: return get_conv_template('neural-chat-7b-v1-1')
def attention_func(self, hidden_states, *args, **kwargs): shape = hidden_states.shape return torch.empty(shape, device=_DEVICE)
def plot_image(img): img = ((img.permute(0, 2, 3, 1) * 127.5) + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy() pillow_image = Image.fromarray(img[0]) plt.imshow(pillow_image) plt.show()
def create_reward_transform(transform_type): if (transform_type == 'tanh'): def transform(r): if torch.is_tensor(r): return torch.tanh(r) return math.tanh(r) elif (transform_type == 'clip'): def transform(r): if torch.is_tensor(r): return torch.clip(r, (- 1), 1) return np.clip(r, (- 1), 1) elif ((transform_type == 'none') or (transform_type is None)): def transform(r): return r else: raise ValueError(transform_type) return transform
class CLIPImageProjection(metaclass=DummyObject): _backends = ['torch', 'transformers'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch', 'transformers']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers'])
def dataframe_to_deepsurv_ds(df, event_col='Event', time_col='Time'): e = df[event_col].values.astype(np.int32) t = df[time_col].values.astype(np.float32) x_df = df.drop([event_col, time_col], axis=1) x = x_df.values.astype(np.float32) return {'x': x, 'e': e, 't': t}
class feature_extraction(nn.Module): def __init__(self): super(feature_extraction, self).__init__() self.inplanes = 32 self.firstconv = nn.Sequential(convbn(3, 32, 3, 2, 1, 1), nn.ReLU(inplace=True), convbn(32, 32, 3, 1, 1, 1), nn.ReLU(inplace=True), convbn(32, 32, 3, 1, 1, 1), nn.ReLU(inplace=True)) self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1) self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1) self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1) self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2) self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)), convbn(128, 32, 1, 1, 0, 1), nn.ReLU(inplace=True)) self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)), convbn(128, 32, 1, 1, 0, 1), nn.ReLU(inplace=True)) self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)), convbn(128, 32, 1, 1, 0, 1), nn.ReLU(inplace=True)) self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)), convbn(128, 32, 1, 1, 0, 1), nn.ReLU(inplace=True)) self.lastconv = nn.Sequential(convbn(320, 128, 3, 1, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False)) def _make_layer(self, block, planes, blocks, stride, pad, dilation): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, 1, None, pad, dilation)) return nn.Sequential(*layers) def forward(self, x): output = self.firstconv(x) output = self.layer1(output) output_raw = self.layer2(output) output = self.layer3(output_raw) output_skip = self.layer4(output) output_branch1 = self.branch1(output_skip) output_branch1 = F.upsample(output_branch1, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=True) output_branch2 = self.branch2(output_skip) output_branch2 = F.upsample(output_branch2, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=True) output_branch3 = self.branch3(output_skip) output_branch3 = F.upsample(output_branch3, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=True) output_branch4 = self.branch4(output_skip) output_branch4 = F.upsample(output_branch4, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=True) output_feature = torch.cat((output_raw, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1) output_feature = self.lastconv(output_feature) return output_feature
def test_film_can_toggle_batch_norm(mocker): spy_batch_norm_init = mocker.spy(torch.nn.BatchNorm1d, '__init__') spy_batch_norm_forward = mocker.spy(torch.nn.BatchNorm1d, 'forward') batch_size = 7 in_channels = 13 seq_len = 37 film_embedding_size = 5 x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32) film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32) film = FiLM(film_embedding_size, in_channels, use_batch_norm=True) film(x, film_embedding) assert (spy_batch_norm_init.call_count == 1) assert (spy_batch_norm_forward.call_count == 1) film = FiLM(film_embedding_size, in_channels, use_batch_norm=False) film(x, film_embedding) assert (spy_batch_norm_init.call_count == 1) assert (spy_batch_norm_forward.call_count == 1)
class SparseConvolution(SparseModule): def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False): super(SparseConvolution, self).__init__() assert (groups == 1) if (not isinstance(kernel_size, (list, tuple))): kernel_size = ([kernel_size] * ndim) if (not isinstance(stride, (list, tuple))): stride = ([stride] * ndim) if (not isinstance(padding, (list, tuple))): padding = ([padding] * ndim) if (not isinstance(dilation, (list, tuple))): dilation = ([dilation] * ndim) if (not isinstance(output_padding, (list, tuple))): output_padding = ([output_padding] * ndim) for (d, s) in zip(dilation, stride): assert any([(s == 1), (d == 1)]), "don't support this." self.ndim = ndim self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.conv1x1 = (np.prod(kernel_size) == 1) self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.inverse = inverse self.output_padding = output_padding self.groups = groups self.subm = subm self.indice_key = indice_key self.fused_bn = fused_bn self.weight = Parameter(torch.Tensor(*kernel_size, in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if (self.bias is not None): (fan_in, _) = _calculate_fan_in_and_fan_out_hwio(self.weight) bound = (1 / math.sqrt(fan_in)) init.uniform_(self.bias, (- bound), bound) def forward(self, input): assert isinstance(input, SparseConvTensor) features = input.features device = features.device indices = input.indices spatial_shape = input.spatial_shape batch_size = input.batch_size if (not self.subm): if self.transposed: out_spatial_shape = ops.get_deconv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding) else: out_spatial_shape = ops.get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation) else: out_spatial_shape = spatial_shape if self.conv1x1: features = torch.mm(input.features, self.weight.view(self.in_channels, self.out_channels)) if (self.bias is not None): features += self.bias out_tensor = SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size) out_tensor.indice_dict = input.indice_dict out_tensor.grid = input.grid return out_tensor datas = input.find_indice_pair(self.indice_key) if self.inverse: assert ((datas is not None) and (self.indice_key is not None)) (_, outids, indice_pairs, indice_pair_num, out_spatial_shape) = datas assert (indice_pairs.shape[0] == np.prod(self.kernel_size)), 'inverse conv must have same kernel size as its couple conv' elif ((self.indice_key is not None) and (datas is not None)): (outids, _, indice_pairs, indice_pair_num, _) = datas else: (outids, indice_pairs, indice_pair_num) = ops.get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding, self.subm, self.transposed, grid=input.grid) input.indice_dict[self.indice_key] = (outids, indices, indice_pairs, indice_pair_num, spatial_shape) if self.fused_bn: assert (self.bias is not None) out_features = ops.fused_indice_conv(features, self.weight, self.bias, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.inverse, self.subm) else: if self.subm: out_features = Fsp.indice_subm_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0]) elif self.inverse: out_features = Fsp.indice_inverse_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0]) else: out_features = Fsp.indice_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0]) if (self.bias is not None): out_features += self.bias out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size) out_tensor.indice_dict = input.indice_dict out_tensor.grid = input.grid return out_tensor
def _generate_waymo_train_dataset_config(): data_root = 'tests/data/waymo/kitti_format/' ann_file = 'tests/data/waymo/kitti_format/waymo_infos_train.pkl' classes = ['Car', 'Pedestrian', 'Cyclist'] pts_prefix = 'velodyne' point_cloud_range = [(- 74.88), (- 74.88), (- 2), 74.88, 74.88, 4] file_client_args = dict(backend='disk') db_sampler = dict(data_root=data_root, info_path=(data_root + 'waymo_dbinfos_train.pkl'), rate=1.0, prepare=dict(filter_by_difficulty=[(- 1)], filter_by_min_points=dict(Car=5)), classes=classes, sample_groups=dict(Car=15), points_loader=dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=[0, 1, 2, 3, 4], file_client_args=file_client_args)) pipeline = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=6, use_dim=5, file_client_args=file_client_args), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, file_client_args=file_client_args), dict(type='ObjectSample', db_sampler=db_sampler), dict(type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=0.5, flip_ratio_bev_vertical=0.5), dict(type='GlobalRotScaleTrans', rot_range=[(- 0.), 0.], scale_ratio_range=[0.95, 1.05]), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointShuffle'), dict(type='DefaultFormatBundle3D', class_names=classes), dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])] modality = dict(use_lidar=True, use_camera=False) split = 'training' return (data_root, ann_file, classes, pts_prefix, pipeline, modality, split)
def dev_token_loader2(dev_path, dim=1): try: with open(dev_path, 'rb') as f: elmo_embeds_train = pickle.load(f) return elmo_embeds_train[dim] except: print('error with loading (dev/test) files, retry:') sys.stdout.flush() with open((dev_path + '_np.pkl'), 'rb') as f: elmo_embeds_train = pickle.load(f) temp = elmo_embeds_train[dim] lst_temp = [] for elem in temp: lst_temp.append(torch.tensor(elem)) return lst_temp
class IsIn(BaseRule): def __init__(self, keyword: str): self.keyword = keyword def __call__(self, target): return (self.keyword in target)
def parse_args(): parser = argparse.ArgumentParser(description='fuse Conv and BN layers in a model') parser.add_argument('config', help='config file path') parser.add_argument('checkpoint', help='checkpoint file path') parser.add_argument('out', help='output path of the converted model') args = parser.parse_args() return args
class TFCamembertModel(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def add_generation_args(parser): group = parser.add_argument_group('Generation') add_common_eval_args(group) gen_parser_from_dataclass(group, GenerationConfig()) return group
class DefaultValues(object): TRAIN_SPEED_RECORD_NUM = 50 SEC_TO_START_AUTOSCALE_WORKER = 90 STEP_TO_ADJUST_WORKER = 200 OPTIMIZED_WORKER_CPU_THRESHOLD = 20 SEC_FOR_STABLE_WORKER_COUNT = 60 SEC_INTERVAL_TO_OPTIMIZE = 300 FACTOR_TO_CUT_PENDING_CPU = 2 FACTOR_TO_CUT_PENDING_MEM = 2 SEC_TO_WAIT_PENDING_POD = 900 SEC_HUGE_TRAINING_THRESHOLD = 1800 STEP_SAMPLE_COUNT_TO_AUTO_WORKER = 5 SEC_TO_CHANGE_PS = 3600 SEC_TO_WAIT_FAILED_PS = 600 HANG_CPU_USAGE_RATE = 0.05
class DeepFactorizationMachineModel(torch.nn.Module): def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = (len(field_dims) * embed_dim) self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout) def forward(self, x): embed_x = self.embedding(x) x = ((self.linear(x) + self.fm(embed_x)) + self.mlp(embed_x.view((- 1), self.embed_output_dim))) return torch.sigmoid(x.squeeze(1))
def create_dataloader(opt): dataset = find_dataset_using_name(opt.dataset_mode) instance = dataset() instance.initialize(opt) print(('dataset [%s] of size %d was created' % (type(instance).__name__, len(instance)))) dataloader = torch.utils.data.DataLoader(instance, batch_size=opt.batchSize, shuffle=(not opt.serial_batches), num_workers=int(opt.nThreads), drop_last=opt.isTrain) return dataloader
def crop_video(video_f, video, crop_path, instanc_size): video_crop_base_path = join(crop_path, video) if (not isdir(video_crop_base_path)): makedirs(video_crop_base_path) sub_set_base_path = join(lasot_base_path, video_f) video_base_path = join(sub_set_base_path, video) gts_path = join(video_base_path, 'groundtruth.txt') gts = np.loadtxt(open(gts_path, 'rb'), delimiter=',') jpgs = sorted(glob.glob(join(video_base_path, 'img', '*.jpg'))) if (not jpgs): print('no jpg files, try png files') jpgs = sorted(glob.glob(join(video_base_path, '*.png'))) if (not jpgs): print('no jpg and png files, check data please') for (idx, img_path) in enumerate(jpgs): gt = gts[idx] if ((abs((gt[2] - 0)) < 1e-05) or (abs((gt[3] - 0)) < 1e-05)): continue im = cv2.imread(img_path) avg_chans = np.mean(im, axis=(0, 1)) bbox = [int(g) for g in gt] bbox = [bbox[0], bbox[1], (bbox[0] + bbox[2]), (bbox[1] + bbox[3])] (z, x) = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans) cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(idx), 0)), z) cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(idx), 0)), x)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): if (gpt2_config_file == ''): config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) pytorch_weights_dump_path = ((pytorch_dump_folder_path + '/') + WEIGHTS_NAME) pytorch_config_dump_path = ((pytorch_dump_folder_path + '/') + CONFIG_NAME) print('Save PyTorch model to {}'.format(pytorch_weights_dump_path)) torch.save(model.state_dict(), pytorch_weights_dump_path) print('Save configuration file to {}'.format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string())
def main(): display_config() print('Contructing dataset...') os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu train_dataset = VSR_Dataset(dir=args.train_set, trans=transforms.Compose([RandomCrop(48, args.scale), DataAug(), ToTensor()])) model_factory = ModelFactory() model = model_factory.create_model(args.model) model_parameters = filter((lambda p: p.requires_grad), model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print(((1.0 * params) / (1000 * 1000))) loss_fn = get_loss_fn(model.name) check_point = os.path.join(args.checkpoint, model.name, (str(args.scale) + 'x')) if (not os.path.exists(check_point)): os.makedirs(check_point) solver = Solver(model, check_point, model.name, loss_fn=loss_fn, batch_size=args.batch_size, num_epochs=args.num_epochs, learning_rate=args.learning_rate, fine_tune=args.fine_tune, verbose=args.verbose) print('Training...') val_dataset = VSR_Dataset(dir=args.test_set, trans=transforms.Compose([ToTensor()])) solver.train(train_dataset, val_dataset)
_ingredient.config def config(): arch = 'resnet18' pretrained = True num_features = 512 dropout = 0.0 norm_layer = None remap = False detach = False normalize = False set_bn_eval = True normalize_weight = False
def test_orbit_setup_lb_uvw_oddunits(): from galpy.orbit import Orbit o = Orbit([(1.0 * units.rad), ((- 0.25) * units.rad), (3000.0 * units.pc), (((- 30.0) * units.pc) / units.Myr), ((20.0 * units.pc) / units.Myr), ((130.0 * units.pc) / units.Myr)], lb=True, uvw=True) assert (numpy.fabs((o.ll(quantity=False) - ((1.0 / numpy.pi) * 180.0))) < (10.0 ** (- 8.0))), 'Orbit initialization with ll as Quantity does not work as expected' assert (numpy.fabs((o.bb(quantity=False) + ((0.25 / numpy.pi) * 180.0))) < (10.0 ** (- 8.0))), 'Orbit initialization with bb as Quantity does not work as expected' assert (numpy.fabs((o.dist(quantity=False) - 3.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with distance as Quantity does not work as expected' assert (numpy.fabs((o.U(quantity=False) + (30.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with U as Quantity does not work as expected' assert (numpy.fabs((o.V(quantity=False) - (20.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with V as Quantity does not work as expected' assert (numpy.fabs((o.W(quantity=False) - (130.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with W as Quantity does not work as expected' return None
def _infunc(x, func, gfun, hfun, more_args, epsrel, epsabs): a = gfun(x) b = hfun(x) myargs = ((x,) + more_args) retval = quad(func, a, b, args=myargs, epsrel=epsrel, epsabs=epsabs) return retval[0]
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, choices=['voc', 'coco'], help='Dataset to use') return parser.parse_args()
def main(args): trainer = Trainer(args) for epoch in range(args.start_epoch, args.epochs): trainer.training(epoch) trainer.testing(epoch)
def _parse_args(): parser = ArgumentParser() parser.add_argument('--cluster_mode', type=str, default='local', help='The cluster mode, such as local, yarn, standalone or spark-submit.') parser.add_argument('--master', type=str, default=None, help='The master url, only used when cluster mode is standalone.') parser.add_argument('--executor_cores', type=int, default=48, help='The executor core number.') parser.add_argument('--executor_memory', type=str, default='160g', help='The executor memory.') parser.add_argument('--num_executors', type=int, default=8, help='The number of executor.') parser.add_argument('--driver_cores', type=int, default=4, help='The driver core number.') parser.add_argument('--driver_memory', type=str, default='36g', help='The driver memory.') parser.add_argument('--days', type=str, required=True, help='Day range for preprocessing, such as 0-23, 0-1.') parser.add_argument('--input_folder', type=str, required=True, help='Path to the folder of parquet files.') parser.add_argument('--output_folder', type=str, default='.', help='The path to save the preprocessed data to parquet files. ') parser.add_argument('--frequency_limit', type=int, default=15, help='frequency below frequency_limit will be omitted from the encoding.') parser.add_argument('--cross_sizes', type=str, help='bucket sizes for cross columns', default='10000, 10000') args = parser.parse_args() (start, end) = args.days.split('-') args.day_range = list(range(int(start), (int(end) + 1))) args.days = len(args.day_range) args.cross_sizes = [int(x) for x in args.cross_sizes.split(',')] return args
def make_roi_box_predictor(cfg): func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR] return func(cfg)
def resize_and_convert(img, size, quality=100): img = trans_fn.resize(img, size, Image.LANCZOS) img = trans_fn.center_crop(img, size) buffer = BytesIO() img.save(buffer, format='jpeg', quality=quality) val = buffer.getvalue() return val
def get_node_ip(): import socket import errno s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(('8.8.8.8', 80)) node_ip_address = s.getsockname()[0] except OSError as e: node_ip_address = '127.0.0.1' if (e.errno == errno.ENETUNREACH): try: host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address
.parametrize('cfg_file', ['../configs/textrecog/sar/sar_r31_parallel_decoder_academic.py', '../configs/textrecog/abinet/abinet_academic.py', '../configs/textrecog/crnn/crnn_academic_dataset.py', '../configs/textrecog/seg/seg_r31_1by16_fpnocr_academic.py', '../configs/textdet/psenet/psenet_r50_fpnf_600e_icdar2017.py']) def test_model_inference(cfg_file): tmp_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) config_file = os.path.join(tmp_dir, cfg_file) model = build_model(config_file) with pytest.raises(AssertionError): model_inference(model, 1) sample_img_path = os.path.join(tmp_dir, '../demo/demo_text_det.jpg') model_inference(model, sample_img_path) img = imread(sample_img_path) model_inference(model, img)
def init_model(args, device, n_gpu, local_rank): if args.init_model: model_state_dict = torch.load(args.init_model, map_location='cpu') else: model_state_dict = None cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')) model = HBI.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) model.to(device) return model
def main(correct, fail=None): if (fail is not None): with open(fail, 'r') as f: test_failures = {l.strip() for l in f.readlines()} else: test_failures = None with open(correct, 'r') as f: correct_lines = f.readlines() done_tests = defaultdict(int) for line in correct_lines: (file, class_name, test_name, correct_line) = line.split(';') if ((test_failures is None) or ('::'.join([file, class_name, test_name]) in test_failures)): overwrite_file(file, class_name, test_name, correct_line, done_tests)
class UpSample(nn.Module): def __init__(self, n_chan, factor=2): super(UpSample, self).__init__() out_chan = ((n_chan * factor) * factor) self.proj = nn.Conv2d(n_chan, out_chan, 1, 1, 0) self.up = nn.PixelShuffle(factor) self.init_weight() def forward(self, x): feat = self.proj(x) feat = self.up(feat) return feat def init_weight(self): nn.init.xavier_normal_(self.proj.weight, gain=1.0)
def masks_union(masks1, masks2): assert (len(masks1) == len(masks2)) masks_union = ((masks1 + masks2) / 2.0) return masks_union
def main(): script_dir = os.path.dirname(os.path.realpath(__file__)) config_file_path = os.path.join(script_dir, 'download_models.json') download_dir = 'checkpoints' os.makedirs(download_dir, exist_ok=True) with open(config_file_path, 'r') as f: config = json.load(f) for (url, filename) in config.items(): download_file(url, filename, download_dir)
def lpips(x: torch.Tensor, y: torch.Tensor, net_type: str='alex', version: str='0.1'): device = x.device criterion = LPIPS(net_type, version).to(device) return criterion(x, y)
def set_mat(obj: Union[(bpy.types.Object, str)], mat: Union[(bpy.types.Material, str)], recursive: bool=True) -> None: obj = zpy.objects.verify(obj) mat = zpy.material.verify(mat) if hasattr(obj, 'active_material'): log.debug(f'Setting object {obj.name} material {mat.name}') obj.active_material = mat else: log.warning('Object does not have material property') return if recursive: for child in obj.children: set_mat(child, mat)
def _get_learningrate(lr, decay): if (decay is None): return lr if (decay[0] == 'inverse time'): return tf.keras.optimizers.schedules.InverseTimeDecay(lr, decay[1], decay[2]) if (decay[0] == 'cosine'): return tf.keras.optimizers.schedules.CosineDecay(lr, decay[1], alpha=decay[2]) raise NotImplementedError(f'{decay[0]} learning rate decay to be implemented for backend tensorflow.')
class Sign2TextTransformerEncoder(FairseqEncoder): def __init__(self, cfg, feats_type: SignFeatsType, feat_dim: int): super().__init__(None) self.num_updates = 0 self.dropout_module = FairseqDropout(p=cfg.dropout, module_name=self.__class__.__name__) self.embed_scale = math.sqrt(cfg.encoder_embed_dim) if cfg.no_scale_embedding: self.embed_scale = 1.0 self.padding_idx = 1 self.feats_type = feats_type if ((feats_type == SignFeatsType.mediapipe) or (feats_type == SignFeatsType.openpose)): self.feat_proj = nn.Linear((feat_dim * 3), cfg.encoder_embed_dim) if (feats_type == SignFeatsType.i3d): self.feat_proj = nn.Linear(feat_dim, cfg.encoder_embed_dim) self.embed_positions = PositionalEmbedding(cfg.max_source_positions, cfg.encoder_embed_dim, self.padding_idx) self.transformer_layers = nn.ModuleList([TransformerEncoderLayer(cfg) for _ in range(cfg.encoder_layers)]) if cfg.encoder_normalize_before: self.layer_norm = LayerNorm(cfg.encoder_embed_dim) else: self.layer_norm = None def forward(self, src_tokens, encoder_padding_mask, return_all_hiddens=False): if (self.feats_type == SignFeatsType.mediapipe): src_tokens = src_tokens.view(src_tokens.shape[0], src_tokens.shape[1], (- 1)) x = self.feat_proj(src_tokens).transpose(0, 1) x = (self.embed_scale * x) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = self.dropout_module(x) encoder_states = [] for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if return_all_hiddens: encoder_states.append(x) if (self.layer_norm is not None): x = self.layer_norm(x) return {'encoder_out': [x], 'encoder_padding_mask': ([encoder_padding_mask] if encoder_padding_mask.any() else []), 'encoder_embedding': [], 'encoder_states': encoder_states, 'src_tokens': []} def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ([] if (len(encoder_out['encoder_out']) == 0) else [x.index_select(1, new_order) for x in encoder_out['encoder_out']]) new_encoder_padding_mask = ([] if (len(encoder_out['encoder_padding_mask']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_padding_mask']]) new_encoder_embedding = ([] if (len(encoder_out['encoder_embedding']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_embedding']]) encoder_states = encoder_out['encoder_states'] if (len(encoder_states) > 0): for (idx, state) in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return {'encoder_out': new_encoder_out, 'encoder_padding_mask': new_encoder_padding_mask, 'encoder_embedding': new_encoder_embedding, 'encoder_states': encoder_states, 'src_tokens': []} def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.num_updates = num_updates
def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--input_model', type=str, required=False, default='tiny-yolov3-11.onnx') parser.add_argument('--output_model', type=str, required=True) return parser.parse_args()
class DreamEnvironment(object): def __init__(self, abstract_scene_description): self.description = abstract_scene_description self.scene_shoppinglists = None def set_scene_shoppinglist(self, shoppinglist: Type[SceneShoppingList]): assert isinstance(shoppinglist, SceneShoppingList) self.scene_shoppinglists = shoppinglist return self
def read_annotations(path: str) -> Tuple[(List[str], List[Dict])]: results = [] with open(path, 'r') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') try: header = next(csv_reader, None) except OSError: raise OSError(f'Failed to open annotations file {path}') assert isinstance(header, list) for row in csv_reader: row_dict = {} for (i, key) in enumerate(header): row_dict[key] = row[i] results += [row_dict] return (header, results)
def do_analyze(logdir, base_path=None): hypes = utils.load_hypes_from_logdir(logdir) modules = utils.load_modules_from_logdir(logdir) if (base_path is not None): hypes['dirs']['base_path'] = base_path with tf.Graph().as_default(): image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) sess = tf.Session() saver = tf.train.Saver() core.load_weights(logdir, sess, saver) logging.info('Graph loaded succesfully. Starting evaluation.') output_dir = os.path.join(logdir, 'analyse') logging.info('Output Images will be written to: {}'.format(os.path.join(output_dir, 'images/'))) logging_file = os.path.join(logdir, 'analyse/output.log') utils.create_filewrite_handler(logging_file) (eval_dict, images) = modules['eval'].evaluate(hypes, sess, image_pl, inf_out) logging.info('Evaluation Succesfull. Results:') utils.print_eval_dict(eval_dict) _write_images_to_logdir(images, output_dir)
def runBody(suite, test): if isDynamic(suite): return dynamicRun(suite, test) else: return staticRun(suite, test)
class SynthTextDataLoaderFactory(BaseDataLoader): def __init__(self, config): super(SynthTextDataLoaderFactory, self).__init__(config) dataRoot = self.config['data_loader']['data_dir'] self.workers = self.config['data_loader']['workers'] ds = SynthTextDataset(dataRoot) (self.__trainDataset, self.__valDataset) = self.__train_val_split(ds) def train(self): trainLoader = torchdata.DataLoader(self.__trainDataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=self.shuffle, collate_fn=collate_fn) return trainLoader def val(self): shuffle = self.config['validation']['shuffle'] valLoader = torchdata.DataLoader(self.__valDataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=shuffle, collate_fn=collate_fn) return valLoader def __train_val_split(self, ds): split = self.config['validation']['validation_split'] try: split = float(split) except: raise RuntimeError('Train and val splitting ratio is invalid.') val_len = int((split * len(ds))) train_len = (len(ds) - val_len) (train, val) = torchdata.random_split(ds, [train_len, val_len]) return (train, val) def split_validation(self): raise NotImplementedError
class Mean(nn.Module): def __init__(self, dim, keep_dim=False): super(Mean, self).__init__() self.dim = dim self.keep_dim = keep_dim def forward(self, input): return input.mean(self.dim, self.keep_dim)
def write_data(input_file, output_file, features): print(input_file) file_id = (input_file.split('/')[(- 1)].split('.')[(- 3)] if ('.s' in input_file) else input_file.split('/')[(- 1)].split('.')[(- 2)]) file_id = int(file_id) print(file_id) (cnf, _) = dimacs_to_cnf(input_file) (_, r_container) = prepare_clauses(clauses, annotation[idx2filename[file_id]], converter, objs) cnf = [r_container.get_original_repr(c) for c in cnf] variables = open(output_file[0], 'w') relations = open(output_file[1], 'w') relations_str = '' variables_str = '' feature_OR = features['Or'] feature_AND = features['And'] feature_G = features['Global'] feature = feature_G label = 0 variables_str += (str(0) + '\t') for j in range(len(feature)): variables_str += (str(feature[j]) + '\t') variables_str += (str(label) + '\n') if ('.s' not in input_file): known_var = {} var_id = 1 and_vars = [] or_vars = [] or_children = {} for c in cnf: or_vars.append(var_id) or_children[var_id] = [] variables_str += (((str(var_id) + '\t') + '\t'.join(list(map(str, feature_OR)))) + '\t2\n') current_or = var_id var_id += 1 for l in c: if (l not in known_var.keys()): known_var[l] = var_id variables_str += (((str(var_id) + '\t') + '\t'.join(list(map(str, _feature_leaf(int(l)))))) + '\t1\n') this_var_id = var_id var_id += 1 else: this_var_id = known_var[l] or_children[current_or].append(this_var_id) and_vars.append(var_id) variables_str += (((str(var_id) + '\t') + '\t'.join(list(map(str, feature_AND)))) + '\t3\n') var_id += 1 for i in range(1, var_id): relations_str += ((str(i) + '\t') + '0\n') for or_var in or_vars: for or_child in or_children[or_var]: relations_str += (((str(or_child) + '\t') + str(or_var)) + '\n') for or_var in or_vars: relations_str += (((str(or_var) + '\t') + str(and_vars[0])) + '\n') else: var_id = 1 pseudo_clause = [i[0] for i in cnf] for l in pseudo_clause: variables_str += (((str(var_id) + '\t') + '\t'.join(list(map(str, _feature_leaf(int(l)))))) + '\t1\n') var_id += 1 variables_str += (((str(var_id) + '\t') + '\t'.join(list(map(str, feature_AND)))) + '\t3\n') var_id += 1 for i in range(1, var_id): relations_str += ((str(i) + '\t') + '0\n') for i in range(1, (var_id - 1)): relations_str += (((str(i) + '\t') + str((var_id - 1))) + '\n') relations.write(relations_str) variables.write(variables_str) relations.close() variables.close()
def _unflatten_params(flat_params, params_example): unflat_params = [] idx = 0 for (key, param) in params_example.items(): size_param = np.prod(param.shape) reshaped_param = np.reshape(flat_params[idx:(idx + size_param)], newshape=param.shape) unflat_params.append((key, reshaped_param)) idx += size_param return OrderedDict(unflat_params)
def get_arpabet(word, dictionary): word_arpabet = dictionary.lookup(word) if (word_arpabet is not None): return (('{' + word_arpabet[0]) + '}') else: return word
class ActNetwork(nn.Module): def __init__(self, taskname): super(ActNetwork, self).__init__() self.taskname = taskname self.conv1 = nn.Sequential(nn.Conv2d(in_channels=var_size[taskname]['in_size'], out_channels=16, kernel_size=(1, var_size[taskname]['ker_size'])), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(kernel_size=(1, 2), stride=2)) self.conv2 = nn.Sequential(nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(1, var_size[taskname]['ker_size'])), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=(1, 2), stride=2)) self.in_features = var_size[taskname]['fc_size'] def forward(self, x): x = self.conv2(self.conv1(x)) x = x.view((- 1), self.in_features) return x def getfea(self, x): x = self.conv2(self.conv1(x)) return x
def mock_wrapper_class() -> Type[Wrapper]: class MockWrapper(Wrapper[FakeState]): pass return MockWrapper
_module() class BottomUpAicDataset(BottomUpCocoDataset): def __init__(self, ann_file, img_prefix, data_cfg, pipeline, test_mode=False): super(BottomUpCocoDataset, self).__init__(ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode) self.ann_info['flip_index'] = [3, 4, 5, 0, 1, 2, 9, 10, 11, 6, 7, 8, 12, 13] self.ann_info['use_different_joint_weights'] = False self.ann_info['joint_weights'] = np.array([1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.2, 1.5, 1.0, 1.0], dtype=np.float32).reshape((self.ann_info['num_joints'], 1)) self.sigmas = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) self.coco = COCO(ann_file) cats = [cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())] self.classes = (['__background__'] + cats) self.num_classes = len(self.classes) self._class_to_ind = dict(zip(self.classes, range(self.num_classes))) self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds())) self._coco_ind_to_class_ind = dict(((self._class_to_coco_ind[cls], self._class_to_ind[cls]) for cls in self.classes[1:])) self.img_ids = self.coco.getImgIds() if (not test_mode): self.img_ids = [img_id for img_id in self.img_ids if (len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0)] self.num_images = len(self.img_ids) (self.id2name, self.name2id) = self._get_mapping_id_name(self.coco.imgs) self.dataset_name = 'aic' print(f'=> num_images: {self.num_images}') def _do_python_keypoint_eval(self, res_file): stats_names = ['AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)'] with open(res_file, 'r') as file: res_json = json.load(file) if (not res_json): info_str = list(zip(stats_names, ([0] * len(stats_names)))) return info_str coco_det = self.coco.loadRes(res_file) coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas, use_area=False) coco_eval.params.useSegm = None coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() info_str = list(zip(stats_names, coco_eval.stats)) return info_str
def plot_segs(track_segs, cd_scores, xtrack, pred=None, y=None, vabs=None, cbar=True, xticks=True, yticks=True): cm = LinearSegmentedColormap.from_list(name='orange-blue', colors=[((222 / 255), (85 / 255), (51 / 255)), 'lightgray', ((50 / 255), (129 / 255), (168 / 255))]) if (vabs is None): vabs = np.max(np.abs(cd_scores)) norm = matplotlib.colors.Normalize(vmin=(- vabs), vmax=vabs) for i in range(len(track_segs)): (s, e) = track_segs[i] cd_score = cd_scores[i] seq_len = (e - s) xs = np.arange(s, e) if (seq_len > 1): cd_score = ([cd_score] * seq_len) col = cm(norm(cd_score[0])) while (len(col) == 1): col = col[0] plt.plot(xs, xtrack[s:e], zorder=0, lw=2, color=col, alpha=0.5) plt.scatter(xs, xtrack[s:e], c=cd_score, cmap=cm, vmin=(- vabs), vmax=vabs, s=6) if (pred is not None): plt.title(f'Pred: {pred: .1f}, y: {y}', fontsize=24) if cbar: cb = plt.colorbar() cb.outline.set_visible(False) if (not xticks): plt.xticks([]) if (not yticks): plt.yticks([]) return cb
def test_intersection_with_broadcasting_module2() -> None: box1 = BoxTensor(torch.tensor([[[[1, 1], [4, 4]], [[2, 2], [5, 5]]]]).float()) assert (box1.box_shape == (1, 2, 2)) box2 = BoxTensor(torch.tensor([[[[3, 3], [7, 6]]], [[[1, 3], [3, 4]]]]).float()) assert (box2.box_shape == (2, 1, 2)) expected = BoxTensor(torch.tensor([[[[3, 3], [4, 4]], [[3, 3], [5, 5]]], [[[1, 3], [3, 4]], [[2, 3], [3, 4]]]]).float()) assert (expected == HardIntersection()(box1, box2)) box1 = BoxTensor(torch.tensor([[[[1, 1], [4, 4]], [[2, 2], [5, 5]]]]).float()) assert (box1.box_shape == (1, 2, 2)) box2 = BoxTensor(torch.tensor([[[[3, 3], [7, 6]]], [[[1, 3], [3, 4]]]]).float()) assert (box2.box_shape == (2, 1, 2)) expected = BoxTensor(torch.tensor([[[[3, 3], [4, 4]], [[3, 3], [5, 5]]], [[[1, 3], [3, 4]], [[2, 3], [3, 4]]]]).float()) assert (expected == HardIntersection()(box2, box1))
class STSEResUNetIN50(STResUNet50): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM BLOCK = SEBottleneckIN
def get_default_kwargs_q(kwargs_q, layer_type): default = {'nbits': 4} if isinstance(layer_type, _Conv2dQ): default.update({'mode': Qmodes.layer_wise}) elif isinstance(layer_type, _LinearQ): pass elif isinstance(layer_type, _ActQ): pass else: assert NotImplementedError return for (k, v) in default.items(): if (k not in kwargs_q): kwargs_q[k] = v return kwargs_q
class ModuleSepconv(torch.nn.Module): def __init__(self): super(ModuleSepconv, self).__init__() def forward(self, tensorInput, tensorVertical, tensorHorizontal): return _FunctionSepconv.apply(tensorInput, tensorVertical, tensorHorizontal)
_HEADS_REGISTRY.register() class TridentRes5ROIHeads(Res5ROIHeads): def __init__(self, cfg, input_shape): super().__init__(cfg, input_shape) self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH self.trident_fast = (cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != (- 1)) def forward(self, images, features, proposals, targets=None): num_branch = (self.num_branch if (self.training or (not self.trident_fast)) else 1) all_targets = ((targets * num_branch) if (targets is not None) else None) (pred_instances, losses) = super().forward(images, features, proposals, all_targets) del images, all_targets, targets if self.training: return (pred_instances, losses) else: pred_instances = merge_branch_instances(pred_instances, num_branch, self.box_predictor.test_nms_thresh, self.box_predictor.test_topk_per_image) return (pred_instances, {})
def test_statcast_outfielder_jump() -> None: min_att = 50 result: pd.DataFrame = statcast_outfielder_jump(2019, min_att) assert (result is not None) assert (not result.empty) assert (len(result.columns) == 13) assert (len(result) > 0) assert (len(result.loc[(result.n < min_att)]) == 0)
class CheckpointFunction(t.autograd.Function): def forward(ctx, run_function, length, *args): ctx.run_function = run_function ctx.input_tensors = list(args[:length]) ctx.input_params = list(args[length:]) with t.no_grad(): output_tensors = ctx.run_function(*ctx.input_tensors) return output_tensors def backward(ctx, *output_grads): for i in range(len(ctx.input_tensors)): temp = ctx.input_tensors[i] ctx.input_tensors[i] = temp.detach() ctx.input_tensors[i].requires_grad = temp.requires_grad with t.enable_grad(): output_tensors = ctx.run_function(*ctx.input_tensors) input_grads = t.autograd.grad(output_tensors, (ctx.input_tensors + ctx.input_params), output_grads, allow_unused=True) del ctx.input_tensors del output_tensors return ((None, None) + input_grads)
class HardtanhDiffSNN(MultivariateDiffThinningAlgorithmMixin, HardtanhActivationMixin, DiffSNNBase): pass
class ResNet152FeatModule(nn.Module): def __init__(self): super(ResNet152FeatModule, self).__init__() modules = list(RESNET152_MODEL.children())[:(- 2)] self.feature_module = nn.Sequential(*modules) def forward(self, x): return self.feature_module(x)
def hotpot_biattention(config, is_train, h, u, h_mask, u_mask, indim, scope=None, tensor_dict=None): (h_len, u_len) = (tf.shape(h)[1], tf.shape(u)[1]) with tf.variable_scope((scope or 'hotpot_biattention')): h_dot = tf.squeeze(tf.tile(tf.expand_dims(tf.layers.dense(h, 1), 2), [1, 1, u_len, 1]), axis=(- 1)) u_dot = tf.squeeze(tf.tile(tf.expand_dims(tf.layers.dense(u, 1), 1), [1, h_len, 1, 1]), axis=(- 1)) dot_scale = tf.get_variable('dot_scale', [indim]) cross_dot = tf.einsum('ijk,ilk->ijl', (h * dot_scale), u) att = (((h_dot + u_dot) + cross_dot) - (1e+30 * (1.0 - tf.cast(tf.tile(tf.expand_dims(h_mask, axis=2), [1, 1, u_len]), 'float32')))) weight_one = tf.nn.softmax(att) weight_two = tf.nn.softmax(tf.reduce_max(att, axis=(- 1))) output_one = tf.einsum('ijk,ikl->ijl', weight_one, u) output_two = tf.einsum('ij,ijk->ik', weight_two, h) output = tf.concat([h, output_one, (h * output_one), tf.einsum('ik,ijk->ijk', output_two, output_one)], axis=(- 1)) return output
class DeepLabHeadV3Plus(nn.Module): def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]): super(DeepLabHeadV3Plus, self).__init__() self.project = nn.Sequential(nn.Conv2d(low_level_channels, 48, 1, bias=False), nn.BatchNorm2d(48), nn.ReLU(inplace=True)) self.classifier = nn.Sequential(nn.Conv2d(560, 256, 3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, num_classes, 1)) self._init_weight() def forward(self, feature): low_level_feature = self.project(feature[0]) output_feature = feature[3] output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear', align_corners=False) return self.classifier(torch.cat([low_level_feature, output_feature], dim=1)) def _init_weight(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def test_d2_skewness(barrel): skew = barrel.second_derivative_skewness() assert isinstance(skew, np.ndarray)
_pipeline_test class SummarizationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, feature_extractor): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer) return (summarizer, ['(CNN)The Palestinian Authority officially became', 'Some other text']) def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer('(CNN)The Palestinian Authority officially became') self.assertEqual(outputs, [{'summary_text': ANY(str)}]) outputs = summarizer('(CNN)The Palestinian Authority officially became ', num_beams=2, min_length=2, max_length=5) self.assertEqual(outputs, [{'summary_text': ANY(str)}]) if (not isinstance(model.config, (T5Config, LEDConfig))): with self.assertRaises(Exception): outputs = summarizer(('This ' * 1000)) outputs = summarizer(('This ' * 1000), truncation=TruncationStrategy.ONLY_FIRST) _torch def test_small_model_pt(self): summarizer = pipeline(task='summarization', model='sshleifer/tiny-mbart', framework='pt') outputs = summarizer('This is a small test') self.assertEqual(outputs, [{'summary_text': ''}]) _tf def test_small_model_tf(self): summarizer = pipeline(task='summarization', model='sshleifer/tiny-mbart', framework='tf') outputs = summarizer('This is a small test') self.assertEqual(outputs, [{'summary_text': ''}]) _torch def test_integration_torch_summarization(self): summarizer = pipeline(task='summarization', device=DEFAULT_DEVICE_NUM) cnn_article = ' (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.' expected_cnn_summary = " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move, says governments seeking to penalize Palestine should end pressure ." result = summarizer(cnn_article) self.assertEqual(result[0]['summary_text'], expected_cnn_summary)
def require_phonemizer(test_case): return unittest.skipUnless(is_phonemizer_available(), 'test requires phonemizer')(test_case)
def parse_int_value(string: str) -> int: pattern = '\\b\\d+\\b' integers = [int(num) for num in re.findall(pattern, string)] return (integers[(- 1)] if (len(integers) > 0) else None)
def SubnetResNet18(taskcla, nf=32, sparsity=0.5): return SubnetResNet(SubnetBasicBlock, [2, 2, 2, 2], taskcla, nf, sparsity=sparsity)
_start_docstrings('Bert Based model to embed queries or document for document retrieval. ', RETRIBERT_START_DOCSTRING) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = (None if config.share_encoders else BertModel(config)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction='mean') self.init_weights() def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=(- 1)): if ((checkpoint_batch_size < 0) or (input_ids.shape[0] < checkpoint_batch_size)): return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = ([None] * sent_encoder.config.num_hidden_layers) extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(attention_mask, input_shape, device) def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output embedding_output = sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None) pooled_output_list = [] for b in range(math.ceil((input_ids.shape[0] / checkpoint_batch_size))): b_embedding_output = embedding_output[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)] b_attention_mask = extended_attention_mask[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)): q_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query, checkpoint_batch_size) return self.project_query(q_reps) def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)): a_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, (self.bert_query if (self.bert_doc is None) else self.bert_doc), checkpoint_batch_size) return self.project_doc(a_reps) def forward(self, input_ids_query, attention_mask_query, input_ids_doc, attention_mask_doc, checkpoint_batch_size=(- 1)): device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = ((loss_qa + loss_aq) / 2) return loss
def create_mapping_kernel(kernel_size=7): kernel_arr = np.zeros(((kernel_size * kernel_size), kernel_size, kernel_size), np.float32) for h in range(kernel_arr.shape[1]): for w in range(kernel_arr.shape[2]): kernel_arr[(((h * kernel_arr.shape[2]) + w), h, w)] = 1.0 kernel_tensor = torch.from_numpy(np.expand_dims(kernel_arr, axis=1)) kernel_params = nn.Parameter(data=kernel_tensor.contiguous(), requires_grad=False) print(kernel_params.type()) return kernel_params
class DistMult(BaseModel): def __init__(self, entity_dict_len, relation_dict_len, embedding_dim, penalty_weight=0.0): super(DistMult, self).__init__(model_name='DistMult', penalty_weight=penalty_weight) self.entity_dict_len = entity_dict_len self.relation_dict_len = relation_dict_len self.embedding_dim = embedding_dim self.entity_embedding = nn.Embedding(self.entity_dict_len, self.embedding_dim) self.relation_embedding = nn.Embedding(self.relation_dict_len, self.embedding_dim) nn.init.xavier_uniform_(self.entity_embedding.weight.data) nn.init.xavier_uniform_(self.relation_embedding.weight.data) self.head_batch_embedding = None self.relation_batch_embedding = None self.tail_batch_embedding = None def forward(self, data): (h, r, t) = (data[0], data[1], data[2]) head_embedding = self.entity_embedding(h) relation_embedding = self.relation_embedding(r) tail_embedding = self.entity_embedding(t) self.head_batch_embedding = head_embedding self.relation_batch_embedding = relation_embedding self.tail_batch_embedding = tail_embedding score = ((head_embedding * relation_embedding) * tail_embedding) score = torch.sum(score, (- 1)).flatten() return score def loss(self, data): pos_data = self.data_to_device(data) neg_data = self.model_negative_sampler.create_negative(data) neg_data = self.data_to_device(neg_data) pos_score = self.forward(pos_data) neg_score = self.forward(neg_data) return (self.model_loss(pos_score, neg_score) + (self.penalty_weight * self.get_penalty())) def get_penalty(self): penalty = (((torch.mean((self.head_batch_embedding ** 2)) + torch.mean((self.relation_batch_embedding ** 2))) + torch.mean((self.tail_batch_embedding ** 2))) / 3) return penalty
def get_metamodel(netstr, dim_in, dim_hidden, dim_out, num_layers=4, w0=30.0): if (netstr == 'siren'): return MetaSirenNet(dim_in, dim_hidden, dim_out, num_layers, w0=w0, w0_initial=w0) else: raise ValueError('no such model exists, mate.')
def decomp_objective(model, x, K=1, beta=1.0, alpha=0.0, regs=None, components=False): (qz_x, px_z, zs) = model(x, K) lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], (- 1)).sum((- 1)) pz = model.pz(*model.pz_params) kld = kl_divergence(qz_x, pz, samples=zs).sum((- 1)) reg = ((regs(pz.sample(torch.Size([x.size(0)])).view((- 1), zs.size((- 1))), zs.squeeze(0)) if regs.samples else regs(pz, qz_x)) if regs else torch.tensor(0)) obj = ((lpx_z - (beta * kld)) - (alpha * reg)) return (obj.sum() if (not components) else (obj.sum(), lpx_z.sum(), kld.sum(), reg.sum()))
class Neural_Engine(Neural_Engine_base): def accuracy(self, batch_size, seq_len, dataset_name, task_name, data_dir, tokenizer_dir): log.info('Load dataset ......') dataset = DataLoader(batch_size, seq_len, dataset_name, task_name, data_dir, tokenizer_dir) log.info('Load metric ......') if (dataset_name and (task_name is not None)): metric = load_metric(dataset_name, task_name) else: metric = load_metric('accuracy') log.info('Start engine ......') for idx in tqdm(range(len(dataset))): inputs = dataset[idx][0] labels = dataset[idx][1] predictions = self.graph.inference(inputs) predictions = list(predictions.values())[0] predictions = np.argmax(predictions, axis=1) metric.add_batch(predictions=predictions, references=labels) log.info('Compute metrics ......') eval_metric = metric.compute() accuracy_value = eval_metric.get('accuracy') log.info(f'Accuracy: {accuracy_value}') def performance(self, batch_size, seq_len, iteration, warm_up): if (warm_up >= iteration): log.error('Warm up should less than iteration.') raise ValueError() log.info('Generate dummy dataset ......') shape = [batch_size, seq_len] dataset = DummyDataLoader(shapes=[shape, shape, shape], lows=[0, 0, 0], highs=[128, 1, 1], dtypes=['int32', 'int32', 'int32'], iteration=iteration) compute_performance(dataset, self.graph, log, self.log_file, warm_up, batch_size, seq_len)
.config def config(): cub_dir = path.join('data', 'CUB_200_2011') cub_url = ' images_file = 'images.txt' train_file = 'train.txt' test_file = 'test.txt'
class FlaxKarrasDiffusionSchedulers(Enum): FlaxDDIMScheduler = 1 FlaxDDPMScheduler = 2 FlaxPNDMScheduler = 3 FlaxLMSDiscreteScheduler = 4 FlaxDPMSolverMultistepScheduler = 5
def main() -> None: logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('source', help='source file or folder') parser.add_argument('target', help='target ipc file to be saved') parser.add_argument('--source_type', default=None, choices=['mgf', 'mzml', 'csv'], help='type of input data') parser.add_argument('--max_charge', default=10, help='maximum charge to filter out') parser.add_argument('--verbose', action='store_true') parser.add_argument('--use_old_schema', action='store_true') args = parser.parse_args() source = Path(args.source) target = Path(args.target) source_type = args.source_type if (source_type is None): if source.is_dir(): raise ValueError(f'Cannot infer source type from a directory. Please specify with --source_type') source_type = source.suffix[1:].lower() else: source_type = source_type.lower() if (source_type == 'mgf'): convert_mgf_ipc(source, target, args.max_charge, use_old_schema=args.use_old_schema, verbose=args.verbose) elif (source_type == 'mzml'): convert_mzml_ipc(source, target, args.max_charge, use_old_schema=args.use_old_schema, verbose=args.verbose) elif (source_type == 'csv'): df = pd.read_csv(source) df = pl.from_pandas(df) Path(target).parent.mkdir(parents=True, exist_ok=True) df.write_ipc(target) else: raise ValueError(f'Source type {source_type} not supported.')
def main(): input = np.empty((640, 480), dtype=np.uint8, order='F') for y in range(480): for x in range(640): input[(x, y)] = (x ^ (y + 1)) output = np.empty((640, 480), dtype=np.uint8, order='F') if False: (input_buf, output_buf) = (buffer_t(), buffer_t()) for i in range(4): input_buf.stride[i] = 0 input_buf.extent[i] = 0 output_buf.stride[i] = 0 output_buf.extent[i] = 0 input_buf.host = input.ctypes.data output_buf.host = output.ctypes.data input_buf.stride[0] = output_buf.stride[0] = 1 input_buf.stride[1] = output_buf.stride[1] = 640 input_buf.extent[0] = output_buf.extent[0] = 640 input_buf.extent[1] = output_buf.extent[1] = 480 input_buf.elem_size = output_buf.elem_size = 1 else: input_buf = Buffer(input).buffer() output_buf = Buffer(output).buffer() if (platform.system() == 'Linux'): lesson10 = ctypes.cdll.LoadLibrary('../build/lesson_10_halide.so') elif (platform.system() == 'Windows'): lesson10 = ctypes.windll.LoadLibrary('lesson_10_halide.dll') elif (platform.system() == 'Darwin'): lesson10 = ctypes.cdll.LoadLibrary('lesson_10_halide.o') else: raise Exception('unknown platform') assert (lesson10 != None) class BufferStruct(ctypes.Structure): _fields_ = [('dev', ctypes.c_uint64), ('host', ctypes.POINTER(ctypes.c_uint8)), ('extent', (ctypes.c_int32 * 4)), ('stride', (ctypes.c_int32 * 4)), ('min', (ctypes.c_int32 * 4)), ('elem_size', ctypes.c_int32), ('host_dirty', ctypes.c_bool), ('dev_dirty', ctypes.c_bool)] def buffer_t_to_buffer_struct(buffer): assert (type(buffer) == Buffer) b = buffer.raw_buffer() bb = BufferStruct() uint8_p_t = ctypes.POINTER(ctypes.c_ubyte) host_p = buffer.host_ptr_as_int() bb.host = ctypes.cast(host_p, uint8_p_t) bb.dev = b.dev bb.elem_size = b.elem_size bb.host_dirty = b.host_dirty bb.dev_dirty = b.dev_dirty for i in range(4): bb.extent[i] = b.extent[i] bb.stride[i] = b.stride[i] bb.min[i] = b.min[i] return bb input_buf_struct = buffer_t_to_buffer_struct(input_buf) output_buf_struct = buffer_t_to_buffer_struct(output_buf) input_buf_struct_p = ctypes.pointer(input_buf_struct) output_buf_struct_p = ctypes.pointer(output_buf_struct) for i in range(15): assert (input_buf_struct_p[0].host[i] == input[(i, 0)]) offset_value = 5 offset = ctypes.c_int(offset_value) error = lesson10.lesson_10_halide(input_buf_struct_p, offset, output_buf_struct_p) if error: print('Halide returned an error: ', error) return (- 1) correct_val = np.empty(1, dtype=np.uint8) for y in range(480): for x in range(640): input_val = input[(x, y)] output_val = output[(x, y)] correct_val[0] = input_val correct_val[0] += offset_value if (output_val != correct_val[0]): raise Exception(('output(%d, %d) was %d instead of %d' % (x, y, output_val, correct_val))) print('Success!') return 0