code
stringlengths
17
6.64M
@pytest.mark.parametrize('model, initializer', [(model1, Uniform), (model2, KaimingNormal())]) def test_single_initializer(model, initializer): inp_weights = model.wide.wide_linear.weight.data.detach().cpu() n_model = c(model) trainer = Trainer(n_model, objective='binary', initializers=initializer) init_weights = trainer.model.wide.wide_linear.weight.data.detach().cpu() assert (not torch.all((inp_weights == init_weights)))
def test_warning_when_missing_initializer(): wide = Wide(100, 1) deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5]) deeptext = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0) model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, pred_dim=1) with pytest.warns(UserWarning): trainer = Trainer(model, objective='binary', verbose=True, initializers=initializers_3)
def test_optimizer_scheduler_format(): model = WideDeep(deeptabular=tabmlp) optimizers = {'deeptabular': torch.optim.Adam(model.deeptabular.parameters(), lr=0.01)} schedulers = torch.optim.lr_scheduler.StepLR(optimizers['deeptabular'], step_size=3) with pytest.raises(ValueError): trainer = Trainer(model, objective='binary', optimizers=optimizers, lr_schedulers=schedulers)
def test_non_instantiated_callbacks(): model = WideDeep(wide=wide, deeptabular=tabmlp) callbacks = [EarlyStopping] trainer = Trainer(model, objective='binary', callbacks=callbacks) assert (trainer.callbacks[2].__class__.__name__ == 'EarlyStopping')
def test_multiple_metrics(): model = WideDeep(wide=wide, deeptabular=tabmlp) metrics = [Accuracy, Precision] trainer = Trainer(model, objective='binary', metrics=metrics) assert ((trainer.metric._metrics[0].__class__.__name__ == 'Accuracy') and (trainer.metric._metrics[1].__class__.__name__ == 'Precision'))
@pytest.mark.parametrize('wide, deeptabular', [(wide, tabmlp), (wide, tabresnet), (wide, tabtransformer)]) def test_basic_run_with_metrics_binary(wide, deeptabular): model = WideDeep(wide=wide, deeptabular=deeptabular) trainer = Trainer(model, objective='binary', metrics=[Accuracy], verbose=False) trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=1, batch_size=16, val_split=0.2) assert (('train_loss' in trainer.history.keys()) and ('train_acc' in trainer.history.keys()))
def test_basic_run_with_metrics_multiclass(): wide = Wide(np.unique(X_wide).shape[0], 3) deeptabular = TabMlp(mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5], column_idx={k: v for (v, k) in enumerate(colnames)}, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):]) model = WideDeep(wide=wide, deeptabular=deeptabular, pred_dim=3) trainer = Trainer(model, objective='multiclass', metrics=[Accuracy], verbose=False) trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target_multi, n_epochs=1, batch_size=16, val_split=0.2) assert (('train_loss' in trainer.history.keys()) and ('train_acc' in trainer.history.keys()))
@pytest.mark.parametrize('wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target', [(wide, None, None, None, X_wide, None, None, None, target), (None, tabmlp, None, None, None, X_tab, None, None, target), (None, tabresnet, None, None, None, X_tab, None, None, target), (None, tabtransformer, None, None, None, X_tab, None, None, target), (None, None, basic_rnn, None, None, None, X_text, None, target), (None, None, basic_transformer, None, None, None, X_text, None, target), (None, None, None, deepimage, None, None, None, X_img, target)]) def test_predict_with_individual_component(wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target): model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage) trainer = Trainer(model, objective='binary', verbose=0) trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16) preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img) assert ((preds.shape[0] == 32) and ('train_loss' in trainer.history))
def test_save_and_load(): model = WideDeep(wide=wide, deeptabular=tabmlp) trainer = Trainer(model, objective='binary', verbose=0) trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, batch_size=16) wide_weights = model.wide.wide_linear.weight.data trainer.save('tests/test_model_functioning/model_dir/') n_model = torch.load('tests/test_model_functioning/model_dir/wd_model.pt') n_wide_weights = n_model.wide.wide_linear.weight.data assert torch.allclose(wide_weights, n_wide_weights)
def test_save_and_load_dict(): wide = Wide(np.unique(X_wide).shape[0], 1) tabmlp = TabMlp(mlp_hidden_dims=[32, 16], column_idx={k: v for (v, k) in enumerate(colnames)}, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):]) model1 = WideDeep(wide=deepcopy(wide), deeptabular=deepcopy(tabmlp)) trainer1 = Trainer(model1, objective='binary', verbose=0) trainer1.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16) wide_weights = model1.wide.wide_linear.weight.data trainer1.save(path='tests/test_model_functioning/model_dir/', save_state_dict=True) model2 = WideDeep(wide=wide, deeptabular=tabmlp) trainer2 = Trainer(model2, objective='binary', verbose=0) trainer2.model.load_state_dict(torch.load('tests/test_model_functioning/model_dir/wd_model.pt')) n_wide_weights = trainer2.model.wide.wide_linear.weight.data same_weights = torch.allclose(wide_weights, n_wide_weights) if os.path.isfile('tests/test_model_functioning/model_dir/history/train_eval_history.json'): history_saved = True else: history_saved = False shutil.rmtree('tests/test_model_functioning/model_dir/') assert (same_weights and history_saved)
def test_save_load_and_predict(): fpath = 'tests/test_model_functioning/test_wd_model' if (not os.path.exists(fpath)): os.makedirs(fpath) model = WideDeep(deeptabular=tabmlp) trainer = Trainer(model, objective='binary', verbose=0) trainer.fit(X_tab=X_tab, target=target, batch_size=16) trainer.save(path=fpath, save_state_dict=True) model_new = WideDeep(deeptabular=tabmlp) model_new.load_state_dict(torch.load('/'.join([fpath, 'wd_model.pt']))) trainer_new = Trainer(model, objective='binary', verbose=0) preds = trainer_new.predict(X_tab=X_tab, batch_size=16) shutil.rmtree(fpath) assert (preds.shape[0] == X_tab.shape[0])
def create_test_dataset(input_type, input_type_2=None): df = pd.DataFrame() col1 = list(np.random.choice(input_type, 32)) if (input_type_2 is not None): col2 = list(np.random.choice(input_type_2, 32)) else: col2 = list(np.random.choice(input_type, 32)) (df['col1'], df['col2']) = (col1, col2) return df
def test_handle_columns_with_dots(): data = df.copy() data = data.rename(columns={'col1': 'col.1', 'a': 'a.1'}) embed_cols = [('col.1', 5), ('col2', 5)] continuous_cols = ['col3', 'col4'] tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=continuous_cols) X_tab = tab_preprocessor.fit_transform(data) target = data.target.values tabmlp = TabMlp(mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5], column_idx={k: v for (v, k) in enumerate(data.columns)}, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols) model = WideDeep(deeptabular=tabmlp) trainer = Trainer(model, objective='binary', verbose=0) trainer.fit(X_tab=X_tab, target=target, batch_size=16) preds = trainer.predict(X_tab=X_tab, batch_size=16) assert ((preds.shape[0] == 32) and ('train_loss' in trainer.history))
def test_lds_component_with_model(): model = WideDeep(deeptabular=tabmlp) trainer = Trainer(model, objective='regression', verbose=0) trainer.fit(X_tab=X_tab, target=target, with_lds=True) preds = trainer.predict(X_tab=X_tab) assert ((preds.shape[0] == 32) and ('train_loss' in trainer.history))
def test_lds_component_with_dataset(): dataset_with_lds = WideDeepDataset(X_tab=X_tab, target=target, with_lds=True) assert (dataset_with_lds.weights.shape[0] == 32)
def test_Trainer_extract_kwargs(): (lds_args, dataloader_args, finetune_args) = Trainer._extract_kwargs({'pin_memory': True, 'lds_ks': 7, 'n_epochs': 10}) assert (lds_args == {'lds_ks': 7}) assert (dataloader_args == {'pin_memory': True}) assert (finetune_args == {'n_epochs': 10})
@pytest.mark.parametrize('model_type', ['mlp', 'transformer']) @pytest.mark.parametrize('schedulers_type, len_loss_output, len_lr_output, init_lr', [('step', 5, 5, 0.001), ('cyclic', 5, 11, 0.001), ('reducelronplateau', 5, 5, 0.001)]) def test_lr_history(model_type, schedulers_type, len_loss_output, len_lr_output, init_lr): if (model_type == 'mlp'): model = TabMlp(column_idx=non_transf_preprocessor.column_idx, cat_embed_input=non_transf_preprocessor.cat_embed_input, continuous_cols=non_transf_preprocessor.continuous_cols, mlp_hidden_dims=[16, 8]) (X, X_valid) = (X_tab, X_tab_valid) elif (model_type == 'transformer'): model = TabTransformer(column_idx=transf_preprocessor.column_idx, cat_embed_input=transf_preprocessor.cat_embed_input, continuous_cols=transf_preprocessor.continuous_cols, embed_continuous=True, n_heads=2, n_blocks=2) (X, X_valid) = (X_tab_transf, X_tab_valid_transf) optimizer = torch.optim.Adam(model.parameters()) if (schedulers_type == 'step'): lr_scheduler = StepLR(optimizer, step_size=4) elif (schedulers_type == 'cyclic'): lr_scheduler = CyclicLR(optimizer, base_lr=0.001, max_lr=0.01, step_size_up=5, cycle_momentum=False) elif (schedulers_type == 'reducelronplateau'): lr_scheduler = ReduceLROnPlateau(optimizer, patience=2, threshold=0.5) if (model_type == 'mlp'): trainer = EncoderDecoderTrainer(encoder=model, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=[LRHistory(n_epochs=5)], masked_prob=0.2, verbose=0) elif (model_type == 'transformer'): trainer = ContrastiveDenoisingTrainer(model=model, preprocessor=transf_preprocessor, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=[LRHistory(n_epochs=5)], verbose=0) if (schedulers_type == 'reducelronplateau'): trainer.pretrain(X, X_tab_val=X_valid, n_epochs=5, batch_size=16) else: trainer.pretrain(X, n_epochs=5, batch_size=16) if (schedulers_type == 'step'): history_assert = (len(trainer.history['train_loss']) == len_loss_output) lr_history_assert = (len(trainer.lr_history['lr_0']) == len_lr_output) lr_assert = (trainer.lr_history['lr_0'][(- 1)] == (init_lr / 10.0)) if (schedulers_type == 'cyclic'): history_assert = (len(trainer.history['train_loss']) == len_loss_output) lr_history_assert = (len(trainer.lr_history['lr_0']) == len_lr_output) lr_assert = (trainer.lr_history['lr_0'][(- 1)] == init_lr) if (schedulers_type == 'reducelronplateau'): history_assert = (len(trainer.history['train_loss']) == len_loss_output) lr_history_assert = (len(trainer.lr_history['lr_0']) == len_lr_output) lr_assert = (trainer.lr_history['lr_0'][(- 1)] == (init_lr * lr_scheduler.factor)) assert all([history_assert, lr_history_assert, lr_assert])
@pytest.mark.parametrize('model_type', ['mlp', 'transformer']) def test_early_stop(model_type): if (model_type == 'mlp'): model = TabMlp(column_idx=non_transf_preprocessor.column_idx, cat_embed_input=non_transf_preprocessor.cat_embed_input, continuous_cols=non_transf_preprocessor.continuous_cols, mlp_hidden_dims=[16, 8]) trainer = EncoderDecoderTrainer(encoder=model, masked_prob=0.2, callbacks=[EarlyStopping(min_delta=5.0, patience=3, restore_best_weights=True, verbose=1)], verbose=0) (X, X_valid) = (X_tab, X_tab_valid) elif (model_type == 'transformer'): model = TabTransformer(column_idx=transf_preprocessor.column_idx, cat_embed_input=transf_preprocessor.cat_embed_input, continuous_cols=transf_preprocessor.continuous_cols, embed_continuous=True, n_heads=2, n_blocks=2) trainer = ContrastiveDenoisingTrainer(model=model, preprocessor=transf_preprocessor, callbacks=[EarlyStopping(min_delta=100.0, patience=3, restore_best_weights=True, verbose=1)], verbose=0) (X, X_valid) = (X_tab_transf, X_tab_valid_transf) trainer.pretrain(X_tab=X, X_tab_val=X_valid, n_epochs=5, batch_size=16) assert (len(trainer.history['train_loss']) == (3 + 1))
@pytest.mark.parametrize('model_type', ['mlp', 'transformer']) @pytest.mark.parametrize('fpath, save_best_only, max_save, n_files', [('tests/test_self_supervised/weights/test_weights', True, 2, 2), ('tests/test_self_supervised/weights/test_weights', False, 2, 2), ('tests/test_self_supervised/weights/test_weights', False, 0, 5), (None, False, 0, 0)]) def test_checkpoint(model_type, fpath, save_best_only, max_save, n_files): if (model_type == 'mlp'): model = TabMlp(column_idx=non_transf_preprocessor.column_idx, cat_embed_input=non_transf_preprocessor.cat_embed_input, continuous_cols=non_transf_preprocessor.continuous_cols, mlp_hidden_dims=[16, 8]) trainer = EncoderDecoderTrainer(encoder=model, masked_prob=0.2, callbacks=[ModelCheckpoint(filepath=fpath, save_best_only=save_best_only, max_save=max_save)], verbose=0) (X, X_valid) = (X_tab, X_tab_valid) elif (model_type == 'transformer'): model = TabTransformer(column_idx=transf_preprocessor.column_idx, cat_embed_input=transf_preprocessor.cat_embed_input, continuous_cols=transf_preprocessor.continuous_cols, embed_continuous=True, n_heads=2, n_blocks=2) trainer = ContrastiveDenoisingTrainer(model=model, preprocessor=transf_preprocessor, callbacks=[ModelCheckpoint(filepath=fpath, save_best_only=save_best_only, max_save=max_save)], verbose=0) (X, X_valid) = (X_tab_transf, X_tab_valid_transf) trainer.pretrain(X_tab=X, X_tab_val=X_valid, n_epochs=5, batch_size=16) if fpath: n_saved = len(os.listdir('tests/test_self_supervised/weights/')) shutil.rmtree('tests/test_self_supervised/weights/') else: n_saved = 0 assert (n_saved <= n_files)
@pytest.mark.parametrize('model_type', ['mlp', 'transformer']) def test_save_and_load(model_type): if (model_type == 'mlp'): model = TabMlp(column_idx=non_transf_preprocessor.column_idx, cat_embed_input=non_transf_preprocessor.cat_embed_input, continuous_cols=non_transf_preprocessor.continuous_cols, mlp_hidden_dims=[16, 8]) X = X_tab elif (model_type == 'transformer'): model = TabTransformer(column_idx=transf_preprocessor.column_idx, cat_embed_input=transf_preprocessor.cat_embed_input, continuous_cols=transf_preprocessor.continuous_cols, embed_continuous=True, n_heads=2, n_blocks=2) X = X_tab_transf optimizer = torch.optim.Adam(model.parameters()) lr_scheduler = StepLR(optimizer, step_size=4) if (model_type == 'mlp'): trainer = EncoderDecoderTrainer(encoder=model, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=[LRHistory(n_epochs=5)], masked_prob=0.2, verbose=0) elif (model_type == 'transformer'): trainer = ContrastiveDenoisingTrainer(model=model, preprocessor=transf_preprocessor, optimizer=optimizer, lr_scheduler=lr_scheduler, callbacks=[LRHistory(n_epochs=5)], verbose=0) trainer.pretrain(X, n_epochs=5, batch_size=16) if (model_type == 'mlp'): col_embed_module = model.cat_and_cont_embed.cat_embed.embed_layers.emb_layer_col1 embeddings = col_embed_module.weight.data elif (model_type == 'transformer'): embed_module = model.cat_and_cont_embed.cat_embed.embed embeddings = embed_module.weight.data trainer.save('tests/test_self_supervised/model_dir/', model_filename='ss_model.pt') new_model = torch.load('tests/test_self_supervised/model_dir/ss_model.pt') if (model_type == 'mlp'): new_col_embed_module = new_model.encoder.cat_and_cont_embed.cat_embed.embed_layers.emb_layer_col1 new_embeddings = new_col_embed_module.weight.data elif (model_type == 'transformer'): new_embed_module = new_model.model.cat_and_cont_embed.cat_embed.embed new_embeddings = new_embed_module.weight.data shutil.rmtree('tests/test_self_supervised/model_dir/') assert torch.allclose(embeddings, new_embeddings)
def _build_model_and_trainer(model_type): if (model_type == 'mlp'): model = TabMlp(column_idx=non_transf_preprocessor.column_idx, cat_embed_input=non_transf_preprocessor.cat_embed_input, continuous_cols=non_transf_preprocessor.continuous_cols, mlp_hidden_dims=[16, 8]) trainer = EncoderDecoderTrainer(encoder=model, masked_prob=0.2, verbose=0) elif (model_type == 'transformer'): model = TabTransformer(column_idx=transf_preprocessor.column_idx, cat_embed_input=transf_preprocessor.cat_embed_input, continuous_cols=transf_preprocessor.continuous_cols, embed_continuous=True, n_heads=2, n_blocks=2) trainer = ContrastiveDenoisingTrainer(model=model, preprocessor=transf_preprocessor, verbose=0) return (model, trainer)
@pytest.mark.parametrize('model_type', ['mlp', 'transformer']) def test_save_and_load_dict(model_type): (model1, trainer1) = _build_model_and_trainer(model_type) X = (X_tab if (model_type == 'mlp') else X_tab_transf) trainer1.pretrain(X, n_epochs=5, batch_size=16) if (model_type == 'mlp'): col_embed_module = model1.cat_and_cont_embed.cat_embed.embed_layers.emb_layer_col1 embeddings = col_embed_module.weight.data elif (model_type == 'transformer'): embed_module = model1.cat_and_cont_embed.cat_embed.embed embeddings = embed_module.weight.data trainer1.save('tests/test_self_supervised/model_dir/', model_filename='ss_model.pt', save_state_dict=True) (model2, trainer2) = _build_model_and_trainer(model_type) if (model_type == 'mlp'): trainer2.ed_model.load_state_dict(torch.load('tests/test_self_supervised/model_dir/ss_model.pt')) elif (model_type == 'transformer'): trainer2.cd_model.load_state_dict(torch.load('tests/test_self_supervised/model_dir/ss_model.pt')) if (model_type == 'mlp'): new_col_embed_module = trainer2.ed_model.encoder.cat_and_cont_embed.cat_embed.embed_layers.emb_layer_col1 new_embeddings = new_col_embed_module.weight.data elif (model_type == 'transformer'): new_embed_module = trainer2.cd_model.model.cat_and_cont_embed.cat_embed.embed new_embeddings = new_embed_module.weight.data same_weights = torch.allclose(embeddings, new_embeddings) if os.path.isfile('tests/test_self_supervised/model_dir/history/train_eval_history.json'): history_saved = True else: history_saved = False shutil.rmtree('tests/test_self_supervised/model_dir/') assert (same_weights and history_saved)
def _build_enc_models(model_type, column_idx, cat_embed_input, continuous_cols): if (model_type == 'mlp'): encoder = TabMlpEncoder(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols, mlp_hidden_dims=[16, 8]) if (model_type == 'resnet'): encoder = TabResnetEncoder(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols, blocks_dims=[32, 16, 8]) if (model_type == 'tabnet'): encoder = TabNetEncoder(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols) return encoder
def _build_dec_models(model_type, encoder): if (model_type == 'mlp'): decoder = TabMlpDecoder(embed_dim=encoder.cat_and_cont_embed.output_dim, mlp_hidden_dims=[encoder.output_dim, (encoder.output_dim * 2)]) if (model_type == 'resnet'): decoder = TabResnetDecoder(embed_dim=encoder.cat_and_cont_embed.output_dim, blocks_dims=[encoder.output_dim, (encoder.output_dim * 2), (encoder.output_dim * 4)]) if (model_type == 'tabnet'): decoder = TabNetDecoder(embed_dim=encoder.cat_and_cont_embed.output_dim) return decoder
@pytest.mark.parametrize('model_type', ['mlp', 'resnet', 'tabnet']) @pytest.mark.parametrize('cat_or_cont', ['cat', 'cont', 'both']) @pytest.mark.parametrize('decoder_model', ['custom', 'auto']) def test_enc_dec_trainer(model_type, cat_or_cont, decoder_model): cat_embed_cols = (['col1', 'col2'] if (cat_or_cont in ['cat', 'both']) else None) continuous_cols = (['col3', 'col4'] if (cat_or_cont in ['cont', 'both']) else None) preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols) X_tab = preprocessor.fit_transform(test_df) cat_embed_input = (preprocessor.cat_embed_input if hasattr(preprocessor, 'cat_embed_input') else None) encoder = _build_enc_models(model_type, preprocessor.column_idx, cat_embed_input, continuous_cols) if (decoder_model == 'auto'): decoder = None elif (decoder_model == 'custom'): decoder = _build_dec_models(model_type, encoder) ec_trainer = EncoderDecoderTrainer(encoder=encoder, decoder=decoder, masked_prob=0.2, verbose=0) ec_trainer.pretrain(X_tab, n_epochs=2, batch_size=16) assert (len(ec_trainer.history['train_loss']) == 2)
@pytest.mark.parametrize('method_name', ['pretrain', 'fit']) def test_enc_dec_trainer_method_name(method_name): cat_embed_cols = ['col1', 'col2'] continuous_cols = ['col3', 'col4'] preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols) X_tab = preprocessor.fit_transform(test_df) encoder = _build_enc_models('mlp', preprocessor.column_idx, preprocessor.cat_embed_input, preprocessor.continuous_cols) ec_trainer = EncoderDecoderTrainer(encoder=encoder, masked_prob=0.2, verbose=0) if (method_name == 'pretrain'): ec_trainer.pretrain(X_tab, n_epochs=2, batch_size=16) elif (method_name == 'fit'): ec_trainer.fit(X_tab, n_epochs=2, batch_size=16) assert (len(ec_trainer.history['train_loss']) == 2)
@pytest.mark.parametrize('transf_model', ['tabtransformer', 'saint', 'fttransformer', 'tabfastformer', 'contextattentionmlp', 'selfattentionmlp']) @pytest.mark.parametrize('cat_or_cont', ['cat', 'cont', 'both']) @pytest.mark.parametrize('with_cls_token', [True, False]) def test_cont_den_trainer_with_defaults(transf_model, cat_or_cont, with_cls_token): cat_embed_cols = (['col1', 'col2'] if (cat_or_cont in ['cat', 'both']) else None) continuous_cols = (['col3', 'col4'] if (cat_or_cont in ['cont', 'both']) else None) preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, with_attention=True, with_cls_token=with_cls_token) X_tab = preprocessor.fit_transform(test_df) cat_embed_input = (preprocessor.cat_embed_input if hasattr(preprocessor, 'cat_embed_input') else None) tr_model = _build_transf_model(transf_model, preprocessor, cat_embed_input, continuous_cols) cd_trainer = ContrastiveDenoisingTrainer(model=tr_model, preprocessor=preprocessor, verbose=0) cd_trainer.pretrain(X_tab, n_epochs=2, batch_size=16) assert (len(cd_trainer.history['train_loss']) == 2)
@pytest.mark.parametrize('method_name', ['pretrain', 'fit']) def test_cont_den_trainer_method_name(method_name): cat_embed_cols = ['col1', 'col2'] continuous_cols = ['col3', 'col4'] preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, with_attention=True, with_cls_token=True) X_tab = preprocessor.fit_transform(test_df) tr_model = _build_transf_model('tabtransformer', preprocessor, preprocessor.cat_embed_input, preprocessor.continuous_cols) cd_trainer = ContrastiveDenoisingTrainer(model=tr_model, preprocessor=preprocessor, verbose=0) if (method_name == 'pretrain'): cd_trainer.pretrain(X_tab, n_epochs=2, batch_size=16) elif (method_name == 'fit'): cd_trainer.fit(X_tab, n_epochs=2, batch_size=16) assert (len(cd_trainer.history['train_loss']) == 2)
@pytest.mark.parametrize('loss_type', ['contrastive', 'denoising', 'both']) @pytest.mark.parametrize('proj_head_dims', [None, [32, 8]]) @pytest.mark.parametrize('mlp_type', ['single', 'multiple']) @pytest.mark.parametrize('with_cls_token', [True, False]) def test_cont_den_trainer_with_varying_params(loss_type, proj_head_dims, mlp_type, with_cls_token): cat_embed_cols = ['col1', 'col2'] continuous_cols = ['col3', 'col4'] preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, with_attention=True, with_cls_token=with_cls_token) X_tab = preprocessor.fit_transform(test_df) cat_embed_input = (preprocessor.cat_embed_input if hasattr(preprocessor, 'cat_embed_input') else None) tr_model = _build_transf_model('saint', preprocessor, cat_embed_input, continuous_cols) cd_trainer = ContrastiveDenoisingTrainer(model=tr_model, preprocessor=preprocessor, loss_type=loss_type, projection_head1_dims=proj_head_dims, projection_head2_dims=proj_head_dims, cat_mlp_type=mlp_type, cont_mlp_type=mlp_type, verbose=0) cd_trainer.pretrain(X_tab, n_epochs=2, batch_size=16) assert (len(cd_trainer.history['train_loss']) == 2)
@pytest.mark.parametrize('proj_head_dims', [[None, [16, 8]], [[16, 8], None], [[16, 8], [16, 8]]]) def test_projection_head_value_error(proj_head_dims): cat_embed_cols = ['col1', 'col2'] continuous_cols = ['col3', 'col4'] preprocessor = TabPreprocessor(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, with_attention=True, with_cls_token=True) X_tab = preprocessor.fit_transform(test_df) tr_model = _build_transf_model('saint', preprocessor, preprocessor.cat_embed_input, preprocessor.continuous_cols) with pytest.raises(ValueError): cd_trainer = ContrastiveDenoisingTrainer(model=tr_model, preprocessor=preprocessor, projection_head1_dims=proj_head_dims[0], projection_head2_dims=proj_head_dims[1], verbose=0)
def create_df(): cat_cols = [np.array(choices(c, k=5)) for c in [cat_col1_vals, cat_col2_vals]] cont_cols = [np.round(np.random.rand(5), 2) for _ in range(2)] target = [np.random.choice(2, 5, p=[0.8, 0.2])] return pd.DataFrame(np.vstack(((cat_cols + cont_cols) + target)).transpose(), columns=colnames)
@pytest.mark.parametrize('deeptabular, return_dataframe', [(tabmlp, True), (tabmlp, False), (tabresnet, True), (tabresnet, False), (tabnet, True), (tabnet, False)]) def test_non_transformer_models(deeptabular, return_dataframe): model = WideDeep(deeptabular=deeptabular) t2v = Tab2Vec(model, tab_preprocessor, return_dataframe=return_dataframe) (t2v_out, _) = t2v.fit_transform(df_t2v, target_col='target') embed_dim = sum([el[2] for el in tab_preprocessor.cat_embed_input]) cont_dim = len(tab_preprocessor.continuous_cols) assert (t2v_out.shape[1] == (embed_dim + cont_dim))
def _build_model(model_name, params): if (model_name == 'tabtransformer'): return TabTransformer(input_dim=8, n_heads=2, n_blocks=2, **params) if (model_name == 'saint'): return SAINT(input_dim=8, n_heads=2, n_blocks=2, **params) if (model_name == 'fttransformer'): return FTTransformer(n_blocks=2, n_heads=2, kv_compression_factor=0.5, **params) if (model_name == 'tabfastformer'): return TabFastFormer(n_blocks=2, n_heads=2, **params) if (model_name == 'tabperceiver'): return TabPerceiver(input_dim=8, n_cross_attn_heads=2, n_latents=2, latent_dim=8, n_latent_heads=2, n_perceiver_blocks=2, share_weights=False, **params)
@pytest.mark.parametrize('model_name, with_cls_token, share_embeddings, embed_continuous', [('tabtransformer', False, False, False), ('tabtransformer', True, False, False), ('tabtransformer', False, True, False), ('tabtransformer', True, False, True)]) def test_tab_transformer_models(model_name, with_cls_token, share_embeddings, embed_continuous): embed_cols = ['a', 'b'] cont_cols = ['c', 'd'] tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols, for_transformer=True, with_cls_token=with_cls_token, shared_embed=share_embeddings) X_tab = tab_preprocessor.fit_transform(df_init) params = {'column_idx': tab_preprocessor.column_idx, 'cat_embed_input': tab_preprocessor.cat_embed_input, 'continuous_cols': tab_preprocessor.continuous_cols, 'embed_continuous': embed_continuous} deeptabular = _build_model(model_name, params) model = WideDeep(deeptabular=deeptabular) t2v = Tab2Vec(model, tab_preprocessor) X_vec = t2v.transform(df_t2v) if embed_continuous: out_dim = ((len(embed_cols) + len(cont_cols)) * deeptabular.input_dim) else: out_dim = ((len(embed_cols) * deeptabular.input_dim) + len(cont_cols)) assert (X_vec.shape[1] == out_dim)
@pytest.mark.parametrize('with_cls_token', [True, False]) @pytest.mark.parametrize('share_embeddings', [True, False]) @pytest.mark.parametrize('attention_name', ['context_attention', 'self_attention']) def test_attentive_mlp(with_cls_token, share_embeddings, attention_name): embed_cols = ['a', 'b'] cont_cols = ['c', 'd'] tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols, with_attention=True, with_cls_token=with_cls_token, shared_embed=share_embeddings) X_tab = tab_preprocessor.fit_transform(df_init) if (attention_name == 'context_attention'): deeptabular = ContextAttentionMLP(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols) elif (attention_name == 'self_attention'): deeptabular = SelfAttentionMLP(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols) model = WideDeep(deeptabular=deeptabular) t2v = Tab2Vec(model, tab_preprocessor) X_vec = t2v.transform(df_t2v) out_dim = ((len(embed_cols) + len(cont_cols)) * deeptabular.input_dim) assert (X_vec.shape[1] == out_dim)
@pytest.mark.parametrize('model_name, with_cls_token, share_embeddings, return_dataframe', [('saint', False, True, False), ('saint', True, True, False), ('saint', False, False, False), ('saint', False, True, True), ('saint', True, True, True), ('saint', False, False, True), ('fttransformer', False, True, False), ('fttransformer', True, True, False), ('fttransformer', False, False, False), ('fttransformer', False, True, True), ('fttransformer', True, True, True), ('fttransformer', False, False, True), ('tabfastformer', False, True, False), ('tabfastformer', True, True, False), ('tabfastformer', False, False, False), ('tabfastformer', False, True, True), ('tabfastformer', True, True, True), ('tabfastformer', False, False, True), ('tabperceiver', False, True, False), ('tabperceiver', False, False, False), ('tabperceiver', False, True, False), ('tabperceiver', False, False, True)]) def test_transformer_family_models(model_name, with_cls_token, share_embeddings, return_dataframe): embed_cols = ['a', 'b'] cont_cols = ['c', 'd'] tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols, for_transformer=True, with_cls_token=with_cls_token, shared_embed=share_embeddings) X_tab = tab_preprocessor.fit_transform(df_init) params = {'column_idx': tab_preprocessor.column_idx, 'cat_embed_input': tab_preprocessor.cat_embed_input, 'continuous_cols': tab_preprocessor.continuous_cols} deeptabular = _build_model(model_name, params) model = WideDeep(deeptabular=deeptabular) t2v = Tab2Vec(model, tab_preprocessor, return_dataframe=return_dataframe) t2v_out = t2v.transform(df_t2v) out_dim = ((len(embed_cols) + len(cont_cols)) * deeptabular.input_dim) assert (t2v_out.shape[1] == out_dim)
class Evaluator(): ' Computes intersection and union between prediction and ground-truth ' @classmethod def initialize(cls): cls.ignore_index = 255 @classmethod def classify_prediction(cls, pred_mask, batch): gt_mask = batch.get('query_mask') query_ignore_idx = batch.get('query_ignore_idx') if (query_ignore_idx is not None): assert (torch.logical_and(query_ignore_idx, gt_mask).sum() == 0) query_ignore_idx *= cls.ignore_index gt_mask = (gt_mask + query_ignore_idx) pred_mask[(gt_mask == cls.ignore_index)] = cls.ignore_index (area_inter, area_pred, area_gt) = ([], [], []) for (_pred_mask, _gt_mask) in zip(pred_mask, gt_mask): _inter = _pred_mask[(_pred_mask == _gt_mask)] if (_inter.size(0) == 0): _area_inter = torch.tensor([0, 0], device=_pred_mask.device) else: _area_inter = torch.histc(_inter, bins=2, min=0, max=1) area_inter.append(_area_inter) area_pred.append(torch.histc(_pred_mask, bins=2, min=0, max=1)) area_gt.append(torch.histc(_gt_mask, bins=2, min=0, max=1)) area_inter = torch.stack(area_inter).t() area_pred = torch.stack(area_pred).t() area_gt = torch.stack(area_gt).t() area_union = ((area_pred + area_gt) - area_inter) return (area_inter, area_union)
class AverageMeter(): ' Stores loss, evaluation results ' def __init__(self, dataset): self.benchmark = dataset.benchmark self.class_ids_interest = dataset.class_ids self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda() if (self.benchmark == 'pascal'): self.nclass = 20 elif (self.benchmark == 'coco'): self.nclass = 80 elif (self.benchmark == 'fss'): self.nclass = 1000 self.intersection_buf = torch.zeros([2, self.nclass]).float().cuda() self.union_buf = torch.zeros([2, self.nclass]).float().cuda() self.ones = torch.ones_like(self.union_buf) self.loss_buf = [] def update(self, inter_b, union_b, class_id, loss): self.intersection_buf.index_add_(1, class_id, inter_b.float()) self.union_buf.index_add_(1, class_id, union_b.float()) if (loss is None): loss = torch.tensor(0.0) self.loss_buf.append(loss) def compute_iou(self): iou = (self.intersection_buf.float() / torch.max(torch.stack([self.union_buf, self.ones]), dim=0)[0]) iou = iou.index_select(1, self.class_ids_interest) miou = (iou[1].mean() * 100) fb_iou = ((self.intersection_buf.index_select(1, self.class_ids_interest).sum(dim=1) / self.union_buf.index_select(1, self.class_ids_interest).sum(dim=1)).mean() * 100) return (miou, fb_iou) def write_result(self, split, epoch): (iou, fb_iou) = self.compute_iou() loss_buf = torch.stack(self.loss_buf) msg = ('\n*** %s ' % split) msg += ('[@Epoch %02d] ' % epoch) msg += ('Avg L: %6.5f ' % loss_buf.mean()) msg += ('mIoU: %5.2f ' % iou) msg += ('FB-IoU: %5.2f ' % fb_iou) msg += '***\n' Logger.info(msg) def write_process(self, batch_idx, datalen, epoch, write_batch_idx=20): if ((batch_idx % write_batch_idx) == 0): msg = (('[Epoch: %02d] ' % epoch) if (epoch != (- 1)) else '') msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen)) (iou, fb_iou) = self.compute_iou() if (epoch != (- 1)): loss_buf = torch.stack(self.loss_buf) msg += ('L: %6.5f ' % loss_buf[(- 1)]) msg += ('Avg L: %6.5f ' % loss_buf.mean()) msg += ('mIoU: %5.2f | ' % iou) msg += ('FB-IoU: %5.2f' % fb_iou) Logger.info(msg)
class Logger(): ' Writes evaluation results of training/testing ' @classmethod def initialize(cls, args, training): logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S') logpath = (args.logpath if training else (('_TEST_' + args.load.split('/')[(- 2)].split('.')[0]) + logtime)) if (logpath == ''): logpath = logtime cls.logpath = os.path.join('logs', (logpath + '.log')) cls.benchmark = args.benchmark os.makedirs(cls.logpath) logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs')) logging.info('\n:=========== Few-shot Seg. with HSNet ===========') for arg_key in args.__dict__: logging.info(('| %20s: %-24s' % (arg_key, str(args.__dict__[arg_key])))) logging.info(':================================================\n') @classmethod def info(cls, msg): ' Writes log message to log.txt ' logging.info(msg) @classmethod def save_model_miou(cls, model, epoch, val_miou): torch.save(model.state_dict(), os.path.join(cls.logpath, 'best_model.pt')) cls.info(('Model saved @%d w/ val. mIoU: %5.2f.\n' % (epoch, val_miou))) @classmethod def log_params(cls, model): backbone_param = 0 learner_param = 0 for k in model.state_dict().keys(): n_param = model.state_dict()[k].view((- 1)).size(0) if (k.split('.')[0] in 'backbone'): if (k.split('.')[1] in ['classifier', 'fc']): continue backbone_param += n_param else: learner_param += n_param Logger.info(('Backbone # param.: %d' % backbone_param)) Logger.info(('Learnable # param.: %d' % learner_param)) Logger.info(('Total # param.: %d' % (backbone_param + learner_param)))
def fix_randseed(seed): ' Set random seeds for reproducibility ' if (seed is None): seed = int((random.random() * 100000.0)) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def mean(x): return ((sum(x) / len(x)) if (len(x) > 0) else 0.0)
def to_cuda(batch): for (key, value) in batch.items(): if isinstance(value, torch.Tensor): batch[key] = value.cuda() return batch
def to_cpu(tensor): return tensor.detach().clone().cpu()
class DatasetCOCO(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = ('val' if (split in ['val', 'test']) else 'trn') self.fold = fold self.nfolds = 4 self.nclass = 80 self.benchmark = 'coco' self.shot = shot self.split_coco = (split if (split == 'val2014') else 'train2014') self.base_path = os.path.join(datapath, 'COCO2014') self.transform = transform self.use_original_imgsize = use_original_imgsize self.class_ids = self.build_class_ids() self.img_metadata_classwise = self.build_img_metadata_classwise() self.img_metadata = self.build_img_metadata() def __len__(self): return (len(self.img_metadata) if (self.split == 'trn') else 1000) def __getitem__(self, idx): (query_img, query_mask, support_imgs, support_masks, query_name, support_names, class_sample, org_qry_imsize) = self.load_frame() query_img = self.transform(query_img) query_mask = query_mask.float() if (not self.use_original_imgsize): query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) for (midx, smask) in enumerate(support_masks): support_masks[midx] = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() support_masks = torch.stack(support_masks) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'org_query_imsize': org_qry_imsize, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)} return batch def build_class_ids(self): nclass_trn = (self.nclass // self.nfolds) class_ids_val = [(self.fold + (self.nfolds * v)) for v in range(nclass_trn)] class_ids_trn = [x for x in range(self.nclass) if (x not in class_ids_val)] class_ids = (class_ids_trn if (self.split == 'trn') else class_ids_val) return class_ids def build_img_metadata_classwise(self): with open(('./data/splits/coco/%s/fold%d.pkl' % (self.split, self.fold)), 'rb') as f: img_metadata_classwise = pickle.load(f) return img_metadata_classwise def build_img_metadata(self): img_metadata = [] for k in self.img_metadata_classwise.keys(): img_metadata += self.img_metadata_classwise[k] return sorted(list(set(img_metadata))) def read_mask(self, name): mask_path = os.path.join(self.base_path, 'annotations', name) mask = torch.tensor(np.array(Image.open((mask_path[:mask_path.index('.jpg')] + '.png')))) return mask def load_frame(self): class_sample = np.random.choice(self.class_ids, 1, replace=False)[0] query_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] query_img = Image.open(os.path.join(self.base_path, query_name)).convert('RGB') query_mask = self.read_mask(query_name) org_qry_imsize = query_img.size query_mask[(query_mask != (class_sample + 1))] = 0 query_mask[(query_mask == (class_sample + 1))] = 1 support_names = [] while True: support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break support_imgs = [] support_masks = [] for support_name in support_names: support_imgs.append(Image.open(os.path.join(self.base_path, support_name)).convert('RGB')) support_mask = self.read_mask(support_name) support_mask[(support_mask != (class_sample + 1))] = 0 support_mask[(support_mask == (class_sample + 1))] = 1 support_masks.append(support_mask) return (query_img, query_mask, support_imgs, support_masks, query_name, support_names, class_sample, org_qry_imsize)
class FSSDataset(): @classmethod def initialize(cls, img_size, datapath, use_original_imgsize): cls.datasets = {'pascal': DatasetPASCAL, 'coco': DatasetCOCO, 'fss': DatasetFSS} cls.img_mean = [0.485, 0.456, 0.406] cls.img_std = [0.229, 0.224, 0.225] cls.datapath = datapath cls.use_original_imgsize = use_original_imgsize cls.transform = transforms.Compose([transforms.Resize(size=(img_size, img_size)), transforms.ToTensor(), transforms.Normalize(cls.img_mean, cls.img_std)]) @classmethod def build_dataloader(cls, benchmark, bsz, nworker, fold, split, shot=1): shuffle = (split == 'trn') nworker = (nworker if (split == 'trn') else 0) dataset = cls.datasets[benchmark](cls.datapath, fold=fold, transform=cls.transform, split=split, shot=shot, use_original_imgsize=cls.use_original_imgsize) dataloader = DataLoader(dataset, batch_size=bsz, shuffle=shuffle, num_workers=nworker) return dataloader
class DatasetFSS(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = split self.benchmark = 'fss' self.shot = shot self.base_path = os.path.join(datapath, 'FSS-1000') with open(('./data/splits/fss/%s.txt' % split), 'r') as f: self.categories = f.read().split('\n')[:(- 1)] self.categories = sorted(self.categories) self.class_ids = self.build_class_ids() self.img_metadata = self.build_img_metadata() self.transform = transform def __len__(self): return len(self.img_metadata) def __getitem__(self, idx): (query_name, support_names, class_sample) = self.sample_episode(idx) (query_img, query_mask, support_imgs, support_masks) = self.load_frame(query_name, support_names) query_img = self.transform(query_img) query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) support_masks_tmp = [] for smask in support_masks: smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() support_masks_tmp.append(smask) support_masks = torch.stack(support_masks_tmp) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)} return batch def load_frame(self, query_name, support_names): query_img = Image.open(query_name).convert('RGB') support_imgs = [Image.open(name).convert('RGB') for name in support_names] query_id = query_name.split('/')[(- 1)].split('.')[0] query_name = (os.path.join(os.path.dirname(query_name), query_id) + '.png') support_ids = [name.split('/')[(- 1)].split('.')[0] for name in support_names] support_names = [(os.path.join(os.path.dirname(name), sid) + '.png') for (name, sid) in zip(support_names, support_ids)] query_mask = self.read_mask(query_name) support_masks = [self.read_mask(name) for name in support_names] return (query_img, query_mask, support_imgs, support_masks) def read_mask(self, img_name): mask = torch.tensor(np.array(Image.open(img_name).convert('L'))) mask[(mask < 128)] = 0 mask[(mask >= 128)] = 1 return mask def sample_episode(self, idx): query_name = self.img_metadata[idx] class_sample = self.categories.index(query_name.split('/')[(- 2)]) if (self.split == 'val'): class_sample += 520 elif (self.split == 'test'): class_sample += 760 support_names = [] while True: support_name = np.random.choice(range(1, 11), 1, replace=False)[0] support_name = (os.path.join(os.path.dirname(query_name), str(support_name)) + '.jpg') if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break return (query_name, support_names, class_sample) def build_class_ids(self): if (self.split == 'trn'): class_ids = range(0, 520) elif (self.split == 'val'): class_ids = range(520, 760) elif (self.split == 'test'): class_ids = range(760, 1000) return class_ids def build_img_metadata(self): img_metadata = [] for cat in self.categories: img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, cat)))]) for img_path in img_paths: if (os.path.basename(img_path).split('.')[1] == 'jpg'): img_metadata.append(img_path) return img_metadata
class DatasetPASCAL(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = ('val' if (split in ['val', 'test']) else 'trn') self.fold = fold self.nfolds = 4 self.nclass = 20 self.benchmark = 'pascal' self.shot = shot self.use_original_imgsize = use_original_imgsize self.img_path = os.path.join(datapath, 'VOC2012/JPEGImages/') self.ann_path = os.path.join(datapath, 'VOC2012/SegmentationClassAug/') self.transform = transform self.class_ids = self.build_class_ids() self.img_metadata = self.build_img_metadata() self.img_metadata_classwise = self.build_img_metadata_classwise() def __len__(self): return (len(self.img_metadata) if (self.split == 'trn') else 1000) def __getitem__(self, idx): idx %= len(self.img_metadata) (query_name, support_names, class_sample) = self.sample_episode(idx) (query_img, query_cmask, support_imgs, support_cmasks, org_qry_imsize) = self.load_frame(query_name, support_names) query_img = self.transform(query_img) if (not self.use_original_imgsize): query_cmask = F.interpolate(query_cmask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() (query_mask, query_ignore_idx) = self.extract_ignore_idx(query_cmask.float(), class_sample) support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) support_masks = [] support_ignore_idxs = [] for scmask in support_cmasks: scmask = F.interpolate(scmask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() (support_mask, support_ignore_idx) = self.extract_ignore_idx(scmask, class_sample) support_masks.append(support_mask) support_ignore_idxs.append(support_ignore_idx) support_masks = torch.stack(support_masks) support_ignore_idxs = torch.stack(support_ignore_idxs) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'query_ignore_idx': query_ignore_idx, 'org_query_imsize': org_qry_imsize, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'support_ignore_idxs': support_ignore_idxs, 'class_id': torch.tensor(class_sample)} return batch def extract_ignore_idx(self, mask, class_id): boundary = (mask / 255).floor() mask[(mask != (class_id + 1))] = 0 mask[(mask == (class_id + 1))] = 1 return (mask, boundary) def load_frame(self, query_name, support_names): query_img = self.read_img(query_name) query_mask = self.read_mask(query_name) support_imgs = [self.read_img(name) for name in support_names] support_masks = [self.read_mask(name) for name in support_names] org_qry_imsize = query_img.size return (query_img, query_mask, support_imgs, support_masks, org_qry_imsize) def read_mask(self, img_name): 'Return segmentation mask in PIL Image' mask = torch.tensor(np.array(Image.open((os.path.join(self.ann_path, img_name) + '.png')))) return mask def read_img(self, img_name): 'Return RGB image in PIL Image' return Image.open((os.path.join(self.img_path, img_name) + '.jpg')) def sample_episode(self, idx): (query_name, class_sample) = self.img_metadata[idx] support_names = [] while True: support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break return (query_name, support_names, class_sample) def build_class_ids(self): nclass_trn = (self.nclass // self.nfolds) class_ids_val = [((self.fold * nclass_trn) + i) for i in range(nclass_trn)] class_ids_trn = [x for x in range(self.nclass) if (x not in class_ids_val)] if (self.split == 'trn'): return class_ids_trn else: return class_ids_val def build_img_metadata(self): def read_metadata(split, fold_id): fold_n_metadata = os.path.join(('data/splits/pascal/%s/fold%d.txt' % (split, fold_id))) with open(fold_n_metadata, 'r') as f: fold_n_metadata = f.read().split('\n')[:(- 1)] fold_n_metadata = [[data.split('__')[0], (int(data.split('__')[1]) - 1)] for data in fold_n_metadata] return fold_n_metadata img_metadata = [] if (self.split == 'trn'): for fold_id in range(self.nfolds): if (fold_id == self.fold): continue img_metadata += read_metadata(self.split, fold_id) elif (self.split == 'val'): img_metadata = read_metadata(self.split, self.fold) else: raise Exception(('Undefined split %s: ' % self.split)) print(('Total (%s) images are : %d' % (self.split, len(img_metadata)))) return img_metadata def build_img_metadata_classwise(self): img_metadata_classwise = {} for class_id in range(self.nclass): img_metadata_classwise[class_id] = [] for (img_name, img_class) in self.img_metadata: img_metadata_classwise[img_class] += [img_name] return img_metadata_classwise
class CenterPivotConv4d(nn.Module): ' CenterPivot 4D conv' def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True): super(CenterPivotConv4d, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size[:2], stride=stride[:2], bias=bias, padding=padding[:2]) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size[2:], stride=stride[2:], bias=bias, padding=padding[2:]) self.stride34 = stride[2:] self.kernel_size = kernel_size self.stride = stride self.padding = padding self.idx_initialized = False def prune(self, ct): (bsz, ch, ha, wa, hb, wb) = ct.size() if (not self.idx_initialized): idxh = torch.arange(start=0, end=hb, step=self.stride[2:][0], device=ct.device) idxw = torch.arange(start=0, end=wb, step=self.stride[2:][1], device=ct.device) self.len_h = len(idxh) self.len_w = len(idxw) self.idx = (idxw.repeat(self.len_h, 1) + (idxh.repeat(self.len_w, 1).t() * wb)).view((- 1)) self.idx_initialized = True ct_pruned = ct.view(bsz, ch, ha, wa, (- 1)).index_select(4, self.idx).view(bsz, ch, ha, wa, self.len_h, self.len_w) return ct_pruned def forward(self, x): if (self.stride[2:][(- 1)] > 1): out1 = self.prune(x) else: out1 = x (bsz, inch, ha, wa, hb, wb) = out1.size() out1 = out1.permute(0, 4, 5, 1, 2, 3).contiguous().view((- 1), inch, ha, wa) out1 = self.conv1(out1) (outch, o_ha, o_wa) = (out1.size((- 3)), out1.size((- 2)), out1.size((- 1))) out1 = out1.view(bsz, hb, wb, outch, o_ha, o_wa).permute(0, 3, 4, 5, 1, 2).contiguous() (bsz, inch, ha, wa, hb, wb) = x.size() out2 = x.permute(0, 2, 3, 1, 4, 5).contiguous().view((- 1), inch, hb, wb) out2 = self.conv2(out2) (outch, o_hb, o_wb) = (out2.size((- 3)), out2.size((- 2)), out2.size((- 1))) out2 = out2.view(bsz, ha, wa, outch, o_hb, o_wb).permute(0, 3, 1, 2, 4, 5).contiguous() if ((out1.size()[(- 2):] != out2.size()[(- 2):]) and (self.padding[(- 2):] == (0, 0))): out1 = out1.view(bsz, outch, o_ha, o_wa, (- 1)).sum(dim=(- 1)) out2 = out2.squeeze() y = (out1 + out2) return y
class Correlation(): @classmethod def multilayer_correlation(cls, query_feats, support_feats, stack_ids): eps = 1e-05 corrs = [] for (idx, (query_feat, support_feat)) in enumerate(zip(query_feats, support_feats)): (bsz, ch, hb, wb) = support_feat.size() support_feat = support_feat.view(bsz, ch, (- 1)) support_feat = (support_feat / (support_feat.norm(dim=1, p=2, keepdim=True) + eps)) (bsz, ch, ha, wa) = query_feat.size() query_feat = query_feat.view(bsz, ch, (- 1)) query_feat = (query_feat / (query_feat.norm(dim=1, p=2, keepdim=True) + eps)) corr = torch.bmm(query_feat.transpose(1, 2), support_feat).view(bsz, ha, wa, hb, wb) corr = corr.clamp(min=0) corrs.append(corr) corr_l4 = torch.stack(corrs[(- stack_ids[0]):]).transpose(0, 1).contiguous() corr_l3 = torch.stack(corrs[(- stack_ids[1]):(- stack_ids[0])]).transpose(0, 1).contiguous() corr_l2 = torch.stack(corrs[(- stack_ids[2]):(- stack_ids[1])]).transpose(0, 1).contiguous() return [corr_l4, corr_l3, corr_l2]
def extract_feat_vgg(img, backbone, feat_ids, bottleneck_ids=None, lids=None): ' Extract intermediate features from VGG ' feats = [] feat = img for (lid, module) in enumerate(backbone.features): feat = module(feat) if (lid in feat_ids): feats.append(feat.clone()) return feats
def extract_feat_res(img, backbone, feat_ids, bottleneck_ids, lids): ' Extract intermediate features from ResNet' feats = [] feat = backbone.conv1.forward(img) feat = backbone.bn1.forward(feat) feat = backbone.relu.forward(feat) feat = backbone.maxpool.forward(feat) for (hid, (bid, lid)) in enumerate(zip(bottleneck_ids, lids)): res = feat feat = backbone.__getattr__(('layer%d' % lid))[bid].conv1.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn1.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].conv2.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn2.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].conv3.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn3.forward(feat) if (bid == 0): res = backbone.__getattr__(('layer%d' % lid))[bid].downsample.forward(res) feat += res if ((hid + 1) in feat_ids): feats.append(feat.clone()) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) return feats
class HPNLearner(nn.Module): def __init__(self, inch): super(HPNLearner, self).__init__() def make_building_block(in_channel, out_channels, kernel_sizes, spt_strides, group=4): assert (len(out_channels) == len(kernel_sizes) == len(spt_strides)) building_block_layers = [] for (idx, (outch, ksz, stride)) in enumerate(zip(out_channels, kernel_sizes, spt_strides)): inch = (in_channel if (idx == 0) else out_channels[(idx - 1)]) ksz4d = ((ksz,) * 4) str4d = ((1, 1) + ((stride,) * 2)) pad4d = (((ksz // 2),) * 4) building_block_layers.append(Conv4d(inch, outch, ksz4d, str4d, pad4d)) building_block_layers.append(nn.GroupNorm(group, outch)) building_block_layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*building_block_layers) (outch1, outch2, outch3) = (16, 64, 128) self.encoder_layer4 = make_building_block(inch[0], [outch1, outch2, outch3], [3, 3, 3], [2, 2, 2]) self.encoder_layer3 = make_building_block(inch[1], [outch1, outch2, outch3], [5, 3, 3], [4, 2, 2]) self.encoder_layer2 = make_building_block(inch[2], [outch1, outch2, outch3], [5, 5, 3], [4, 4, 2]) self.encoder_layer4to3 = make_building_block(outch3, [outch3, outch3, outch3], [3, 3, 3], [1, 1, 1]) self.encoder_layer3to2 = make_building_block(outch3, [outch3, outch3, outch3], [3, 3, 3], [1, 1, 1]) self.decoder1 = nn.Sequential(nn.Conv2d(outch3, outch3, (3, 3), padding=(1, 1), bias=True), nn.ReLU(), nn.Conv2d(outch3, outch2, (3, 3), padding=(1, 1), bias=True), nn.ReLU()) self.decoder2 = nn.Sequential(nn.Conv2d(outch2, outch2, (3, 3), padding=(1, 1), bias=True), nn.ReLU(), nn.Conv2d(outch2, 2, (3, 3), padding=(1, 1), bias=True)) def interpolate_support_dims(self, hypercorr, spatial_size=None): (bsz, ch, ha, wa, hb, wb) = hypercorr.size() hypercorr = hypercorr.permute(0, 4, 5, 1, 2, 3).contiguous().view(((bsz * hb) * wb), ch, ha, wa) hypercorr = F.interpolate(hypercorr, spatial_size, mode='bilinear', align_corners=True) (o_hb, o_wb) = spatial_size hypercorr = hypercorr.view(bsz, hb, wb, ch, o_hb, o_wb).permute(0, 3, 4, 5, 1, 2).contiguous() return hypercorr def forward(self, hypercorr_pyramid): hypercorr_sqz4 = self.encoder_layer4(hypercorr_pyramid[0]) hypercorr_sqz3 = self.encoder_layer3(hypercorr_pyramid[1]) hypercorr_sqz2 = self.encoder_layer2(hypercorr_pyramid[2]) hypercorr_sqz4 = self.interpolate_support_dims(hypercorr_sqz4, hypercorr_sqz3.size()[(- 4):(- 2)]) hypercorr_mix43 = (hypercorr_sqz4 + hypercorr_sqz3) hypercorr_mix43 = self.encoder_layer4to3(hypercorr_mix43) hypercorr_mix43 = self.interpolate_support_dims(hypercorr_mix43, hypercorr_sqz2.size()[(- 4):(- 2)]) hypercorr_mix432 = (hypercorr_mix43 + hypercorr_sqz2) hypercorr_mix432 = self.encoder_layer3to2(hypercorr_mix432) (bsz, ch, ha, wa, hb, wb) = hypercorr_mix432.size() hypercorr_encoded = hypercorr_mix432.view(bsz, ch, ha, wa, (- 1)).mean(dim=(- 1)) hypercorr_decoded = self.decoder1(hypercorr_encoded) upsample_size = (((hypercorr_decoded.size((- 1)) * 2),) * 2) hypercorr_decoded = F.interpolate(hypercorr_decoded, upsample_size, mode='bilinear', align_corners=True) logit_mask = self.decoder2(hypercorr_decoded) return logit_mask
def test(model, dataloader, nshot): ' Test HSNet ' utils.fix_randseed(0) average_meter = AverageMeter(dataloader.dataset) for (idx, batch) in enumerate(dataloader): batch = utils.to_cuda(batch) pred_mask = model.module.predict_mask_nshot(batch, nshot=nshot) assert (pred_mask.size() == batch['query_mask'].size()) (area_inter, area_union) = Evaluator.classify_prediction(pred_mask.clone(), batch) average_meter.update(area_inter, area_union, batch['class_id'], loss=None) average_meter.write_process(idx, len(dataloader), epoch=(- 1), write_batch_idx=1) if Visualizer.visualize: Visualizer.visualize_prediction_batch(batch['support_imgs'], batch['support_masks'], batch['query_img'], batch['query_mask'], pred_mask, batch['class_id'], idx, (area_inter[1].float() / area_union[1].float())) average_meter.write_result('Test', 0) (miou, fb_iou) = average_meter.compute_iou() return (miou, fb_iou)
def train(epoch, model, dataloader, optimizer, training): ' Train HSNet ' (utils.fix_randseed(None) if training else utils.fix_randseed(0)) (model.module.train_mode() if training else model.module.eval()) average_meter = AverageMeter(dataloader.dataset) for (idx, batch) in enumerate(dataloader): batch = utils.to_cuda(batch) logit_mask = model(batch['query_img'], batch['support_imgs'].squeeze(1), batch['support_masks'].squeeze(1)) pred_mask = logit_mask.argmax(dim=1) loss = model.module.compute_objective(logit_mask, batch['query_mask']) if training: optimizer.zero_grad() loss.backward() optimizer.step() (area_inter, area_union) = Evaluator.classify_prediction(pred_mask, batch) average_meter.update(area_inter, area_union, batch['class_id'], loss.detach().clone()) average_meter.write_process(idx, len(dataloader), epoch, write_batch_idx=50) average_meter.write_result(('Training' if training else 'Validation'), epoch) avg_loss = utils.mean(average_meter.loss_buf) (miou, fb_iou) = average_meter.compute_iou() return (avg_loss, miou, fb_iou)
class SSFetcher(threading.Thread): def __init__(self, parent): threading.Thread.__init__(self) self.parent = parent self.rng = numpy.random.RandomState(self.parent.seed) self.indexes = numpy.arange(parent.data_len) def run(self): diter = self.parent self.rng.shuffle(self.indexes) offset = 0 while (not diter.exit_flag): last_batch = False dialogues = [] while (len(dialogues) < diter.batch_size): if (offset == diter.data_len): if (not diter.use_infinite_loop): last_batch = True break else: self.rng.shuffle(self.indexes) offset = 0 index = self.indexes[offset] s = diter.data[index] if (len(s) > 0): if isinstance(s[0], list): s = [item for sublist in s for item in sublist] offset += 1 if ((diter.max_len == (- 1)) or (len(s) <= diter.max_len)): dialogues.append([s]) if len(dialogues): diter.queue.put(dialogues) if last_batch: diter.queue.put(None) return
class SSIterator(object): def __init__(self, dialogue_file, batch_size, seed, max_len=(- 1), use_infinite_loop=True, dtype='int32'): self.dialogue_file = dialogue_file self.batch_size = batch_size args = locals() args.pop('self') self.__dict__.update(args) self.load_files() self.exit_flag = False def load_files(self): self.data = cPickle.load(open(self.dialogue_file, 'r')) self.data_len = len(self.data) logger.debug(('Data len is %d' % self.data_len)) def start(self): self.exit_flag = False self.queue = Queue.Queue(maxsize=1000) self.gather = SSFetcher(self) self.gather.daemon = True self.gather.start() def __del__(self): if hasattr(self, 'gather'): self.gather.exitFlag = True self.gather.join() def __iter__(self): return self def next(self): if self.exit_flag: return None batch = self.queue.get() if (not batch): self.exit_flag = True return batch
def sharedX(value, name=None, borrow=False, dtype=None): if (dtype is None): dtype = theano.config.floatX return theano.shared(theano._asarray(value, dtype=dtype), name=name, borrow=borrow)
def Adam(grads, lr=0.0002, b1=0.1, b2=0.001, e=1e-08): updates = [] i = sharedX(0.0) i_t = (i + 1.0) fix1 = (1.0 - ((1.0 - b1) ** i_t)) fix2 = (1.0 - ((1.0 - b2) ** i_t)) lr_t = (lr * (T.sqrt(fix2) / fix1)) for (p, g) in grads.items(): m = sharedX((p.get_value() * 0.0)) v = sharedX((p.get_value() * 0.0)) m_t = ((b1 * g) + ((1.0 - b1) * m)) v_t = ((b2 * T.sqr(g)) + ((1.0 - b2) * v)) g_t = (m_t / (T.sqrt(v_t) + e)) p_t = (p - (lr_t * g_t)) updates.append((m, m_t)) updates.append((v, v_t)) updates.append((p, p_t)) updates.append((i, i_t)) return updates
def safe_pickle(obj, filename): if os.path.isfile(filename): logger.info(('Overwriting %s.' % filename)) else: logger.info(('Saving to %s.' % filename)) with open(filename, 'wb') as f: cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
class Model(object): def __init__(self): self.floatX = theano.config.floatX self.params = [] def save(self, filename): '\n Save the model to file `filename`\n ' vals = dict([(x.name, x.get_value()) for x in self.params]) numpy.savez(filename, **vals) def load(self, filename, parameter_strings_to_ignore=[]): '\n Load the model.\n\n Any parameter which has one of the strings inside parameter_strings_to_ignore as a substring,\n will not be loaded from the file (but instead initialized as a new model, which usually means random).\n ' vals = numpy.load(filename) for p in self.params: load_parameter = True for string_to_ignore in parameter_strings_to_ignore: if (string_to_ignore in p.name): logger.debug('Initializing parameter {} as in new model'.format(p.name)) load_parameter = False if load_parameter: if (p.name in vals): logger.debug('Loading {} of {}'.format(p.name, p.get_value(borrow=True).shape)) if (p.get_value().shape != vals[p.name].shape): raise Exception('Shape mismatch: {} != {} for {}'.format(p.get_value().shape, vals[p.name].shape, p.name)) p.set_value(vals[p.name]) else: logger.error('No parameter {} given: default initialization used'.format(p.name)) unknown = (set(vals.keys()) - {p.name for p in self.params}) if len(unknown): logger.error('Unknown parameters {} given'.format(unknown))
class Timer(object): def __init__(self): self.total = 0 def start(self): self.start_time = time.time() def finish(self): self.total += (time.time() - self.start_time)
def parse_args(): parser = argparse.ArgumentParser('Sample (with beam-search) from the session model') parser.add_argument('--ignore-unk', action='store_false', help='Allows generation procedure to output unknown words (<unk> tokens)') parser.add_argument('model_prefix', help='Path to the model prefix (without _model.npz or _state.pkl)') parser.add_argument('context', help='File of input contexts') parser.add_argument('output', help='Output file') parser.add_argument('--beam_search', action='store_true', help='Use beam search instead of random search') parser.add_argument('--n-samples', default='1', type=int, help='Number of samples') parser.add_argument('--n-turns', default=1, type=int, help='Number of dialog turns to generate') parser.add_argument('--verbose', action='store_true', default=False, help='Be verbose') parser.add_argument('changes', nargs='?', default='', help='Changes to state') return parser.parse_args()
def main(): args = parse_args() state = prototype_state() state_path = (args.model_prefix + '_state.pkl') model_path = (args.model_prefix + '_model.npz') with open(state_path) as src: state.update(cPickle.load(src)) logging.basicConfig(level=getattr(logging, state['level']), format='%(asctime)s: %(name)s: %(levelname)s: %(message)s') model = DialogEncoderDecoder(state) sampler = search.RandomSampler(model) if args.beam_search: sampler = search.BeamSampler(model) if os.path.isfile(model_path): logger.debug('Loading previous model') model.load(model_path) else: raise Exception('Must specify a valid model path') contexts = [[]] lines = open(args.context, 'r').readlines() if len(lines): contexts = [x.strip() for x in lines] print('Sampling started...') (context_samples, context_costs) = sampler.sample(contexts, n_samples=args.n_samples, n_turns=args.n_turns, ignore_unk=args.ignore_unk, verbose=args.verbose) print('Sampling finished.') print('Saving to file...') output_handle = open(args.output, 'w') for context_sample in context_samples: ((print >> output_handle), '\t'.join(context_sample)) output_handle.close() print('Saving to file finished.') print('All done!')
def safe_pickle(obj, filename): if os.path.isfile(filename): logger.info(('Overwriting %s.' % filename)) else: logger.info(('Saving to %s.' % filename)) with open(filename, 'wb') as f: cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
def _itersplit(l, splitters): current = [] for item in l: if (item in splitters): (yield current) current = [] else: current.append(item) (yield current)
def magicsplit(l, *splitters): return [subl for subl in _itersplit(l, splitters) if subl]
def prototype_state(): state = {} state['seed'] = 1234 state['level'] = 'DEBUG' state['oov'] = '<unk>' state['end_sym_utterance'] = '</s>' state['unk_sym'] = 0 state['eos_sym'] = 1 state['eod_sym'] = 2 state['first_speaker_sym'] = 3 state['second_speaker_sym'] = 4 state['third_speaker_sym'] = 5 state['minor_speaker_sym'] = 6 state['voice_over_sym'] = 7 state['off_screen_sym'] = 8 state['pause_sym'] = 9 state['reset_hidden_states_between_subsequences'] = False state['maxout_out'] = False state['deep_out'] = True state['deep_dialogue_input'] = False state['sent_rec_activation'] = 'lambda x: T.tanh(x)' state['dialogue_rec_activation'] = 'lambda x: T.tanh(x)' state['decoder_bias_type'] = 'all' state['utterance_encoder_gating'] = 'GRU' state['dialogue_encoder_gating'] = 'GRU' state['utterance_decoder_gating'] = 'GRU' state['bidirectional_utterance_encoder'] = False state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['collaps_to_standard_rnn'] = False state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = True state['qdim_encoder'] = 512 state['qdim_decoder'] = 512 state['sdim'] = 1000 state['rankdim'] = 256 state['add_latent_gaussian_per_utterance'] = False state['condition_latent_variable_on_dialogue_encoder'] = False state['condition_latent_variable_on_dcgm_encoder'] = False state['latent_gaussian_per_utterance_dim'] = 10 state['latent_gaussian_linear_dynamics'] = False state['scale_latent_variable_variances'] = 10 state['condition_decoder_only_on_latent_variable'] = False state['train_latent_gaussians_with_kl_divergence_annealing'] = False state['kl_divergence_annealing_rate'] = (1.0 / 60000.0) state['decoder_drop_previous_input_tokens'] = False state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['initialize_from_pretrained_word_embeddings'] = False state['pretrained_word_embeddings_file'] = '' state['fix_pretrained_word_embeddings'] = False state['fix_encoder_parameters'] = False state['updater'] = 'adam' state['use_nce'] = False state['cutoff'] = 1.0 state['lr'] = 0.0002 state['patience'] = 20 state['cost_threshold'] = 1.003 state['bs'] = 80 state['sort_k_batches'] = 20 state['max_grad_steps'] = 80 state['save_dir'] = './' state['train_freq'] = 10 state['valid_freq'] = 5000 state['loop_iters'] = 3000000 state['time_stop'] = ((24 * 60) * 31) state['minerr'] = (- 1) return state
def prototype_test(): state = prototype_state() state['train_dialogues'] = './tests/data/ttrain.dialogues.pkl' state['test_dialogues'] = './tests/data/ttest.dialogues.pkl' state['valid_dialogues'] = './tests/data/tvalid.dialogues.pkl' state['dictionary'] = './tests/data/ttrain.dict.pkl' state['save_dir'] = './tests/models/' state['max_grad_steps'] = 20 state['initialize_from_pretrained_word_embeddings'] = False state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl' state['fix_pretrained_word_embeddings'] = False state['valid_freq'] = 50 state['collaps_to_standard_rnn'] = False state['prefix'] = 'testmodel_' state['updater'] = 'adam' state['maxout_out'] = False state['deep_out'] = True state['deep_dialogue_input'] = True state['utterance_encoder_gating'] = 'GRU' state['dialogue_encoder_gating'] = 'GRU' state['utterance_decoder_gating'] = 'GRU' state['bidirectional_utterance_encoder'] = True state['direct_connection_between_encoders_and_decoder'] = True state['bs'] = 5 state['sort_k_batches'] = 1 state['use_nce'] = False state['decoder_bias_type'] = 'all' state['qdim_encoder'] = 15 state['qdim_decoder'] = 5 state['sdim'] = 10 state['rankdim'] = 10 return state
def prototype_test_variational(): state = prototype_state() state['train_dialogues'] = './tests/data/ttrain.dialogues.pkl' state['test_dialogues'] = './tests/data/ttest.dialogues.pkl' state['valid_dialogues'] = './tests/data/tvalid.dialogues.pkl' state['dictionary'] = './tests/data/ttrain.dict.pkl' state['save_dir'] = './tests/models/' state['max_grad_steps'] = 20 state['initialize_from_pretrained_word_embeddings'] = True state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl' state['fix_pretrained_word_embeddings'] = True state['valid_freq'] = 5 state['collaps_to_standard_rnn'] = False state['prefix'] = 'testmodel_' state['updater'] = 'adam' state['maxout_out'] = False state['deep_out'] = True state['deep_dialogue_input'] = True state['direct_connection_between_encoders_and_decoder'] = True state['deep_direct_connection'] = True state['utterance_encoder_gating'] = 'GRU' state['dialogue_encoder_gating'] = 'GRU' state['utterance_decoder_gating'] = 'GRU' state['bidirectional_utterance_encoder'] = True state['add_latent_gaussian_per_utterance'] = True state['latent_gaussian_per_utterance_dim'] = 5 state['condition_latent_variable_on_dialogue_encoder'] = True state['condition_latent_variable_on_dcgm_encoder'] = False state['train_latent_gaussians_with_kl_divergence_annealing'] = True state['kl_divergence_annealing_rate'] = (1.0 / 60000.0) state['latent_gaussian_linear_dynamics'] = True state['decoder_drop_previous_input_tokens'] = True state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['bs'] = 5 state['sort_k_batches'] = 1 state['use_nce'] = False state['decoder_bias_type'] = 'all' state['qdim_encoder'] = 15 state['qdim_decoder'] = 5 state['sdim'] = 10 state['rankdim'] = 10 return state
def prototype_twitter_lstm(): state = prototype_state() state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl' state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl' state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl' state['dictionary'] = '../TwitterData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'TwitterModel_' state['updater'] = 'adam' state['deep_dialogue_input'] = True state['deep_out'] = True state['collaps_to_standard_rnn'] = True state['bs'] = 80 state['decoder_bias_type'] = 'all' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = False state['reset_utterance_encoder_at_end_of_utterance'] = False state['lr'] = 0.0001 state['qdim_encoder'] = 10 state['qdim_decoder'] = 2000 state['sdim'] = 10 state['rankdim'] = 400 return state
def prototype_twitter_HRED(): state = prototype_state() state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl' state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl' state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl' state['dictionary'] = '../TwitterData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'TwitterModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = True state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['decoder_bias_type'] = 'selective' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = False state['lr'] = 0.0001 state['qdim_encoder'] = 1000 state['qdim_decoder'] = 1000 state['sdim'] = 1000 state['rankdim'] = 400 state['utterance_decoder_gating'] = 'GRU' return state
def prototype_twitter_HRED_StandardBias(): state = prototype_state() state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl' state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl' state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl' state['dictionary'] = '../TwitterData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'TwitterModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = True state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['decoder_bias_type'] = 'all' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = True state['lr'] = 0.0002 state['qdim_encoder'] = 1000 state['qdim_decoder'] = 1000 state['sdim'] = 1000 state['rankdim'] = 400 state['utterance_decoder_gating'] = 'LSTM' return state
def prototype_twitter_VHRED(): state = prototype_state() state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl' state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl' state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl' state['dictionary'] = '../TwitterData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'TwitterModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = True state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['decoder_bias_type'] = 'selective' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = False state['lr'] = 0.0001 state['qdim_encoder'] = 1000 state['qdim_decoder'] = 1000 state['sdim'] = 1000 state['rankdim'] = 400 state['utterance_decoder_gating'] = 'GRU' state['add_latent_gaussian_per_utterance'] = True state['latent_gaussian_per_utterance_dim'] = 100 state['scale_latent_variable_variances'] = 0.1 state['condition_latent_variable_on_dialogue_encoder'] = True state['train_latent_gaussians_with_kl_divergence_annealing'] = True state['kl_divergence_annealing_rate'] = (1.0 / 60000.0) state['decoder_drop_previous_input_tokens'] = True state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['patience'] = 20 return state
def prototype_twitter_VHRED_StandardBias(): state = prototype_state() state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl' state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl' state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl' state['dictionary'] = '../TwitterData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'TwitterModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = True state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['decoder_bias_type'] = 'all' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = True state['lr'] = 0.0002 state['qdim_encoder'] = 1000 state['qdim_decoder'] = 1000 state['sdim'] = 1000 state['rankdim'] = 400 state['utterance_decoder_gating'] = 'LSTM' state['add_latent_gaussian_per_utterance'] = True state['latent_gaussian_per_utterance_dim'] = 100 state['scale_latent_variable_variances'] = 0.1 state['condition_latent_variable_on_dialogue_encoder'] = True state['train_latent_gaussians_with_kl_divergence_annealing'] = True state['kl_divergence_annealing_rate'] = (1.0 / 60000.0) state['decoder_drop_previous_input_tokens'] = True state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['patience'] = 20 return state
def prototype_ubuntu_LSTM(): state = prototype_state() state['end_sym_utterance'] = '__eot__' state['unk_sym'] = 0 state['eos_sym'] = 1 state['eod_sym'] = (- 1) state['first_speaker_sym'] = (- 1) state['second_speaker_sym'] = (- 1) state['third_speaker_sym'] = (- 1) state['minor_speaker_sym'] = (- 1) state['voice_over_sym'] = (- 1) state['off_screen_sym'] = (- 1) state['pause_sym'] = (- 1) state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl' state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl' state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl' state['dictionary'] = '../UbuntuData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'UbuntuModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = False state['deep_dialogue_input'] = True state['deep_out'] = True state['collaps_to_standard_rnn'] = True state['bs'] = 80 state['decoder_bias_type'] = 'all' state['direct_connection_between_encoders_and_decoder'] = False state['deep_direct_connection'] = False state['reset_utterance_decoder_at_end_of_utterance'] = False state['reset_utterance_encoder_at_end_of_utterance'] = False state['lr'] = 0.0002 state['qdim_encoder'] = 10 state['qdim_decoder'] = 2000 state['sdim'] = 10 state['rankdim'] = 300 state['utterance_decoder_gating'] = 'LSTM' return state
def prototype_ubuntu_HRED(): state = prototype_state() state['end_sym_utterance'] = '__eot__' state['unk_sym'] = 0 state['eos_sym'] = 1 state['eod_sym'] = (- 1) state['first_speaker_sym'] = (- 1) state['second_speaker_sym'] = (- 1) state['third_speaker_sym'] = (- 1) state['minor_speaker_sym'] = (- 1) state['voice_over_sym'] = (- 1) state['off_screen_sym'] = (- 1) state['pause_sym'] = (- 1) state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl' state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl' state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl' state['dictionary'] = '../UbuntuData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'UbuntuModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = False state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = True state['utterance_decoder_gating'] = 'LSTM' state['lr'] = 0.0002 state['qdim_encoder'] = 500 state['qdim_decoder'] = 500 state['sdim'] = 1000 state['rankdim'] = 300 return state
def prototype_ubuntu_VHRED(): state = prototype_state() state['end_sym_utterance'] = '__eot__' state['unk_sym'] = 0 state['eos_sym'] = 1 state['eod_sym'] = (- 1) state['first_speaker_sym'] = (- 1) state['second_speaker_sym'] = (- 1) state['third_speaker_sym'] = (- 1) state['minor_speaker_sym'] = (- 1) state['voice_over_sym'] = (- 1) state['off_screen_sym'] = (- 1) state['pause_sym'] = (- 1) state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl' state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl' state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl' state['dictionary'] = '../UbuntuData/Dataset.dict.pkl' state['save_dir'] = 'Output' state['max_grad_steps'] = 80 state['valid_freq'] = 5000 state['prefix'] = 'UbuntuModel_' state['updater'] = 'adam' state['bidirectional_utterance_encoder'] = False state['deep_dialogue_input'] = True state['deep_out'] = True state['bs'] = 80 state['reset_utterance_decoder_at_end_of_utterance'] = True state['reset_utterance_encoder_at_end_of_utterance'] = True state['utterance_decoder_gating'] = 'LSTM' state['lr'] = 0.0002 state['qdim_encoder'] = 500 state['qdim_decoder'] = 500 state['sdim'] = 1000 state['rankdim'] = 300 state['add_latent_gaussian_per_utterance'] = True state['latent_gaussian_per_utterance_dim'] = 100 state['scale_latent_variable_variances'] = 0.1 state['condition_latent_variable_on_dialogue_encoder'] = True state['train_latent_gaussians_with_kl_divergence_annealing'] = True state['kl_divergence_annealing_rate'] = (1.0 / 75000.0) state['decoder_drop_previous_input_tokens'] = True state['decoder_drop_previous_input_tokens_rate'] = 0.75 state['patience'] = 20 return state
def DPrint(name, var): if (PRINT_VARS is False): return var return theano.printing.Print(name)(var)
def sharedX(value, name=None, borrow=False, dtype=None): if (dtype is None): dtype = theano.config.floatX return theano.shared(theano._asarray(value, dtype=dtype), name=name, borrow=borrow)
def Adam(grads, lr=0.0002, b1=0.1, b2=0.001, e=1e-08): return adam.Adam(grads, lr, b1, b2, e)
def Adagrad(grads, lr): updates = OrderedDict() for param in grads.keys(): sum_square_grad = sharedX((param.get_value() * 0.0)) if (param.name is not None): sum_square_grad.name = ('sum_square_grad_' + param.name) new_sum_squared_grad = (sum_square_grad + T.sqr(grads[param])) delta_x_t = (((- lr) / T.sqrt((numpy.float32(1e-05) + new_sum_squared_grad))) * grads[param]) updates[sum_square_grad] = new_sum_squared_grad updates[param] = (param + delta_x_t) return updates
def Adadelta(grads, decay=0.95, epsilon=1e-06): updates = OrderedDict() for param in grads.keys(): mean_square_grad = sharedX((param.get_value() * 0.0)) mean_square_dx = sharedX((param.get_value() * 0.0)) if (param.name is not None): mean_square_grad.name = ('mean_square_grad_' + param.name) mean_square_dx.name = ('mean_square_dx_' + param.name) new_mean_squared_grad = ((decay * mean_square_grad) + ((1 - decay) * T.sqr(grads[param]))) rms_dx_tm1 = T.sqrt((mean_square_dx + epsilon)) rms_grad_t = T.sqrt((new_mean_squared_grad + epsilon)) delta_x_t = (((- rms_dx_tm1) / rms_grad_t) * grads[param]) new_mean_square_dx = ((decay * mean_square_dx) + ((1 - decay) * T.sqr(delta_x_t))) updates[mean_square_grad] = new_mean_squared_grad updates[mean_square_dx] = new_mean_square_dx updates[param] = (param + delta_x_t) return updates
def RMSProp(grads, lr, decay=0.95, eta=0.9, epsilon=1e-06): ' \n RMSProp gradient method\n ' updates = OrderedDict() for param in grads.keys(): mean_square_grad = sharedX((param.get_value() * 0.0)) mean_grad = sharedX((param.get_value() * 0.0)) delta_grad = sharedX((param.get_value() * 0.0)) if (param.name is None): raise ValueError('Model parameters must be named.') mean_square_grad.name = ('mean_square_grad_' + param.name) new_mean_grad = ((decay * mean_grad) + ((1 - decay) * grads[param])) new_mean_squared_grad = ((decay * mean_square_grad) + ((1 - decay) * T.sqr(grads[param]))) scaled_grad = (grads[param] / T.sqrt(((new_mean_squared_grad - (new_mean_grad ** 2)) + epsilon))) new_delta_grad = ((eta * delta_grad) - (lr * scaled_grad)) updates[delta_grad] = new_delta_grad updates[mean_grad] = new_mean_grad updates[mean_square_grad] = new_mean_squared_grad updates[param] = (param + new_delta_grad) return updates
class Maxout(object): def __init__(self, maxout_part): self.maxout_part = maxout_part def __call__(self, x): shape = x.shape if (x.ndim == 2): shape1 = T.cast((shape[1] / self.maxout_part), 'int64') shape2 = T.cast(self.maxout_part, 'int64') x = x.reshape([shape[0], shape1, shape2]) x = x.max(2) else: shape1 = T.cast((shape[2] / self.maxout_part), 'int64') shape2 = T.cast(self.maxout_part, 'int64') x = x.reshape([shape[0], shape[1], shape1, shape2]) x = x.max(3) return x
def UniformInit(rng, sizeX, sizeY, lb=(- 0.01), ub=0.01): ' Uniform Init ' return rng.uniform(size=(sizeX, sizeY), low=lb, high=ub).astype(theano.config.floatX)
def OrthogonalInit(rng, sizeX, sizeY, sparsity=(- 1), scale=1): ' \n Orthogonal Initialization\n ' sizeX = int(sizeX) sizeY = int(sizeY) assert (sizeX == sizeY), 'for orthogonal init, sizeX == sizeY' if (sparsity < 0): sparsity = sizeY else: sparsity = numpy.minimum(sizeY, sparsity) values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX) for dx in xrange(sizeX): perm = rng.permutation(sizeY) new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,)) values[(dx, perm[:sparsity])] = new_vals if ((sizeX * sizeY) > 5000000): import scipy (u, s, v) = scipy.linalg.svd(values) else: (u, s, v) = numpy.linalg.svd(values) values = (u * scale) return values.astype(theano.config.floatX)
def GrabProbs(classProbs, target, gRange=None): if (classProbs.ndim > 2): classProbs = classProbs.reshape(((classProbs.shape[0] * classProbs.shape[1]), classProbs.shape[2])) else: classProbs = classProbs if (target.ndim > 1): tflat = target.flatten() else: tflat = target return T.diag(classProbs.T[tflat])
def NormalInit(rng, sizeX, sizeY, scale=0.01, sparsity=(- 1)): ' \n Normal Initialization\n ' sizeX = int(sizeX) sizeY = int(sizeY) if (sparsity < 0): sparsity = sizeY sparsity = numpy.minimum(sizeY, sparsity) values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX) for dx in xrange(sizeX): perm = rng.permutation(sizeY) new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,)) values[(dx, perm[:sparsity])] = new_vals return values.astype(theano.config.floatX)
def ConvertTimedelta(seconds_diff): hours = (seconds_diff // 3600) minutes = ((seconds_diff % 3600) // 60) seconds = (seconds_diff % 60) return (hours, minutes, seconds)
def SoftMax(x): x = T.exp((x - T.max(x, axis=(x.ndim - 1), keepdims=True))) return (x / T.sum(x, axis=(x.ndim - 1), keepdims=True))
def VariableNormalization(x, mask=None, axes=0): if mask: mask = mask.dimshuffle(0, 1, 'x') x_masked = (x * mask) average = (T.sum(x_masked, axis=axes) / T.sum(mask, axis=axes)) if (average.ndim == 1): x_zero_average = (x_masked - average.dimshuffle('x', 'x', 0)) else: x_zero_average = (x_masked - average.dimshuffle('x', 0)) x_std = T.sqrt(((T.sum((x_zero_average ** 2)) / T.sum(mask, axis=axes)) + 1e-07)) return (x_zero_average / x_std) else: return ((x - T.mean(x, axis=axes)) / T.sqrt((T.var(x, axis=axes) + 1e-07)))
@jit def function(x): return x
@njit def njit_f(x): return x
@jit('int32(int32, int32)') def int32_sum(a, b): return (a + b)
@jit def int32_sum_r1(a: int, b: int): return (a + b)
def list_norm_inplace(buff): r_mean = np.mean(buff) r_std = np.std(buff) for ii in range(len(buff)): buff[ii] = ((buff[ii] - r_mean) / r_std)
def plot_durations(episode_durations): plt.figure(2) plt.clf() durations_t = TC.FloatTensor(episode_durations) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) if (len(durations_t) >= 100): means = durations_t.unfold(0, 100, 1).mean(1).view((- 1)) means = TC.cat((TC.zeros(99), means)) plt.plot(means.numpy()) plt.show()
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100): episode_durations.append((ii + 1)) if (((ee + 1) % ee_duration) == 0): clear_output() plot_durations(episode_durations)
class PGNET(nn.Module): def __init__(self, num_state): super(PGNET, self).__init__() self.fc_in = nn.Linear(num_state, 24) self.fc_hidden = nn.Linear(24, 36) self.fc_out = nn.Linear(36, 1) def forward(self, x): x = F.relu(self.fc_in(x)) x = F.relu(self.fc_hidden(x)) x = TC.sigmoid(self.fc_out(x)) return x
class PGNET_AGENT(PGNET): def run(self, env): for ee in range(self.num_episode): self.run_episode(env, ee) self.train_episode(ee)