code stringlengths 17 6.64M |
|---|
def test_notfittederror():
processor = WidePreprocessor(wide_cols, cross_cols)
with pytest.raises(NotFittedError):
processor.transform(df_letters)
|
def test_rules():
assert (fix_html('Some HTML text<br />') == 'Some HTML& text\n')
assert (replace_rep("I'm so excited!!!!!!!!") == "I'm so excited xxrep 8 ! ")
assert (replace_wrep("I've never ever ever ever ever ever ever ever done this.") == "I've never xxwrep 7 ever done this.")
assert (rm_useless_spaces('Inconsistent use of spaces.') == 'Inconsistent use of spaces.')
assert (spec_add_spaces('I #like to #put #hashtags #everywhere!') == 'I # like to # put # hashtags # everywhere!')
assert (replace_all_caps(['Mark', 'CAPITALIZED', 'Only']) == ['Mark', 'xxup', 'capitalized', 'Only'])
assert (deal_caps(['Mark', 'Capitalized', 'lower', 'All']) == ['xxmaj', 'mark', 'xxmaj', 'capitalized', 'lower', 'xxmaj', 'all'])
|
def test_tokenize():
texts = ['one two three four', 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.', "I'm suddenly SHOUTING FOR NO REASON"]
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert (toks[0] == ['one', 'two', 'three', 'four'])
assert (toks[1][:6] == ['xxmaj', 'lorem', 'ipsum', 'dolor', 'sit', 'amet,'])
assert (' '.join(toks[2]) == "xxmaj i'm suddenly xxup shouting xxup for xxup no xxup reason")
|
def test_tokenize_handles_empty_lines():
texts = ['= Markdown Title =\n\nMakrdown Title does not have spaces around']
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert (toks[0] == ['=', 'xxmaj', 'markdown', 'xxmaj', 'title', '=', '\n', '\n', 'xxmaj', 'makrdown', 'xxmaj', 'title', 'does', 'not', 'have', 'spaces', 'around'])
|
def test_tokenize_ignores_extraneous_space():
texts = ['test ']
tokenizer = Tokenizer(BaseTokenizer)
toks = tokenizer.process_all(texts)
assert (toks[0] == ['test'])
|
def test_numericalize_and_textify():
toks = [['ok', '!', 'xxmaj', 'nice', '!', 'anti', '-', 'virus'], ['!', 'xxmaj', 'meg', 'xxmaj', 'nice', 'meg']]
vocab = Vocab(max_vocab=20, min_freq=2).create(toks)
assert (vocab.numericalize(toks[0]) == [0, 9, 5, 10, 9, 0, 0, 0])
assert (vocab.textify([0, 3, 10, 11, 9]) == 'xxunk xxeos nice meg !')
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_bio_kdd04(as_frame):
df = load_bio_kdd04(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((145751, 77), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((145751, 77), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_adult(as_frame):
df = load_adult(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((48842, 15), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((48842, 15), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_ecoli(as_frame):
df = load_ecoli(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((336, 9), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((336, 9), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_womens_ecommerce(as_frame):
df = load_womens_ecommerce(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((23486, 10), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((23486, 10), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_rf1(as_frame):
df = load_rf1(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((4108, 72), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((4108, 72), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_birds(as_frame):
df = load_birds(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((322, 279), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((322, 279), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_california_housing(as_frame):
df = load_california_housing(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((20640, 9), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((20640, 9), np.ndarray))
|
@pytest.mark.parametrize('as_frame', [True, False])
def test_load_movielens100k(as_frame):
(df_data, df_users, df_items) = load_movielens100k(as_frame=as_frame)
if as_frame:
assert ((df_data.shape, df_users.shape, df_items.shape, type(df_data), type(df_users), type(df_items)) == ((100000, 4), (943, 5), (1682, 24), pd.DataFrame, pd.DataFrame, pd.DataFrame))
else:
assert ((df_data.shape, df_users.shape, df_items.shape, type(df_data), type(df_users), type(df_items)) == ((100000, 4), (943, 5), (1682, 24), np.ndarray, np.ndarray, np.ndarray))
|
def _build_model_for_feat_imp_test(model_name, params):
if (model_name == 'tabtransformer'):
return TabTransformer(input_dim=6, n_blocks=2, n_heads=2, embed_continuous=True, **params)
if (model_name == 'saint'):
return SAINT(input_dim=6, n_blocks=2, n_heads=2, **params)
if (model_name == 'fttransformer'):
return FTTransformer(input_dim=6, n_blocks=2, n_heads=2, kv_compression_factor=1.0, **params)
if (model_name == 'tabfastformer'):
return TabFastFormer(input_dim=6, n_blocks=2, n_heads=2, **params)
if (model_name == 'self_attn_mlp'):
return SelfAttentionMLP(input_dim=6, n_blocks=2, n_heads=2, **params)
if (model_name == 'cxt_attn_mlp'):
return ContextAttentionMLP(input_dim=6, n_blocks=2, **params)
|
@pytest.mark.parametrize('with_cls_token', [True, False])
@pytest.mark.parametrize('model_name', ['tabtransformer', 'saint', 'fttransformer', 'tabfastformer', 'self_attn_mlp', 'cxt_attn_mlp'])
def test_feature_importances(with_cls_token, model_name):
tab_preprocessor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=cont_cols, with_attention=True, with_cls_token=with_cls_token)
X_tr = tab_preprocessor.fit_transform(df_tr).astype(float)
X_te = tab_preprocessor.transform(df_te).astype(float)
params = {'column_idx': tab_preprocessor.column_idx, 'cat_embed_input': tab_preprocessor.cat_embed_input, 'continuous_cols': tab_preprocessor.continuous_cols}
tab_model = _build_model_for_feat_imp_test(model_name, params)
model = WideDeep(deeptabular=tab_model)
trainer = Trainer(model, objective='binary')
trainer.fit(X_tab=X_tr, target=target, n_epochs=1, batch_size=16, feature_importance_sample_size=32)
feat_imps = trainer.feature_importance
feat_imp_per_sample = trainer.explain(X_te)
assert ((len(feat_imps) == df_tr.shape[1]) and (feat_imp_per_sample.shape == df_te.shape))
|
def test_fttransformer_valueerror():
tab_preprocessor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=cont_cols, with_attention=True)
X_tr = tab_preprocessor.fit_transform(df_tr).astype(float)
params = {'column_idx': tab_preprocessor.column_idx, 'cat_embed_input': tab_preprocessor.cat_embed_input, 'continuous_cols': tab_preprocessor.continuous_cols}
model = FTTransformer(input_dim=6, n_blocks=2, n_heads=2, kv_compression_factor=0.5, **params)
model = WideDeep(deeptabular=model)
trainer = Trainer(model, objective='binary')
with pytest.raises(ValueError) as ve:
trainer.fit(X_tab=X_tr, target=target, n_epochs=1, batch_size=16, feature_importance_sample_size=32)
assert (ve.value.args[0] == "Feature importance can only be computed if the compression factor 'kv_compression_factor' is set to 1")
|
def test_feature_importances_tabnet():
tab_preprocessor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=cont_cols)
X_tr = tab_preprocessor.fit_transform(df_tr).astype(float)
X_te = tab_preprocessor.transform(df_te).astype(float)
tabnet = TabNet(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols, embed_continuous=True)
model = WideDeep(deeptabular=tabnet)
trainer = Trainer(model, objective='binary')
trainer.fit(X_tab=X_tr, target=target, n_epochs=1, batch_size=16, feature_importance_sample_size=32)
feat_imps = trainer.feature_importance
feat_imp_per_sample = trainer.explain(X_te, save_step_masks=False)
assert ((len(feat_imps) == df_tr.shape[1]) and (feat_imp_per_sample.shape == df_te.shape))
|
class TextModeTestClass(nn.Module):
def __init__(self):
super(TextModeTestClass, self).__init__()
self.word_embed = nn.Embedding(5, 16, padding_idx=0)
self.rnn = nn.LSTM(16, 8, batch_first=True)
self.linear = nn.Linear(8, 1)
def forward(self, X):
embed = self.word_embed(X.long())
(o, (h, c)) = self.rnn(embed)
return self.linear(h).view((- 1), 1)
|
class ImageModeTestClass(nn.Module):
def __init__(self):
super(ImageModeTestClass, self).__init__()
self.conv_block = nn.Sequential(conv_layer(3, 64, 3), conv_layer(64, 128, 1, maxpool=False, adaptiveavgpool=True))
self.linear = nn.Linear(128, 1)
def forward(self, X):
x = self.conv_block(X)
x = x.view(x.size(0), (- 1))
return self.linear(x)
|
def loss_fn(y_pred, y_true):
return F.binary_cross_entropy_with_logits(y_pred, y_true.view((- 1), 1))
|
@pytest.mark.parametrize('model, modelname, loader, n_epochs, max_lr', [(wide, 'wide', mmloader, 1, 0.01), (tab_mlp, 'deeptabular', mmloader, 1, 0.01), (deeptext, 'deeptext', mmloader, 1, 0.01), (deepimage, 'deepimage', mmloader, 1, 0.01)])
def test_finetune_all(model, modelname, loader, n_epochs, max_lr):
has_run = True
try:
finetuner.finetune_all(model, modelname, loader, n_epochs, max_lr)
except Exception:
has_run = False
assert has_run
|
@pytest.mark.parametrize('model, modelname, loader, max_lr, layers, routine', [(tab_mlp, 'deeptabular', mmloader, 0.01, tab_layers, 'felbo'), (tab_mlp, 'deeptabular', mmloader, 0.01, tab_layers, 'howard'), (deeptext, 'deeptext', mmloader, 0.01, text_layers, 'felbo'), (deeptext, 'deeptext', mmloader, 0.01, text_layers, 'howard'), (deepimage, 'deepimage', mmloader, 0.01, image_layers, 'felbo'), (deepimage, 'deepimage', mmloader, 0.01, image_layers, 'howard')])
def test_finetune_gradual(model, modelname, loader, max_lr, layers, routine):
has_run = True
try:
finetuner.finetune_gradual(model, modelname, loader, max_lr, layers, routine)
except Exception:
has_run = False
assert has_run
|
def test_chunk_wide_processor_one_chunk():
df = pd.read_csv(os.path.join(data_folder, fname))
wide_processor = WidePreprocessor(wide_cols=cat_cols)
X_wide = wide_processor.fit_transform(df)
chunk_wide_processor = ChunkWidePreprocessor(wide_cols=cat_cols, n_chunks=1)
chunk_wide_processor.partial_fit(df)
X_wide_chunk = chunk_wide_processor.transform(df)
reconstruced_df = wide_processor.inverse_transform(X_wide)
reconstruced_df_chunk = chunk_wide_processor.inverse_transform(X_wide_chunk)
assert reconstruced_df.equals(reconstruced_df_chunk)
|
def test_chunk_wide_processor():
df = pd.read_csv(os.path.join(data_folder, fname))
wide_processor = WidePreprocessor(wide_cols=cat_cols)
X_wide = wide_processor.fit_transform(df)
chunk_wide_processor = ChunkWidePreprocessor(wide_cols=cat_cols, n_chunks=n_chunks)
for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize):
chunk_wide_processor.partial_fit(chunk)
X_wide_chunk = chunk_wide_processor.transform(df)
reconstruced_df = wide_processor.inverse_transform(X_wide)
reconstruced_df_chunk = chunk_wide_processor.inverse_transform(X_wide_chunk)
assert reconstruced_df.equals(reconstruced_df_chunk)
|
def test_chunk_tab_preprocessor_one_chunk():
df = pd.read_csv(os.path.join(data_folder, fname))
tab_processor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols)
X_tab = tab_processor.fit_transform(df)
chunk_tab_processor = ChunkTabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=1)
chunk_tab_processor.partial_fit(df)
X_tab_chunk = chunk_tab_processor.transform(df)
assert (X_tab == X_tab_chunk).all()
|
def test_chunk_tab_preprocessor():
df = pd.read_csv(os.path.join(data_folder, fname))
tab_processor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols)
X_tab = tab_processor.fit_transform(df)
chunk_tab_processor = ChunkTabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks)
for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize):
chunk_tab_processor.partial_fit(chunk)
X_tab_chunk = chunk_tab_processor.transform(df)
reconstruced_df = tab_processor.inverse_transform(X_tab)
reconstruced_df_chunk = chunk_tab_processor.inverse_transform(X_tab_chunk)
assert reconstruced_df.equals(reconstruced_df_chunk)
|
@pytest.mark.parametrize('with_attention', [True, False])
@pytest.mark.parametrize('quantization_setup', [{'numeric2': [0.0, 50.0, 100.0]}, None])
def test_chunk_tab_preprocessor_with_params(with_attention, quantization_setup):
df = pd.read_csv(os.path.join(data_folder, fname))
tab_processor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols, cols_to_scale=['numeric1'], with_attention=with_attention, with_cls_token=with_attention, quantization_setup=quantization_setup)
X_tab = tab_processor.fit_transform(df)
chunk_tab_processor = ChunkTabPreprocessor(n_chunks=n_chunks, cat_embed_cols=cat_cols, continuous_cols=num_cols, cols_to_scale=['numeric1'], with_attention=with_attention, with_cls_token=with_attention, cols_and_bins=quantization_setup)
for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize):
chunk_tab_processor.partial_fit(chunk)
X_tab_chunk = chunk_tab_processor.transform(df)
reconstruced_df_chunk = chunk_tab_processor.inverse_transform(X_tab_chunk)
reconstruced_df = tab_processor.inverse_transform(X_tab)
assert reconstruced_df.equals(reconstruced_df_chunk)
|
def test_chunk_text_preprocessor_one_go():
df = pd.read_csv(os.path.join(data_folder, fname))
text_processor = TextPreprocessor(text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50)
X_text = text_processor.fit_transform(df)
chunk_text_processor = ChunkTextPreprocessor(text_col=text_col, n_chunks=1, n_cpus=1, maxlen=10, max_vocab=50)
chunk_text_processor.partial_fit(df)
X_text_chunk = chunk_text_processor.transform(df)
assert (X_text == X_text_chunk).all()
|
def test_chunk_text_preprocessor():
df = pd.read_csv(os.path.join(data_folder, fname))
text_processor = TextPreprocessor(text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50)
X_text = text_processor.fit_transform(df)
chunk_text_processor = ChunkTextPreprocessor(text_col=text_col, n_chunks=n_chunks, n_cpus=1, maxlen=10, max_vocab=50)
for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize):
chunk_text_processor.partial_fit(chunk)
X_text_chunk = chunk_text_processor.transform(df)
reconstruced_df = text_processor.inverse_transform(X_text)
reconstruced_df_chunk = chunk_text_processor.inverse_transform(X_text_chunk)
assert reconstruced_df.equals(reconstruced_df_chunk)
|
def test_tab_from_folder_alone():
df = pd.read_csv('/'.join([data_folder, fname]))
tab_preprocessor = ChunkTabPreprocessor(embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks)
for (i, chunk) in enumerate(pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize)):
tab_preprocessor.fit(chunk)
tab_from_folder = TabFromFolder(fname=fname, directory=data_folder, target_col='target_regression', preprocessor=tab_preprocessor)
processed_sample = tab_preprocessor.transform(df)[1]
(processed_sample_from_folder, _, _, _) = tab_from_folder.get_item(1)
assert (processed_sample == processed_sample_from_folder).all()
|
def test_tab_from_folder_with_reference():
df = pd.read_csv('/'.join([data_folder, fname]))
tab_preprocessor = ChunkTabPreprocessor(embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks)
for (i, chunk) in enumerate(pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize)):
tab_preprocessor.fit(chunk)
train_tab_from_folder = TabFromFolder(fname=fname, directory=data_folder, target_col='target_regression', preprocessor=tab_preprocessor)
eval_tab_from_folder = TabFromFolder(fname=fname, reference=train_tab_from_folder)
processed_sample = tab_preprocessor.transform(df)[1]
(processed_sample_from_folder, _, _, _) = eval_tab_from_folder.get_item(1)
assert (processed_sample == processed_sample_from_folder).all()
|
def test_text_from_folder_alone():
df = pd.read_csv('/'.join([data_folder, fname]))
chunk_text_processor = ChunkTextPreprocessor(text_col=text_col, n_chunks=1, n_cpus=1, maxlen=10, max_vocab=50)
for chunk in pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize):
chunk_text_processor.partial_fit(chunk)
text_folder = TextFromFolder(preprocessor=chunk_text_processor)
processed_sample = chunk_text_processor.transform(df)[1]
processed_sample_from_folder = text_folder.get_item(df.text.loc[1])
assert (processed_sample == processed_sample_from_folder).all()
|
def test_image_from_folder_alone():
df = pd.read_csv('/'.join([data_folder, fname]))
img_preprocessor = ImagePreprocessor(img_col=img_col, img_path=img_folder)
img_from_folder = ImageFromFolder(preprocessor=img_preprocessor)
processed_sample = img_preprocessor.transform(df)[1]
processed_sample = processed_sample.transpose(2, 0, 1)
processed_sample_from_folder = img_from_folder.get_item(df.images.loc[1])
assert (processed_sample.shape == processed_sample_from_folder.shape)
|
def test_image_from_folder_with_transforms():
df = pd.read_csv('/'.join([data_folder, fname]))
img_transforms = transforms.Compose([transforms.CenterCrop(10), transforms.ToTensor()])
img_from_folder = ImageFromFolder(directory=img_folder, transforms=img_transforms)
processed_sample_from_folder = img_from_folder.get_item(df.images.loc[1])
return (processed_sample_from_folder.shape == torch.Size([3, 10, 10]))
|
def test_full_wide_deep_dataset_from_folder():
df = pd.read_csv('/'.join([data_folder, fname]))
tab_preprocessor = ChunkTabPreprocessor(embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks, default_embed_dim=8, verbose=0)
text_preprocessor = ChunkTextPreprocessor(n_chunks=n_chunks, text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50)
img_preprocessor = ImagePreprocessor(img_col=img_col, img_path=img_folder)
for (i, chunk) in enumerate(pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize)):
tab_preprocessor.fit(chunk)
text_preprocessor.fit(chunk)
tab_from_folder = TabFromFolder(fname=fname, directory=data_folder, target_col='target_regression', preprocessor=tab_preprocessor, text_col=text_col, img_col=img_col)
text_from_folder = TextFromFolder(preprocessor=text_preprocessor)
img_from_folder = ImageFromFolder(preprocessor=img_preprocessor)
train_dataset_folder = WideDeepDatasetFromFolder(n_samples=df.shape[0], tab_from_folder=tab_from_folder, text_from_folder=text_from_folder, img_from_folder=img_from_folder)
(X, y) = train_dataset_folder.__getitem__(1)
cond1 = all([(k in X) for k in ['deeptabular', 'deeptext', 'deepimage']])
cond2 = (X['deeptabular'].shape[0] == (len(cat_cols) + len(num_cols)))
cond3 = (X['deeptext'].shape[0] == text_preprocessor.maxlen)
cond4 = (X['deepimage'].shape == (3, 224, 224))
assert all([cond1, cond2, cond3, cond4])
|
@pytest.mark.parametrize('tabular_component', ['wide', 'deeptabular'])
def test_wide_and_tab_optional(tabular_component):
df = pd.read_csv('/'.join([data_folder, fname]))
if (tabular_component == 'wide'):
tab_preprocessor = ChunkWidePreprocessor(wide_cols=cat_cols, n_chunks=n_chunks)
else:
tab_preprocessor = ChunkTabPreprocessor(embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks, default_embed_dim=8, verbose=0)
text_preprocessor = ChunkTextPreprocessor(n_chunks=n_chunks, text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50)
img_preprocessor = ImagePreprocessor(img_col=img_col, img_path=img_folder)
for (i, chunk) in enumerate(pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize)):
tab_preprocessor.fit(chunk)
text_preprocessor.fit(chunk)
tab_from_folder = TabFromFolder(fname=fname, directory=data_folder, target_col='target_regression', preprocessor=tab_preprocessor, text_col=text_col, img_col=img_col)
text_from_folder = TextFromFolder(preprocessor=text_preprocessor)
img_from_folder = ImageFromFolder(preprocessor=img_preprocessor)
train_dataset_folder = WideDeepDatasetFromFolder(n_samples=df.shape[0], tab_from_folder=(tab_from_folder if (tabular_component == 'deeptabular') else None), wide_from_folder=(tab_from_folder if (tabular_component == 'wide') else None), text_from_folder=text_from_folder, img_from_folder=img_from_folder)
(X, y) = train_dataset_folder.__getitem__(1)
if (tabular_component == 'deeptabular'):
cond1 = all([(k in X) for k in ['deeptabular', 'deeptext', 'deepimage']])
cond2 = (X['deeptabular'].shape[0] == (len(cat_cols) + len(num_cols)))
else:
cond1 = all([(k in X) for k in ['wide', 'deeptext', 'deepimage']])
cond2 = (X['wide'].shape[0] == len(cat_cols))
cond3 = (X['deeptext'].shape[0] == text_preprocessor.maxlen)
cond4 = (X['deepimage'].shape == (3, 224, 224))
assert all([cond1, cond2, cond3, cond4])
|
def _build_preprocessors(tab_params={}):
wide_preprocessor = ChunkWidePreprocessor(wide_cols=cat_cols, n_chunks=n_chunks)
tab_preprocessor = ChunkTabPreprocessor(embed_cols=cat_cols, continuous_cols=num_cols, n_chunks=n_chunks, default_embed_dim=8, verbose=0, **tab_params)
text_preprocessor = ChunkTextPreprocessor(n_chunks=n_chunks, text_col=text_col, n_cpus=1, max_vocab=50, maxlen=10)
img_preprocessor = ImagePreprocessor(img_col=img_col, img_path=img_folder)
for (i, chunk) in enumerate(pd.read_csv('/'.join([data_folder, fname]), chunksize=chunksize)):
wide_preprocessor.fit(chunk)
tab_preprocessor.fit(chunk)
text_preprocessor.fit(chunk)
return (wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor)
|
def _build_data_mode_from_folder(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor, target_col='target_regression'):
tab_from_folder = TabFromFolder(fname=fname, directory=data_folder, target_col=target_col, preprocessor=tab_preprocessor, img_col=img_col, text_col=text_col)
wide_from_folder = WideFromFolder(fname=fname, directory=data_folder, preprocessor=wide_preprocessor, reference=tab_from_folder)
text_from_folder = TextFromFolder(preprocessor=text_preprocessor)
img_from_folder = ImageFromFolder(preprocessor=img_preprocessor)
return (wide_from_folder, tab_from_folder, text_from_folder, img_from_folder)
|
def _build_eval_and_test_data_mode_from_folder(wide_from_folder, tab_from_folder, eval_fname, test_fname):
eval_wide_from_folder = TabFromFolder(fname=eval_fname, reference=wide_from_folder)
eval_tab_from_folder = TabFromFolder(fname=eval_fname, reference=tab_from_folder)
test_wide_from_folder = TabFromFolder(fname=test_fname, reference=wide_from_folder, ignore_target=True)
test_tab_from_folder = TabFromFolder(fname=test_fname, reference=tab_from_folder, ignore_target=True)
return (eval_wide_from_folder, eval_tab_from_folder, test_wide_from_folder, test_tab_from_folder)
|
def _buid_model(wide_preprocessor, tab_preprocessor, text_preprocessor, pred_dim=1, with_attention=False):
wide = Wide(input_dim=wide_preprocessor.wide_dim, num_class=pred_dim)
if with_attention:
deeptabular = TabTransformer(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols, input_dim=8, n_heads=2, n_blocks=2)
else:
deeptabular = TabMlp(mlp_hidden_dims=[16, 8], column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols)
basic_rnn = BasicRNN(vocab_size=len(text_preprocessor.vocab.itos), embed_dim=8, hidden_dim=8)
basic_cnn = Vision()
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=basic_rnn, deepimage=basic_cnn, num_class=pred_dim)
return model
|
@pytest.mark.parametrize('objective', ['regression', 'binary', 'multiclass'])
def test_trainer_from_loader_basic_inputs(objective):
(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor) = _build_preprocessors()
if (objective == 'regression'):
target_col = 'target_regression'
elif (objective == 'binary'):
target_col = 'target_binary'
else:
target_col = 'target_multiclass'
(wide_from_folder, tab_from_folder, text_from_folder, img_from_folder) = _build_data_mode_from_folder(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor, target_col)
dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=wide_from_folder, tab_from_folder=tab_from_folder, text_from_folder=text_from_folder, img_from_folder=img_from_folder)
dataloader_from_folder = DataLoader(dataset_from_folder, batch_size=4)
pred_dim = (1 if ((objective == 'regression') or (objective == 'binary')) else 3)
model = _buid_model(wide_preprocessor, tab_preprocessor, text_preprocessor, pred_dim=pred_dim)
trainer = TrainerFromFolder(model, objective=objective, verbose=0)
trainer.fit(train_loader=dataloader_from_folder)
assert ((len(trainer.history) > 0) and ('train_loss' in trainer.history.keys()))
|
@pytest.mark.parametrize('pred_with_loader', [True, False])
def test_trainer_from_loader_with_valid_and_test(pred_with_loader):
(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor) = _build_preprocessors()
(wide_from_folder, tab_from_folder, text_from_folder, img_from_folder) = _build_data_mode_from_folder(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor)
(eval_wide_from_folder, eval_tab_from_folder, test_wide_from_folder, test_tab_from_folder) = _build_eval_and_test_data_mode_from_folder(wide_from_folder, tab_from_folder, fname, fname)
train_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=wide_from_folder, tab_from_folder=tab_from_folder, text_from_folder=text_from_folder, img_from_folder=img_from_folder)
eval_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=eval_wide_from_folder, tab_from_folder=eval_tab_from_folder, reference=train_dataset_from_folder)
test_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=test_wide_from_folder, tab_from_folder=test_tab_from_folder, reference=train_dataset_from_folder)
train_dataloader_from_folder = DataLoader(train_dataset_from_folder, batch_size=4)
eval_dataloader_from_folder = DataLoader(eval_dataset_from_folder, batch_size=4)
test_dataloader_from_folder = DataLoader(test_dataset_from_folder, batch_size=4)
model = _buid_model(wide_preprocessor, tab_preprocessor, text_preprocessor)
trainer = TrainerFromFolder(model, objective='regression', verbose=0)
trainer.fit(train_loader=train_dataloader_from_folder, eval_loader=eval_dataloader_from_folder)
if pred_with_loader:
preds = trainer.predict(test_loader=test_dataloader_from_folder)
else:
df = pd.read_csv('/'.join([data_folder, fname]))
X_test_wide = wide_preprocessor.transform(df)
X_test_tab = tab_preprocessor.transform(df)
X_test_text = text_preprocessor.transform(df)
X_images = img_preprocessor.fit_transform(df)
preds = trainer.predict(X_wide=X_test_wide, X_tab=X_test_tab, X_text=X_test_text, X_img=X_images, batch_size=4)
assert ((preds.shape[0] == data_size) and ('train_loss' in trainer.history.keys()) and ('val_loss' in trainer.history.keys()))
|
@pytest.mark.parametrize('tab_params', [{'with_attention': True, 'with_cls_token': True}, {'with_attention': True, 'with_cls_token': False}])
def test_trainer_from_loader_with_tab_params(tab_params):
(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor) = _build_preprocessors(tab_params=tab_params)
(wide_from_folder, tab_from_folder, text_from_folder, img_from_folder) = _build_data_mode_from_folder(wide_preprocessor, tab_preprocessor, text_preprocessor, img_preprocessor)
(eval_wide_from_folder, eval_tab_from_folder, test_wide_from_folder, test_tab_from_folder) = _build_eval_and_test_data_mode_from_folder(wide_from_folder, tab_from_folder, fname, fname)
train_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=wide_from_folder, tab_from_folder=tab_from_folder, text_from_folder=text_from_folder, img_from_folder=img_from_folder)
eval_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=eval_wide_from_folder, tab_from_folder=eval_tab_from_folder, reference=train_dataset_from_folder)
test_dataset_from_folder = WideDeepDatasetFromFolder(n_samples=data_size, wide_from_folder=test_wide_from_folder, tab_from_folder=test_tab_from_folder, reference=train_dataset_from_folder)
train_dataloader_from_folder = DataLoader(train_dataset_from_folder, batch_size=4)
eval_dataloader_from_folder = DataLoader(eval_dataset_from_folder, batch_size=4)
test_dataloader_from_folder = DataLoader(test_dataset_from_folder, batch_size=4)
model = _buid_model(wide_preprocessor, tab_preprocessor, text_preprocessor, with_attention=tab_params['with_attention'])
trainer = TrainerFromFolder(model, objective='regression', verbose=1, callbacks=[EarlyStopping(patience=10)])
trainer.fit(train_loader=train_dataloader_from_folder, eval_loader=eval_dataloader_from_folder, n_epochs=2, finetune=True, finetune_epochs=1)
preds = trainer.predict(test_loader=test_dataloader_from_folder)
assert ((preds.shape[0] == data_size) and ('train_loss' in trainer.history.keys()) and ('val_loss' in trainer.history.keys()) and (len(trainer.history['train_loss']) == 2))
|
def f2_score_bin(y_true, y_pred):
return fbeta_score(y_true, y_pred, beta=2)
|
@pytest.mark.parametrize('sklearn_metric, widedeep_metric', [(accuracy_score, Accuracy()), (precision_score, Precision()), (recall_score, Recall()), (f1_score, F1Score()), (f2_score_bin, FBetaScore(beta=2))])
def test_binary_metrics(sklearn_metric, widedeep_metric):
assert np.isclose(sklearn_metric(y_true_bin_np, y_pred_bin_np.round()), widedeep_metric(y_pred_bin_pt, y_true_bin_pt))
|
@pytest.mark.parametrize('top_k, expected_acc', [(1, 0.33), (2, 0.66)])
def test_categorical_accuracy_topk(top_k, expected_acc):
y_true = torch.from_numpy(np.random.choice(3, 100))
y_pred = torch.from_numpy(np.random.rand(100, 3))
metric = Accuracy(top_k=top_k)
acc = metric(y_pred, y_true)
assert np.isclose(acc, expected_acc, atol=0.3)
|
def f2_score_multi(y_true, y_pred, average):
return fbeta_score(y_true, y_pred, average=average, beta=2)
|
@pytest.mark.parametrize('sklearn_metric, widedeep_metric', [(accuracy_score, Accuracy()), (precision_score, Precision()), (recall_score, Recall()), (f1_score, F1Score()), (f2_score_multi, FBetaScore(beta=2))])
def test_muticlass_metrics(sklearn_metric, widedeep_metric):
if (sklearn_metric.__name__ == 'accuracy_score'):
assert np.isclose(sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1)), widedeep_metric(y_pred_multi_pt, y_true_multi_pt))
else:
assert np.isclose(sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1), average='macro'), widedeep_metric(y_pred_multi_pt, y_true_multi_pt))
|
@pytest.mark.parametrize('sklearn_metric, widedeep_metric', [(precision_score, Precision(average=False)), (recall_score, Recall(average=False)), (f1_score, F1Score(average=False)), (f2_score_multi, FBetaScore(beta=2, average=False))])
def test_muticlass_metrics_without_average(sklearn_metric, widedeep_metric):
skm = (sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1), average='macro'),)
wdm = widedeep_metric(y_pred_multi_pt, y_true_multi_pt)
assert (np.isclose(skm, np.mean(wdm)) and (wdm.shape[0] == 3))
|
@pytest.mark.parametrize('metric, metric_name', [(Accuracy(), 'accuracy'), (Precision(), 'precision'), (Recall(), 'recall'), (FBetaScore(beta=2), 'fbeta'), (F1Score(), 'f1'), (R2Score(), 'r2')])
def test_reset_methods(metric, metric_name):
if (metric_name == 'r2'):
res = metric(y_pred_reg_np, y_true_reg_np)
else:
res = metric(y_pred_bin_pt, y_true_bin_pt)
out = []
if (metric_name == 'accuracy'):
out.append(((metric.correct_count != 0.0) and (metric.total_count != 0.0)))
elif (metric_name == 'precision'):
out.append(((metric.true_positives != 0.0) and (metric.all_positives != 0.0)))
elif (metric_name == 'recall'):
out.append(((metric.true_positives != 0.0) and (metric.actual_positives != 0.0)))
elif (metric_name == 'fbeta'):
out.append(((metric.precision.true_positives != 0.0) and (metric.precision.all_positives != 0.0) and (metric.recall.true_positives != 0.0) and (metric.recall.actual_positives != 0.0)))
elif (metric_name == 'f1'):
out.append(((metric.f1.precision.true_positives != 0.0) and (metric.f1.precision.all_positives != 0.0) and (metric.f1.recall.true_positives != 0.0) and (metric.f1.recall.actual_positives != 0.0)))
elif (metric_name == 'r2'):
out.append(((metric.numerator != 0.0) and (metric.num_examples != 0.0) and (metric.y_true_sum != 0.0)))
metric.reset()
if (metric_name == 'accuracy'):
out.append(((metric.correct_count == 0) and (metric.total_count == 0)))
elif (metric_name == 'precision'):
out.append(((metric.true_positives == 0) and (metric.all_positives == 0)))
elif (metric_name == 'recall'):
out.append(((metric.true_positives == 0) and (metric.actual_positives == 0)))
elif (metric_name == 'fbeta'):
out.append(((metric.precision.true_positives == 0) and (metric.precision.all_positives == 0) and (metric.recall.true_positives == 0) and (metric.recall.actual_positives == 0)))
elif (metric_name == 'f1'):
out.append(((metric.f1.precision.true_positives == 0) and (metric.f1.precision.all_positives == 0) and (metric.f1.recall.true_positives == 0) and (metric.f1.recall.actual_positives == 0)))
elif (metric_name == 'r2'):
out.append(((metric.numerator == 0.0) and (metric.num_examples == 0.0) and (metric.y_true_sum == 0.0)))
assert all(out)
|
def test_r2_score():
assert (r2_score(y_true_reg_np, y_pred_reg_np) == R2Score()(y_pred_reg_pt, y_true_reg_pt))
|
def f2_score_bin(y_true, y_pred):
return fbeta_score(y_true, y_pred, beta=2)
|
@pytest.mark.parametrize('metric_name, sklearn_metric, torch_metric', [('BinaryAccuracy', accuracy_score, Accuracy(task='binary')), ('BinaryPrecision', precision_score, Precision(task='binary')), ('BinaryRecall', recall_score, Recall(task='binary')), ('BinaryF1Score', f1_score, F1Score(task='binary')), ('BinaryFBetaScore', f2_score_bin, FBetaScore(task='binary', beta=2.0))])
def test_binary_metrics(metric_name, sklearn_metric, torch_metric):
sk_res = sklearn_metric(y_true_bin_np, y_pred_bin_np.round())
wd_metric = MultipleMetrics(metrics=[torch_metric])
wd_logs = wd_metric(y_pred_bin_pt, y_true_bin_pt)
wd_res = wd_logs[metric_name]
if (wd_res.size != 1):
wd_res = wd_res[1]
assert np.isclose(sk_res, wd_res)
|
def f2_score_multi(y_true, y_pred, average):
return fbeta_score(y_true, y_pred, average=average, beta=2)
|
@pytest.mark.parametrize('metric_name, sklearn_metric, torch_metric', [('MulticlassAccuracy', accuracy_score, Accuracy(task='multiclass', num_classes=3, average='micro')), ('MulticlassPrecision', precision_score, Precision(task='multiclass', num_classes=3, average='macro')), ('MulticlassRecall', recall_score, Recall(task='multiclass', num_classes=3, average='macro')), ('MulticlassF1Score', f1_score, F1Score(task='multiclass', num_classes=3, average='macro')), ('MulticlassFBetaScore', f2_score_multi, FBetaScore(beta=3.0, task='multiclass', num_classes=3, average='macro'))])
def test_muticlass_metrics(metric_name, sklearn_metric, torch_metric):
if (metric_name == 'MulticlassAccuracy'):
sk_res = sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1))
else:
sk_res = sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1), average='macro')
wd_metric = MultipleMetrics(metrics=[torch_metric])
wd_logs = wd_metric(y_pred_multi_pt, y_true_multi_pt)
wd_res = wd_logs[metric_name]
assert np.isclose(sk_res, wd_res, atol=0.01)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA to run')
def test_flash_standard_shapes():
assert (standard_attn(X).shape == flash_attn(X).shape)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA to run')
def test_flash_standard_values():
assert torch.allclose(standard_attn(X), flash_attn(X), atol=1e-07)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA to run')
def test_speedup_flash():
standard_its = timeit.timeit((lambda : standard_attn(X)), number=500)
flash_its = timeit.timeit((lambda : flash_attn(X)), number=500)
assert (standard_its > flash_its)
assert (((standard_its - flash_its) / standard_its) > 0.3)
|
def test_flash_standard_vs_linear_shapes():
assert (standard_attn(X).shape == linear_attn(X).shape)
|
def test_output_sizes():
model = Vision()
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == 512))
|
def test_n_trainable():
model = Vision(pretrained_model_setup='resnet18', n_trainable=6)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == 512))
|
@pytest.mark.parametrize('arch, expected_out_shape', [('shufflenet_v2_x0_5', 1024), ('resnext50_32x4d', 2048), ('wide_resnet50_2', 2048), ('mobilenet_v2', 1280), ('mnasnet1_0', 1280), ('squeezenet1_0', 512), ({'shufflenet_v2_x0_5': ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1}, 1024), ({'resnext50_32x4d': ResNeXt50_32X4D_Weights.IMAGENET1K_V2}, 2048), ({'wide_resnet50_2': Wide_ResNet50_2_Weights.IMAGENET1K_V2}, 2048), ({'mobilenet_v2': MobileNet_V2_Weights.IMAGENET1K_V2}, 1280), ({'mnasnet1_0': MNASNet1_0_Weights.IMAGENET1K_V1}, 1280), ({'squeezenet1_0': SqueezeNet1_0_Weights.IMAGENET1K_V1}, 512)])
def test_architectures(arch, expected_out_shape):
model = Vision(pretrained_model_setup=arch, n_trainable=0)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == expected_out_shape))
|
def test_head():
model = Vision(head_hidden_dims=[256, 128], head_dropout=0.1)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == 128))
|
def test_all_frozen():
model = Vision(pretrained_model_setup='resnet18', n_trainable=0)
is_trainable = []
for p in model.parameters():
is_trainable.append((not p.requires_grad))
assert all(is_trainable)
|
@pytest.mark.parametrize('arch, expected_out_shape', [('resnet', 512), ('shufflenet', 1024), ('resnext', 2048), ('wide_resnet', 2048), ('regnet', 912), ('mobilenet', 1280), ('mnasnet', 1280), ('squeezenet', 512), ({'shufflenet': ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1}, 1024), ({'resnext': ResNeXt50_32X4D_Weights.IMAGENET1K_V2}, 2048)])
def test_pretrained_model_setup_defaults(arch, expected_out_shape):
model = Vision(pretrained_model_setup=arch, n_trainable=0)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == expected_out_shape))
|
@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason='For reasons beyond me, when running in GH actions, throws a RuntimeError when trying to download the weights')
def test_pretrained_model_efficientnet():
model = Vision(pretrained_model_setup='efficientnet', n_trainable=0)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == 1280))
|
def test_wide():
out = model(inp)
assert ((out.size(0) == 10) and (out.size(1) == 1))
|
def test_deephead_and_head_layers_dim():
deephead = nn.Sequential(nn.Linear(32, 16), nn.Linear(16, 8))
with pytest.raises(ValueError):
model = WideDeep(wide=wide, deeptabular=tabmlp, head_hidden_dims=[16, 8], deephead=deephead)
|
def test_no_deephead_and_head_layers_dim():
out = []
model = WideDeep(wide=wide, deeptabular=tabmlp, head_hidden_dims=[8, 4])
for (n, p) in model.named_parameters():
if (n == 'deephead.head_layer_0.0.weight'):
out.append(((p.size(0) == 8) and (p.size(1) == 8)))
if (n == 'deephead.head_layer_1.0.weight'):
out.append(((p.size(0) == 4) and (p.size(1) == 8)))
assert all(out)
|
def test_tabnet_warning():
with pytest.warns(UserWarning):
model = WideDeep(wide=wide, deeptabular=tabnet)
|
@pytest.mark.parametrize('optimizers, schedulers, len_loss_output, len_lr_output, init_lr, schedulers_type', [(optimizers_1, lr_schedulers_1, 5, 5, 0.001, 'step'), (optimizers_2, lr_schedulers_2, 5, 11, 0.001, 'cyclic'), (optimizers_3, lr_schedulers_3, 5, 5, None, None), (optimizers_4, lr_schedulers_4, 5, 11, None, None), (optimizers_5, lr_schedulers_5, 5, 11, 0.001, 'cyclic'), (optimizers_6, lr_schedulers_6, 5, 5, None, None)])
def test_history_callback(optimizers, schedulers, len_loss_output, len_lr_output, init_lr, schedulers_type):
trainer = Trainer(model=model, objective='binary', optimizers=optimizers, lr_schedulers=schedulers, callbacks=[LRHistory(n_epochs=5)], verbose=0)
trainer.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, n_epochs=5, batch_size=16)
out = []
out.append((len(trainer.history['train_loss']) == len_loss_output))
try:
lr_list = list(chain.from_iterable(trainer.lr_history['lr_deeptabular_0']))
except Exception:
try:
lr_list = trainer.lr_history['lr_deeptabular_0']
except Exception:
lr_list = trainer.lr_history['lr_0']
out.append((len(lr_list) == len_lr_output))
if ((init_lr is not None) and (schedulers_type == 'step')):
out.append((lr_list[(- 1)] == (init_lr / 10)))
elif ((init_lr is not None) and (schedulers_type == 'cyclic')):
out.append((lr_list[(- 1)] == init_lr))
assert all(out)
|
def test_early_stop():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular)
trainer = Trainer(model=model, objective='binary', callbacks=[EarlyStopping(min_delta=5.0, patience=3, restore_best_weights=True, verbose=1)], verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, val_split=0.2, n_epochs=5)
assert (len(trainer.history['train_loss']) == (3 + 1))
|
@pytest.mark.parametrize('fpath, save_best_only, max_save, n_files', [('tests/test_model_functioning/weights/test_weights', True, 2, 2), ('tests/test_model_functioning/weights/test_weights', False, 2, 2), ('tests/test_model_functioning/weights/test_weights', False, 0, 5), (None, False, 0, 0)])
def test_model_checkpoint(fpath, save_best_only, max_save, n_files):
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5], column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):])
model = WideDeep(wide=wide, deeptabular=deeptabular)
trainer = Trainer(model=model, objective='binary', callbacks=[ModelCheckpoint(filepath=fpath, save_best_only=save_best_only, max_save=max_save)], verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=5, val_split=0.2)
if fpath:
n_saved = len(os.listdir('tests/test_model_functioning/weights/'))
shutil.rmtree('tests/test_model_functioning/weights/')
else:
n_saved = 0
assert (n_saved <= n_files)
|
def test_filepath_error():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(mlp_hidden_dims=[16, 4], column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):])
model = WideDeep(wide=wide, deeptabular=deeptabular)
with pytest.raises(ValueError):
trainer = Trainer(model=model, objective='binary', callbacks=[ModelCheckpoint(filepath='wrong_file_path')], verbose=0)
|
@pytest.mark.parametrize('optimizers, schedulers, len_loss_output, len_lr_output, init_lr, schedulers_type', [(optimizers_1, lr_schedulers_1, 5, 5, 0.001, 'step'), (optimizers_2, lr_schedulers_2, 5, 11, 0.001, 'cyclic'), (optimizers_3, lr_schedulers_3, 5, 5, None, None), (optimizers_4, lr_schedulers_4, 5, 11, None, None)])
def test_history_callback_w_tabtransformer(optimizers, schedulers, len_loss_output, len_lr_output, init_lr, schedulers_type):
trainer_tt = Trainer(model_tt, objective='binary', optimizers=optimizers, lr_schedulers=schedulers, callbacks=[LRHistory(n_epochs=5)], verbose=0)
trainer_tt.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=5, batch_size=16)
out = []
out.append((len(trainer_tt.history['train_loss']) == len_loss_output))
try:
lr_list = list(chain.from_iterable(trainer_tt.lr_history['lr_deeptabular_0']))
except TypeError:
lr_list = trainer_tt.lr_history['lr_deeptabular_0']
except Exception:
lr_list = trainer_tt.lr_history['lr_0']
out.append((len(lr_list) == len_lr_output))
if ((init_lr is not None) and (schedulers_type == 'step')):
out.append((lr_list[(- 1)] == (init_lr / 10)))
elif ((init_lr is not None) and (schedulers_type == 'cyclic')):
out.append((lr_list[(- 1)] == init_lr))
assert all(out)
|
def test_modelcheckpoint_mode_warning():
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
with pytest.warns(RuntimeWarning):
model_checkpoint = ModelCheckpoint(filepath=fpath, monitor='val_loss', mode='unknown')
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
|
def test_modelcheckpoint_mode_options():
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
model_checkpoint_1 = ModelCheckpoint(filepath=fpath, monitor='val_loss', mode='min')
model_checkpoint_2 = ModelCheckpoint(filepath=fpath, monitor='val_loss')
model_checkpoint_3 = ModelCheckpoint(filepath=fpath, monitor='acc', mode='max')
model_checkpoint_4 = ModelCheckpoint(filepath=fpath, monitor='acc')
model_checkpoint_5 = ModelCheckpoint(filepath=None, monitor='acc')
is_min = (model_checkpoint_1.monitor_op is np.less)
best_inf = (model_checkpoint_1.best is np.Inf)
auto_is_min = (model_checkpoint_2.monitor_op is np.less)
auto_best_inf = (model_checkpoint_2.best is np.Inf)
is_max = (model_checkpoint_3.monitor_op is np.greater)
best_minus_inf = ((- model_checkpoint_3.best) == np.Inf)
auto_is_max = (model_checkpoint_4.monitor_op is np.greater)
auto_best_minus_inf = ((- model_checkpoint_4.best) == np.Inf)
auto_is_max = (model_checkpoint_5.monitor_op is np.greater)
auto_best_minus_inf = ((- model_checkpoint_5.best) == np.Inf)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert all([is_min, best_inf, is_max, best_minus_inf, auto_is_min, auto_best_inf, auto_is_max, auto_best_minus_inf])
|
def test_modelcheckpoint_get_state():
fpath = 'tests/test_model_functioning/modelcheckpoint/'
model_checkpoint = ModelCheckpoint(filepath='/'.join([fpath, 'weights_out']), monitor='val_loss')
trainer = Trainer(model, objective='binary', callbacks=[model_checkpoint], verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=1, batch_size=16)
with open('/'.join([fpath, 'checkpoint.p']), 'wb') as f:
pickle.dump(model_checkpoint, f)
with open('/'.join([fpath, 'checkpoint.p']), 'rb') as f:
model_checkpoint = pickle.load(f)
self_dict_keys = model_checkpoint.__dict__.keys()
no_trainer = ('trainer' not in self_dict_keys)
no_model = ('model' not in self_dict_keys)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert (no_trainer and no_model)
|
def test_early_stop_mode_warning():
with pytest.warns(RuntimeWarning):
model_checkpoint = EarlyStopping(monitor='val_loss', mode='unknown')
|
def test_early_stop_mode_options():
early_stopping_1 = EarlyStopping(monitor='val_loss', mode='min')
early_stopping_2 = EarlyStopping(monitor='val_loss')
early_stopping_3 = EarlyStopping(monitor='acc', mode='max')
early_stopping_4 = EarlyStopping(monitor='acc')
is_min = (early_stopping_1.monitor_op is np.less)
auto_is_min = (early_stopping_2.monitor_op is np.less)
is_max = (early_stopping_3.monitor_op is np.greater)
auto_is_max = (early_stopping_4.monitor_op is np.greater)
assert all([is_min, is_max, auto_is_min, auto_is_max])
|
def test_early_stopping_get_state():
early_stopping_path = Path('tests/test_model_functioning/early_stopping')
early_stopping_path.mkdir()
early_stopping = EarlyStopping()
trainer_tt = Trainer(model, objective='binary', callbacks=[early_stopping], verbose=0)
trainer_tt.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, target=target, n_epochs=1, batch_size=16)
with open((early_stopping_path / 'early_stopping.p'), 'wb') as f:
pickle.dump(early_stopping, f)
with open((early_stopping_path / 'early_stopping.p'), 'rb') as f:
early_stopping = pickle.load(f)
self_dict_keys = early_stopping.__dict__.keys()
no_trainer = ('trainer' not in self_dict_keys)
no_model = ('model' not in self_dict_keys)
shutil.rmtree('tests/test_model_functioning/early_stopping/')
assert (no_trainer and no_model)
|
def test_early_stopping_restore_weights_with_metric():
early_stopping = EarlyStopping(restore_best_weights=True, min_delta=1000, patience=1000)
trainer = Trainer(model, objective='regression', callbacks=[early_stopping], verbose=0)
trainer.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, target=target, n_epochs=2, batch_size=16)
assert (early_stopping.wait > 0)
pred_val = trainer.predict(X_test={'X_wide': X_wide_val, 'X_tab': X_tab_val})
restored_metric = trainer.loss_fn(torch.tensor(pred_val), torch.tensor(target_val)).item()
assert np.allclose(restored_metric, early_stopping.best)
|
def test_early_stopping_restore_weights_with_state():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
model = WideDeep(wide=wide, deeptabular=deeptabular)
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
model_checkpoint = ModelCheckpoint(filepath=fpath, save_best_only=False, max_save=10, min_delta=1000)
early_stopping = EarlyStopping(patience=3, min_delta=1000, restore_best_weights=True)
trainer = Trainer(model, objective='binary', callbacks=[early_stopping, model_checkpoint], verbose=0)
trainer.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, target=target, n_epochs=5, batch_size=16)
new_wide = Wide(np.unique(X_wide).shape[0], 1)
new_deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
new_model = WideDeep(wide=new_wide, deeptabular=new_deeptabular)
full_best_epoch_path = '_'.join([model_checkpoint.filepath, (str(((early_stopping.stopped_epoch - early_stopping.patience) + 1)) + '.p')])
new_model.load_state_dict(torch.load(full_best_epoch_path))
new_model.to(next(model.parameters()).device)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert torch.allclose(new_model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight'], model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight'])
|
def test_model_checkpoint_restore_weights():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
model = WideDeep(wide=wide, deeptabular=deeptabular)
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
model_checkpoint = ModelCheckpoint(filepath=fpath, save_best_only=True, min_delta=1000)
trainer = Trainer(model, objective='binary', callbacks=[model_checkpoint], verbose=0)
trainer.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, target=target, n_epochs=5, batch_size=16)
new_wide = Wide(np.unique(X_wide).shape[0], 1)
new_deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
new_model = WideDeep(wide=new_wide, deeptabular=new_deeptabular)
full_best_epoch_path = '_'.join([model_checkpoint.filepath, (str((model_checkpoint.best_epoch + 1)) + '.p')])
new_model.load_state_dict(torch.load(full_best_epoch_path))
new_model.to(next(model.parameters()).device)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert torch.allclose(new_model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight'], model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight'])
|
@pytest.mark.parametrize('X_wide, X_tab, X_text, X_img, X_train, X_val, target, val_split, transforms', [(X_wide, X_tab, X_text, X_img, None, None, target, None, transforms1), (X_wide, X_tab, X_text, X_img, None, None, target, None, transforms2), (X_wide, X_tab, X_text, X_img, None, None, target, None, None), (X_wide, X_tab, X_text, X_img_norm, None, None, target, None, transforms2), (X_wide, X_tab, X_text, X_img_norm, None, None, target, None, transforms1), (X_wide, X_tab, X_text, X_img_norm, None, None, target, None, None), (X_wide, X_tab, X_text, X_img, None, None, target, 0.2, None), (None, None, None, None, {'X_wide': X_wide, 'X_tab': X_tab, 'X_text': X_text, 'X_img': X_img, 'target': target}, None, None, None, None), (None, None, None, None, {'X_wide': X_wide, 'X_tab': X_tab, 'X_text': X_text, 'X_img': X_img, 'target': target}, None, None, None, transforms1), (None, None, None, None, {'X_wide': X_wide, 'X_tab': X_tab, 'X_text': X_text, 'X_img': X_img, 'target': target}, None, None, 0.2, None), (None, None, None, None, {'X_wide': X_wide, 'X_tab': X_tab, 'X_text': X_text, 'X_img': X_img, 'target': target}, None, None, 0.2, transforms2), (None, None, None, None, {'X_wide': X_wide_tr, 'X_tab': X_tab_tr, 'X_text': X_text_tr, 'X_img': X_img_tr, 'target': y_train}, {'X_wide': X_wide_val, 'X_tab': X_tab_val, 'X_text': X_text_val, 'X_img': X_img_val, 'target': y_val}, None, None, None), (None, None, None, None, {'X_wide': X_wide_tr, 'X_tab': X_tab_tr, 'X_text': X_text_tr, 'X_img': X_img_tr, 'target': y_train}, {'X_wide': X_wide_val, 'X_tab': X_tab_val, 'X_text': X_text_val, 'X_img': X_img_val, 'target': y_val}, None, None, transforms1)])
def test_widedeep_inputs(X_wide, X_tab, X_text, X_img, X_train, X_val, target, val_split, transforms):
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
trainer = Trainer(model, objective='binary', transforms=transforms, verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, X_train=X_train, X_val=X_val, target=target, val_split=val_split, batch_size=16)
assert (trainer.history['train_loss'] is not None)
|
@pytest.mark.parametrize('X_wide, X_tab, X_text, X_img, X_train, X_val, target', [(X_wide, X_tab, X_text, X_img, None, {'X_wide': X_wide_val, 'X_tab': X_tab_val, 'X_text': X_text_val, 'X_img': X_img_val, 'target': y_val}, target)])
def test_xtrain_xval_assertion(X_wide, X_tab, X_text, X_img, X_train, X_val, target):
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
trainer = Trainer(model, objective='binary', verbose=0)
with pytest.raises(AssertionError):
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, X_train=X_train, X_val=X_val, target=target, batch_size=16)
|
@pytest.mark.parametrize('wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target', [(wide, None, None, None, X_wide, None, None, None, target), (None, deeptabular, None, None, None, X_tab, None, None, target), (None, None, deeptext, None, None, None, X_text, None, target), (None, None, None, deepimage, None, None, None, X_img, target)])
def test_individual_inputs(wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target):
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16)
assert (len(trainer.history) == 1)
|
@pytest.mark.parametrize('deeptabular, deeptext, deepimage, X_tab, X_text, X_img, deephead, target', [(deeptabular, None, None, X_tab, None, None, deephead_ds, target), (None, deeptext, None, None, X_text, None, deephead_dt, target), (None, None, deepimage, None, None, X_img, deephead_di, target)])
def test_deephead_individual_components(deeptabular, deeptext, deepimage, X_tab, X_text, X_img, deephead, target):
model = WideDeep(deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage, deephead=deephead)
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16)
assert (len(trainer.history) == 1)
|
@pytest.mark.parametrize('deeptabular, deeptext, deepimage, X_tab, X_text, X_img, target', [(deeptabular, None, None, X_tab, None, None, target), (None, deeptext, None, None, X_text, None, target), (None, None, deepimage, None, None, X_img, target)])
def test_head_layers_individual_components(deeptabular, deeptext, deepimage, X_tab, X_text, X_img, target):
model = WideDeep(deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage, head_hidden_dims=[8, 4])
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16)
assert (len(trainer.history) == 1)
|
@pytest.mark.parametrize('X_wide, X_tab, target, objective, X_test, pred_dim, probs_dim, uncertainties_pred_dim', [(X_wide, X_tab, target_regres, 'regression', None, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', None, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', None, 3, 3, 4), (X_wide, X_tab, target_regres, 'regression', X_test, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', X_test, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', X_test, 3, 3, 4)])
def test_fit_objectives(X_wide, X_tab, target, objective, X_test, pred_dim, probs_dim, uncertainties_pred_dim):
wide = Wide(np.unique(X_wide).shape[0], pred_dim)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular, pred_dim=pred_dim)
trainer = Trainer(model, objective=objective, verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, batch_size=16)
preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
probs = trainer.predict_proba(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
unc_preds = trainer.predict_uncertainty(X_wide=X_wide, X_tab=X_tab, X_test=X_test, uncertainty_granularity=5)
if (objective == 'regression'):
assert ((preds.shape[0], probs, unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
else:
assert ((preds.shape[0], probs.shape[1], unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
|
def test_fit_with_deephead():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16])
deephead = nn.Sequential(nn.Linear(16, 8), nn.Linear(8, 4))
deephead.output_dim = 4
model = WideDeep(wide=wide, deeptabular=deeptabular, pred_dim=1, deephead=deephead)
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target_binary, batch_size=16)
preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
probs = trainer.predict_proba(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
unc_preds = trainer.predict_uncertainty(X_wide=X_wide, X_tab=X_tab, X_test=X_test, uncertainty_granularity=5)
assert ((preds.shape[0], probs.shape[1], unc_preds.shape[1]) == (32, 2, 3))
|
@pytest.mark.parametrize('X_wide, X_tab, target, objective, X_wide_test, X_tab_test, X_test, pred_dim, probs_dim, uncertainties_pred_dim', [(X_wide, X_tab, target_regres, 'regression', X_wide, X_tab, None, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', X_wide, X_tab, None, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', X_wide, X_tab, None, 3, 3, 4), (X_wide, X_tab, target_regres, 'regression', None, None, X_test, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', None, None, X_test, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', None, None, X_test, 3, 3, 4)])
def test_fit_objectives_tab_transformer(X_wide, X_tab, target, objective, X_wide_test, X_tab_test, X_test, pred_dim, probs_dim, uncertainties_pred_dim):
wide = Wide(np.unique(X_wide).shape[0], pred_dim)
tab_transformer = TabTransformer(column_idx={k: v for (v, k) in enumerate(colnames)}, cat_embed_input=embed_input_tt, continuous_cols=colnames[5:])
model = WideDeep(wide=wide, deeptabular=tab_transformer, pred_dim=pred_dim)
trainer = Trainer(model, objective=objective, verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, batch_size=16)
preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
probs = trainer.predict_proba(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
unc_preds = trainer.predict_uncertainty(X_wide=X_wide, X_tab=X_tab, X_test=X_test, uncertainty_granularity=5)
if (objective == 'regression'):
assert ((preds.shape[0], probs, unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
else:
assert ((preds.shape[0], probs.shape[1], unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
|
@pytest.mark.parametrize('X_wide, X_tab, target, objective, X_wide_test, X_tab_test, X_test, pred_dim, probs_dim, uncertainties_pred_dim', [(X_wide, X_tab, target_regres, 'regression', X_wide, X_tab, None, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', X_wide, X_tab, None, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', X_wide, X_tab, None, 3, 3, 4), (X_wide, X_tab, target_regres, 'regression', None, None, X_test, 1, None, 4), (X_wide, X_tab, target_binary, 'binary', None, None, X_test, 1, 2, 3), (X_wide, X_tab, target_multic, 'multiclass', None, None, X_test, 3, 3, 4)])
def test_fit_objectives_tabnet(X_wide, X_tab, target, objective, X_wide_test, X_tab_test, X_test, pred_dim, probs_dim, uncertainties_pred_dim):
warnings.filterwarnings('ignore')
wide = Wide(np.unique(X_wide).shape[0], pred_dim)
tab_transformer = TabNet(column_idx={k: v for (v, k) in enumerate(colnames)}, cat_embed_input=embed_input, continuous_cols=colnames[5:])
model = WideDeep(wide=wide, deeptabular=tab_transformer, pred_dim=pred_dim)
trainer = Trainer(model, objective=objective, verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, batch_size=16)
preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
probs = trainer.predict_proba(X_wide=X_wide, X_tab=X_tab, X_test=X_test)
unc_preds = trainer.predict_uncertainty(X_wide=X_wide, X_tab=X_tab, X_test=X_test, uncertainty_granularity=5)
if (objective == 'regression'):
assert ((preds.shape[0], probs, unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
else:
assert ((preds.shape[0], probs.shape[1], unc_preds.shape[1]) == (32, probs_dim, uncertainties_pred_dim))
|
def test_fit_with_regression_and_metric():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular, pred_dim=1)
trainer = Trainer(model, objective='regression', metrics=[R2Score], verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target_regres, batch_size=16)
assert ('train_r2' in trainer.history.keys())
|
def test_aliases():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular, pred_dim=1)
trainer = Trainer(model, loss='regression', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target_regres, batch_size=16, warmup=True)
assert (('train_loss' in trainer.history.keys()) and (trainer.__wd_aliases_used['objective'] == 'loss') and (trainer.__wd_aliases_used['finetune'] == 'warmup'))
|
def test_custom_dataloader():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular)
trainer = Trainer(model, loss='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target_binary_imbalanced, batch_size=16, custom_dataloader=DataLoaderImbalanced)
assert ('train_loss' in trainer.history.keys())
|
def test_multiclass_warning():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
model = WideDeep(wide=wide, deeptabular=deeptabular)
with pytest.raises(ValueError):
trainer = Trainer(model, loss='multiclass', verbose=0)
|
@pytest.mark.parametrize('initializers, test_layers', [(initializers_1, test_layers), (initializers_2, test_layers)])
def test_initializers_1(initializers, test_layers):
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
deeptext = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
deepimage = Vision(pretrained_model_setup='resnet18', n_trainable=0)
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage, pred_dim=1)
cmodel = c(model)
org_weights = []
for (n, p) in cmodel.named_parameters():
if (n in test_layers):
org_weights.append(p)
trainer = Trainer(model, objective='binary', verbose=0, initializers=initializers)
init_weights = []
for (n, p) in trainer.model.named_parameters():
if (n in test_layers):
init_weights.append(p)
res = all([torch.all((1 - (a == b).int()).bool()) for (a, b) in zip(org_weights, init_weights)])
assert res
|
def test_initializers_with_pattern():
wide = Wide(100, 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], mlp_dropout=[0.5, 0.5])
deeptext = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, pred_dim=1)
cmodel = c(model)
org_word_embed = []
for (n, p) in cmodel.named_parameters():
if ('word_embed' in n):
org_word_embed.append(p)
trainer = Trainer(model, objective='binary', verbose=0, initializers=initializers_2)
init_word_embed = []
for (n, p) in trainer.model.named_parameters():
if ('word_embed' in n):
init_word_embed.append(p)
assert torch.all((org_word_embed[0] == init_word_embed[0].cpu()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.