code stringlengths 17 6.64M |
|---|
class RepeatedContinuousStratifiedGroupKFold(_RepeatedSplits):
'Repeated Stratified-Groups K-Fold cross validator.\n\n Repeats :class:`julearn.model_selection.ContinuousStratifiedGroupKFold`\n n times with different randomization in each repetition.\n\n Parameters\n ----------\n n_bins : int\n Number of bins/quantiles to use.\n method : str, default="binning"\n Method used to stratify the groups. Can be either "binning" or\n "quantile". In the first case, the groups are stratified by binning\n the target variable. In the second case, the groups are stratified\n by quantiling the target variable.\n n_splits : int, default=5\n Number of folds. Must be at least 2.\n n_repeats : int, default=10\n Number of times cross-validator needs to be repeated.\n random_state : int, RandomState instance or None, default=None\n Controls the generation of the random states for each repetition.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an intege\n '
def __init__(self, n_bins, method='binning', n_splits: int=5, n_repeats: int=10, random_state: Optional[Union[(int, RandomState)]]=None):
super().__init__(ContinuousStratifiedGroupKFold, n_bins=n_bins, method=method, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits)
|
class StratifiedBootstrap(BaseShuffleSplit):
'Stratified Bootstrap cross-validation iterator.\n\n Provides train/test indices using resampling with replacement, respecting\n the distribution of samples for each class.\n\n Parameters\n ----------\n n_splits : int, default=5\n Number of re-shuffling & splitting iterations.\n test_size : float, int, default=0.2\n If float, should be between 0.0 and 1.0 and represent the proportion\n of groups to include in the test split (rounded up). If int,\n represents the absolute number of test groups. If None, the value is\n set to the complement of the train size.\n The default will change in version 0.21. It will remain 0.2 only\n if ``train_size`` is unspecified, otherwise it will complement\n the specified ``train_size``.\n train_size : float or int, default=None\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the groups to include in the train split. If\n int, represents the absolute number of train groups. If None,\n the value is automatically set to the complement of the test size.\n random_state : int or RandomState instance, default=None\n Controls the randomness of the training and testing indices produced.\n Pass an int for reproducible output across multiple function calls.\n '
def __init__(self, n_splits: int=5, test_size: float=0.5, train_size: Optional[float]=None, random_state: Optional[Union[(int, RandomState)]]=None):
super().__init__(n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state)
def _iter_indices(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None):
'Generate (train, test) indices.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like of shape (n_samples,)\n The target variable for supervised learning problems.\n groups : array-like of shape (n_samples,), default=None\n Group labels for stratifying the samples used while splitting the\n dataset into train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n '
y_labels = np.unique(y)
y_inds = [np.where((y == t_y))[0] for t_y in y_labels]
n_samples = [_validate_shuffle_split(len(t_inds), self.test_size, self.train_size, default_test_size=self._default_test_size) for t_inds in y_inds]
for _ in range(self.n_splits):
train = []
test = []
for (t_inds, (n_train, _)) in zip(y_inds, n_samples):
bs_inds = np.random.choice(t_inds, len(t_inds), replace=True)
train.extend(bs_inds[:n_train])
test.extend(bs_inds[n_train:])
(yield (train, test))
def split(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None):
'Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n Note that providing ``y`` is sufficient to generate the splits and\n hence ``np.zeros(n_samples)`` may be used as a placeholder for\n ``X`` instead of actual training data.\n y : array-like of shape (n_samples,) or (n_samples, n_labels)\n The target variable for supervised learning problems.\n Stratification is done based on the y labels.\n groups : array-like of shape (n_samples,), default=None\n Group labels for stratifying the samples used while splitting the\n dataset into train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n '
return super().split(X, y, groups)
|
def test_register_searcher() -> None:
'Test registering a searcher.'
with pytest.raises(ValueError, match='The specified searcher '):
get_searcher('custom_grid')
register_searcher('custom_grid', GridSearchCV)
assert (get_searcher('custom_grid') == GridSearchCV)
with pytest.warns(RuntimeWarning, match='searcher named custom_grid already exists.'):
register_searcher('custom_grid', GridSearchCV)
register_searcher('custom_grid', GridSearchCV, overwrite=True)
with pytest.raises(ValueError, match='searcher named custom_grid already exists and '):
register_searcher('custom_grid', GridSearchCV, overwrite=False)
reset_searcher_register()
|
def test_reset_searcher() -> None:
'Test resetting the searcher registry.'
register_searcher('custom_grid', GridSearchCV)
get_searcher('custom_grid')
reset_searcher_register()
with pytest.raises(ValueError, match='The specified searcher '):
get_searcher('custom_grid')
|
def test_continuous_stratified_kfold_binning() -> None:
'Test continuous stratified K-fold generator using binning.'
(n_samples, n_features) = (200, 20)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples)
n_bins = 5
edges = np.histogram_bin_edges(y, bins=n_bins)
bins = np.digitize(y, bins=edges[:(- 1)])
assert (len(np.unique(bins)) == n_bins)
skcv = StratifiedKFold(n_splits=3, shuffle=True, random_state=42)
jucv = ContinuousStratifiedKFold(n_bins=n_bins, n_splits=3, shuffle=True, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins), jucv.split(X, y)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
skcv = RepeatedStratifiedKFold(n_repeats=4, n_splits=3, random_state=42)
jucv = RepeatedContinuousStratifiedKFold(n_bins=n_bins, n_repeats=4, n_splits=3, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins), jucv.split(X, y)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
|
def test_continuous_stratified_kfold_quantile() -> None:
'Test continuous stratified K-fold generator using binning.'
(n_samples, n_features) = (200, 20)
X = np.random.rand(n_samples, n_features)
y = np.random.normal(size=n_samples)
n_bins = 5
edges = np.quantile(y, np.linspace(0, 1, (n_bins + 1)))
bins = np.digitize(y, bins=edges[:(- 1)])
assert (len(np.unique(bins)) == n_bins)
assert all(((v == (n_samples / n_bins)) for (k, v) in Counter(bins).items()))
skcv = StratifiedKFold(n_splits=3, shuffle=True, random_state=42)
jucv = ContinuousStratifiedKFold(method='quantile', n_bins=n_bins, n_splits=3, shuffle=True, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins), jucv.split(X, y)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
skcv = RepeatedStratifiedKFold(n_repeats=4, n_splits=3, random_state=42)
jucv = RepeatedContinuousStratifiedKFold(method='quantile', n_bins=n_bins, n_repeats=4, n_splits=3, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins), jucv.split(X, y)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
|
def test_continuous_stratified_group_kfold_binning() -> None:
'Test continuous stratified group K-fold generator using binning.'
(n_samples, n_features) = (200, 20)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples)
n_bins = 5
edges = np.histogram_bin_edges(y, bins=n_bins)
bins = np.digitize(y, bins=edges[:(- 1)])
assert (len(np.unique(bins)) == n_bins)
groups = np.random.randint(0, 50, size=n_samples)
skcv = StratifiedGroupKFold(n_splits=3, shuffle=True, random_state=42)
jucv = ContinuousStratifiedGroupKFold(n_bins=n_bins, n_splits=3, shuffle=True, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins, groups=groups), jucv.split(X, y, groups=groups)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
|
def test_continuous_stratified_group_kfold_quantile() -> None:
'Test continuous stratified group K-fold generator using binning.'
(n_samples, n_features) = (200, 20)
X = np.random.rand(n_samples, n_features)
y = np.random.normal(size=n_samples)
n_bins = 5
edges = np.quantile(y, np.linspace(0, 1, (n_bins + 1)))
bins = np.digitize(y, bins=edges[:(- 1)])
assert (len(np.unique(bins)) == n_bins)
assert all(((v == (n_samples / n_bins)) for (k, v) in Counter(bins).items()))
groups = np.random.randint(0, 50, size=n_samples)
skcv = StratifiedGroupKFold(n_splits=3, shuffle=True, random_state=42)
jucv = ContinuousStratifiedGroupKFold(method='quantile', n_bins=n_bins, n_splits=3, shuffle=True, random_state=42)
for ((sk_train, sk_test), (ju_train, ju_test)) in zip(skcv.split(X, bins, groups=groups), jucv.split(X, y, groups=groups)):
assert_array_equal(sk_train, ju_train)
assert_array_equal(sk_test, ju_test)
|
@pytest.mark.parametrize('n_classes, test_size', [(3, 0.2), (2, 0.5), (4, 0.8)])
def test_stratified_bootstrap(n_classes: int, test_size: float) -> None:
'Test stratified bootstrap CV generator.\n\n Parameters\n ----------\n n_classes : int\n Number of classes.\n test_size : float\n Test size.\n '
n_samples = 100
X = np.random.rand(n_samples, 2)
y = np.random.randint(0, n_classes, n_samples)
cv = StratifiedBootstrap(n_splits=10, test_size=test_size)
for (train, test) in cv.split(X, y):
y_train = y[train]
y_test = y[test]
for i in range(n_classes):
n_y = (y == i).sum()
n_y_train = (y_train == i).sum()
n_y_test = (y_test == i).sum()
assert (abs((n_y_train - (n_y * (1 - test_size)))) < 1)
assert (abs((n_y_test - (n_y * test_size))) < 1)
|
def list_models() -> List[str]:
'List all the available model names.\n\n Returns\n -------\n list of str\n A list will all the available model names.\n\n '
out = list(_available_models.keys())
return out
|
def get_model(name: str, problem_type: str, **kwargs: Any) -> ModelLike:
'Get a model.\n\n Parameters\n ----------\n name : str\n The model name\n problem_type : str\n The type of problem. See :func:`.run_cross_validation`.\n **kwargs : dict\n Extra keyword arguments.\n\n Returns\n -------\n ModelLike\n The model object.\n\n '
if (name not in _available_models):
raise_error(f'The specified model ({name}) is not available. Valid options are: {list(_available_models.keys())}')
if (problem_type not in _available_models[name]):
raise_error(f'The specified model ({name})) is not suitable for{problem_type}')
out = _available_models[name][problem_type](**kwargs)
return out
|
def register_model(model_name: str, classification_cls: Optional[Type[ModelLike]]=None, regression_cls: Optional[Type[ModelLike]]=None, overwrite: Optional[bool]=None):
'Register a model to julearn.\n\n This function allows you to add a model or models for different problem\n types to julearn. Afterwards, it behaves like every other julearn model and\n can be referred to by name. E.g. you can use inside of\n `run_cross_validation` using `model=model_name`.\n\n Parameters\n ----------\n model_name : str\n Name by which model will be referenced by\n classification_cls : ModelLike\n The class which will be used for classification.\n regression_cls : ModelLike\n The class which will be used for regression.\n overwrite : bool, optional\n decides whether overwrite should be allowed\n\n Options are:\n\n * None : overwrite is possible, but warns the user (default).\n * True : overwrite is possible without any warning.\n * False : overwrite is not possible, error is raised instead.\n\n Raises\n ------\n ValueError\n If `model_name` is already registered and `overwrite` is False.\n\n Warns\n -----\n RuntimeWarning\n If `model_name` is already registered and `overwrite` is None.\n '
problem_types = ['classification', 'regression']
for (cls, problem_type) in zip([classification_cls, regression_cls], problem_types):
if (cls is not None):
if ((t_available := _available_models.get(model_name)) is not None):
if t_available.get(problem_type):
if (overwrite is None):
warn_with_log(f"Model named {model_name} with problem type {{problem_type}} already exists. Therefore, {model_name} will be overwritten. To remove this warning set overwrite=True. If you won't to reset this use `julearn.estimators.reset_model_register`.")
elif (overwrite is False):
raise_error(f'Model named {model_name} with problem type {{problem_type}} already exists. Therefore, {model_name} will be overwritten. overwrite is set to False, therefore you cannot overwrite existing models. Set overwrite=True in case you want to overwrite existing models')
logger.info(f'registering model named {model_name} with problem_type {problem_type}')
_available_models[model_name][problem_type] = cls
else:
logger.info(f'registering model named {model_name} with problem_type {problem_type}')
_available_models[model_name] = {problem_type: cls}
|
def reset_model_register() -> None:
'Reset the model register to the default state.'
global _available_models
_available_models = deepcopy(_available_models_reset)
|
def test_register_model() -> None:
'Test the register model function.'
register_model('dt', classification_cls=DecisionTreeClassifier, regression_cls=DecisionTreeRegressor)
classification = get_model('dt', 'classification')
regression = get_model('dt', 'regression')
assert isinstance(classification, DecisionTreeClassifier)
assert isinstance(regression, DecisionTreeRegressor)
reset_model_register()
with pytest.raises(ValueError, match='The specified model '):
classification = get_model('dt', 'classification')
|
def test_register_warning() -> None:
'Test the register model function warnings.'
with pytest.warns(RuntimeWarning, match='Model name'):
register_model('rf', regression_cls=RandomForestRegressor)
reset_model_register()
with pytest.raises(ValueError, match='Model name'):
register_model('rf', regression_cls=RandomForestRegressor, overwrite=False)
reset_model_register()
with warnings.catch_warnings():
warnings.simplefilter('error')
register_model('rf', regression_cls=RandomForestRegressor, overwrite=True)
reset_model_register()
|
@fixture(params=['METADES', 'SingleBest', 'StaticSelection', 'StackedClassifier', 'KNORAU', 'KNORAE', 'DESP', 'OLA', 'MCB', 'KNOP'], scope='module')
def all_deslib_algorithms(request: FixtureRequest) -> str:
'Return different algorithms for the iris dataset features.\n\n Parameters\n ----------\n request : pytest.FixtureRequest\n The request object.\n\n Returns\n -------\n dict or None\n A dictionary with the types for the features.\n '
return request.param
|
@pytest.mark.parametrize('algo_name', [lazy_fixture('all_deslib_algorithms')])
@pytest.mark.skip('Deslib is not compatible with new python. Waiting for PR.')
def test_algorithms(df_iris: pd.DataFrame, algo_name: str) -> None:
'Test all the algorithms from deslib.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n Iris dataset.\n algo_name : str\n Name of the algorithm.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
seed = 42
ds_split = 0.2
np.random.seed(seed)
ensemble_model = RandomForestClassifier(random_state=6)
dynamic_model = DynamicSelection(ensemble=ensemble_model, algorithm=algo_name, random_state=seed, random_state_algorithm=seed, ds_split=ds_split)
dynamic_model.fit(df_iris[X], df_iris[y])
score_julearn = dynamic_model.score(df_iris[X], df_iris[y])
pred_julearn = dynamic_model.predict(df_iris[X])
np.random.seed(seed)
(X_train, X_dsel, y_train, y_dsel) = train_test_split(df_iris[X], df_iris[y], test_size=ds_split, random_state=seed)
pool_classifiers = RandomForestClassifier(random_state=6)
pool_classifiers.fit(X_train, y_train)
cls = _algorithm_objects[algo_name]
model_deslib = cls(pool_classifiers, random_state=seed)
model_deslib.fit(X_dsel, y_dsel)
score_deslib = model_deslib.score(df_iris[X], df_iris[y])
pred_deslib = model_deslib.predict(df_iris[X])
assert (score_deslib == score_julearn)
assert (pred_deslib == pred_julearn).all()
if hasattr(model_deslib, 'predict_proba'):
pred_proba_julearn = dynamic_model.predict_proba(df_iris[X])
pred_proba_deslib = model_deslib.predict_proba(df_iris[X].values)
assert (pred_proba_deslib == pred_proba_julearn).all()
|
def test_wrong_algo(df_iris: pd.DataFrame) -> None:
'Test wrong algorithm.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n Iris dataset.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
ensemble_model = RandomForestClassifier()
with pytest.raises(ValueError, match='wrong is not a valid or supported'):
dynamic_model = DynamicSelection(ensemble=ensemble_model, algorithm='wrong')
dynamic_model.fit(df_iris[X], df_iris[y])
|
@pytest.mark.parametrize('ds_split', [0.2, 0.3, [train_test_split(np.arange(20), test_size=0.4, shuffle=True)], ShuffleSplit(n_splits=1)])
@pytest.mark.skip('Deslib is not compatible with new python. Waiting for PR.')
def test_ds_split_parameter(ds_split: Any, df_iris: pd.DataFrame) -> None:
'Test ds_split parameter.\n\n Parameters\n ----------\n ds_split : float or tuple or sklearn.model_selection._split.ShuffleSplit\n ds_split parameter.\n df_iris : pd.DataFrame\n Iris dataset.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
df_iris = df_iris.sample(n=len(df_iris))
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
ensemble_model = RandomForestClassifier()
dynamic_model = DynamicSelection(ensemble=ensemble_model, algorithm='METADES', ds_split=ds_split)
dynamic_model.fit(df_iris[X], df_iris[y])
|
@pytest.mark.parametrize('ds_split', [4, ShuffleSplit(n_splits=2)])
@pytest.mark.skip('Deslib is not compatible with new python. Waiting for PR.')
def test_ds_split_error(ds_split: Any, df_iris: pd.DataFrame) -> None:
'Test ds_split errors.\n\n Parameters\n ----------\n ds_split : float or tuple or sklearn.model_selection._split.ShuffleSplit\n ds_split parameter.\n df_iris : pd.DataFrame\n Iris dataset.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
df_iris = df_iris.sample(n=len(df_iris))
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
ensemble_model = RandomForestClassifier()
with pytest.raises(ValueError, match='ds_split only allows'):
dynamic_model = DynamicSelection(ensemble=ensemble_model, algorithm='METADES', ds_split=ds_split)
dynamic_model.fit(df_iris[X], df_iris[y])
|
@pytest.mark.parametrize('model_name, model_class, model_params', [('nb_bernoulli', BernoulliNB, {}), ('nb_categorical', CategoricalNB, {}), ('nb_complement', ComplementNB, {}), ('nb_gaussian', GaussianNB, {}), ('nb_multinomial', MultinomialNB, {})])
def test_naive_bayes_estimators(df_iris: pd.DataFrame, model_name: str, model_class: Type[ModelLike], model_params: Dict[(str, Any)]) -> None:
'Test all naive bayes estimators.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n Iris dataset.\n model_name : str\n Name of the model to test.\n model_class : ModelLike\n Class of the model to test.\n model_params : dict\n Parameters to pass to the model.\n '
df_binary = df_iris[df_iris['species'].isin(['setosa', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
X_types = {'continuous': X}
y = 'species'
ju_model_params = None
if (len(model_params) > 0):
ju_model_params = {f'{model_name}__{t_param}': t_value for (t_param, t_value) in model_params.items()}
t_model = model_class(**model_params)
else:
t_model = model_class()
t_df_binary = df_binary.copy(deep=True)
t_df = df_iris.copy(deep=True)
if (model_name in ['nb_categorical']):
t_df_binary[X] = (t_df_binary[X] > t_df_binary[X].mean())
t_df[X] = (t_df[X] > t_df[X].mean())
scorers = ['accuracy']
api_params = {'model': model_name, 'model_params': ju_model_params, 'preprocess': None, 'problem_type': 'classification'}
clf = make_pipeline(clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=t_df_binary, api_params=api_params, sklearn_model=clf, scorers=scorers)
api_params = {'model': model_name, 'model_params': ju_model_params, 'preprocess': None, 'problem_type': 'classification'}
clf = make_pipeline(clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=t_df, api_params=api_params, sklearn_model=clf, scorers=scorers)
if (model_name not in ['nb_bernoulli']):
scorers = ['recall', 'precision', 'f1']
sk_y = (t_df_binary[y] == 'setosa').values.astype(int)
api_params = {'model': model_name, 'pos_labels': 'setosa', 'model_params': ju_model_params, 'preprocess': None, 'problem_type': 'classification'}
clf = make_pipeline(clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=t_df_binary, api_params=api_params, sklearn_model=clf, scorers=scorers, sk_y=sk_y)
|
@pytest.mark.parametrize('model_name, model_class, model_params', [('svm', SVC, {}), ('rf', RandomForestClassifier, {'n_estimators': 10, 'random_state': 42}), ('et', ExtraTreesClassifier, {'n_estimators': 10, 'random_state': 42}), ('dummy', DummyClassifier, {'strategy': 'prior'}), ('gauss', GaussianProcessClassifier, {}), ('logit', LogisticRegression, {}), ('logitcv', LogisticRegressionCV, {}), ('ridge', RidgeClassifier, {}), ('ridgecv', RidgeClassifierCV, {}), ('sgd', SGDClassifier, {'random_state': 2}), ('adaboost', AdaBoostClassifier, {'random_state': 42}), ('bagging', BaggingClassifier, {'random_state': 42, 'estimator': DecisionTreeClassifier(random_state=42)}), ('gradientboost', GradientBoostingClassifier, {})])
def test_classificationestimators(df_binary: pd.DataFrame, model_name: str, model_class: Type[ModelLike], model_params: Dict) -> None:
'Test all classification estimators.\n\n Parameters\n ----------\n df_binary : pd.DataFrame\n Binary classification dataset.\n model_name : str\n Name of the model to test.\n model_class : ModelLike\n Class of the model to test.\n model_params : dict\n Parameters to pass to the model.\n '
decimal = (5 if (model_name != 'sgd') else (- 1))
X = ['sepal_length', 'sepal_width', 'petal_length']
X_types = {'continuous': X}
y = 'species'
ju_model_params = None
if (len(model_params) > 0):
ju_model_params = {f'{model_name}__{t_param}': t_value for (t_param, t_value) in model_params.items()}
t_model = model_class(**model_params)
else:
t_model = model_class()
scorers = ['accuracy']
api_params = {'model': model_name, 'model_params': ju_model_params, 'problem_type': 'classification', 'preprocess': 'zscore'}
clf = make_pipeline(StandardScaler(), clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=df_binary, api_params=api_params, sklearn_model=clf, scorers=scorers, decimal=decimal)
if (model_name != 'dummy'):
scorers = ['recall', 'precision', 'f1']
sk_y = (df_binary[y] == 'setosa').values.astype(np.int64)
api_params = {'model': model_name, 'pos_labels': 'setosa', 'model_params': ju_model_params, 'problem_type': 'classification', 'preprocess': 'zscore'}
clf = make_pipeline(StandardScaler(), clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=df_binary, api_params=api_params, sklearn_model=clf, scorers=scorers, sk_y=sk_y, decimal=decimal)
|
@pytest.mark.parametrize('model_name, model_class, model_params', [('svm', SVR, {}), ('rf', RandomForestRegressor, {'n_estimators': 10, 'random_state': 42}), ('et', ExtraTreesRegressor, {'n_estimators': 10, 'random_state': 42}), ('dummy', DummyRegressor, {'strategy': 'mean'}), ('gauss', GaussianProcessRegressor, {'random_state': 42}), ('linreg', LinearRegression, {}), ('ridge', Ridge, {}), ('ridgecv', RidgeCV, {}), ('sgd', SGDRegressor, {'random_state': 2}), ('adaboost', AdaBoostRegressor, {'random_state': 2}), ('bagging', BaggingRegressor, {'random_state': 2}), ('gradientboost', GradientBoostingRegressor, {'random_state': 42})])
def test_regression_estimators(df_binary: pd.DataFrame, model_name: str, model_class: Type[ModelLike], model_params: Dict) -> None:
'Test all regression estimators.\n\n Parameters\n ----------\n df_binary : pd.DataFrame\n Binary classification dataset.\n model_name : str\n Name of the model to test.\n model_class : ModelLike\n Class of the model to test.\n model_params : dict\n Parameters to pass to the model.\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
X_types = {'continuous': X}
y = 'petal_width'
ju_model_params = None
if (len(model_params) > 0):
ju_model_params = {f'{model_name}__{t_param}': t_value for (t_param, t_value) in model_params.items()}
t_model = model_class(**model_params)
else:
t_model = model_class()
scorers = ['neg_root_mean_squared_error', 'r2']
api_params = {'model': model_name, 'model_params': ju_model_params, 'preprocess': 'zscore', 'problem_type': 'regression'}
clf = make_pipeline(StandardScaler(), clone(t_model))
do_scoring_test(X, y, X_types=X_types, data=df_binary, api_params=api_params, sklearn_model=clf, scorers=scorers, decimal=2)
|
def test_wrong_problem_types() -> None:
'Test models with wrong problem types.'
with pytest.raises(ValueError, match='is not suitable for'):
get_model('linreg', 'classification')
with pytest.raises(ValueError, match='is not available'):
get_model('wrong', 'classification')
|
def merge_pipelines(*pipelines: EstimatorLike, search_params: Dict) -> Pipeline:
'Merge multiple pipelines into a single one.\n\n Parameters\n ----------\n pipelines : List[EstimatorLike]\n List of estimators that will be merged.\n search_params : Dict\n Dictionary with the search parameters.\n\n Returns\n -------\n merged : BaseSearchCV\n The merged pipeline as a searcher.\n '
search_params = prepare_search_params(search_params)
for p in pipelines:
if (not isinstance(p, (Pipeline, GridSearchCV, RandomizedSearchCV))):
raise_error(f'Only pipelines and searchers are supported. Found {type(p)} instead.')
if isinstance(p, GridSearchCV):
if (search_params['kind'] != 'grid'):
raise_error('At least one of the pipelines to merge is a GridSearchCV, but the search params do not specify a grid search. These pipelines cannot be merged.')
elif isinstance(p, RandomizedSearchCV):
if (search_params['kind'] != 'random'):
raise_error('At least one of the pipelines to merge is a RandomizedSearchCV, but the search params do not specify a random search. These pipelines cannot be merged.')
reference_pipeline = pipelines[0]
if isinstance(reference_pipeline, (GridSearchCV, RandomizedSearchCV)):
reference_pipeline = reference_pipeline.estimator
step_names = reference_pipeline.named_steps.keys()
for p in pipelines:
if isinstance(p, (GridSearchCV, RandomizedSearchCV)):
p = p.estimator
if (not isinstance(p, Pipeline)):
raise_error('All searchers must use a pipeline.')
if (step_names != p.named_steps.keys()):
raise_error('All pipelines must have the same named steps.')
different_steps = []
for t_step_name in step_names:
t = reference_pipeline.named_steps[t_step_name]
for s in pipelines[1:]:
if isinstance(s, (GridSearchCV, RandomizedSearchCV)):
if (s.estimator.named_steps[t_step_name] != t):
different_steps.append(t_step_name)
break
elif (s.named_steps[t_step_name] != t):
different_steps.append(t_step_name)
break
all_grids = []
for s in pipelines:
if isinstance(s, GridSearchCV):
t_grid = s.param_grid.copy()
elif isinstance(s, RandomizedSearchCV):
t_grid = s.param_distributions.copy()
else:
t_grid = {}
for t_name in different_steps:
if isinstance(s, (GridSearchCV, RandomizedSearchCV)):
t_grid[t_name] = [s.estimator.named_steps[t_name]]
else:
t_grid[t_name] = [s.named_steps[t_name]]
all_grids.append(t_grid)
new_searcher = _prepare_hyperparameter_tuning(all_grids, search_params, reference_pipeline)
return new_searcher
|
def _params_to_pipeline(param: Any, X_types: Dict[(str, List)], search_params: Optional[Dict]):
'Recursively convert params to pipelines.\n\n Parameters\n ----------\n param : Any\n The parameter to convert.\n X_types : Dict[str, List]\n The types of the columns in the data.\n search_params : Optional[Dict]\n The parameters to tune for this step, by default None\n\n Returns\n -------\n Any\n The converted parameter.\n '
if isinstance(param, PipelineCreator):
param = param.to_pipeline(X_types=X_types, search_params=search_params)
elif isinstance(param, list):
param = [_params_to_pipeline(_v, X_types, search_params) for _v in param]
elif isinstance(param, dict):
param = {k: _params_to_pipeline(_v, X_types, search_params) for (k, _v) in param.items()}
elif isinstance(param, tuple):
param = tuple((_params_to_pipeline(_v, X_types, search_params) for _v in param))
return param
|
@dataclass
class Step():
'Step class.\n\n This class represents a step in a pipeline.\n\n\n Parameters\n ----------\n name : str\n The name of the step.\n estimator : Any\n The estimator to use.\n apply_to : ColumnTypesLike\n The types to apply this step to, by default "continuous"\n needed_types : Any, optional\n The types needed by this step (default is None)\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n params_to_tune : Optional[Dict], optional\n The parameters to tune for this step, by default None\n '
name: str
estimator: Union[(JuEstimatorLike, EstimatorLike)]
apply_to: ColumnTypes = field(default_factory=(lambda : ColumnTypes('continuous')))
needed_types: Optional[ColumnTypesLike] = None
params_to_tune: Optional[Dict] = None
row_select_col_type: Optional[ColumnTypesLike] = None
row_select_vals: Optional[Union[(str, int, list, bool)]] = None
def __post_init__(self) -> None:
'Post init.'
self.params_to_tune = ({} if (self.params_to_tune is None) else self.params_to_tune)
|
class PipelineCreator():
'PipelineCreator class.\n\n This class is used to create pipelines. As the creation of a pipeline\n is a bit more complicated than just adding steps to a pipeline, this\n helper class is provided so the user can easily create complex\n :class:`sklearn.pipeline.Pipeline` objects.\n\n Parameters\n ----------\n problem_type: {"classification", "regression"}\n The problem type for which the pipeline should be created.\n apply_to: ColumnTypesLike, optional\n To what should the transformers be applied to if not specified in\n the `add` method (default is continuous).\n '
def __init__(self, problem_type: str, apply_to: ColumnTypesLike='continuous'):
if (problem_type not in ['classification', 'regression']):
raise_error("`problem_type` should be either 'classification' or 'regression'.")
self._steps = []
self._added_target_transformer = False
self._added_model = False
self.apply_to = apply_to
self.problem_type = problem_type
def add(self, step: Union[(EstimatorLike, str, TargetPipelineCreator)], name: Optional[str]=None, apply_to: Optional[ColumnTypesLike]=None, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None, **params: Any) -> 'PipelineCreator':
'Add a step to the PipelineCreator.\n\n Parameters\n ----------\n step : EstimatorLike\n The step that should be added.\n This can be an available_transformer or\n available_model as a str or a sklearn compatible\n transformer or model.\n name : str, optional\n The name of the step. If None, the name will be obtained from\n the step (default is None).\n apply_to: ColumnTypesLike, optional\n To what should the transformer or model be applied to.\n This can be a str representing a column type or a list\n of such str (defaults to the `PipelineCreator.apply_to` attribute).\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n **params\n Parameters for the step. This will mostly include\n hyperparameters or any other parameter for initialization.\n If you provide multiple options for hyperparameters then\n this will lead to a pipeline with a search.\n\n Returns\n -------\n PipelineCreator\n The PipelineCreator with the added step as its last step.\n\n Raises\n ------\n ValueError\n If the step is not a valid step, if the problem_type is\n specified in the params or if the step is a\n TargetPipelineCreator and the apply_to is not "target".\n '
if ('problem_type' in params):
raise_error('Please provide the problem_type directly and only to the PipelineCreator like this PipelineCreator(problem_type=problem_type)')
apply_to = (self.apply_to if (apply_to is None) else apply_to)
apply_to = ColumnTypes(apply_to)
if isinstance(step, TargetPipelineCreator):
if (apply_to != 'target'):
raise_error('TargetPipelineCreator can only be added to the target.')
step = step.to_pipeline()
self._validate_step(step, apply_to)
if ((name is not None) and (name in [step.name for step in self._steps])):
if (self._steps[(- 1)].name != name):
raise_error(f'Repeated step names are only allowed to be added consecutively. That means that all the steps with the same name should be added one after the other.The step {name} was already added.')
name = self._get_step_name(name, step)
logger.info(f'Adding step {name} that applies to {apply_to}')
params_to_set = {}
params_to_tune = {}
for (param, vals) in params.items():
if (hasattr(vals, '__iter__') and (not hasattr(vals, 'fit')) and (not isinstance(vals, str))):
if (len(vals) > 1):
logger.info(f'Tuning hyperparameter {param} = {vals}')
params_to_tune[param] = vals
else:
logger.info(f'Setting hyperparameter {param} = {vals[0]}')
params_to_set[param] = vals[0]
else:
logger.info(f'Setting hyperparameter {param} = {vals}')
params_to_set[param] = vals
if isinstance(step, str):
step = self._get_estimator_from(step, self.problem_type, **params_to_set)
if isinstance(step, JuEstimatorLike):
if ('apply_to' in step.get_params(deep=False)):
step.set_params(apply_to=apply_to)
needed_types = step.get_needed_types()
elif isinstance(step, JuTargetPipeline):
needed_types = apply_to
if (step.needed_types is not None):
needed_types.add(step.needed_types)
else:
needed_types = apply_to
if isinstance(step, JuTransformer):
step.set_params(row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
needed_types = step.get_needed_types()
if (apply_to == 'target'):
name = f'target_{name}'
params_to_tune = {f'{name}__{param}': val for (param, val) in params_to_tune.items()}
self._steps.append(Step(name=name, estimator=step, apply_to=apply_to, needed_types=needed_types, params_to_tune=params_to_tune, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals))
logger.info('Step added')
return self
@property
def steps(self) -> List[Step]:
'Get the steps that have been added to the PipelineCreator.'
return self._steps
def __str__(self) -> str:
'Get a string representation of the PipelineCreator.'
out = 'PipelineCreator:\n'
for (i_step, step) in enumerate(self._steps):
out += f''' Step {i_step}: {step.name}
'''
out += f''' estimator: {step.estimator}
'''
out += f''' apply to: {step.apply_to}
'''
out += f''' needed types: {step.needed_types}
'''
out += f''' tuning params: {step.params_to_tune}
'''
return out
def has_model(self) -> bool:
'Whether the PipelineCreator has a model.'
return self._added_model
def copy(self) -> 'PipelineCreator':
'Create a copy of the PipelineCreator.\n\n Returns\n -------\n PipelineCreator\n The copy of the PipelineCreator\n '
other = PipelineCreator(problem_type=self.problem_type, apply_to=self.apply_to)
other._steps = self._steps.copy()
return other
@classmethod
def from_list(cls, transformers: Union[(str, list)], model_params: dict, problem_type: str, apply_to: ColumnTypesLike='continuous') -> 'PipelineCreator':
'Create a PipelineCreator from a list of transformers and parameters.\n\n Parameters\n ----------\n transformers : Union[str, list]\n The transformers that should be added to the PipelineCreator.\n This can be a str or a list of str.\n model_params : dict\n The parameters for the model and the transformers.\n This should be a dict with the keys being the name of the\n transformer or the model and the values being a dict with\n the parameters for that transformer or model.\n problem_type : str\n The problem_type for which the piepline should be created.\n apply_to : ColumnTypesLike, optional\n To what should the transformers be applied to if not specified in\n the `add` method (default is continuous).\n\n Returns\n -------\n PipelineCreator\n The PipelineCreator with the steps added\n '
creator = cls(problem_type=problem_type, apply_to=apply_to)
if isinstance(transformers, str):
transformers = [transformers]
for transformer_name in transformers:
t_params = {x.replace(f'{transformer_name}__', ''): y for (x, y) in model_params.items() if x.startswith(f'{transformer_name}__')}
creator.add(transformer_name, **t_params)
return creator
def split(self) -> List['PipelineCreator']:
'Split the PipelineCreator into multiple PipelineCreators.\n\n If the PipelineCreator has at least two steps with the same name,\n this is considered a split point for hyperparameter tuning.\n This function will split the PipelineCreator into multiple\n PipelineCreators, one for each split point, recursively. Thus, creating\n as many PipelineCreators as needed to tune all the hyperparameters\n configurations.\n\n\n Returns\n -------\n List[PipelineCreator]\n A list of PipelineCreators, each one without repeated step names.\n '
out = []
out.append(PipelineCreator(problem_type=self.problem_type, apply_to=self.apply_to))
names = [x.name for x in self._steps]
unique_names = []
for name in names:
if (name not in unique_names):
unique_names.append(name)
for t_name in unique_names:
t_steps = [x for x in self._steps if (x.name == t_name)]
if (len(t_steps) == 1):
for t_pipe in out:
t_pipe._steps.append(t_steps[0])
else:
new_out = []
for t_pipe in out:
for t_step in t_steps:
new_pipe = t_pipe.copy()
new_pipe._steps.append(t_step)
new_out.append(new_pipe)
out = new_out
for t_out in out:
t_out._added_model = self._added_model
t_out._added_target_transformer = self._added_target_transformer
return out
def to_pipeline(self, X_types: Optional[Dict[(str, List)]]=None, search_params: Optional[Dict[(str, Any)]]=None) -> Pipeline:
'Create a pipeline from the PipelineCreator.\n\n Parameters\n ----------\n X_types : Optional[Dict[str, List]], optional\n The types of the columns in the data, by default None\n search_params : Optional[Dict], optional\n The parameters for the search, by default None\n\n Returns\n -------\n sklearn.pipeline.Pipeline\n The pipeline created from the PipelineCreator\n '
logger.debug('Creating pipeline')
if (not self.has_model()):
raise_error('Cannot create a pipeline without a model')
pipeline_steps: List[Tuple[(str, Any)]] = [('set_column_types', SetColumnTypes(X_types))]
X_types = self._check_X_types(X_types)
model_step = self._steps[(- 1)]
target_transformer_step = None
transformer_steps = []
for _step in self._steps[:(- 1)]:
if ('target' in _step.apply_to):
target_transformer_step = _step
else:
transformer_steps.append(_step)
params_to_tune = {}
for step_dict in transformer_steps:
logger.debug(f'Adding transformer {step_dict.name}')
name = step_dict.name
estimator = step_dict.estimator
logger.debug(f' Estimator: {estimator}')
step_params_to_tune = step_dict.params_to_tune
logger.debug(f' Params to tune: {step_params_to_tune}')
if (self.wrap and (not isinstance(estimator, JuTransformer))):
estimator = self._wrap_step(name, estimator, step_dict.apply_to, row_select_col_type=step_dict.row_select_col_type, row_select_vals=step_dict.row_select_vals)
pipeline_steps.append((name, estimator))
params_to_tune.update(step_params_to_tune)
model_name = model_step.name
model_estimator = model_step.estimator
logger.debug(f'Adding model {model_name}')
model_params = model_estimator.get_params(deep=False)
model_params = {k: _params_to_pipeline(v, X_types=X_types, search_params=search_params) for (k, v) in model_params.items()}
model_estimator.set_params(**model_params)
if (self.wrap and (not isinstance(model_estimator, JuModelLike))):
logger.debug(f'Wrapping {model_name}')
model_estimator = WrapModel(model_estimator, model_step.apply_to)
step_params_to_tune = model_step.params_to_tune
logger.debug(f' Estimator: {model_estimator}')
logger.debug('\t Looking for nested pipeline creators')
logger.debug(f' Params to tune: {step_params_to_tune}')
if self._added_target_transformer:
target_model_step = self._wrap_target_model(model_name, model_estimator, target_transformer_step)
target_step_to_tune = {f'{model_name}_target_transform__transformer__{param}': val for (param, val) in target_transformer_step.params_to_tune.items()}
step_params_to_tune = {f'{model_name}_target_transform__model__{param}': val for (param, val) in step_params_to_tune.items()}
pipeline_steps.append(target_model_step)
params_to_tune.update(step_params_to_tune)
params_to_tune.update(target_step_to_tune)
else:
params_to_tune.update(step_params_to_tune)
pipeline_steps.append((model_name, model_estimator))
pipeline = Pipeline(pipeline_steps).set_output(transform='pandas')
out = _prepare_hyperparameter_tuning(params_to_tune, search_params, pipeline)
logger.debug('Pipeline created')
return out
@staticmethod
def _wrap_target_model(model_name: str, model: ModelLike, target_transformer_step: Step) -> Tuple[(str, JuTransformedTargetModel)]:
'Wrap the model in a JuTransformedTargetModel.\n\n Parameters\n ----------\n model_name : str\n The name of the model\n model : ModelLike\n The model to wrap\n target_transformer_step : Step\n The step with the target transformer.\n\n Returns\n -------\n str :\n The name of the model.\n JuTransformedTargetModel :\n The wrapped model.\n\n Raises\n ------\n ValueError\n If the target transformer is not a JuTargetPipeline.\n '
transformer = target_transformer_step.estimator
if (not isinstance(transformer, JuTargetPipeline)):
raise_error(f'The target transformer should be a JuTargetPipeline. Got {type(transformer)}')
target_model = JuTransformedTargetModel(model=model, transformer=transformer)
return (f'{model_name}_target_transform', target_model)
def _validate_model_params(self, model_name: str, model_params: Dict[(str, Any)]) -> None:
'Validate the model parameters.\n\n Parameters\n ----------\n model_name : str\n The name of the model.\n model_params : dict\n The parameters of the model to validate.\n\n Raises\n ------\n ValueError\n If the model parameters are not valid.\n '
for param in model_params.keys():
if ('__' in param):
est_name = param.split('__')[0]
if (est_name != model_name):
raise_error(f'Only parameters for the model should be specified. Got {param} for {est_name}.')
def _get_step_name(self, name: Optional[str], step: Union[(EstimatorLike, str)]) -> str:
'Get the name of a step, with a count if it is repeated.\n\n Parameters\n ----------\n name : str, optional\n The name of the step, by default None.\n step : EstimatorLike or str\n The step to get the name for.\n\n Returns\n -------\n name : str\n The name of the step.\n '
out = name
if (name is None):
name = (step if isinstance(step, str) else step.__class__.__name__.lower())
count = np.array([(_step.name == name) for _step in self._steps]).sum()
out = (f'{name}_{count}' if (count > 0) else name)
return out
def _validate_step(self, step: Union[(EstimatorLike, str)], apply_to: ColumnTypesLike) -> None:
'Validate a step.\n\n Parameters\n ----------\n step : EstimatorLike or str\n The step to validate.\n apply_to : str\n The type of data the step is applied to.\n\n Raises\n ------\n ValueError\n If the step is not a valid step, if the transformer is added after\n adding a model, or if a transformer is added after a target\n transformer.\n\n '
if self._is_transfromer_step(step):
if self._added_model:
raise_error('Cannot add a transformer after adding a model')
if (self._added_target_transformer and (not self._is_model_step(step))):
raise_error('Only a model can be added after a target transformer.')
if (apply_to == 'target'):
self._added_target_transformer = True
elif self._is_model_step(step):
self._added_model = True
else:
raise_error(f"Cannot add a {step}. I don't know what it is.")
def _check_X_types(self, X_types: Optional[Dict]=None) -> Dict[(str, List[str])]:
'Check the X_types against the pipeline creator settings.\n\n Parameters\n ----------\n X_types : dict, optional\n The types of the columns in the data.\n\n Returns\n -------\n X_types : dict\n The types of the columns in the data after the check.\n\n Raises\n ------\n ValueError\n If there are extra types in the X_types that are not needed / used\n by the pipeline.\n\n Warns\n -----\n RuntimeWarning\n If there are extra types in the pipeline that are not specified in\n the X_types.\n '
if (X_types is None):
X_types = {}
if (X_types == {}):
all_X_types = ColumnTypes('continuous')
else:
all_X_types = ColumnTypes(list(X_types.keys()))
needed_types = []
for step_dict in self._steps:
if (step_dict.needed_types is None):
continue
needed_types.extend(step_dict.needed_types)
needed_types = set(needed_types)
skip_need_error = (('.*' in needed_types) or ('*' in needed_types))
if (not skip_need_error):
extra_types = [x for x in all_X_types if (x not in needed_types)]
if (len(extra_types) > 0):
raise_error(f'''Extra X_types were provided but never used by a transformer.
Extra types are {extra_types}
Used types are {needed_types}''')
available_types = {*all_X_types, '*', '.*', 'target', 'continuous'}
for needed_type in needed_types:
if (needed_type not in available_types):
warn_with_log(f'{needed_type} is not in the provided X_types={X_types}. Make sure your pipeline has a transformer that creates this type.')
self.wrap = (needed_types != {'continuous'})
return X_types
@staticmethod
def _is_transfromer_step(step: Union[(str, EstimatorLike)]) -> bool:
'Check if a step is a transformer.'
if (step in list_transformers()):
return True
if (hasattr(step, 'fit') and hasattr(step, 'transform')):
return True
return False
@staticmethod
def _is_model_step(step: Union[(EstimatorLike, str)]) -> bool:
'Check if a step is a model.'
if (step in list_models()):
return True
if (hasattr(step, 'fit') and hasattr(step, 'predict')):
return True
return False
@staticmethod
def _wrap_step(name, step, column_types, row_select_col_type, row_select_vals) -> JuColumnTransformer:
'Wrap a step in a JuColumnTransformer.\n\n Parameters\n ----------\n name : str\n The name of the step.\n step : EstimatorLike\n The step to wrap.\n column_types : ColumnTypesLike\n The types of the columns the step is applied to.\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default None).\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the\n ``row_select_col_type`` to select the rows used for training\n (default None).\n\n '
return JuColumnTransformer(name, step, column_types, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
@staticmethod
def _get_estimator_from(name: str, problem_type: str, **kwargs: Any) -> EstimatorLike:
'Get an estimator from a name.\n\n Parameters\n ----------\n name : str\n The name of the estimator.\n problem_type : str\n The problem type.\n **kwargs : dict\n The keyword arguments to pass to the estimator constructor.\n\n Returns\n -------\n estimator : EstimatorLike\n The estimator.\n\n Raises\n ------\n ValueError\n If the name is not a registered transformer or model.\n\n '
if (name in list_transformers()):
return get_transformer(name, **kwargs)
if (name in list_models()):
return get_model(name, problem_type, **kwargs)
raise_error(f'{name} is neither a registered transformernor a registered model.')
|
def _prepare_hyperparameter_tuning(params_to_tune: Union[(Dict[(str, Any)], List[Dict[(str, Any)]])], search_params: Optional[Dict[(str, Any)]], pipeline: Pipeline):
"Prepare hyperparameter tuning in the pipeline.\n\n Parameters\n ----------\n params_to_tune : dict\n A dictionary with the parameters to tune. The keys of the\n dictionary should be named 'STEP__PARAMETER', to be used as\n PARAMETER for STEP in the pipeline. Example:\n 'svm__probability': True will set the parameter 'probability' of\n the 'svm' step. The value of the parameter must be a list of\n values to test.\n\n search_params : dict\n The parameters for the search. The following keys are accepted:\n\n * 'kind': The kind of search algorithm to use e.g.:\n 'grid' or 'random'. All valid julearn searchers can be entered.\n * 'cv': If search is going to be used, the cross-validation\n splitting strategy to use. Defaults to same CV as for the model\n evaluation.\n * 'scoring': If search is going to be used, the scoring metric to\n evaluate the performance.\n\n pipeline : sklearn.pipeline.Pipeline\n The pipeline to apply/tune the hyperparameters\n\n Returns\n -------\n sklearn.pipeline.Pipeline\n The modified pipeline\n "
logger.info('= Model Parameters =')
search_params = prepare_search_params(search_params)
if (len(params_to_tune) > 0):
search = search_params.pop('kind', 'grid')
cv_inner = search_params.get('cv', None)
if (search in list_searchers()):
logger.info(f'Tuning hyperparameters using {search}')
search = get_searcher(search)
elif isinstance(search, str):
raise_error(f'The searcher {search} is not a valid julearn searcher. You can get a list of all available once by using: julearn.model_selection.list_searchers(). You can also enter a valid scikit-learn searcher or register it.')
else:
warn_with_log(f'{search} is not a registered searcher. ')
logger.info(f'Tuning hyperparameters using not registered {search}')
if isinstance(params_to_tune, dict):
logger.info('Hyperparameters:')
for (k, v) in params_to_tune.items():
logger.info(f' {k}: {v}')
else:
logger.info('Hyperparameters list:')
for (i_list, t_params) in enumerate(params_to_tune):
logger.info(f' Set {i_list}')
for (k, v) in t_params.items():
logger.info(f' {k}: {v}')
cv_inner = check_cv(cv_inner)
logger.info(f'Using inner CV scheme {cv_inner}')
search_params['cv'] = cv_inner
logger.info('Search Parameters:')
for (k, v) in search_params.items():
logger.info(f' {k}: {v}')
pipeline = search(pipeline, params_to_tune, **search_params)
logger.info('====================')
logger.info('')
return pipeline
|
class TargetPipelineCreator():
'TargetPipelineCreator class.\n\n Analogous to the PipelineCreator class, this class allows to create\n :class:`julearn.pipeline.target_pipeline.JuTargetPipeline` objects in an\n easy way.\n '
def __init__(self) -> None:
self._steps = []
def add(self, step: str, name: Optional[str]=None, **params) -> 'TargetPipelineCreator':
'Add a step to the pipeline.\n\n Parameters\n ----------\n step : str\n The step to add to the pipeline.\n name : str, optional\n The name of the step. If None, the name will be obtained from\n the step (default is None).\n **params\n Parameters for the step. This will mostly include\n hyperparameters or any other parameter for initialization.\n '
if (name is None):
name = (step if isinstance(step, str) else step.__class__.__name__.lower())
name = self._get_step_name(name, step)
if (step in list_target_transformers()):
estimator = get_target_transformer(step, **params)
else:
estimator = get_transformer(step, **params)
self._steps.append((name, estimator))
return self
def to_pipeline(self) -> JuTargetPipeline:
'Create a pipeline from the steps.\n\n Returns\n -------\n out : JuTargetPipeline\n The pipeline object.\n '
return JuTargetPipeline(self._steps)
def _get_step_name(self, name: Optional[str], step: Union[(EstimatorLike, str)]) -> str:
'Get the name of a step, with a count if it is repeated.\n\n Parameters\n ----------\n name : str or None\n The name of the step.\n step : EstimatorLike or str\n The step to get the name for.\n\n Returns\n -------\n name : str\n The name of the step.\n '
if (name is None):
name = (step if isinstance(step, str) else step.__class__.__name__.lower())
count = np.array([(_step[0] == name) for _step in self._steps]).sum()
return (f'{name}_{count}' if (count > 0) else name)
def __str__(self) -> str:
'Get a string representation of the TargetPipelineCreator.'
out = 'TargetPipelineCreator:\n'
for (i_step, step) in enumerate(self._steps):
out += f''' Step {i_step}: {step[0]}
'''
out += f''' estimator: {step[1]}
'''
return out
|
def test_merger_pipelines() -> None:
'Test the pipeline merger.'
creator1 = PipelineCreator(problem_type='classification')
creator1.add('zscore', name='scaler', apply_to='continuous')
creator1.add('rf')
creator2 = PipelineCreator(problem_type='classification')
creator2.add('scaler_robust', name='scaler', apply_to='continuous')
creator2.add('rf')
pipe1 = creator1.to_pipeline()
pipe2 = creator2.to_pipeline()
merged = merge_pipelines(pipe1, pipe2, search_params=None)
assert isinstance(merged, GridSearchCV)
assert isinstance(merged.estimator, Pipeline)
assert (len(merged.estimator.named_steps) == 3)
named_steps = list(merged.estimator.named_steps.keys())
assert ('scaler' == named_steps[1])
assert ('rf' == named_steps[2])
assert (len(merged.param_grid) == 2)
search_params = {'kind': 'random'}
creator3 = PipelineCreator(problem_type='classification')
creator3.add('zscore', name='scaler', apply_to='continuous')
creator3.add('rf', max_features=[2, 3, 7, 42])
pipe3 = creator3.to_pipeline(search_params=search_params)
merged = merge_pipelines(pipe1, pipe2, pipe3, search_params=search_params)
assert isinstance(merged, RandomizedSearchCV)
assert isinstance(merged.estimator, Pipeline)
assert (len(merged.estimator.named_steps) == 3)
named_steps = list(merged.estimator.named_steps.keys())
assert ('scaler' == named_steps[1])
assert ('rf' == named_steps[2])
assert (len(merged.param_distributions) == 3)
assert (merged.param_distributions[(- 1)]['rf__max_features'] == [2, 3, 7, 42])
|
def test_merger_errors() -> None:
'Test that the merger raises errors when it should.'
creator1 = PipelineCreator(problem_type='classification')
creator1.add('zscore', name='scaler', apply_to='continuous')
creator1.add('rf')
creator2 = PipelineCreator(problem_type='classification')
creator2.add('scaler_robust', name='scaler', apply_to='continuous')
creator2.add('rf', n_estimators=[10, 100])
pipe1 = creator1.to_pipeline()
pipe2 = creator2.to_pipeline(search_params={'kind': 'grid'})
with pytest.raises(ValueError, match='Only pipelines and searchers'):
merge_pipelines(pipe1, SVC(), search_params=None)
search_params = {'kind': 'random'}
with pytest.raises(ValueError, match='At least one of the pipelines to merge is a GridSearchCV'):
merge_pipelines(pipe1, pipe2, search_params=search_params)
search_params = {'kind': 'grid'}
pipe2 = creator2.to_pipeline(search_params={'kind': 'random'})
with pytest.raises(ValueError, match='one of the pipelines to merge is a RandomizedSearchCV'):
merge_pipelines(pipe1, pipe2, search_params=search_params)
pipe3 = GridSearchCV(SVC(), param_grid={'C': [1, 10]})
with pytest.raises(ValueError, match='All searchers must use a pipeline.'):
merge_pipelines(pipe1, pipe3, search_params=None)
creator4 = PipelineCreator(problem_type='classification')
creator4.add('scaler_robust', name='scaler', apply_to='continuous')
creator4.add('pca')
creator4.add('rf', n_estimators=[10, 100])
pipe4 = creator4.to_pipeline(search_params={'kind': 'grid'})
with pytest.raises(ValueError, match='must have the same named steps.'):
merge_pipelines(pipe1, pipe4, search_params=None)
|
@pytest.mark.parametrize('model,preprocess,problem_type', [lazy_fixture(['models_all_problem_types', 'preprocessing', 'all_problem_types'])])
def test_construction_working(model: str, preprocess: List[str], problem_type: str) -> None:
'Test that the pipeline constructions works as expected.\n\n Parameters\n ----------\n model : str\n The model to test.\n preprocess : List[str]\n The preprocessing steps to test.\n problem_type : str\n The problem type to test.\n '
creator = PipelineCreator(problem_type=problem_type)
preprocess = (preprocess if isinstance(preprocess, list) else [preprocess])
for step in preprocess:
creator.add(step, apply_to='categorical')
creator.add(model)
X_types = {'categorical': ['A']}
pipeline = creator.to_pipeline(X_types=X_types)
for element in zip(preprocess, pipeline.steps[1:(- 1)]):
(_preprocess, (name, transformer)) = element
assert name.startswith(f'{_preprocess}')
assert isinstance(transformer, JuColumnTransformer)
assert isinstance(transformer.transformer, get_transformer(_preprocess).__class__)
(model_name, model) = pipeline.steps[(- 1)]
assert isinstance(model, WrapModel)
assert isinstance(model.model, get_model(model_name, problem_type=problem_type).__class__)
assert ((len(preprocess) + 2) == len(pipeline.steps))
|
@pytest.mark.parametrize('model,preprocess,problem_type', [lazy_fixture(['models_all_problem_types', 'preprocessing', 'all_problem_types'])])
def test_fit_and_transform_no_error(X_iris: pd.DataFrame, y_iris: pd.Series, model: str, preprocess: List[str], problem_type: str) -> None:
'Test that the pipeline fit and transform does not give an error.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target variable.\n model : str\n The model to test.\n preprocess : List[str]\n The preprocessing steps to test.\n problem_type : str\n The problem type to test.\n '
creator = PipelineCreator.from_list(preprocess, model_params={}, problem_type=problem_type)
creator.add(model)
pipeline = creator.to_pipeline({})
pipeline.fit(X_iris, y_iris)
pipeline[:(- 1)].transform(X_iris)
|
@pytest.mark.parametrize('model,preprocess,problem_type', [lazy_fixture(['models_all_problem_types', 'preprocessing', 'all_problem_types'])])
def test_hyperparameter_tuning(X_types_iris: Dict[(str, List[str])], model: str, preprocess: List[str], problem_type: str, get_tuning_params: Callable, search_params: Dict[(str, List)]) -> None:
'Test that the pipeline hyperparameter tuning works as expected.\n\n Parameters\n ----------\n X_types_iris : Dict[str, List[str]]\n The iris dataset features types.\n model : str\n The model to test.\n preprocess : List[str]\n The preprocessing steps to test.\n problem_type : str\n The problem type to test.\n get_tuning_params : Callable\n A function that returns the tuning hyperparameters for a given step.\n search_params : dict of str and list\n The parameters for the search.\n\n '
if isinstance(preprocess, str):
preprocess = [preprocess]
creator = PipelineCreator(problem_type=problem_type)
param_grid = {}
used_types = (['continuous'] if (X_types_iris in [None, {}]) else list(X_types_iris.keys()))
for step in preprocess:
default_params = get_tuning_params(step)
creator.add(step, apply_to=used_types, **default_params)
params = {f'{step}__{param}': val for (param, val) in default_params.items()}
param_grid.update(params)
model_params = get_tuning_params(model)
creator.add(model, **model_params)
param_grid.update({f'{model}__{param}': val for (param, val) in model_params.items()})
pipeline = creator.to_pipeline(X_types=X_types_iris, search_params=search_params)
kind = 'grid'
if (search_params is not None):
kind = search_params.get('kind', 'grid')
if (kind == 'grid'):
assert isinstance(pipeline, GridSearchCV)
assert (pipeline.param_grid == param_grid)
else:
assert isinstance(pipeline, RandomizedSearchCV)
assert (pipeline.param_distributions == param_grid)
|
@pytest.mark.parametrize('X_types,apply_to,warns', [({'duck': 'B'}, ['duck', 'chicken'], True), ({'duck': 'B'}, ['duck'], False), ({}, ['continuous'], False), (None, ['continuous'], False), ({'continuous': 'A', 'cat': 'B'}, ['continuous', 'cat'], False), ({'continuous': 'A'}, ['continuous', 'target'], False), ({'continuous': 'A', 'cat': 'B'}, ['*'], False), ({'continuous': 'A', 'cat': 'B'}, ['.*'], False)])
def test_X_types_to_pattern_warnings(X_types: Dict[(str, List[str])], apply_to: ColumnTypesLike, warns: bool) -> None:
'Test that the X_types raises the expected warnings.\n\n Parameters\n ----------\n X_types : Dict[str, List[str]]\n The X_types to test.\n apply_to : ColumnTypesLike\n The apply_to to test.\n warns : bool\n Whether the test should raise a warning.\n '
pipeline_creator = PipelineCreator(problem_type='classification').add('zscore', apply_to=apply_to)
if warns:
with pytest.warns(match='is not in the provided X_types'):
pipeline_creator._check_X_types(X_types)
else:
pipeline_creator._check_X_types(X_types)
|
@pytest.mark.parametrize('X_types,apply_to,error', [({}, ['duck'], True), ({'duck': 'B'}, ['duck'], False), ({}, ['continuous'], False), (None, ['continuous'], False), ({'continuous': 'A', 'cat': 'B'}, ['continuous', 'cat'], False), ({'continuous': 'A', 'cat': 'B'}, ['continuous'], True), ({'continuous': 'A', 'cat': 'B'}, ['*'], False), ({'continuous': 'A', 'cat': 'B'}, ['.*'], False)])
def test_X_types_to_pattern_errors(X_types: Dict[(str, List[str])], apply_to: ColumnTypesLike, error: bool) -> None:
'Test that the X_types raises the expected errors.\n\n Parameters\n ----------\n X_types : Dict[str, List[str]]\n The X_types to test.\n apply_to : ColumnTypesLike\n The apply_to to test.\n error : bool\n Whether the test should raise a warning.\n '
pipeline_creator = PipelineCreator(problem_type='classification').add('zscore', apply_to=apply_to)
if error:
with pytest.raises(ValueError, match='Extra X_types were provided'):
pipeline_creator._check_X_types(X_types)
else:
pipeline_creator._check_X_types(X_types)
|
def test_pipelinecreator_default_apply_to() -> None:
'Test pipeline creator using the default apply_to.'
pipeline_creator = PipelineCreator(problem_type='classification').add('rf', apply_to='chicken')
with pytest.raises(ValueError, match='Extra X_types were provided'):
pipeline_creator._check_X_types({'duck': 'B'})
pipeline_creator = PipelineCreator(problem_type='classification').add('rf', apply_to=['chicken', 'duck'])
with pytest.warns(match='is not in the provided X_types'):
pipeline_creator._check_X_types({'chicken': 'teriyaki'})
pipeline_creator = PipelineCreator(problem_type='classification').add('rf', apply_to='*')
pipeline_creator._check_X_types({'duck': 'teriyaki'})
|
def test_pipelinecreator_default_constructor_apply_to() -> None:
'Test pipeline creator using a default apply_to in the constructor.'
pipeline_creator = PipelineCreator(problem_type='classification', apply_to='duck').add('rf')
pipeline_creator._check_X_types({'duck': 'teriyaki'})
pipeline_creator = PipelineCreator(problem_type='classification', apply_to='duck')
pipeline_creator.add('zscore', apply_to='chicken')
pipeline_creator.add('rf')
pipeline_creator._check_X_types({'duck': 'teriyaki', 'chicken': '1'})
|
def test_added_model_target_transform() -> None:
'Test that the added model and target transformer are set correctly.'
pipeline_creator = PipelineCreator(problem_type='classification').add('zscore', apply_to='continuous')
assert (pipeline_creator._added_target_transformer is False)
pipeline_creator.add('zscore', apply_to='target')
assert pipeline_creator._added_target_transformer
assert (pipeline_creator._added_model is False)
pipeline_creator.add('rf')
assert pipeline_creator._added_model
|
def test_stacking(X_iris: pd.DataFrame, y_iris: pd.Series) -> None:
'Test that the stacking model works correctly.'
X_types = {'sepal': ['sepal_length', 'sepal_width'], 'petal': ['petal_length', 'petal_width']}
model_sepal = PipelineCreator(problem_type='classification', apply_to='*')
model_sepal.add('filter_columns', keep='sepal')
model_sepal.add('zscore')
model_sepal.add('svm')
model_petal = PipelineCreator(problem_type='classification', apply_to='*')
model_petal.add('filter_columns', keep='petal')
model_petal.add('zscore')
model_petal.add('rf')
model = PipelineCreator(problem_type='classification')
model.add('stacking', estimators=[[('sepal', model_sepal), ('petal', model_petal)]], apply_to='*')
model = model.to_pipeline(X_types)
with warnings.catch_warnings():
warnings.simplefilter('error')
model.fit(X_iris, y_iris)
|
def test_added_repeated_transformers() -> None:
'Test that the repeated transformers names are set correctly.'
pipeline_creator = PipelineCreator(problem_type='classification')
pipeline_creator.add('zscore', apply_to='continuous')
pipeline_creator.add('zscore', apply_to='duck')
pipeline_creator.add('rf')
assert (len(pipeline_creator._steps) == 3)
assert (pipeline_creator._steps[0].name == 'zscore')
assert (pipeline_creator._steps[1].name == 'zscore_1')
|
def test_target_pipe(X_iris, y_iris) -> None:
'Test that the target pipeline works correctly.'
X_types = {'continuous': ['sepal_length', 'sepal_width', 'petal_length'], 'confounds': ['petal_width']}
target_pipeline = TargetPipelineCreator().add('confound_removal', confounds=['confounds', 'continuous'])
pipeline_creator = PipelineCreator(problem_type='regression').add(target_pipeline, apply_to='target').add('svm', C=[1, 2])
pipe = pipeline_creator.to_pipeline(X_types, search_params={'kind': 'random'})
pipe.fit(X_iris, y_iris)
|
def test_raise_wrong_problem_type() -> None:
'Test that the correct error is raised when the problem type is wrong.'
with pytest.raises(ValueError, match='`problem_type` should'):
PipelineCreator(problem_type='binary')
|
def test_raise_wrong_problem_type_added_to_step() -> None:
'Test error when problem type is passed to a step.'
with pytest.raises(ValueError, match='Please provide the problem_type'):
PipelineCreator(problem_type='classification').add('svm', problem_type='classification')
|
def test_raise_error_not_target_pipe() -> None:
'Test error when target pipeline is not applied to target.'
with pytest.raises(ValueError, match='TargetPipelineCreator can'):
target_pipeline = TargetPipelineCreator().add('confound_removal', confounds=['confounds', 'continuous'])
PipelineCreator(problem_type='regression').add(target_pipeline, apply_to='confounds')
|
def test_raise_pipe_no_model() -> None:
'Test error when no model is added to the pipeline.'
X_types = {'continuous': ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']}
pipeline_creator = PipelineCreator(problem_type='regression').add('zscore')
with pytest.raises(ValueError, match='Cannot create a pipe'):
pipeline_creator.to_pipeline(X_types)
|
def test_raise_pipe_wrong_searcher() -> None:
'Test error when the searcher is not a valid julearn searcher.'
X_types = {'continuous': ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']}
pipeline_creator = PipelineCreator(problem_type='regression').add('svm', C=[1, 2])
with pytest.raises(ValueError, match='The searcher no_search is not a valid julearn searcher'):
pipeline_creator.to_pipeline(X_types, search_params={'kind': 'no_search'})
|
def test_PipelineCreator_repeated_steps() -> None:
'Test the pipeline creator with repeated steps.'
creator = PipelineCreator(problem_type='classification')
creator.add('zscore', apply_to='continuous')
creator.add('zscore', apply_to='continuous')
creator.add('rf')
assert (len(creator._steps) == 3)
assert (creator._steps[0].name == 'zscore')
assert (creator._steps[1].name == 'zscore_1')
creator2 = PipelineCreator(problem_type='classification')
creator2.add('zscore', name='scale', apply_to='continuous')
creator2.add('zscore', name='scale', apply_to='continuous')
creator2.add('rf')
assert (len(creator2._steps) == 3)
assert (creator2._steps[0].name == 'scale')
assert (creator2._steps[1].name == 'scale')
|
def test_PipelineCreator_repeated_steps_error() -> None:
'Test error with repeated steps.'
creator = PipelineCreator(problem_type='classification')
creator.add('zscore', name='scale', apply_to='continuous')
creator.add('pca', name='pca', apply_to='continuous')
with pytest.raises(ValueError, match='Repeated step names are only'):
creator.add('scaler_robust', name='scale', apply_to='continuous')
|
def test_PipelineCreator_split() -> None:
'Test the pipeline creator split.'
creator1 = PipelineCreator(problem_type='classification')
creator1.add('zscore', apply_to='continuous')
creator1.add('zscore', apply_to='continuous')
creator1.add('rf')
assert (len(creator1._steps) == 3)
assert (creator1._steps[0].name == 'zscore')
assert (creator1._steps[1].name == 'zscore_1')
out1 = creator1.split()
assert (len(out1) == 1)
assert (len(out1[0]._steps) == 3)
assert (out1[0]._steps[0].name == 'zscore')
assert (out1[0]._steps[1].name == 'zscore_1')
assert (out1[0]._steps[2].name == 'rf')
creator2 = PipelineCreator(problem_type='classification')
creator2.add('zscore', name='scale', apply_to='continuous')
creator2.add('zscore', name='scale', apply_to='continuous')
creator2.add('rf')
assert (len(creator2._steps) == 3)
assert (creator2._steps[0].name == 'scale')
assert (creator2._steps[1].name == 'scale')
out2 = creator2.split()
assert (len(out2) == 2)
assert (len(out2[0]._steps) == 2)
assert (out2[0]._steps[0].name == 'scale')
assert (out2[0]._steps[1].name == 'rf')
assert (len(out2[1]._steps) == 2)
assert (out2[1]._steps[0].name == 'scale')
assert (out2[1]._steps[1].name == 'rf')
creator3 = PipelineCreator(problem_type='classification')
creator3.add('zscore', name='scale', apply_to='continuous')
creator3.add('zscore', name='scale', apply_to='continuous')
creator3.add('scaler_robust', name='scale', apply_to='continuous')
creator3.add('rf')
assert (len(creator3._steps) == 4)
assert (creator3._steps[0].name == 'scale')
assert (creator3._steps[1].name == 'scale')
assert (creator3._steps[2].name == 'scale')
out3 = creator3.split()
assert (len(out3) == 3)
assert (len(out3[0]._steps) == 2)
assert (out3[0]._steps[0].name == 'scale')
assert (out3[0]._steps[1].name == 'rf')
assert (len(out3[1]._steps) == 2)
assert (out3[1]._steps[0].name == 'scale')
assert (out3[1]._steps[1].name == 'rf')
assert (len(out3[2]._steps) == 2)
assert (out3[2]._steps[0].name == 'scale')
assert (out3[2]._steps[1].name == 'rf')
creator4 = PipelineCreator(problem_type='classification')
creator4.add('zscore', name='scale', apply_to='continuous')
creator4.add('scaler_robust', name='scale', apply_to='continuous')
creator4.add('pca', apply_to='continuous')
creator4.add('rf', name='model')
creator4.add('svm', name='model')
assert (len(creator4._steps) == 5)
assert (creator4._steps[0].name == 'scale')
assert (creator4._steps[1].name == 'scale')
assert (creator4._steps[2].name == 'pca')
assert (creator4._steps[3].name == 'model')
assert (creator4._steps[4].name == 'model')
out4 = creator4.split()
assert (len(out4) == 4)
for i in range(4):
assert (len(out4[i]._steps) == 3)
assert (out4[i]._steps[0].name == 'scale')
assert (out4[i]._steps[1].name == 'pca')
assert (out4[i]._steps[2].name == 'model')
assert isinstance(out4[0]._steps[0].estimator, StandardScaler)
assert isinstance(out4[0]._steps[2].estimator, RandomForestClassifier)
assert isinstance(out4[1]._steps[0].estimator, StandardScaler)
assert isinstance(out4[1]._steps[2].estimator, SVC)
assert isinstance(out4[2]._steps[0].estimator, RobustScaler)
assert isinstance(out4[2]._steps[2].estimator, RandomForestClassifier)
assert isinstance(out4[3]._steps[0].estimator, RobustScaler)
assert isinstance(out4[3]._steps[2].estimator, SVC)
|
def test_TargetPipelineCreator() -> None:
'Test the target pipeline creator.'
creator = TargetPipelineCreator()
creator.add('zscore')
creator.add('scaler_minmax')
creator.add('confound_removal', confounds='confounds')
pipeline = creator.to_pipeline()
assert isinstance(pipeline, JuTargetPipeline)
assert (len(pipeline.steps) == 3)
assert isinstance(pipeline.steps[0][1], StandardScaler)
assert isinstance(pipeline.steps[1][1], MinMaxScaler)
assert isinstance(pipeline.steps[2][1], TargetConfoundRemover)
|
def test_TargetPipelineCreator_repeated_names() -> None:
'Test the target pipeline creator.'
creator = TargetPipelineCreator()
creator.add('zscore')
creator.add('zscore')
pipeline = creator.to_pipeline()
assert isinstance(pipeline, JuTargetPipeline)
assert (len(pipeline.steps) == 2)
assert (pipeline.steps[0][0] == 'zscore')
assert (pipeline.steps[1][0] == 'zscore_1')
|
def get_scorer(name: str) -> ScorerLike:
'Get available scorer by name.\n\n Parameters\n ----------\n name : str\n name of an available scorer\n\n Returns\n -------\n scorer : ScorerLike\n Callable object that returns a scalar score; greater is better.\n Will be called using `(estimator, X, y)`.\n '
scorer = _extra_available_scorers.get(name)
if (scorer is None):
try:
scorer = _scorer.get_scorer(name)
except ValueError:
raise_error(f'{name} is not a valid scorer please use julearn.scorers.list_scorers to get a listof possible scorers')
return scorer
|
def list_scorers() -> List[str]:
'List all available scorers.\n\n Returns\n -------\n list of str\n a list containing all available scorers.\n '
scorers = list(get_scorer_names())
scorers.extend(list(_extra_available_scorers.keys()))
return scorers
|
def register_scorer(scorer_name: str, scorer: ScorerLike, overwrite: Optional[bool]=None) -> None:
'Register a scorer, so that it can be accessed by name.\n\n Parameters\n ----------\n scorer_name : str\n name of the scorer you want to register\n scorer : ScorerLike\n Callable object that returns a scalar score; greater is better.\n Will be called using `(estimator, X, y)`.\n overwrite : bool, optional\n decides whether overwrite should be allowed. Options are:\n\n * None : overwrite is possible, but warns the user\n * True : overwrite is possible without any warning\n * False : overwrite is not possible, error is raised instead\n\n (default is None)\n\n Raises\n ------\n ValueError\n if overwrite is set to False and the scorer already exists.\n\n Warns\n -----\n UserWarning\n if overwrite is set to None and the scorer already exists.\n '
if (scorer_name in list_scorers()):
if (overwrite is None):
warn_with_log(f'scorer named {scorer_name} already exists. Therefore, {scorer_name} will be overwritten. To remove this warning set overwrite=True ')
logger.info(f'registering scorer named {scorer_name}')
elif (overwrite is False):
raise_error(f'scorer named {scorer_name} already exists and overwrite is set to False, therefore you cannot overwrite existing scorers. Set overwrite=True in case you want to overwrite existing scorers')
logger.info(f'registering scorer named {scorer_name}')
_extra_available_scorers[scorer_name] = scorer
|
def reset_scorer_register():
'Reset the scorer register to the default state.'
global _extra_available_scorers
_extra_available_scorers = deepcopy(_extra_available_scorers_reset)
|
def check_scoring(estimator: EstimatorLike, scoring: Union[(ScorerLike, str, Callable, List[str], None)], wrap_score: bool) -> Union[(None, ScorerLike, Callable, Dict[(str, ScorerLike)])]:
'Check the scoring.\n\n Parameters\n ----------\n estimator : EstimatorLike\n estimator to check the scoring for\n scoring : Union[ScorerLike, str, Callable]\n scoring to check\n wrap_score : bool\n Does the score needs to be wrapped\n to handle non_inverse transformable target pipelines.\n '
if (scoring is None):
return scoring
if isinstance(scoring, str):
scoring = _extend_scorer(get_scorer(scoring), wrap_score)
if callable(scoring):
return _extend_scorer(sklearn_check_scoring(estimator, scoring=scoring), wrap_score)
if isinstance(scoring, list):
scorer_names = typing.cast(List[str], scoring)
scoring_dict = {score: _extend_scorer(get_scorer(score), wrap_score) for score in scorer_names}
return _check_multimetric_scoring(estimator, scoring_dict)
|
def _extend_scorer(scorer, extend):
if extend:
return _ExtendedScorer(scorer)
return scorer
|
class _ExtendedScorer():
def __init__(self, scorer):
self.scorer = scorer
def __call__(self, estimator, X, y):
if hasattr(estimator, 'best_estimator_'):
estimator = estimator.best_estimator_
X_trans = X
for (_, transform) in estimator.steps[:(- 1)]:
X_trans = transform.transform(X_trans)
y_true = estimator.steps[(- 1)][(- 1)].transform_target(X_trans, y)
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=TransformedTargetWarning)
scores = self.scorer(estimator, X, y_true)
return scores
|
def ensure_1d(y: ArrayLike) -> np.ndarray:
'Ensure that y is 1d.\n\n Parameters\n ----------\n y : ArrayLike\n The array to be checked.\n\n Returns\n -------\n np.ndarray\n The array as a 1d numpy array.\n\n Raises\n ------\n ValueError\n If y cannot be converted to a 1d numpy array.\n '
if (not isinstance(y, np.ndarray)):
y = np.array(y)
if (y.ndim > 1):
y = np.squeeze(y)
if (y.ndim > 1):
raise ValueError('y cannot be converted to 1d')
return y
|
def r2_corr(y_true: ArrayLike, y_pred: ArrayLike) -> float:
'Compute squared Pearson product-moment correlation coefficient.\n\n Parameters\n ----------\n y_true : ArrayLike\n The true values.\n y_pred : ArrayLike\n The predicted values.\n\n Returns\n -------\n float\n The squared Pearson product-moment correlation coefficient.\n '
return (np.corrcoef(ensure_1d(y_true), ensure_1d(y_pred))[(0, 1)] ** 2)
|
def r_corr(y_true: ArrayLike, y_pred: ArrayLike) -> float:
'Compute Pearson product-moment correlation coefficient.\n\n Parameters\n ----------\n y_true : ArrayLike\n The true values.\n y_pred : ArrayLike\n The predicted values.\n\n Returns\n -------\n float\n Pearson product-moment correlation coefficient.\n\n '
return np.corrcoef(ensure_1d(y_true), ensure_1d(y_pred))[(0, 1)]
|
def _return_1(estimator: EstimatorLike, X: DataLike, y: DataLike) -> float:
'Return 1.'
return 1
|
def test_register_scorer() -> None:
'Test registering scorers.'
with pytest.raises(ValueError, match='useless is not a valid scorer'):
get_scorer('useless')
register_scorer('useless', make_scorer(_return_1))
_ = get_scorer('useless')
register_scorer('useless', make_scorer(_return_1), True)
with pytest.warns(RuntimeWarning, match='scorer named useless already exists. '):
register_scorer('useless', make_scorer(_return_1), None)
with pytest.raises(ValueError, match='scorer named useless already exists and'):
register_scorer('useless', make_scorer(_return_1), False)
reset_scorer_register()
|
def test_reset_scorer() -> None:
'Test resetting the scorers registry.'
with pytest.raises(ValueError, match='useless is not a valid scorer '):
get_scorer('useless')
register_scorer('useless', make_scorer(_return_1))
get_scorer('useless')
reset_scorer_register()
with pytest.raises(ValueError, match='useless is not a valid scorer '):
get_scorer('useless')
|
def test_ensure_1d() -> None:
'Test ensure_1d.'
y = [1, 2, 3, 4]
assert np.all((ensure_1d(y) == y))
y = [[1, 2, 3, 4]]
assert np.all((ensure_1d(y) == y[0]))
with pytest.raises(ValueError, match='cannot be converted to 1d'):
ensure_1d([[1, 2, 3, 4], [2, 3, 4, 5]])
|
def test_r2_corr() -> None:
'Test r2_corr.'
assert (r2_corr([1, 2, 3, 4], [1, 2, 3, 4]) == 1)
assert (r2_corr([1, 2, 3, 4], [2, 3, 4, 5]) == 1)
|
def test_r_corr() -> None:
'Test r_corr.'
assert (r_corr([1, 2, 3, 4], [1, 2, 3, 4]) == 1)
assert (r_corr([1, 2, 3, 4], [2, 3, 4, 5]) == 1)
|
def _corrected_std(differences: np.ndarray, n_train: int, n_test: int) -> float:
"Corrects standard deviation using Nadeau and Bengio's approach.\n\n Parameters\n ----------\n differences : ndarray of shape (n_samples,)\n Vector containing the differences in the score metrics of two models.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n\n Returns\n -------\n corrected_std : float\n Variance-corrected standard deviation of the set of differences.\n "
kr = len(differences)
corrected_var = (np.var(differences, ddof=1, axis=0) * ((1 / kr) + (n_test / n_train)))
corrected_std = np.sqrt(corrected_var)
return corrected_std
|
def _compute_corrected_ttest(differences: np.ndarray, n_train: int, n_test: int, df: Optional[int]=None, alternative: str='two-sided') -> Tuple[(float, float)]:
"Compute paired t-test with corrected variance.\n\n Parameters\n ----------\n differences : array-like of shape (n_samples,)\n Vector containing the differences in the score metrics of two models.\n df : int\n Degrees of freedom.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n * 'two-sided': the means of the distributions underlying the samples\n are unequal.\n * 'less': the mean of the distribution underlying the first sample\n is less than the mean of the distribution underlying the second\n sample.\n * 'greater': the mean of the distribution underlying the first\n sample is greater than the mean of the distribution underlying\n the second sample.\n\n Returns\n -------\n t_stat : float\n Variance-corrected t-statistic.\n p_val : float\n Variance-corrected p-value.\n "
mean = differences.mean(axis=0)
if (df is None):
df = (len(differences) - 1)
std = _corrected_std(differences, n_train=n_train, n_test=n_test)
t_stat = (mean / std)
if (alternative == 'less'):
p_val = special.stdtr(df, t_stat)
elif (alternative == 'greater'):
p_val = special.stdtr(df, (- t_stat))
elif (alternative == 'two-sided'):
p_val = (special.stdtr(df, (- np.abs(t_stat))) * 2)
else:
raise_error(f"Invalid alternative {alternative}. Should be 'two-sided', 'less' or 'greater'.")
return (t_stat, p_val)
|
def corrected_ttest(*scores: pd.DataFrame, df: Optional[int]=None, method: str='bonferroni', alternative: str='two-sided') -> pd.DataFrame:
"Perform corrected t-test on the scores of two or more models.\n\n Parameters\n ----------\n *scores : pd.DataFrame\n DataFrames containing the scores of the models. The DataFrames must\n be the output of `run_cross_validation`\n df: int\n Degrees of freedom.\n method : str\n Method used for testing and adjustment of pvalues. Can be either the\n full name or initial letters. Available methods are:\n\n * `bonferroni` : one-step correction\n * `sidak` : one-step correction\n * `holm-sidak` : step down method using Sidak adjustments\n * `holm` : step-down method using Bonferroni adjustments\n * `simes-hochberg` : step-up method (independent)\n * `hommel` : closed method based on Simes tests (non-negative)\n * `fdr_bh` : Benjamini/Hochberg (non-negative)\n * `fdr_by` : Benjamini/Yekutieli (negative)\n * `fdr_tsbh` : two stage fdr correction (non-negative)\n * `fdr_tsbky` : two stage fdr correction (non-negative)\n\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n\n * 'two-sided': the means of the distributions underlying the samples\n are unequal.\n * 'less': the mean of the distribution underlying the first sample\n is less than the mean of the distribution underlying the second\n sample.\n * 'greater': the mean of the distribution underlying the first\n sample is greater than the mean of the distribution underlying\n the second sample.\n\n "
scores = check_scores_df(*scores, same_cv=True)
if ((len(scores) > 2) and (alternative != 'two-sided')):
raise_error('Only two-sided tests are supported for more than two models.')
t_scores = [x.set_index(['fold', 'repeat']) for x in scores]
all_stats = []
for (model_i, model_k) in combinations(range(len(t_scores)), 2):
i_scores = t_scores[model_i]
k_scores = t_scores[model_k]
model_i_name = i_scores['model'].iloc[0]
model_k_name = k_scores['model'].iloc[0]
n_train = i_scores['n_train'].values
n_test = i_scores['n_test'].values
if (np.unique(n_train).size > 1):
warn_with_log('The training set sizes are not the same. Will use a rounded average.')
n_train = int(np.mean(n_train).round())
else:
n_train = n_train[0]
if (np.unique(n_test).size > 1):
warn_with_log('The testing set sizes are not the same. Will use a rounded average.')
n_test = int(np.mean(n_test).round())
else:
n_test = n_test[0]
to_skip = ['cv_mdsum', 'n_train', 'n_test', 'model']
to_keep = [x for x in i_scores.columns if ((x not in to_skip) and (x.startswith('test_') or x.startswith('train_')))]
df1 = i_scores[to_keep]
df2 = k_scores[to_keep]
differences = (df1 - df2)
(t_stat, p_val) = _compute_corrected_ttest(differences, n_train=n_train, n_test=n_test, df=df)
stat_df = t_stat.to_frame('t-stat')
stat_df['p-val'] = p_val
stat_df['model_1'] = model_i_name
stat_df['model_2'] = model_k_name
all_stats.append(stat_df)
all_stats_df = pd.concat(all_stats)
all_stats_df.index.name = 'metric'
all_stats_df = all_stats_df.reset_index()
if (len(t_scores) > 2):
corrected_stats = []
for t_metric in all_stats_df['metric'].unique():
metric_df = all_stats_df[(all_stats_df['metric'] == t_metric)].copy()
corrected = multipletests(metric_df['p-val'], method=method)
metric_df['p-val-corrected'] = corrected[1]
corrected_stats.append(metric_df)
all_stats_df = pd.concat(corrected_stats)
else:
all_stats_df['p-val-corrected'] = all_stats_df['p-val']
return all_stats_df
|
def test__compute_corrected_ttest_alternatives():
'Test the _compute_corrected_ttest function.'
rvs1 = stats.norm.rvs(loc=0.5, scale=0.2, size=20, random_state=42)
rvs2 = stats.norm.rvs(loc=0.51, scale=0.2, size=20, random_state=45)
rvs3 = stats.norm.rvs(loc=0.9, scale=0.2, size=20, random_state=50)
(_, p1) = _compute_corrected_ttest((rvs1 - rvs2), n_train=70, n_test=30)
assert (p1 > 0.7)
(_, p2) = _compute_corrected_ttest((rvs1 - rvs3), n_train=70, n_test=30)
assert (p2 < 0.1)
(_, p3) = _compute_corrected_ttest((rvs1 - rvs3), n_train=70, n_test=30, alternative='less')
assert (p3 < 0.05)
(_, p4) = _compute_corrected_ttest((rvs1 - rvs3), n_train=70, n_test=30, alternative='greater')
assert (p4 > 0.9)
with pytest.raises(ValueError, match='Invalid alternative'):
_compute_corrected_ttest((rvs1 - rvs3), n_train=70, n_test=30, alternative='not_valid')
|
def test_corrected_ttest() -> None:
'Test the corrected_ttest function.'
data1 = np.random.rand(10)
data2 = (np.random.rand(10) + 0.05)
data3 = (np.random.rand(10) + 0.1)
cv_mdsum = 'maradona'
scores1 = pd.DataFrame({'fold': (np.arange(10) % 5), 'repeat': (np.arange(10) // 5), 'test_score': data1})
scores1['cv_mdsum'] = cv_mdsum
scores1['n_train'] = 100
scores1['n_test'] = 20
scores2 = pd.DataFrame({'fold': (np.arange(10) % 5), 'repeat': (np.arange(10) // 5), 'test_score': data2})
scores2['cv_mdsum'] = cv_mdsum
scores2['n_train'] = 100
scores2['n_test'] = 20
with warnings.catch_warnings():
warnings.simplefilter('error')
out = corrected_ttest(scores1, scores2)
assert (len(out) == 1)
assert ('p-val' in out)
assert ('p-val-corrected' in out)
assert (out['p-val-corrected'][0] == out['p-val'][0])
assert ('model_1' in out)
assert ('model_2' in out)
assert ('model_1' in out['model_1'].values)
assert ('model_2' in out['model_2'].values)
scores3 = pd.DataFrame({'fold': (np.arange(10) % 5), 'repeat': (np.arange(10) // 5), 'test_score': data3})
scores3['cv_mdsum'] = cv_mdsum
scores3['n_train'] = 100
scores3['n_test'] = 20
with warnings.catch_warnings():
warnings.simplefilter('error')
out = corrected_ttest(scores1, scores2, scores3)
assert (len(out) == 3)
assert ('p-val' in out)
assert ('p-val-corrected' in out)
assert ('model_1' in out)
assert ('model_2' in out)
assert ('model_1' in out['model_1'].values)
assert ('model_2' in out['model_1'].values)
assert ('model_2' in out['model_2'].values)
assert ('model_3' in out['model_2'].values)
|
def test_corrected_ttest_errors() -> None:
'Test the corrected_ttest function.'
data1 = np.random.rand(10)
data2 = (np.random.rand(10) + 0.05)
scores1 = pd.DataFrame({'test_score': data1})
scores2 = pd.DataFrame({'test_score': data2})
with pytest.raises(ValueError, match='cv_mdsum'):
corrected_ttest(scores1, scores2)
scores1['cv_mdsum'] = 'maradona'
scores2['cv_mdsum'] = 'messi'
with pytest.raises(ValueError, match='fold'):
corrected_ttest(scores1, scores2)
scores1['fold'] = (np.arange(10) % 5)
scores2['fold'] = (np.arange(10) % 5)
with pytest.raises(ValueError, match='repeat'):
corrected_ttest(scores1, scores2)
scores1['repeat'] = (np.arange(10) // 5)
scores2['repeat'] = (np.arange(10) // 5)
with pytest.raises(ValueError, match='n_train'):
corrected_ttest(scores1, scores2)
scores1['n_train'] = 100
scores2['n_train'] = 100
with pytest.raises(ValueError, match='n_test'):
corrected_ttest(scores1, scores2)
scores1['n_test'] = 90
scores2['n_test'] = 90
with pytest.raises(ValueError, match='different CVs'):
corrected_ttest(scores1, scores2)
scores1['cv_mdsum'] = 'non-reproducible'
scores2['cv_mdsum'] = 'non-reproducible'
with pytest.raises(ValueError, match='non-reproducible'):
corrected_ttest(scores1, scores2)
scores1['cv_mdsum'] = 'maradona'
scores2['cv_mdsum'] = 'maradona'
scores3 = scores2
with pytest.raises(ValueError, match='two-sided'):
corrected_ttest(scores1, scores2, scores3, alternative='wrong')
scores1['n_train'] = (([100] * 9) + [90])
scores1['n_test'] = 90
with pytest.warns(RuntimeWarning, match='training set'):
corrected_ttest(scores1, scores2)
scores1['n_train'] = 100
scores1['n_test'] = (([100] * 9) + [90])
with pytest.warns(RuntimeWarning, match='testing set'):
corrected_ttest(scores1, scores2)
|
def test_run_cv_simple_binary(df_binary: pd.DataFrame, df_iris: pd.DataFrame) -> None:
'Test a simple binary classification problem.\n\n Parameters\n ----------\n df_binary : pd.DataFrame\n The iris dataset as a binary classification problem.\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'features': X}
scorers = ['accuracy', 'balanced_accuracy']
api_params = {'model': 'svm', 'problem_type': 'classification'}
sklearn_model = SVC()
with pytest.warns(RuntimeWarning, match='treated as continuous'):
do_scoring_test(X=X, y=y, data=df_binary, scorers=scorers, api_params=api_params, sklearn_model=sklearn_model)
scorers = ['recall', 'precision', 'f1']
sk_y = (df_iris[y].values == 'virginica').astype(int)
model = PipelineCreator(apply_to='features', problem_type='classification')
model.add('svm')
api_params = {'model': model, 'pos_labels': 'virginica'}
sklearn_model = SVC()
do_scoring_test(X, y, data=df_iris, api_params=api_params, X_types=X_types, sklearn_model=sklearn_model, scorers=scorers, sk_y=sk_y)
X = ['sepal_length', 'petal_length']
scorers = ['accuracy', 'roc_auc']
sk_y = (df_iris[y].values == 'virginica').astype(int)
with pytest.warns(RuntimeWarning, match='treated as continuous'):
api_params = {'model': 'svm', 'pos_labels': 'virginica', 'problem_type': 'classification', 'model_params': {'svm__probability': True}}
sklearn_model = SVC(probability=True)
do_scoring_test(X, y, data=df_iris, api_params=api_params, sklearn_model=sklearn_model, scorers=scorers, sk_y=sk_y)
X = ['sepal_length', 'petal_length']
scorers = ['accuracy', 'roc_auc']
sk_y = (df_iris[y].values == 'virginica').astype(int)
with pytest.warns(RuntimeWarning, match='treated as continuous'):
api_params = {'model': 'svm', 'pos_labels': 'virginica', 'problem_type': 'classification'}
sklearn_model = SVC(probability=False)
do_scoring_test(X, y, data=df_iris, api_params=api_params, sklearn_model=sklearn_model, scorers=scorers, sk_y=sk_y)
|
def test_run_cv_simple_binary_groups(df_iris: pd.DataFrame) -> None:
'Test a simple binary classification problem with groups in the CV.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
df_iris = df_iris.copy()
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
df_iris['groups'] = np.digitize(df_iris['sepal_length'], bins=np.histogram(df_iris['sepal_length'], bins=20)[1])
scorers = ['accuracy', 'balanced_accuracy']
api_params = {'model': 'svm', 'problem_type': 'classification'}
sklearn_model = SVC()
cv = GroupKFold(n_splits=2)
do_scoring_test(X=X, y=y, data=df_iris, X_types=X_types, groups='groups', cv=cv, scorers=scorers, api_params=api_params, sklearn_model=sklearn_model)
|
def test_run_cv_simple_binary_errors(df_binary: pd.DataFrame, df_iris: pd.DataFrame) -> None:
'Test a simple classification problem errors.\n\n Parameters\n ----------\n df_binary : pd.DataFrame\n The iris dataset as a binary classification problem.\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
scorers = ['recall', 'precision', 'f1']
api_params = {'model': 'svm', 'problem_type': 'classification'}
sklearn_model = SVC()
with pytest.warns(UserWarning, match='Target is multiclass but average'):
do_scoring_test(X, y, data=df_iris, api_params=api_params, sklearn_model=sklearn_model, scorers=scorers)
|
def test_run_cv_errors(df_iris: pd.DataFrame) -> None:
'Test a run_cross_validation errors and warnings.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
model = make_pipeline(SVC())
with pytest.raises(ValueError, match='a scikit-learn pipeline'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, problem_type='classification')
model = PipelineCreator(problem_type='classification')
with pytest.raises(ValueError, match='preprocess should be None'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, preprocess='zscore')
model = PipelineCreator(problem_type='classification')
with pytest.raises(ValueError, match='Problem type should be set'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, problem_type='classification')
model = 2
with pytest.raises(ValueError, match='has to be a PipelineCreator'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model)
model = 'svm'
with pytest.raises(ValueError, match='`problem_type` must be specified'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model)
model = 'svm'
with pytest.raises(ValueError, match='preprocess has to be a string'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, preprocess=2, problem_type='classification')
model = SVC()
model_params = {'svc__C': 1}
with pytest.raises(ValueError, match='Cannot use model_params with a model object'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, model_params=model_params, problem_type='classification')
model = PipelineCreator(problem_type='classification')
model_params = {'svc__C': 1}
with pytest.raises(ValueError, match='must be None'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, model_params=model_params)
model = 'svm'
model_params = {'svc__C': 1}
with pytest.raises(ValueError, match='model_params are incorrect'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, model_params=model_params, problem_type='classification')
model = 'svm'
model_params = {'probability': True, 'svm__C': 1}
with pytest.raises(ValueError, match='model_params are incorrect'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=model, model_params=model_params, problem_type='classification')
|
def test_run_cv_multiple_pipeline_errors(df_iris: pd.DataFrame) -> None:
'Test run_cross_validation with multiple pipelines errors.'
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
model1 = PipelineCreator(problem_type='classification')
model1.add('svm')
model2 = 'svm'
with pytest.raises(ValueError, match='If model is a list, all'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=[model1, model2])
model2 = PipelineCreator(problem_type='regression')
model2.add('svm')
with pytest.raises(ValueError, match='same problem_type'):
run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=[model1, model2])
|
def test_tune_hyperparam_gridsearch(df_iris: pd.DataFrame) -> None:
'Test a run_cross_validation with hyperparameter tuning (gridsearch).\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
sk_X = df_iris[X].values
sk_y = df_iris[y].values
scoring = 'accuracy'
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
model_params = {'svm__C': [0.01, 0.001]}
search_params = {'cv': cv_inner}
(actual, actual_estimator) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', model_params=model_params, cv=cv_outer, scoring=[scoring], return_estimator='final', search_params=search_params, problem_type='classification')
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
clf = make_pipeline(SVC())
gs = GridSearchCV(clf, {'svc__C': [0.01, 0.001]}, cv=cv_inner)
expected = cross_validate(gs, sk_X, sk_y, cv=cv_outer, scoring=[scoring])
assert (len(actual.columns) == (len(expected) + 5))
assert (len(actual['test_accuracy']) == len(expected['test_accuracy']))
assert all(((a == b) for (a, b) in zip(actual['test_accuracy'], expected['test_accuracy'])))
clf1 = actual_estimator.best_estimator_.steps[(- 1)][1]
clf2 = clone(gs).fit(sk_X, sk_y).best_estimator_.steps[(- 1)][1]
compare_models(clf1, clf2)
|
def test_tune_hyperparam_gridsearch_groups(df_iris: pd.DataFrame) -> None:
'Test a run_cross_validation with hyperparameter tuning (gridsearch).\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
df_iris = df_iris.copy()
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
df_iris['groups'] = np.digitize(df_iris['sepal_length'], bins=np.histogram(df_iris['sepal_length'], bins=20)[1])
sk_X = df_iris[X].values
sk_y = df_iris[y].values
sk_groups = df_iris['groups'].values
scoring = 'accuracy'
np.random.seed(42)
cv_outer = GroupKFold(n_splits=2)
cv_inner = GroupKFold(n_splits=2)
model_params = {'svm__C': [0.01, 0.001]}
search_params = {'cv': cv_inner}
(actual, actual_estimator) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', model_params=model_params, cv=cv_outer, scoring=[scoring], groups='groups', return_estimator='final', search_params=search_params, problem_type='classification')
np.random.seed(42)
cv_outer = GroupKFold(n_splits=2)
cv_inner = GroupKFold(n_splits=2)
clf = make_pipeline(SVC())
gs = GridSearchCV(clf, {'svc__C': [0.01, 0.001]}, cv=cv_inner)
expected = cross_validate(gs, sk_X, sk_y, cv=cv_outer, scoring=[scoring], groups=sk_groups, fit_params={'groups': sk_groups})
assert (len(actual.columns) == (len(expected) + 5))
assert (len(actual['test_accuracy']) == len(expected['test_accuracy']))
assert all(((a == b) for (a, b) in zip(actual['test_accuracy'], expected['test_accuracy'])))
clf1 = actual_estimator.best_estimator_.steps[(- 1)][1]
clf2 = clone(gs).fit(sk_X, sk_y, groups=sk_groups).best_estimator_.steps[(- 1)][1]
compare_models(clf1, clf2)
|
def test_tune_hyperparam_randomsearch(df_iris: pd.DataFrame) -> None:
'Test a run_cross_validation with hyperparameter tuning (randomsearch).\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
sk_X = df_iris[X].values
sk_y = df_iris[y].values
scoring = 'accuracy'
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
model_params = {'svm__C': [0.01, 0.001]}
search_params = {'kind': 'random', 'n_iter': 2, 'cv': cv_inner}
(actual, actual_estimator) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', model_params=model_params, search_params=search_params, problem_type='classification', cv=cv_outer, scoring=[scoring], return_estimator='final')
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
clf = make_pipeline(SVC())
gs = RandomizedSearchCV(clf, {'svc__C': [0.01, 0.001]}, cv=cv_inner, n_iter=2)
expected = cross_validate(gs, sk_X, sk_y, cv=cv_outer, scoring=[scoring])
assert (len(actual.columns) == (len(expected) + 5))
assert (len(actual['test_accuracy']) == len(expected['test_accuracy']))
assert all(((a == b) for (a, b) in zip(actual['test_accuracy'], expected['test_accuracy'])))
clf1 = actual_estimator.best_estimator_.steps[(- 1)][1]
clf2 = clone(gs).fit(sk_X, sk_y).best_estimator_.steps[(- 1)][1]
compare_models(clf1, clf2)
|
def test_tune_hyperparams_multiple_grid(df_iris: pd.DataFrame) -> None:
'Test a run_cross_validation hyperparameter tuning (multiple grid).'
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
creator1 = PipelineCreator(problem_type='classification')
creator1.add('svm', kernel='linear', C=[0.01, 0.1], name='svm')
creator1.add('svm', kernel='rbf', C=[0.01, 0.1], gamma=['scale', 'auto', 0.01, 0.001], name='svm')
sk_X = df_iris[X].values
sk_y = df_iris[y].values
scoring = 'accuracy'
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
search_params = {'cv': cv_inner}
(actual1, actual_estimator1) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=creator1, cv=cv_outer, scoring=[scoring], return_estimator='final', search_params=search_params)
creator2_1 = PipelineCreator(problem_type='classification')
creator2_1.add('svm', kernel='linear', C=[0.01, 0.1], name='svm')
creator2_2 = PipelineCreator(problem_type='classification')
creator2_2.add('svm', kernel='rbf', C=[0.01, 0.1], gamma=['scale', 'auto', 0.01, 0.001], name='svm')
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
search_params = {'cv': cv_inner}
(actual2, actual_estimator2) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model=[creator2_1, creator2_2], cv=cv_outer, scoring=[scoring], return_estimator='final', search_params=search_params)
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
clf = make_pipeline(SVC())
grid = [{'svc__C': [0.01, 0.1], 'svc__kernel': ['linear']}, {'svc__gamma': ['scale', 'auto', 0.01, 0.001], 'svc__kernel': ['rbf'], 'svc__C': [0.01, 0.1]}]
gs = GridSearchCV(clf, grid, cv=cv_inner)
expected = cross_validate(gs, sk_X, sk_y, cv=cv_outer, scoring=[scoring])
assert (len(actual1.columns) == (len(expected) + 5))
assert (len(actual2.columns) == (len(expected) + 5))
assert (len(actual1['test_accuracy']) == len(expected['test_accuracy']))
assert (len(actual2['test_accuracy']) == len(expected['test_accuracy']))
assert all(((a == b) for (a, b) in zip(actual1['test_accuracy'], expected['test_accuracy'])))
assert all(((a == b) for (a, b) in zip(actual2['test_accuracy'], expected['test_accuracy'])))
clf1 = actual_estimator1.best_estimator_.steps[(- 1)][1]
clf2 = actual_estimator2.best_estimator_.steps[(- 1)][1]
clf3 = clone(gs).fit(sk_X, sk_y).best_estimator_.steps[(- 1)][1]
compare_models(clf1, clf2)
compare_models(clf1, clf3)
|
def test_return_estimators(df_iris: pd.DataFrame) -> None:
'Test returning estimators.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
cv = StratifiedKFold(2)
with pytest.raises(ValueError, match='must be one of'):
scores = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', problem_type='classification', cv=cv, return_estimator=True)
scores = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', problem_type='classification', cv=cv, return_estimator=None)
assert isinstance(scores, pd.DataFrame)
assert ('estimator' not in scores)
(scores, final) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', problem_type='classification', cv=cv, return_estimator='final')
assert isinstance(scores, pd.DataFrame)
assert ('estimator' not in scores)
assert isinstance(final['svm'], SVC)
scores = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', problem_type='classification', cv=cv, return_estimator='cv')
assert isinstance(scores, pd.DataFrame)
assert ('estimator' in scores)
(scores, final) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', problem_type='classification', cv=cv, return_estimator='all')
assert isinstance(scores, pd.DataFrame)
assert ('estimator' in scores)
assert isinstance(final['svm'], SVC)
|
def test_return_train_scores(df_iris: pd.DataFrame) -> None:
'Test returning estimators.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset as a multiclass classification problem.\n '
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
scoring = ['accuracy', 'precision', 'recall']
cv = StratifiedKFold(2)
with pytest.warns(RuntimeWarning, match='treated as continuous'):
scores = run_cross_validation(X=X, y=y, data=df_iris, model='svm', problem_type='classification', cv=cv, scoring=scoring)
train_scores = [f'train_{s}' for s in scoring]
test_scores = [f'test_{s}' for s in scoring]
assert all(((s not in scores.columns) for s in train_scores))
assert all(((s in scores.columns) for s in test_scores))
with pytest.warns(RuntimeWarning, match='treated as continuous'):
scores = run_cross_validation(X=X, y=y, data=df_iris, model='svm', problem_type='classification', cv=cv, scoring=scoring, return_train_score=True)
train_scores = [f'train_{s}' for s in scoring]
test_scores = [f'test_{s}' for s in scoring]
assert all(((s in scores.columns) for s in train_scores))
assert all(((s in scores.columns) for s in test_scores))
|
@pytest.mark.parametrize('cv1, cv2, expected', [(GroupKFold(2), KFold(3), False), (GroupKFold(2), GroupKFold(3), False), (GroupKFold(3), GroupKFold(3), True), (GroupShuffleSplit(2), GroupShuffleSplit(3), 'non-reproducible'), (GroupShuffleSplit(2, random_state=32), GroupShuffleSplit(3, random_state=32), False), (GroupShuffleSplit(3, random_state=32), GroupShuffleSplit(3, random_state=32), True), (GroupShuffleSplit(3, random_state=33), GroupShuffleSplit(3, random_state=32), False), (KFold(2), KFold(3), False), (KFold(2, shuffle=True), KFold(2, shuffle=True), 'non-reproducible'), (KFold(3, random_state=32, shuffle=True), KFold(3, random_state=32, shuffle=True), True), (KFold(3, random_state=33, shuffle=True), KFold(3, random_state=32, shuffle=True), False), (LeaveOneGroupOut(), LeaveOneGroupOut(), True), (LeavePGroupsOut(3), LeavePGroupsOut(3), True), (LeavePGroupsOut(3), LeavePGroupsOut(2), False), (LeaveOneOut(), LeaveOneOut(), True), (LeavePOut(2), LeavePOut(2), True), (LeavePOut(2), LeavePOut(3), False), (PredefinedSplit([1, 2, 3]), PredefinedSplit([1, 2, 3]), True), (PredefinedSplit([1, 2, 3]), PredefinedSplit([1, 2, 4]), False), (RepeatedKFold(n_splits=2), RepeatedKFold(n_splits=2), 'non-reproducible'), (RepeatedKFold(n_splits=2, random_state=32), RepeatedKFold(n_splits=3, random_state=32), False), (RepeatedKFold(n_splits=2, random_state=32), RepeatedKFold(n_splits=2, random_state=32), True), (RepeatedKFold(n_splits=2, n_repeats=2, random_state=32), RepeatedKFold(n_splits=2, n_repeats=3, random_state=32), False), (RepeatedStratifiedKFold(n_splits=2), RepeatedStratifiedKFold(n_splits=2), 'non-reproducible'), (RepeatedStratifiedKFold(n_splits=2, random_state=32), RepeatedStratifiedKFold(n_splits=3, random_state=32), False), (RepeatedStratifiedKFold(n_splits=2, random_state=32), RepeatedStratifiedKFold(n_splits=2, random_state=32), True), (RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=32), RepeatedStratifiedKFold(n_splits=2, n_repeats=3, random_state=32), False), (ShuffleSplit(n_splits=2), ShuffleSplit(n_splits=2), 'non-reproducible'), (ShuffleSplit(n_splits=2, random_state=32), ShuffleSplit(n_splits=3, random_state=32), False), (ShuffleSplit(n_splits=2, random_state=32), ShuffleSplit(n_splits=2, random_state=32), True), (ShuffleSplit(n_splits=2, test_size=2, random_state=32), ShuffleSplit(n_splits=2, test_size=3, random_state=32), False), (ShuffleSplit(n_splits=2, train_size=2, random_state=32), ShuffleSplit(n_splits=2, train_size=3, random_state=32), False), (StratifiedKFold(2), StratifiedKFold(3), False), (StratifiedKFold(2, shuffle=True), StratifiedKFold(2, shuffle=True), 'non-reproducible'), (StratifiedKFold(3, random_state=32, shuffle=True), StratifiedKFold(3, random_state=32, shuffle=True), True), (StratifiedKFold(3, random_state=33, shuffle=True), StratifiedKFold(3, random_state=32, shuffle=True), False), (StratifiedShuffleSplit(n_splits=2), StratifiedShuffleSplit(n_splits=2), 'non-reproducible'), (StratifiedShuffleSplit(n_splits=2, random_state=32), StratifiedShuffleSplit(n_splits=3, random_state=32), False), (StratifiedShuffleSplit(n_splits=2, random_state=32), StratifiedShuffleSplit(n_splits=2, random_state=32), True), (StratifiedShuffleSplit(n_splits=2, test_size=2, random_state=32), StratifiedShuffleSplit(n_splits=2, test_size=3, random_state=32), False), (StratifiedShuffleSplit(n_splits=2, train_size=2, random_state=32), StratifiedShuffleSplit(n_splits=2, train_size=3, random_state=32), False), (StratifiedGroupKFold(2), StratifiedGroupKFold(3), False), (StratifiedGroupKFold(3), StratifiedGroupKFold(3), True), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), ContinuousStratifiedGroupKFold(n_bins=11, n_splits=2), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, method='quantile'), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, shuffle=True), 'non-reproducible'), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), True), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=33, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), False), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), 'non-reproducible'), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32), False), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), True), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, n_repeats=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, n_repeats=3, random_state=32), False), ([(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], [(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], True), ([(np.arange(3, 9), np.arange(0, 3)), (np.arange(0, 7), np.arange(7, 9))], [(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], False)])
def test__compute_cvmdsum(cv1, cv2, expected):
'Test _compute_cvmdsum.'
cv1 = check_cv(cv1)
cv2 = check_cv(cv2)
md1 = _compute_cvmdsum(cv1)
md2 = _compute_cvmdsum(cv2)
if (expected == 'non-reproducible'):
assert (md1 == md2)
assert (md1 == expected)
else:
assert ((md1 == md2) is expected)
|
def test_api_stacking_models() -> None:
'Test API of stacking models.'
(X, y) = make_regression(n_features=6, n_samples=50)
X_types = {'type1': [f'type1_{x}' for x in range(1, 4)], 'type2': [f'type2_{x}' for x in range(1, 4)]}
X_names = (X_types['type1'] + X_types['type2'])
data = pd.DataFrame(X)
data.columns = X_names
data['target'] = y
model_1 = PipelineCreator(problem_type='regression', apply_to='type1')
model_1.add('filter_columns', apply_to='*', keep='type1')
model_1.add('svm', C=[1, 2])
model_2 = PipelineCreator(problem_type='regression', apply_to='type2')
model_2.add('filter_columns', apply_to='*', keep='type2')
model_2.add('rf')
model = PipelineCreator(problem_type='regression')
model.add('stacking', estimators=[[('model_1', model_1), ('model_2', model_2)]], apply_to='*')
(_, final) = run_cross_validation(X=X_names, X_types=X_types, y='target', data=data, model=model, seed=200, return_estimator='final')
assert isinstance(final.steps[1][1].model.estimators[0][1], GridSearchCV)
|
def test_inspection_error(df_iris: pd.DataFrame) -> None:
'Test error for inspector.\n\n Parameters\n ----------\n df_iris : pd.DataFrame\n The iris dataset.\n\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
with pytest.raises(ValueError, match='return_inspector=True requires'):
run_cross_validation(X=X, y=y, data=df_iris, model='rf', return_estimator='final', return_inspector=True, problem_type='classification')
res = run_cross_validation(X=X, y=y, data=df_iris, model='rf', return_inspector=True, problem_type='classification')
assert (len(res) == 3)
|
def test_final_estimator_picklable(tmp_path: Path, df_iris: pd.DataFrame) -> None:
'Test if final estimator is picklable.\n\n Parameters\n ----------\n tmp_path : pathlib.Path\n The path to the test directory.\n df_iris : pd.DataFrame\n The iris dataset.\n\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
pickled_file = (tmp_path / 'final_estimator.joblib')
(_, final_estimator) = run_cross_validation(X=X, y=y, data=df_iris, model='rf', problem_type='classification', return_estimator='final')
joblib.dump(final_estimator, pickled_file)
joblib.load(pickled_file)
|
def test_inspector_picklable(tmp_path: Path, df_iris: pd.DataFrame) -> None:
'Test if inspector is picklable.\n\n Parameters\n ----------\n tmp_path : pathlib.Path\n The path to the test directory.\n df_iris : pd.DataFrame\n The iris dataset.\n\n '
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
pickled_file = (tmp_path / 'inspector.joblib')
(_, _, inspector) = run_cross_validation(X=X, y=y, data=df_iris, model='rf', problem_type='classification', return_estimator='all', return_inspector=True)
joblib.dump(inspector, pickled_file)
joblib.load(pickled_file)
|
def test_set_config_wrong_keys() -> None:
'Test that set_config raises an error when the key does not exist.'
with pytest.raises(ValueError, match='does not exist'):
set_config('wrong_key', 1)
|
def test_set_get_config() -> None:
'Test setting and getting config values.'
old_value = get_config('MAX_X_WARNS')
new_value = (old_value + 1)
set_config('MAX_X_WARNS', new_value)
assert (get_config('MAX_X_WARNS') == new_value)
|
def _check_df_input(prepared, X, y, groups, df):
(df_X, df_y, df_groups, _) = prepared
assert_array_equal(df[X].values, df_X[X].values)
assert_array_equal(df_y.values, df[y].values)
if (groups is not None):
assert_array_equal(df[groups].values, df_groups)
|
def test_prepare_input_data() -> None:
'Test prepare input data (dataframe).'
data = np.random.rand(4, 10)
columns = [f'f_{x}' for x in range(data.shape[1])]
X = columns[:(- 2)]
y = columns[(- 1)]
df = pd.DataFrame(data=data, columns=columns)
X_types = {'continuous': X}
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=None, df=df)
X = columns[:5]
y = columns[7]
groups = columns[8]
X_types = {'continuous': X}
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[:5]
y = columns[6]
groups = columns[7]
X_types = {'continuous': X}
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[2]
y = columns[6]
groups = columns[7]
X_types = {'continuous': X}
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[:(- 1)]
y = columns[(- 1)]
groups = None
X_types = {'continuous': X}
prepared = prepare_input_data(X=[':'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[:(- 2)]
y = columns[(- 2)]
groups = columns[(- 1)]
X_types = {'continuous': X}
prepared = prepare_input_data(X=[':'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
columns = [f'f_{x}' for x in range((data.shape[1] - 2))]
columns.append('t_8')
columns.append('g_9')
df = pd.DataFrame(data=data, columns=columns)
X = columns[:(- 2)]
y = 't_8'
groups = 'g_9'
X_types = {'continuous': X}
prepared = prepare_input_data(X='f_.*', y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
prepared = prepare_input_data(X=['f_.*'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X_types = {'continuous': 'f_.*'}
prepared = prepare_input_data(X=['f_.*'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X_types = {'continuous': ['f_.*']}
prepared = prepare_input_data(X=['f_.*'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X_types = {'continuous': ['f_.*']}
prepared = prepare_input_data(X='f_.*', y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X_types = {'continuous': 'f_.*'}
prepared = prepare_input_data(X='f_.*', y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
|
def test_prepare_input_data_erors() -> None:
'Test prepare input data (dataframe) errors.'
data = np.random.rand(4, 10)
columns = [f'f_{x}' for x in range(data.shape[1])]
df = pd.DataFrame(data=data, columns=columns)
with pytest.raises(ValueError, match='DataFrame columns must be strings'):
X = 2
y = columns[6]
data = np.random.rand(4, 10)
int_columns = ([f'f_{x}' for x in range((data.shape[1] - 1))] + [0])
X = columns[:(- 2)]
y = columns[(- 1)]
df_wrong_cols = pd.DataFrame(data=data, columns=int_columns)
prepared = prepare_input_data(X=X, y=y, df=df_wrong_cols, pos_labels=None, groups=None, X_types=None)
with pytest.raises(ValueError, match='X must be a string or list of strings'):
X = 2
y = columns[6]
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=None, X_types=None)
with pytest.raises(ValueError, match='y must be a string'):
X = columns[:5]
y = ['bad']
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=None, X_types=None)
with pytest.raises(ValueError, match='groups must be a string'):
X = columns[:5]
y = columns[6]
groups = 2
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
with pytest.raises(ValueError, match='df must be a pandas.DataFrame'):
X = columns[:5]
y = columns[6]
prepared = prepare_input_data(X=X, y=y, df={}, pos_labels=None, groups=None, X_types=None)
X = (columns[:5] + ['wrong'])
y = columns[6]
groups = columns[7]
with pytest.raises(ValueError, match="missing: \\['wrong'\\]"):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
set_config('disable_x_check', True)
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
set_config('disable_x_check', False)
X = columns[:5]
y = 'wrong'
groups = columns[7]
with pytest.raises(ValueError, match='not a valid column'):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
X = columns[:5]
y = columns[6]
groups = 'wrong'
with pytest.raises(ValueError, match='is not a valid column'):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
X = columns[:5]
y = columns[4]
groups = columns[7]
with pytest.warns(RuntimeWarning, match='contains the target'):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[:5]
y = columns[6]
groups = columns[6]
with pytest.warns(RuntimeWarning, match='y and groups are the same column'):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
X = columns[:5]
y = columns[6]
groups = columns[3]
with pytest.warns(RuntimeWarning, match='groups is part of X'):
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=groups, X_types=None)
_check_df_input(prepared, X=X, y=y, groups=groups, df=df)
|
def test_prepare_input_data_pos_labels() -> None:
'Test prepare input data (dataframe) pos_labels.'
data = np.random.rand(20, 10)
columns = [f'f_{x}' for x in range(data.shape[1])]
df = pd.DataFrame(data=data, columns=columns)
X = columns[:(- 1)]
y = columns[(- 1)]
X_types = {'continuous': X}
t_df = df.copy()
t_df[y] = (t_df[y] > 0.5).astype(int)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=1, groups=None, X_types=X_types)
assert_series_equal(prep_y, t_df[y])
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=0, groups=None, X_types=X_types)
assert_series_equal(prep_y, (1 - t_df[y]))
t_df = df.copy()
t_df[y] = 'mid'
target = t_df[y]
high_mask = (df[y] > 0.8)
low_mask = (df[y] < 0.2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
target.loc[high_mask] = 'high'
target.loc[low_mask] = 'low'
t_df[y] = target
bin_y = (target == 'high').astype(int)
assert (0 in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels='high', groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
bin_y = (target == 'low').astype(int)
assert (0 in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels='low', groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
bin_y = (target == 'mid').astype(int)
assert (0 in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels='mid', groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
bin_y = target.isin(['low', 'mid']).astype(int)
assert (0 in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=['low', 'mid'], groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
with pytest.warns(RuntimeWarning, match='labels are not in the target'):
bin_y = (target == 'low').astype(int)
assert (0 in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=['low', 'missing'], groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
with pytest.warns(RuntimeWarning, match='All targets have been set to 1'):
bin_y = target.isin(['low', 'mid', 'high']).astype(int)
assert (0 not in bin_y.values)
assert (1 in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=['low', 'mid', 'high'], groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
with pytest.warns(RuntimeWarning, match='All targets have been set to 0'):
bin_y = target.isin(['wrong']).astype(int)
assert (0 in bin_y.values)
assert (1 not in bin_y.values)
(_, prep_y, _, _) = prepare_input_data(X=X, y=y, df=t_df, pos_labels=['wrong'], groups=None, X_types=X_types)
assert_series_equal(prep_y, bin_y)
|
def test_pick_columns_using_column_name() -> None:
'Test pick columns using column names as regexes.'
columns = ['conf_1', 'conf_2', 'feat_1', 'feat_2', 'Feat_3']
regexes = ['conf_2', 'Feat_3']
assert (regexes == _pick_columns(regexes, columns))
columns = ['Feat_3', 'conf_1', 'conf_2', 'feat_1', 'feat_2']
regexes = ['conf_2', 'Feat_3']
assert (regexes == _pick_columns(regexes, columns))
columns = ['120', '121', '122', '123', '124', '125']
regexes = ['12']
msg = "following are missing: \\['12'\\]"
with pytest.raises(ValueError, match=msg):
_pick_columns(regexes, columns)
columns = ['120', '121', '122', '123', '124', '125']
regexes = ['2']
msg = "following are missing: \\['2'\\]"
with pytest.raises(ValueError, match=msg):
_pick_columns(regexes, columns)
columns = ['120', '121', '122', '123', '124', '125']
regexes = ['24']
msg = "following are missing: \\['24'\\]"
with pytest.raises(ValueError, match=msg):
_pick_columns(regexes, columns)
columns = ['120', '121', '122', '123', '124', '125']
regexes = ['122', '125', '130']
msg = "following are missing: \\['130'\\]"
with pytest.raises(ValueError, match=msg):
_pick_columns(regexes, columns)
columns = ['120', '121', '122', '123', '124', '125']
regexes = ['122', '125']
assert (regexes == _pick_columns(regexes, columns))
|
def test_pick_columns_using_regex_match() -> None:
'Test pick columns using regexes.'
columns = ['conf_1', 'conf_2', 'feat_1', 'feat_2', 'Feat_3']
regexes = ['.*conf.*', '.*feat.*']
picked = _pick_columns(regexes, columns)
assert (columns[:(- 1)] == picked)
columns = ['conf_1', 'conf_2', '_feat_1', 'feat_2', 'Feat_3']
regexes = ['.*conf.*', '.*feat.*', 'feat_.*']
picked = _pick_columns(regexes, columns)
assert (columns[:(- 1)] == picked)
|
def test_pick_columns_using_regex_and_column_name_match() -> None:
'Test pick columns using regexes and column names.'
columns = ['conf_1', 'conf_2', 'feat_1', 'feat_2', 'Feat_3']
regexes = ['.*conf.*', '.*feat.*', 'Feat_3']
assert (columns == _pick_columns(regexes, columns))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.