code stringlengths 17 6.64M |
|---|
def test_prepare_data_pick_regexp():
'Test picking columns by regexp.'
data = np.random.rand(5, 10)
columns = ['_a_b_c1_', '_a_b_c2_', '_a_b2_c3_', '_a_b2_c4_', '_a_b3_c5_', '_a_b3_c6_', '_a3_b2_c7_', '_a2_b_c7_', '_a2_b_c8_', '_a2_b_c9_']
X = columns[:(- 1)]
y = columns[(- 1)]
df = pd.DataFrame(data=data, columns=columns)
X_types = {'numerical': ['_a_b.*'], 'categorical': ['_a[2-3]_b.*']}
prepared = prepare_input_data(X=X, y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
(df_X, df_y, _, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (df_y.name == y)
assert (X_types == prep_X_types)
prepared = prepare_input_data(X=[':'], y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
(df_X, df_y, _, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (df_y.name == y)
assert (X_types == prep_X_types)
X = columns[:6]
y = '_a3_b2_c7_'
prepared = prepare_input_data(X=[':'], y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
(df_X, df_y, _, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (df_y.name == y)
assert (X_types == prep_X_types)
X = columns[:6]
y = '_a3_b2_c7_'
groups = columns[(- 1)]
prepared = prepare_input_data(X=[':'], y=y, df=df, pos_labels=None, groups=groups, X_types=X_types)
(df_X, df_y, df_groups, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (groups not in df_X.columns)
assert (df_y.name == y)
assert (df_groups.name == groups)
assert (X_types == prep_X_types)
X = columns[:6]
y = '_a3_b2_c7_'
X_types = {'numerical': ['_a_b.*']}
prepared = prepare_input_data(X=['_a_.*'], y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
(df_X, df_y, _, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (df_y.name == y)
assert (X_types == prep_X_types)
X = columns[:6]
y = '_a3_b2_c7_'
X_types = {'numerical': ['_a_b.*'], 'categorical': ['_a[2-3]_b.*']}
prepared = prepare_input_data(X=['.*_b_.*', '.*a_b2_.*', '.*b3_.*'], y=y, df=df, pos_labels=None, groups=None, X_types=X_types)
(df_X, df_y, _, prep_X_types) = prepared
assert all(((x in df_X.columns) for x in X))
assert (y not in df_X.columns)
assert (df_y.name == y)
assert (X_types == prep_X_types)
|
def test_check_consistency() -> None:
'Test check_consistency function.'
y = pd.Series(np.random.randint(0, 2, size=10))
problem_type = 'classification'
groups = None
cv = 5
with warnings.catch_warnings():
warnings.simplefilter('error')
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series(np.random.randint(0, 5, size=10))
problem_type = 'classification'
groups = None
cv = 5
with warnings.catch_warnings():
warnings.simplefilter('error')
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series(np.random.randn(10))
problem_type = 'regression'
groups = None
cv = 5
with warnings.catch_warnings():
warnings.simplefilter('error')
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series(np.random.randint(0, 2, size=10))
problem_type = 'regression'
groups = None
cv = 5
with pytest.warns(RuntimeWarning, match='only 2 distinct values'):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series(np.random.rand(10))
problem_type = 'classification'
groups = None
cv = 5
with pytest.warns(RuntimeWarning, match='larger than the number'):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series((['A'] * 10))
problem_type = 'regression'
groups = None
cv = 5
with pytest.warns(RuntimeWarning, match='not suitable for a regression'):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series((['A'] * 10))
problem_type = 'classification'
groups = None
cv = 5
with pytest.raises(ValueError, match='only one class in y'):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
y = pd.Series(np.random.randint(0, 2, size=10))
problem_type = 'classification'
groups = pd.Series((['A'] * 10))
cv = 5
with pytest.warns(RuntimeWarning, match='groups was specified but the CV '):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
valid_instances = (GroupKFold(), GroupShuffleSplit(), LeaveOneGroupOut(), LeavePGroupsOut(n_groups=2), StratifiedGroupKFold(), ContinuousStratifiedGroupKFold(n_bins=2), RepeatedContinuousStratifiedGroupKFold(n_bins=2))
for cv in valid_instances:
with warnings.catch_warnings():
warnings.simplefilter('error')
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
invalid_instances = (ShuffleSplit(), StratifiedKFold(), StratifiedShuffleSplit(), LeaveOneOut(), LeavePOut(p=2))
for cv in invalid_instances:
with pytest.warns(RuntimeWarning, match='groups was specified but the CV '):
check_consistency(y=y, cv=cv, groups=groups, problem_type=problem_type)
|
def test__check_x_types() -> None:
'Test checking for valid X types.'
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b'], 'continuous': ['c']}
with warnings.catch_warnings():
warnings.simplefilter('error')
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (X_types == checked_X_types)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b'], 'continuous': 'c'}
expected_X_types = {'categorical': ['a', 'b'], 'continuous': ['c']}
with warnings.catch_warnings():
warnings.simplefilter('error')
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (expected_X_types == checked_X_types)
with pytest.warns(RuntimeWarning, match='No type checking will be performed'):
checked_X_types = _check_x_types(X=X, X_types=None)
assert ({} == checked_X_types)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b']}
with pytest.warns(RuntimeWarning, match='will be treated as continuous'):
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (X_types == checked_X_types)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b', 'd']}
with pytest.raises(ValueError, match='in X_types but not in X'):
_check_x_types(X=X, X_types=X_types)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b'], 'continuous': ['a', 'c']}
with pytest.raises(ValueError, match='more than once in X_types'):
_check_x_types(X=X, X_types=X_types)
set_config('disable_xtypes_check', True)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b', 'd']}
_check_x_types(X=X, X_types=X_types)
X = ['a', 'b', 'c']
X_types = {'categorical': ['a', 'b'], 'continuous': ['a', 'c']}
_check_x_types(X=X, X_types=X_types)
set_config('disable_xtypes_check', False)
|
def test__check_x_types_regexp() -> None:
'Test checking for valid X types using regexp.'
X = ['_a_b_c1_', '_a_b_c2_', '_a_b2_c3_', '_a_b2_c4_', '_a_b3_c5_', '_a_b3_c6_', '_a3_b2_c7_', '_a2_b_c7_', '_a2_b_c8_', '_a2_b_c9_']
X_types = {'categorical': ['.*a_b.*', '_a2.*'], 'continuous': ['_a3.*']}
with warnings.catch_warnings():
warnings.simplefilter('error')
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (X_types == checked_X_types)
X_types = {'categorical': ['.*a_b.*', '_a2.*'], 'continuous': ['_a2_b_c7_']}
with pytest.raises(ValueError, match='more than once in X_types'):
_check_x_types(X=X, X_types=X_types)
X_types = {'categorical': ['.*a_b.*'], 'continuous': ['_a2_b_c7_']}
with pytest.warns(RuntimeWarning, match='not defined in X_types'):
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (X_types == checked_X_types)
X_types = {'categorical': ['.*a_b.*', '_a2.*', '_a_b_c.*'], 'continuous': ['_a3.*']}
with warnings.catch_warnings():
warnings.simplefilter('error')
checked_X_types = _check_x_types(X=X, X_types=X_types)
assert (X_types == checked_X_types)
|
def list_transformers() -> List[str]:
'List all the available transformers.\n\n Returns\n -------\n list of str\n A list will all the available transformer names.\n\n '
return list(_available_transformers.keys())
|
def get_transformer(name: str, **params: Any) -> TransformerLike:
'Get a transformer.\n\n Parameters\n ----------\n name : str\n The transformer name.\n **params : dict\n Parameters to get transformer.\n\n Returns\n -------\n scikit-learn compatible transformer\n The transformer object.\n\n '
out = None
if (name not in _available_transformers):
raise_error(f'The specified transformer ({name}) is not available. Valid options are: {list(_available_transformers.keys())}')
trans = _available_transformers[name]
out = trans(**params)
return out
|
def register_transformer(transformer_name, transformer_cls, overwrite=None):
'Register a transformer to julearn.\n\n This function allows you to add a transformer to julearn.\n Afterwards, it behaves like every other julearn transformer and can\n be referred to by name.\n\n Parameters\n ----------\n transformer_name : str\n Name by which the transformer will be referenced by\n transformer_cls : object\n The class by which the transformer can be initialized from.\n overwrite : bool, optional\n Whether overwrite should be allowed. Options are:\n\n * None : overwrite is possible, but warns the user (default).\n * True : overwrite is possible without any warning.\n * False : overwrite is not possible, error is raised instead.\n\n Raises\n ------\n ValueError\n If `transformer_name` is already registered and `overwrite` is False.\n\n Warns\n -----\n RuntimeWarning\n If `transformer_name` is already registered and `overwrite` is None.\n '
if (_available_transformers.get(transformer_name) is not None):
if (overwrite is None):
warn_with_log(f'Transformer named {transformer_name} already exists. Therefore, {transformer_name} will be overwritten. To remove this warning set overwrite=True.')
elif (overwrite is False):
raise_error(f'Transformer named {transformer_name} already exists and overwrite is set to False. Set `overwrite=True` in case you want to overwrite an existing transformer.')
logger.info(f'registering transformer named {transformer_name}.')
_available_transformers[transformer_name] = transformer_cls
|
def reset_transformer_register():
'Reset the transformer register to its initial state.'
global _available_transformers
_available_transformers = deepcopy(_available_transformers_reset)
return _available_transformers
|
class ChangeColumnTypes(JuTransformer):
"Transformer to change the column types.\n\n Parameters\n ----------\n X_types : dict, optional\n A dictionary with the column types to set. The keys are the column\n types and the values are the columns to set the type to. If None, will\n set all the column types to `continuous` (default is None).\n apply_to : ColumnTypesLike, optional\n From which feature types ('X_types') to remove confounds.\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n Not really useful for this one, but here for compatibility.\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n Not really useful for this one, but here for compatibility.\n "
def __init__(self, X_types_renamer: Dict[(str, str)], apply_to: ColumnTypesLike, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None):
self.X_types_renamer = X_types_renamer
super().__init__(apply_to=apply_to, needed_types=None, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None) -> 'ChangeColumnTypes':
'Fit the transformer.\n\n The transformer will learn how to se set the column types of the input\n data. This will not transform the data yet.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to add column types.\n y : Data-Like, optional\n Target data. This data will not be used.\n\n Returns\n -------\n ChangeColumnTypes\n The fitted transformer.\n '
self.feature_names_in_ = X.columns
to_rename = {}
for col in self.filter_columns(X).columns.tolist():
if ('__:type:__' in col):
(name, old_type) = col.split('__:type:__')
if (old_type in self.X_types_renamer):
to_rename[col] = f'{name}__:type:__{self.X_types_renamer[old_type]}'
self._renamer = to_rename
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
'Change the column types.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to set the column types.\n\n Returns\n -------\n pd.DataFrame\n The transformed data.\n '
return X.rename(columns=self._renamer)
def get_feature_names_out(self, input_features: Optional[List[str]]=None) -> List[str]:
'Get names of features to be returned.\n\n Parameters\n ----------\n input_features : None\n Parameter to ensure scikit-learn compatibility. It is not used by\n the method.\n\n Returns\n -------\n list\n Names of features to be kept in the output pd.DataFrame.\n '
out = self.feature_names_in_
out = self.filter_columns(pd.DataFrame(columns=out)).columns
out = out.map(self._renamer)
return out
|
class DropColumns(JuTransformer):
"Drop columns of a DataFrame.\n\n Parameters\n ----------\n apply_to : ColumnTypesLike\n The feature types ('X_types') to drop.\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n Not really useful for this one, but here for compatibility.\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n Not really useful for this one, but here for compatibility.\n "
def __init__(self, apply_to: ColumnTypesLike, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None):
super().__init__(apply_to=apply_to, needed_types=None, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None) -> 'DropColumns':
'Fit the transformer.\n\n The transformer will learn how to drop the columns of the input data.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to drop columns.\n y : Data-Like, optional\n Target data. This data will not be used.\n\n Returns\n -------\n DropColumns\n The fitted transformer.\n '
self.support_mask_ = pd.Series(True, index=X.columns, dtype=bool)
try:
self.drop_columns_ = self.filter_columns(X).columns
self.support_mask_[self.drop_columns_] = False
except ValueError:
self.drop_columns_ = []
self.support_mask_ = self.support_mask_.values
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
'Drop the columns.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to drop columns.\n\n Returns\n -------\n pd.DataFrame\n Data with dropped columns.\n '
logger.debug(f'Dropping columns: {self.drop_columns_}')
return X.drop(columns=self.drop_columns_)
def get_support(self, indices: bool=False) -> Union[(ArrayLike, pd.Series)]:
'Get the support mask.\n\n Parameters\n ----------\n indices : bool\n If true, return indices.\n\n Returns\n -------\n support_mask : numpy.array\n The support mask\n '
if indices:
return np.arange(len(self.support_mask_))[self.support_mask_]
else:
return self.support_mask_
|
class FilterColumns(JuTransformer):
"Filter columns of a DataFrame.\n\n Parameters\n ----------\n keep : ColumnTypesLike, optional\n Which feature types ('X_types') to keep. If not specified, 'keep'\n defaults to 'continuous'.\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n Not really useful for this one, but here for compatibility.\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n Not really useful for this one, but here for compatibility.\n "
def __init__(self, keep: Optional[ColumnTypesLike]=None, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None):
if (keep is None):
keep = 'continuous'
self.keep: ColumnTypes = ensure_column_types(keep)
super().__init__(apply_to='*', needed_types=keep, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None) -> 'FilterColumns':
'Fit the transformer.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to fit the transformer on.\n y : DataLike, optional\n The target data. This data will not be used.\n\n Returns\n -------\n FilterColumns\n The fitted transformer.\n '
apply_to_selector = self.keep.to_type_selector()
self.filter_columns_ = ColumnTransformer(transformers=[('keep', 'passthrough', apply_to_selector)], remainder='drop', verbose_feature_names_out=False)
self.filter_columns_.fit(X, y)
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
'Transform the data.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to filter the columns on.\n\n Returns\n -------\n DataLike\n The filtered data.\n '
return self.filter_columns_.transform(X)
def get_feature_names_out(self, input_features: Optional[List[str]]=None) -> List[str]:
'Get names of features to be returned.\n\n Parameters\n ----------\n input_features : None\n Parameter to ensure scikit-learn compatibility. It is not used by\n the method.\n\n Returns\n -------\n list\n Names of features to be kept in the output pd.DataFrame.\n '
out = self.filter_columns_.get_feature_names_out(input_features)
return out
|
class SetColumnTypes(JuTransformer):
'Transformer to set the column types.\n\n Parameters\n ----------\n X_types : dict, optional\n A dictionary with the column types to set. The keys are the column\n types and the values are the columns to set the type to. If None, will\n set all the column types to `continuous` (default is None).\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None)\n Not really useful for this one, but here for compatibility.\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None)\n Not really useful for this one, but here for compatibility.\n '
def __init__(self, X_types: Optional[Dict[(str, List[str])]]=None, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None):
if (X_types is None):
X_types = {}
for (X_type, columns) in X_types.items():
if (not isinstance(columns, list)):
raise_error(f'Each value of X_types must be a list. Found {X_type} with value {columns} of type {type(columns)}')
self.X_types = X_types
super().__init__(apply_to='*', needed_types=None, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None) -> 'SetColumnTypes':
'Fit the transformer.\n\n The transformer will learn how to se set the column types of the input\n data. This will not transform the data yet.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to add column types.\n y : Data-Like, optional\n Target data. This data remains unchanged.\n\n Returns\n -------\n SetColumnTypes\n The fitted transformer.\n '
if (not isinstance(X, (pd.DataFrame, pd.Series))):
X = pd.DataFrame(X)
X.columns = X.columns.astype(str)
self.feature_names_in_ = X.columns
logger.debug(f'Setting column types for {self.feature_names_in_}')
column_mapper_ = {}
for col in X.columns.tolist():
if ('__:type:__' in col):
(col_no_type, X_type) = col.split('__:type:__')
else:
(col_no_type, X_type) = (col, 'continuous')
column_mapper_[col_no_type] = change_column_type(col_no_type, X_type)
for (X_type, columns) in self.X_types.items():
t_columns = [col for col in X.columns if any((re.fullmatch(exp, col) for exp in columns))]
column_mapper_.update({col: change_column_type(col, X_type) for col in t_columns})
logger.debug(f' Column mappers for {column_mapper_}')
self.column_mapper_ = column_mapper_
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
'Transform the data.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to set the column types.\n\n Returns\n -------\n pd.DataFrame\n The same dataframe.\n '
return X
def get_feature_names_out(self, input_features: Optional[List[str]]=None) -> List[str]:
'Get names of features to be returned.\n\n Parameters\n ----------\n input_features : None\n Parameter to ensure scikit-learn compatibility. It is not used by\n the method.\n\n Returns\n -------\n list\n Names of features to be kept in the output pd.DataFrame.\n '
out = self.feature_names_in_
out = out.map((lambda col: col.split('__:type:__')[0]))
out = out.map(self.column_mapper_)
return out
|
def test_DropColumns() -> None:
'Test DropColumns.'
drop_columns = DropColumns(apply_to=['confound'])
drop_columns.fit(X_with_types)
X_trans = drop_columns.transform(X_with_types)
support = drop_columns.get_support()
non_confound = ['a__:type:__continuous', 'b__:type:__continuous', 'e__:type:__categorical', 'f__:type:__categorical']
X_non_confound = X_with_types[non_confound]
assert_frame_equal(X_trans, X_non_confound)
assert_frame_equal(X_with_types.drop(columns=['c__:type:__confound', 'd__:type:__confound']), X_trans)
assert all((support == [1, 1, 0, 0, 1, 1]))
|
def test_FilterColumns() -> None:
'Test FilterColumns.'
filter = FilterColumns(keep=['continuous'])
kept_columns = ['a__:type:__continuous', 'b__:type:__continuous']
filter.set_output(transform='pandas').fit(X_with_types)
X_expected = X_with_types.copy()[kept_columns]
X_trans = filter.transform(X_with_types)
assert isinstance(X_expected, pd.DataFrame)
assert_frame_equal(X_expected, X_trans)
|
def test_SetColumnTypes(X_iris: pd.DataFrame, X_types_iris: Optional[Dict]) -> None:
'Test SetColumnTypes.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset.\n X_types_iris : dict, optional\n The types to set in the iris dataset.\n '
_X_types_iris = ({} if (X_types_iris is None) else X_types_iris)
to_rename = {col: f'{col}__:type:__{dtype}' for (dtype, columns) in _X_types_iris.items() for col in columns}
X_iris_with_types = X_iris.rename(columns=to_rename, inplace=False)
X_iris_with_types.rename(columns=(lambda col: (col if ('__:type:__' in col) else f'{col}__:type:__continuous')))
st = SetColumnTypes(X_types_iris).set_output(transform='pandas')
Xt = st.fit_transform(X_iris)
Xt_iris_with_types = st.fit_transform(X_iris_with_types)
assert_frame_equal(Xt, X_iris_with_types)
assert_frame_equal(Xt_iris_with_types, X_iris_with_types)
|
def test_SetColumnTypes_input_validation(X_iris: pd.DataFrame) -> None:
'Test SetColumnTypes input validation.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset.\n\n '
with pytest.raises(ValueError, match='Each value of X_types must be a list.'):
SetColumnTypes({'confound': 'chicken'}).fit(X_iris)
|
def test_SetColumnTypes_array(X_iris: pd.DataFrame, X_types_iris: Optional[Dict]) -> None:
'Test SetColumnTypes.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset.\n X_types_iris : dict, optional\n The types to set in the iris dataset.\n '
_X_types_iris = ({} if (X_types_iris is None) else X_types_iris)
to_rename = {col: f'{icol}__:type:__{dtype}' for (dtype, columns) in _X_types_iris.items() for (icol, col) in enumerate(columns)}
X_iris_with_types = X_iris.rename(columns=to_rename, inplace=False)
to_rename = {col: f'{icol}__:type:__continuous' for (icol, col) in enumerate(X_iris.columns) if ('__:type:__' not in col)}
X_iris_with_types.rename(columns=to_rename)
st = SetColumnTypes(X_types_iris).set_output(transform='pandas')
Xt = st.fit_transform(X_iris.values)
Xt_iris_with_types = st.fit_transform(X_iris_with_types.values)
assert_frame_equal(Xt, Xt_iris_with_types)
|
class JuColumnTransformer(JuTransformer):
'Column transformer that can be used in a julearn pipeline.\n\n This column transformer is a wrapper around the sklearn column transformer,\n so it can be used directly with julearn pipelines.\n\n Parameters\n ----------\n name : str\n Name of the transformer.\n transformer : EstimatorLike\n The transformer to apply to the columns.\n apply_to : ColumnTypesLike\n To which column types the transformer needs to be applied to.\n needed_types : ColumnTypesLike, optional\n Which feature types are needed for the transformer to work.\n row_select_col_type : str or list of str or set of str or ColumnTypes\n The column types needed to select rows (default is None).\n row_select_vals : str, int, bool or list of str, int, bool\n The value(s) which should be selected in the row_select_col_type\n to select the rows used for training (default is None).\n **params : dict\n Extra keyword arguments for the transformer.\n\n '
def __init__(self, name: str, transformer: EstimatorLike, apply_to: ColumnTypesLike, needed_types: Optional[ColumnTypesLike]=None, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, List, bool)]]=None, **params: Any):
self.name = name
self.transformer = transformer
self.apply_to = ensure_column_types(apply_to)
self.needed_types = needed_types
self.row_select_col_type = row_select_col_type
self.row_select_vals = row_select_vals
self.set_params(**params)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None, **fit_params: Any) -> 'JuColumnTransformer':
'Fit the transformer.\n\n Fit the transformer to the data, only for the specified columns.\n\n Parameters\n ----------\n X : np.array\n Input features.\n y : np.array\n Target.\n **fit_params : dict\n Parameters for fitting the transformer.\n\n Returns\n -------\n JuColumnTransformer\n The fitted transformer.\n\n '
verbose_feature_names_out = isinstance(self.transformer, ClassNamePrefixFeaturesOutMixin)
self.column_transformer_ = ColumnTransformer([(self.name, self.transformer, self.apply_to.to_type_selector())], verbose_feature_names_out=verbose_feature_names_out, remainder='passthrough')
self.column_transformer_.fit(X, y, **fit_params)
return self
def transform(self, X: pd.DataFrame) -> DataLike:
'Apply the transformer.\n\n Parameters\n ----------\n X : pd.DataFrame\n Data to be transformed.\n\n Returns\n -------\n pd.DataFrame\n Transformed data.\n\n '
check_is_fitted(self)
return self.column_transformer_.transform(X)
def get_feature_names_out(self, input_features: Optional[List[str]]=None) -> List[str]:
'Get names of features to be returned.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features to use.\n\n * If ``None``, then ``feature_names_in_`` is\n used as input feature names if it\'s defined. If\n ``feature_names_in_`` is undefined, then the following input\n feature names are generated:\n ``["x0", "x1", ..., "x(n_features_in_ - 1)"]``.\n * If ``array-like``, then ``input_features`` must\n match ``feature_names_in_`` if it\'s defined.\n\n Returns\n -------\n list of str\n Names of features to be kept in the output pd.DataFrame.\n\n '
out = None
try:
out = self.column_transformer_.get_feature_names_out(input_features)
except ValueError as e:
raise_error('This transformer changes the names of the features. Unfortunately, this feature is already present and will create a repeated feature name. Please re-implement your transformer, inheriting from sklearn.base.ClassNamePrefixFeaturesOutMixin', klass=ValueError, exception=e)
if self.column_transformer_.verbose_feature_names_out:
out = [(x.replace('remainder__', '') if ('remainder__' in x) else x) for x in out]
return out
def get_params(self, deep: bool=True) -> Dict[(str, Any)]:
'Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n Not used. Kept for compatibility with scikit-learn.\n\n Returns\n -------\n dict\n Parameter names mapped to their values.\n\n '
return dict(**self.transformer.get_params(True), name=self.name, apply_to=self.apply_to, needed_types=self.needed_types, row_select_col_type=self.row_select_col_type, row_select_vals=self.row_select_vals, transformer=self.transformer)
def set_params(self, **kwargs: Any) -> 'JuColumnTransformer':
"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as :class:`sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n\n Parameters\n ----------\n **kwargs : dict\n Estimator parameters.\n\n Returns\n -------\n JuColumnTransformer\n JuColumnTransformer instance with params set.\n\n "
transformer_params = list(self.transformer.get_params(True).keys())
for (param, val) in kwargs.items():
if (param in transformer_params):
self.transformer.set_params(**{param: val})
else:
setattr(self, param, val)
return self
|
def list_target_transformers() -> List[str]:
'List all the available target transformers.\n\n Returns\n -------\n out : list of str\n A list will all the available transformer names.\n '
return list(_available_target_transformers.keys())
|
def get_target_transformer(name: str, **params: Any) -> JuTargetTransformer:
'Get a target transformer by name.\n\n Parameters\n ----------\n name : str\n The target transformer name\n **params\n Parameters for the transformer.\n\n Returns\n -------\n JuTargetTransformer\n The transformer object.\n\n Raises\n ------\n ValueError\n If the specified target transformer name is not available.\n '
out = None
if (name not in _available_target_transformers):
raise_error(f'The specified target transformer ({name}) is not available. Valid options are: {list(_available_target_transformers.keys())}')
trans = _available_target_transformers[name]
out = trans(**params)
return out
|
def register_target_transformer(transformer_name: str, transformer_cls: Type[JuTargetTransformer], overwrite: Optional[bool]=None):
'Register a target transformer to julearn.\n\n Parameters\n ----------\n transformer_name : str\n Name by which the transformer will be referenced by\n transformer_cls : class(JuTargetTransformer)\n The class by which the transformer can be initialized from.\n overwrite : bool, optional\n decides whether overwrite should be allowed.\n Options are:\n\n * None : overwrite is possible, but warns the user (default).\n * True : overwrite is possible without any warning.\n * False : overwrite is not possible, error is raised instead.\n\n Raises\n ------\n ValueError\n If `transformer_name` is already registered and `overwrite` is False.\n\n Warns\n -----\n RuntimeWarning\n If `transformer_name` is already registered and `overwrite` is None.\n '
if (_available_target_transformers.get(transformer_name) is not None):
if (overwrite is None):
warn_with_log(f'Target transformer named {transformer_name} already exists. Therefore, {transformer_name} will be overwritten. To remove this warning set overwrite=True.')
elif (overwrite is False):
raise_error(f'Target transformer named {transformer_name} already exists and overwrite is set to False. Set `overwrite=True` in case you want to overwrite an existing target transformer.')
logger.info(f'registering transformer named {transformer_name}.')
_available_target_transformers[transformer_name] = transformer_cls
|
def reset_target_transformer_register() -> None:
'Reset the target transformer register to its initial state.'
global _available_target_transformers
_available_target_transformers = deepcopy(_available_target_transformers_reset)
|
class JuTargetTransformer():
'Base class for target transformers.\n\n Unlike the scikit-learn transformer, this fits and transforms using both\n X and y. This is useful for pipelines that work on the target but require\n information from the input data, such as the TargetConfoundRemover or\n a target encoder that requires one of the features to be present.\n\n IMPORTANT: Using any of the transformers that transforms the target\n based on the input data will result in data leakage if the features\n are not dropped after the transformation.\n '
def fit_transform(self, X: pd.DataFrame, y: DataLike) -> DataLike:
'Fit and transform the target.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n\n Returns\n -------\n DataLike\n The transformed target.\n\n '
return self.fit(X, y).transform(X, y)
def fit(self, X: pd.DataFrame, y: DataLike) -> 'JuTargetTransformer':
'Fit the target.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n\n Returns\n -------\n JuTargetTransformer\n The fitted transformer.\n\n '
raise NotImplementedError('fit() not implemented')
def transform(self, X: pd.DataFrame, y: DataLike) -> DataLike:
'Transform the target.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n\n Returns\n -------\n DataLike\n The transformed target.\n\n '
raise NotImplementedError('transform() not implemented')
|
def _wrapped_model_has(attr):
'Create a function to check if self.model_ has a given attribute.\n\n This function is usable by\n :func:`sklearn.utils.metaestimators.available_if`\n\n Parameters\n ----------\n attr : str\n The attribute to check for.\n\n Returns\n -------\n check : function\n The check function.\n\n '
def check(self):
'Check if self.model has a given attribute.\n\n Returns\n -------\n bool\n True if self.model_ has the attribute, False otherwise.\n '
return hasattr(self.model, attr)
return check
|
class TransformedTargetWarning(RuntimeWarning):
'Warning used to notify the user that the target has been transformed.'
|
class JuTransformedTargetModel(JuBaseEstimator):
'Class that provides a model that supports transforming the target.\n\n This _model_ is a wrapper that will transform the target before fitting.\n\n Parameters\n ----------\n model : ModelLike\n The model to be wrapped. Can be a pipeline.\n transformer : JuTargetPipeline\n The transformer to be used to transform the target.\n '
def __init__(self, model: ModelLike, transformer: 'JuTargetPipeline'):
self.model = model
self.transformer = transformer
def fit(self, X: pd.DataFrame, y: DataLike, **fit_params: Any) -> 'JuTransformedTargetModel':
'Fit the model.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n **fit_params : dict\n Additional parameters to be passed to the model fit method.\n\n Returns\n -------\n JuTransformedTargetModel\n The fitted model.\n\n '
y = self.transformer.fit_transform(X, y)
self.model_ = clone(self.model)
self.model_.fit(X, y, **fit_params)
return self
def predict(self, X: pd.DataFrame) -> DataLike:
'Predict using the model.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to predict on.\n\n Returns\n -------\n DataLike\n The predictions.\n\n '
if (not hasattr(self, 'model_')):
raise_error('Model not fitted yet.')
self.model_ = typing.cast(ModelLike, self.model_)
y_pred = self.model_.predict(X)
if self.transformer.can_inverse_transform():
y_pred = self.transformer.inverse_transform(X, y_pred)
else:
warn_with_log("The target has been transformed to fit the model, but cannot inverse the model's prediction. The output of `predict(X)` is still in the transformed space. To remove this warning, use a suitable julearn scorer.", category=TransformedTargetWarning)
return y_pred
def score(self, X: pd.DataFrame, y: DataLike) -> float:
'Score the model.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n\n Returns\n -------\n float\n Score for the model.\n\n '
if (not hasattr(self, 'model_')):
raise_error('Model not fitted yet.')
self.model_ = typing.cast(ModelLike, self.model_)
y_trans = self.transform_target(X, y)
return self.model_.score(X, y_trans)
@available_if(_wrapped_model_has('predict_proba'))
def predict_proba(self, X: pd.DataFrame) -> np.ndarray:
'Compute probabilities of possible outcomes for samples in X.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to predict on.\n\n Returns\n -------\n np.ndarray\n Returns the probability of the sample for each class in\n the model. The columns correspond to the classes in sorted\n order, as they appear in the attribute :term:`classes_`.\n '
if (not hasattr(self, 'model_')):
raise_error('Model not fitted yet.')
self.model_ = typing.cast(ModelLike, self.model_)
return self.model_.predict_proba(X)
@available_if(_wrapped_model_has('decision_function'))
def decision_function(self, X: pd.DataFrame) -> np.ndarray:
'Evaluate the decision function for the samples in X.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to obtain the decision function.\n\n Returns\n -------\n X : array-like of shape (n_samples, n_class * (n_class-1) / 2)\n Returns the decision function of the sample for each class\n in the model.\n '
if (not hasattr(self, 'model_')):
raise_error('Model not fitted yet.')
self.model_ = typing.cast(ModelLike, self.model_)
return self.model_.decision_function(X)
def transform_target(self, X: pd.DataFrame, y: DataLike) -> DataLike:
'Transform target.\n\n Parameters\n ----------\n X : pd.DataFrame\n The input data.\n y : DataLike\n The target.\n\n Returns\n -------\n DataLike\n The transformed target.\n\n '
return self.transformer.transform(X, y)
def can_inverse_transform(self) -> bool:
'Check if the target can be inverse transformed.\n\n Returns\n -------\n bool\n True if the target can be inverse transformed, False otherwise.\n\n '
return self.transformer.can_inverse_transform()
@property
def classes_(self) -> np.ndarray:
'Get the classes of the model.'
if (not hasattr(self, 'model_')):
raise_error('Model not fitted yet.')
self.model_ = typing.cast(ModelLike, self.model_)
return self.model_.classes_
|
def test_register_target_transformer() -> None:
'Test registering target transformers.'
with pytest.raises(ValueError, match='\\(useless\\) is not available'):
get_target_transformer('useless')
first = list_target_transformers()
class MyTransformer(JuTargetTransformer):
pass
register_target_transformer('useless', MyTransformer)
_ = get_target_transformer('useless')
second = list_target_transformers()
assert ('useless' in second)
assert ('useless' not in first)
register_target_transformer('useless', MyTransformer, True)
with pytest.warns(RuntimeWarning, match='Target transformer named useless already exists. '):
register_target_transformer('useless', MyTransformer, None)
with pytest.raises(ValueError, match='Target transformer named useless already exists and'):
register_target_transformer('useless', MyTransformer, False)
reset_target_transformer_register()
|
def test_reset_target_transformer() -> None:
'Test resetting the target transformers registry.'
with pytest.raises(ValueError, match='\\(useless\\) is not available'):
get_target_transformer('useless')
class MyTransformer(JuTargetTransformer):
pass
register_target_transformer('useless', MyTransformer)
get_target_transformer('useless')
reset_target_transformer_register()
with pytest.raises(ValueError, match='\\(useless\\) is not available'):
get_target_transformer('useless')
|
def test_JuTargetTransformer_abstractness() -> None:
'Test JuTargetTransformer is abstract base class.'
with pytest.raises(NotImplementedError, match='fit'):
JuTargetTransformer().fit('1', '2')
|
class Fish(BaseEstimator, TransformerMixin):
'A (flying) fish.\n\n Parameters\n ----------\n can_it_fly : bool\n Whether the fish can fly.\n\n '
def __init__(self, can_it_fly: bool):
self.can_it_fly = can_it_fly
def fit(self, X: DataLike, y: Optional[DataLike]=None) -> 'Fish':
'Fit the fish.\n\n Parameters\n ----------\n X : DataLike\n The data.\n y : DataLike, optional\n The target, by default None\n\n Returns\n -------\n Fish\n The fitted fish.\n '
return self
def transform(self, X: DataLike) -> DataLike:
'Transform the data.\n\n Parameters\n ----------\n X : DataLike\n The data.\n\n Returns\n -------\n DataLike\n The transformed data.\n '
return X
|
def test_register_reset() -> None:
'Test the register reset.'
reset_transformer_register()
with pytest.raises(ValueError, match='The specified transformer'):
get_transformer('passthrough')
register_transformer('passthrough', PassThroughTransformer)
assert (get_transformer('passthrough').__class__ == PassThroughTransformer)
with pytest.warns(RuntimeWarning, match='Transformer named'):
register_transformer('passthrough', PassThroughTransformer)
reset_transformer_register()
with pytest.raises(ValueError, match='The specified transformer'):
get_transformer('passthrough')
register_transformer('passthrough', PassThroughTransformer, 'continuous')
assert (get_transformer('passthrough').__class__ == PassThroughTransformer)
|
def test_register_class_no_default_params():
'Test the register with a class that has no default params.'
reset_transformer_register()
register_transformer('fish', Fish)
get_transformer('fish', can_it_fly='dont_be_stupid')
|
def test_register_warnings_errors():
'Test the register warning / error.'
with pytest.warns(RuntimeWarning, match='Transformer name'):
register_transformer('zscore', Fish)
reset_transformer_register()
with pytest.raises(ValueError, match='Transformer name'):
register_transformer('zscore', Fish, overwrite=False)
reset_transformer_register()
with warnings.catch_warnings():
warnings.simplefilter('error')
register_transformer('zscore', Fish, overwrite=True)
reset_transformer_register()
|
def test_CBPM_posneg_correlated_features(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with posneg correlated features.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
X_neg = ['sepal_width']
trans_X_posneg = CBPM(corr_sign='posneg', agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man_pos = X_iris[X_pos].values.mean(axis=1)
trans_man_neg = X_iris[X_neg].values.mean(axis=1)
trans_man = np.concatenate([trans_man_pos.reshape((- 1), 1), trans_man_neg.reshape((- 1), 1)], axis=1)
assert_array_equal(trans_X_posneg, trans_man)
|
def test_CBPM_pos_correlated_features(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with positive correlated features.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
trans_X_pos = CBPM(corr_sign='pos', agg_method=np.mean).fit_transform(X_iris[X_pos], y_iris)
trans_X_pos_neg = CBPM(corr_sign='pos', agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man = X_iris[X_pos].values.mean(axis=1)
assert_array_equal(trans_X_pos, trans_X_pos_neg)
assert_array_equal(trans_X_pos, trans_man)
|
def test_CBPM_neg_correlated_features(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with positive correlated features.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
X_neg = ['sepal_width']
trans_X_neg = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris[X_neg], y_iris)
trans_X_neg_neg = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man = X_iris[X_neg].values.mean(axis=1)
assert_array_equal(trans_X_neg, trans_X_neg_neg)
assert_array_equal(trans_X_neg, trans_man)
|
def test_CBPM_warnings(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer warnings.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
X_neg = ['sepal_width']
with pytest.warns(RuntimeWarning, match='No feature with significant positive'):
trans = CBPM(corr_sign='pos', agg_method=np.mean).fit_transform(X_iris[X_neg], y_iris)
assert (trans == y_iris.values.mean()).all()
with pytest.warns(RuntimeWarning, match='No feature with significant negative'):
trans = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris[X_pos], y_iris)
assert (trans == y_iris.values.mean()).all()
trans_pos = CBPM(corr_sign='pos', agg_method=np.mean).fit_transform(X_iris[X_pos], y_iris)
with pytest.warns(RuntimeWarning, match='Only features with positive correlations'):
trans = CBPM(corr_sign='posneg', agg_method=np.mean).fit_transform(X_iris[X_pos], y_iris)
assert_array_equal(trans, trans_pos)
trans_neg = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris[X_neg], y_iris)
with pytest.warns(RuntimeWarning, match='Only features with negative correlations'):
trans = CBPM(corr_sign='posneg', agg_method=np.mean).fit_transform(X_iris[X_neg], y_iris)
assert_array_equal(trans, trans_neg)
df_shuffled_X = X_iris.sample(frac=1, random_state=42)
with pytest.warns(RuntimeWarning, match='No feature with significant negative or positive'):
trans = CBPM(corr_sign='posneg', agg_method=np.mean).fit_transform(df_shuffled_X, y_iris)
assert (trans == y_iris.values.mean()).all()
|
def test_CBPM_lower_sign_threshhold(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with lower significance threshold.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
trans_posneg = CBPM(corr_sign='pos', significance_threshold=1e-50, agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man = X_iris[['petal_length', 'petal_width']].values.mean(axis=1)
assert_array_equal(trans_posneg, trans_man)
|
def test_CBPM_lower_sign_threshhold_no_sig(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with an even lower significance threshold.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
with pytest.warns(RuntimeWarning, match='No feature with significant negative or positive'):
trans_posneg = CBPM(corr_sign='posneg', significance_threshold=1e-100, agg_method=np.mean).fit_transform(X_iris, y_iris)
assert (trans_posneg == y_iris.values.mean()).all()
|
def test_CBPM_spearman(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer with spearman correlation.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features\n y_iris : pd.Series\n The iris dataset target\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
X_neg = ['sepal_width']
trans_posneg = CBPM(corr_method=spearmanr, agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man_pos = X_iris[X_pos].values.mean(axis=1)
trans_man_neg = X_iris[X_neg].values.mean(axis=1)
trans_man = np.concatenate([trans_man_pos.reshape((- 1), 1), trans_man_neg.reshape((- 1), 1)], axis=1)
assert_array_equal(trans_posneg, trans_man)
|
def test_CBPM_set_output_posneg(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer for setting posneg output.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features.\n y_iris : pd.Series\n The iris dataset target.\n\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
X_neg = ['sepal_width']
trans_posneg = CBPM(corr_method=spearmanr, agg_method=np.mean, corr_sign='posneg').set_output(transform='pandas').fit_transform(X_iris, y_iris)
trans_man_pos = X_iris[X_pos].values.mean(axis=1)
trans_man_neg = X_iris[X_neg].values.mean(axis=1)
trans_man = np.concatenate([trans_man_pos.reshape((- 1), 1), trans_man_neg.reshape((- 1), 1)], axis=1)
df_trans_man = pd.DataFrame(trans_man, columns=['positive', 'negative'])
assert_frame_equal(trans_posneg, df_trans_man)
|
def test_CBPM_set_output_pos(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer for setting pos output.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features.\n y_iris : pd.Series\n The iris dataset target.\n\n '
X_pos = ['sepal_length', 'petal_length', 'petal_width']
trans_pos = CBPM(corr_method=spearmanr, agg_method=np.mean, corr_sign='pos').set_output(transform='pandas').fit_transform(X_iris, y_iris)
trans_man_pos = X_iris[X_pos].values.mean(axis=1)
df_trans_man = pd.DataFrame(trans_man_pos, columns=['positive'])
assert_frame_equal(trans_pos, df_trans_man)
|
def test_CBPM_set_output_neg(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
'Test the CBPM transformer for setting neg output.\n\n Parameters\n ----------\n X_iris : pd.DataFrame\n The iris dataset features.\n y_iris : pd.Series\n The iris dataset target.\n\n '
X_neg = ['sepal_width']
trans_neg = CBPM(corr_method=spearmanr, agg_method=np.mean, corr_sign='neg').set_output(transform='pandas').fit_transform(X_iris, y_iris)
trans_man_neg = X_iris[X_neg].values.mean(axis=1)
df_trans_man = pd.DataFrame(trans_man_neg, columns=['negative'])
assert_frame_equal(trans_neg, df_trans_man)
|
@fixture
def df_X_confounds() -> pd.DataFrame:
'Create a dataframe with confounds.\n\n Returns\n -------\n pd.DataFrame\n A dataframe with confounds.\n\n '
X = pd.DataFrame({'a__:type:__continuous': (np.arange(10) + np.random.rand(10)), 'b__:type:__continuous': (np.arange(10, 20) + np.random.rand(10)), 'c__:type:__confound': np.arange(30, 40).astype(float), 'd__:type:__confound': np.arange(40, 50).astype(float), 'e__:type:__categorical': np.arange(50, 70, 2).astype(float), 'f__:type:__categorical': np.arange(70, 100, 3).astype(float)})
return X
|
@pytest.mark.parametrize('name, klass, params', [('zscore', StandardScaler, {}), ('scaler_robust', RobustScaler, {}), ('scaler_minmax', MinMaxScaler, {}), ('scaler_maxabs', MaxAbsScaler, {}), ('scaler_normalizer', Normalizer, {}), ('scaler_quantile', QuantileTransformer, {'n_quantiles': 10}), ('scaler_power', PowerTransformer, {})])
def test_JuColumnTransformer(name: str, klass: Type[EstimatorLike], params: Dict, df_X_confounds: pd.DataFrame):
'Test JuColumnTransformer class.'
transformer = JuColumnTransformer(name=name, transformer=klass(), apply_to=['continuous'])
transformer.set_params(**params)
transformer.fit(df_X_confounds)
X_transformed = transformer.transform(df_X_confounds.copy())
df_X_transformed = pd.DataFrame(X_transformed, columns=transformer.get_feature_names_out())
assert (set(df_X_transformed.columns) == {'a__:type:__continuous', 'b__:type:__continuous', 'c__:type:__confound', 'd__:type:__confound', 'e__:type:__categorical', 'f__:type:__categorical'})
kept = ['c__:type:__confound', 'd__:type:__confound', 'e__:type:__categorical', 'f__:type:__categorical']
trans = ['a__:type:__continuous', 'b__:type:__continuous']
sk_trans = klass(**params)
manual = sk_trans.fit_transform(df_X_confounds[trans])
assert_frame_equal(df_X_transformed[kept], df_X_confounds[kept])
assert_array_equal(df_X_transformed[trans].values, manual)
|
def test_JuColumnTransformer_row_select():
'Test row selection for JuColumnTransformer.'
X = pd.DataFrame({'a__:type:__continuous': [0, 0, 1, 1], 'b__:type:__healthy': [1, 1, 0, 0]})
transformer_healthy = JuColumnTransformer(name='zscore', transformer=StandardScaler(), apply_to='continuous', row_select_col_type=['healthy'], row_select_vals=1)
transformer_unhealthy = JuColumnTransformer(name='zscore', transformer=StandardScaler(), apply_to='continuous', row_select_col_type=['healthy'], row_select_vals=0)
transformer_both = JuColumnTransformer(name='zscore', transformer=StandardScaler(), apply_to='continuous', row_select_col_type=['healthy'], row_select_vals=[0, 1])
mean_healthy = transformer_healthy.fit(X).column_transformer_.transformers_[0][1].mean_
mean_unhealthy = transformer_unhealthy.fit(X).column_transformer_.transformers_[0][1].mean_
mean_both = transformer_both.fit(X).column_transformer_.transformers_[0][1].mean_
assert_almost_equal(transformer_healthy._select_rows(X, y=None)['X'].index.values, [0, 1])
assert_almost_equal(transformer_unhealthy._select_rows(X, None)['X'].index.values, [2, 3])
assert_almost_equal(transformer_both._select_rows(X, None)['X'].index.values, [0, 1, 2, 3])
assert_almost_equal(mean_unhealthy, [1])
assert_almost_equal(mean_healthy, [0])
assert_almost_equal(mean_both, [0.5])
|
def _recurse_to_list(a):
'Recursively convert a to a list.'
if isinstance(a, (list, tuple)):
return [_recurse_to_list(i) for i in a]
elif isinstance(a, np.ndarray):
return a.tolist()
else:
return a
|
def _compute_cvmdsum(cv):
'Compute the sum of the CV generator.'
params = dict(vars(cv).items())
params['class'] = cv.__class__.__name__
out = None
if ('random_state' in params):
if (params['random_state'] is None):
if (params.get('shuffle', True) is True):
out = 'non-reproducible'
if isinstance(cv, _CVIterableWrapper):
splits = params.pop('cv')
params['cv'] = _recurse_to_list(splits)
if isinstance(cv, PredefinedSplit):
params['test_fold'] = params['test_fold'].tolist()
params['unique_folds'] = params['unique_folds'].tolist()
if ('cv' in params):
if inspect.isclass(params['cv']):
params['cv'] = params['cv'].__class__.__name__
if (out is None):
out = hashlib.md5(json.dumps(params, sort_keys=True).encode('utf-8')).hexdigest()
return out
|
def is_nonoverlapping_cv(cv) -> bool:
_valid_instances = (KFold, GroupKFold, RepeatedKFold, RepeatedStratifiedKFold, StratifiedKFold, LeaveOneOut, LeaveOneGroupOut, StratifiedGroupKFold, ContinuousStratifiedGroupKFold, RepeatedContinuousStratifiedGroupKFold)
return isinstance(cv, _valid_instances)
|
def check_scores_df(*scores: pd.DataFrame, same_cv: bool=False) -> pd.DataFrame:
'Check the output of `run_cross_validation`.\n\n Parameters\n ----------\n *scores : pd.DataFrame\n DataFrames containing the scores of the models. The DataFrames must\n be the output of `run_cross_validation`\n same_cv : bool, optional\n If True, the DataFrames must have the same CV scheme, by default False\n\n Returns\n -------\n named_scores : list of pd.DataFrame\n The validated input DataFrames, with a `model` column added if\n missing.\n\n Raises\n ------\n ValueError\n If the DataFrames are not the output of `run_cross_validation` or\n if they do not have the same CV scheme and `same_cv` is True.\n '
if any((('cv_mdsum' not in x) for x in scores)):
raise_error('The DataFrames must be the output of `run_cross_validation`. Some of the DataFrames are missing the `cv_mdsum` column.')
if any((('fold' not in x) for x in scores)):
raise_error('The DataFrames must be the output of `run_cross_validation`. Some of the DataFrames are missing the `fold` column.')
if any((('repeat' not in x) for x in scores)):
raise_error('The DataFrames must be the output of `run_cross_validation`. Some of the DataFrames are missing the `repeat` column.')
if any((('n_train' not in x) for x in scores)):
raise_error('The DataFrames must be the output of `run_cross_validation`. Some of the DataFrames are missing the `n_train` column.')
if any((('n_test' not in x) for x in scores)):
raise_error('The DataFrames must be the output of `run_cross_validation`. Some of the DataFrames are missing the `n_test` column.')
if same_cv:
cv_mdsums = np.unique(np.hstack([x['cv_mdsum'].unique() for x in scores]))
if (cv_mdsums.size > 1):
raise_error("The CVs are not the same. Can't do a t-test on different CVs.")
if (cv_mdsums[0] == 'non-reproducible'):
raise_error("The CV is non-reproducible. Can't reproduce the CV folds.")
named_scores = []
for (i, score) in enumerate(scores):
if ('model' not in score):
score['model'] = f'model_{(i + 1)}'
named_scores.append(score)
return named_scores
|
def _get_git_head(path: Path) -> str:
'Aux function to read HEAD from git.\n\n Parameters\n ----------\n path : pathlib.Path\n The path to read git HEAD from.\n\n Returns\n -------\n str\n Empty string if timeout expired for subprocess command execution else\n git HEAD information.\n\n '
if (not path.exists()):
raise ValueError(f'This path does not exist: {path}')
command = f'cd {path}; git rev-parse --verify HEAD'
process = Popen(args=command, stdout=PIPE, shell=True)
try:
(stdout, _) = process.communicate(timeout=10)
proc_stdout = stdout.strip().decode()
except TimeoutExpired:
process.kill()
proc_stdout = ''
return proc_stdout
|
def get_versions() -> Dict:
'Import stuff and get versions if module.\n\n Returns\n -------\n module_versions : dict\n The module names and corresponding versions.\n\n '
module_versions = {}
for (name, module) in sys.modules.items():
if ('.' in name):
continue
if (name in ['_curses']):
continue
vstring = str(getattr(module, '__version__', None))
module_version = LooseVersion(vstring)
module_version = getattr(module_version, 'vstring', None)
if (module_version is None):
module_version = None
elif ('git' in module_version):
git_path = Path(module.__file__).resolve().parent
head = _get_git_head(git_path)
module_version += f'-HEAD:{head}'
module_versions[name] = module_version
return module_versions
|
def _safe_log(versions: Dict, name: str) -> None:
'Log with safety.\n\n Parameters\n ----------\n versions : dict\n The dictionary with keys as dependency names and values as the\n versions.\n name : str\n The dependency to look up in `versions`.\n\n '
if (name in versions):
logger.info(f'{name}: {versions[name]}')
|
def log_versions() -> None:
'Log versions of the core libraries, for reproducibility purposes.'
versions = get_versions()
logger.info('===== Lib Versions =====')
_safe_log(versions, 'numpy')
_safe_log(versions, 'scipy')
_safe_log(versions, 'sklearn')
_safe_log(versions, 'pandas')
_safe_log(versions, 'julearn')
logger.info('========================')
|
def configure_logging(level: Union[(int, str)]='WARNING', fname: Optional[Union[(str, Path)]]=None, overwrite: Optional[bool]=None, output_format=None) -> None:
'Configure the logging functionality.\n\n Parameters\n ----------\n level : int or {"DEBUG", "INFO", "WARNING", "ERROR"}\n The level of the messages to print. If string, it will be interpreted\n as elements of logging (default "WARNING").\n fname : str or pathlib.Path, optional\n Filename of the log to print to. If None, stdout is used\n (default None).\n overwrite : bool, optional\n Overwrite the log file (if it exists). Otherwise, statements\n will be appended to the log (default). None is the same as False,\n but additionally raises a warning to notify the user that log\n entries will be appended (default None).\n output_format : str, optional\n Format of the output messages. See the following for examples:\n https://docs.python.org/dev/howto/logging.html\n e.g., ``"%(asctime)s - %(levelname)s - %(message)s"``.\n If None, default string format is used\n (default ``"%(asctime)s - %(name)s - %(levelname)s - %(message)s"``).\n\n '
_close_handlers(logger)
if isinstance(level, str):
level = _logging_types[level]
if (fname is not None):
if (not isinstance(fname, Path)):
fname = Path(fname)
if (fname.exists() and (overwrite is None)):
warnings.warn(f'File ({fname.absolute()!s}) exists. Messages will be appended. Use overwrite=True to overwrite or overwrite=False to avoid this message.', stacklevel=2)
overwrite = False
mode = ('w' if overwrite else 'a')
lh = logging.FileHandler(fname, mode=mode)
else:
lh = logging.StreamHandler(WrapStdOut())
if (output_format is None):
output_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt=output_format)
lh.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(lh)
log_versions()
|
def _close_handlers(logger: logging.Logger) -> None:
'Safely close relevant handlers for logger.\n\n Parameters\n ----------\n logger : logging.logger\n The logger to close handlers for.\n\n '
for handler in list(logger.handlers):
if isinstance(handler, (logging.FileHandler, logging.StreamHandler)):
if isinstance(handler, logging.FileHandler):
handler.close()
logger.removeHandler(handler)
|
def raise_error(msg: str, klass: Type[Exception]=ValueError, exception: Optional[Exception]=None) -> NoReturn:
'Raise error, but first log it.\n\n Parameters\n ----------\n msg : str\n The message for the exception.\n klass : subclass of Exception, optional\n The subclass of Exception to raise using (default ValueError).\n exception : Exception, optional\n The original exception to follow up on (default None).\n\n '
logger.error(msg)
if (exception is not None):
raise klass(msg) from exception
else:
raise klass(msg)
|
def warn_with_log(msg: str, category: Optional[Type[Warning]]=RuntimeWarning) -> None:
'Warn, but first log it.\n\n Parameters\n ----------\n msg : str\n Warning message.\n category : subclass of Warning, optional\n The warning subclass (default RuntimeWarning).\n\n '
this_filters = [x for x in warnings.filters if issubclass(x[2], category)]
skip_log = ((len(this_filters) > 0) and (this_filters[0][0] == 'ignore'))
if (not skip_log):
logger.warning(msg)
warnings.warn(msg, category=category, stacklevel=2)
|
class WrapStdOut(logging.StreamHandler):
'Dynamically wrap to sys.stdout.\n\n This makes packages that monkey-patch sys.stdout (e.g.doctest,\n sphinx-gallery) work properly.\n\n '
def __getattr__(self, name: str) -> str:
'Implement attribute fetch.'
if hasattr(sys.stdout, name):
return getattr(sys.stdout, name)
else:
raise AttributeError(f"'file' object has not attribute '{name}'")
|
def compare_models(clf1: EstimatorLike, clf2: EstimatorLike) -> None:
'Compare two models.\n\n Parameters\n ----------\n clf1 : EstimatorLike\n The first model.\n clf2 : EstimatorLike\n The second model.\n\n Raises\n ------\n AssertionError\n If the models are not equal.\n '
if isinstance(clf1, WrapModel):
clf1 = clf1.model
if isinstance(clf2, WrapModel):
clf2 = clf2.model
if (clf1.__class__ != clf2.__class__):
raise AssertionError('Different classes')
if isinstance(clf1, (SVC, SVR)):
idx1 = np.argsort(clf1.support_)
v1 = clf1.support_vectors_[idx1]
idx2 = np.argsort(clf2.support_)
v2 = clf2.support_vectors_[idx2]
if hasattr(clf1, 'probability'):
assert (clf1.probability == clf2.probability)
elif isinstance(clf1, (RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor, GradientBoostingClassifier, GradientBoostingRegressor)):
v1 = clf1.feature_importances_
v2 = clf2.feature_importances_
elif isinstance(clf1, (DummyClassifier, DummyRegressor)):
v1 = None
v2 = None
if hasattr(clf1, '_strategy'):
assert (clf1._strategy == clf2._strategy)
if hasattr(clf1, 'strategy'):
assert (clf1.strategy == clf2.strategy)
if hasattr(clf1, 'class_prior_'):
assert_array_equal(clf1.class_prior_, clf2.class_prior_)
if hasattr(clf1, 'constant_'):
assert (clf1.constant_ == clf2.constant_)
if hasattr(clf1, 'classes_'):
assert_array_equal(clf1.classes_, clf2.classes_)
elif isinstance(clf1, GaussianProcessClassifier):
if hasattr(clf1.base_estimator_, 'estimators_'):
est1 = clf1.base_estimator_.estimators_
v1 = np.array([x.pi_ for x in est1])
est2 = clf2.base_estimator_.estimators_
v2 = np.array([x.pi_ for x in est2])
else:
v1 = clf1.base_estimator_.pi_
v2 = clf2.base_estimator_.pi_
elif isinstance(clf1, GaussianProcessRegressor):
v1 = np.c_[(clf1.L_, clf1.alpha_)]
v2 = np.c_[(clf2.L_, clf2.alpha_)]
elif isinstance(clf1, (LogisticRegression, RidgeClassifier, RidgeClassifierCV, SGDClassifier, SGDRegressor, LinearRegression, Ridge, RidgeCV, BernoulliNB, ComplementNB, MultinomialNB)):
v1 = _get_coef_over_versions(clf1)
v2 = _get_coef_over_versions(clf1)
elif isinstance(clf1, CategoricalNB):
v1 = None
v2 = None
for (c1, c2) in zip(_get_coef_over_versions(clf1), _get_coef_over_versions(clf2)):
assert_array_equal(c1, c2)
elif isinstance(clf1, GaussianNB):
v1 = clf1.var_
v2 = clf2.var_
elif isinstance(clf1, (AdaBoostClassifier, AdaBoostRegressor, BaggingClassifier, BaggingRegressor)):
est1 = clf1.estimators_
v1 = np.array([x.feature_importances_ for x in est1])
est2 = clf2.estimators_
v2 = np.array([x.feature_importances_ for x in est2])
else:
raise NotImplementedError(f'Model comparison for {clf1} not yet implemented.')
assert_array_equal(v1, v2)
|
def do_scoring_test(X: List[str], y: str, data: pd.DataFrame, api_params: Dict[(str, Any)], sklearn_model: EstimatorLike, scorers: List[str], groups: Optional[str]=None, X_types: Optional[Dict[(str, List[str])]]=None, cv: int=5, sk_y: Optional[np.ndarray]=None, decimal: int=5):
'Test scoring for a model, using the julearn and sklearn API.\n\n Parameters\n ----------\n X : List[str]\n The feature names.\n y : str\n The target name.\n data : pd.DataFrame\n The data.\n groups : str, optional\n The group name, by default None.\n X_types : Dict[str, List[str]]\n The feature types.\n api_params : Dict[str, Any]\n The parameters for the julearn API.\n sklearn_model : EstimatorLike\n The sklearn model.\n scorers : list of str\n The scorers to use.\n cv : int, optional\n The number of folds to use, by default 5.\n sk_y : np.ndarray, optional\n The target values, by default None.\n decimal : int, optional\n The number of decimals to use for the comparison, by default 5.\n '
sk_X = data[X].values
if (sk_y is None):
sk_y = data[y].values
params_dict = dict(api_params.items())
if isinstance(cv, int):
jucv = KFold(n_splits=cv, random_state=42, shuffle=True)
sk_cv = KFold(n_splits=cv, random_state=42, shuffle=True)
else:
jucv = cv
sk_cv = cv
sk_groups = None
if (groups is not None):
sk_groups = data[groups].values
np.random.seed(42)
(actual, actual_estimator) = run_cross_validation(X=X, y=y, X_types=X_types, data=data, groups=groups, scoring=scorers, cv=jucv, return_estimator='final', **params_dict)
np.random.seed(42)
expected = cross_validate(sklearn_model, sk_X, sk_y, cv=sk_cv, scoring=scorers, groups=sk_groups)
if isinstance(actual_estimator, Pipeline):
clf1 = actual_estimator.steps[(- 1)][1]
else:
clf1 = actual_estimator
if isinstance(sklearn_model, Pipeline):
clf2 = clone(sklearn_model).fit(sk_X, sk_y).steps[(- 1)][1]
else:
clf2 = clone(sklearn_model).fit(sk_X, sk_y)
compare_models(clf1, clf2)
if (decimal > 0):
for scoring in scorers:
s_key = f'test_{scoring}'
assert (len(actual.columns) == (len(expected) + 5))
assert (len(actual[s_key]) == len(expected[s_key]))
assert_array_almost_equal(actual[s_key], expected[s_key], decimal=decimal)
|
class PassThroughTransformer(TransformerMixin, BaseEstimator):
'A transformer doing nothing.'
def __init__(self):
pass
def fit(self, X: DataLike, y: Optional[DataLike]=None) -> 'PassThroughTransformer':
'Fit the transformer.\n\n Parameters\n ----------\n X : DataLike\n The data.\n y : Optional[DataLike], optional\n The target, by default None.\n\n Returns\n -------\n PassThroughTransformer\n The fitted transformer.\n\n '
return self
def transform(self, X: DataLike) -> DataLike:
'Transform the data.\n\n Parameters\n ----------\n X : DataLike\n The data.\n\n Returns\n -------\n DataLike\n The transformed data.\n '
return X
|
class TargetPassThroughTransformer(PassThroughTransformer):
'A target transformer doing nothing.'
def __init__(self):
super().__init__()
def transform(self, X: Optional[DataLike]=None, y: Optional[DataLike]=None) -> Optional[DataLike]:
'Transform the data.\n\n Parameters\n ----------\n X : DataLike, optional\n The data, by default None.\n y : DataLike, optional\n The target, by default None.\n\n Returns\n -------\n DataLike or None\n The target.\n '
return y
def fit_transform(self, X: Optional[DataLike]=None, y: Optional[DataLike]=None) -> Optional[DataLike]:
'Fit the model and transform the data.\n\n Parameters\n ----------\n X : DataLike, optional\n The data, by default None.\n y : DataLike, optional\n The target, by default None.\n\n Returns\n -------\n DataLike or None\n The target.\n '
self.fit(X, y)
return self.transform(X, y)
|
def _get_coef_over_versions(clf: EstimatorLike) -> np.ndarray:
'Get the coefficients of a model, skipping warnings.\n\n Parameters\n ----------\n clf : EstimatorLike\n The model.\n\n Returns\n -------\n np.ndarray\n The coefficients.\n '
if isinstance(clf, (BernoulliNB, ComplementNB, MultinomialNB, CategoricalNB)):
with warnings.catch_warnings():
warnings.filterwarnings('error', category=FutureWarning)
warnings.filterwarnings('error', category=DeprecationWarning)
return clf.feature_log_prob_
else:
return clf.coef_
|
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_log_file() -> None:
'Test logging to a file.'
with tempfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
configure_logging(fname=(tmpdir / 'test1.log'))
logger.debug('Debug message')
logger.info('Info message')
logger.warning('Warn message')
logger.error('Error message')
_close_handlers(logger)
with open((tmpdir / 'test1.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert (not any((('Info message' in line) for line in lines)))
assert any((('Warn message' in line) for line in lines))
assert any((('Error message' in line) for line in lines))
configure_logging(fname=(tmpdir / 'test2.log'), level='INFO')
logger.debug('Debug message')
logger.info('Info message')
logger.warning('Warn message')
logger.error('Error message')
_close_handlers(logger)
with open((tmpdir / 'test2.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert any((('Info message' in line) for line in lines))
assert any((('Warn message' in line) for line in lines))
assert any((('Error message' in line) for line in lines))
configure_logging(fname=(tmpdir / 'test3.log'), level='WARNING')
logger.debug('Debug message')
logger.info('Info message')
logger.warning('Warn message')
logger.error('Error message')
_close_handlers(logger)
with open((tmpdir / 'test3.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert (not any((('Info message' in line) for line in lines)))
assert any((('Warn message' in line) for line in lines))
assert any((('Error message' in line) for line in lines))
configure_logging(fname=(tmpdir / 'test4.log'), level='ERROR')
logger.debug('Debug message')
logger.info('Info message')
logger.warning('Warn message')
logger.error('Error message')
with open((tmpdir / 'test4.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert (not any((('Info message' in line) for line in lines)))
assert (not any((('Warn message' in line) for line in lines)))
assert any((('Error message' in line) for line in lines))
with pytest.warns(UserWarning, match='to avoid this message'):
configure_logging(fname=(tmpdir / 'test4.log'), level='WARNING')
logger.debug('Debug2 message')
logger.info('Info2 message')
logger.warning('Warn2 message')
logger.error('Error2 message')
with open((tmpdir / 'test4.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert (not any((('Info message' in line) for line in lines)))
assert (not any((('Warn message' in line) for line in lines)))
assert any((('Error message' in line) for line in lines))
assert (not any((('Debug2 message' in line) for line in lines)))
assert (not any((('Info2 message' in line) for line in lines)))
assert any((('Warn2 message' in line) for line in lines))
assert any((('Error2 message' in line) for line in lines))
configure_logging(fname=(tmpdir / 'test4.log'), level='WARNING', overwrite=True)
logger.debug('Debug3 message')
logger.info('Info3 message')
logger.warning('Warn3 message')
logger.error('Error3 message')
with open((tmpdir / 'test4.log')) as f:
lines = f.readlines()
assert (not any((('Debug message' in line) for line in lines)))
assert (not any((('Info message' in line) for line in lines)))
assert (not any((('Warn message' in line) for line in lines)))
assert (not any((('Error message' in line) for line in lines)))
assert (not any((('Debug2 message' in line) for line in lines)))
assert (not any((('Info2 message' in line) for line in lines)))
assert (not any((('Warn2 message' in line) for line in lines)))
assert (not any((('Error2 message' in line) for line in lines)))
assert (not any((('Debug3 message' in line) for line in lines)))
assert (not any((('Info3 message' in line) for line in lines)))
assert any((('Warn3 message' in line) for line in lines))
assert any((('Error3 message' in line) for line in lines))
with pytest.warns(RuntimeWarning, match='Warn raised'):
warn_with_log('Warn raised')
with pytest.raises(ValueError, match='Error raised'):
raise_error('Error raised')
with open((tmpdir / 'test4.log')) as f:
lines = f.readlines()
assert any((('Warn raised' in line) for line in lines))
assert any((('Error raised' in line) for line in lines))
|
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_log() -> None:
'Simple log test.'
configure_logging()
logger.info('Testing')
|
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_lib_logging() -> None:
'Test logging versions.'
import numpy as np
import pandas
import scipy
import sklearn
with tempfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
configure_logging(fname=(tmpdir / 'test1.log'), level='INFO')
logger.info('first message')
with open((tmpdir / 'test1.log')) as f:
lines = f.readlines()
assert any((('numpy' in line) for line in lines))
assert any((('scipy' in line) for line in lines))
assert any((('sklearn' in line) for line in lines))
assert any((('pandas' in line) for line in lines))
assert any((('julearn' in line) for line in lines))
|
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_log_file_warning_filter() -> None:
'Test filtering warning when logging to a file.'
with tempfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
configure_logging(fname=(tmpdir / 'test_filter.log'))
warn_with_log('Warn message 1')
warnings.filterwarnings('ignore', category=ImportWarning)
warn_with_log('Warn message 2', RuntimeWarning)
warn_with_log('Warn message 3', ImportWarning)
warnings.resetwarnings()
warn_with_log('Warn message 4', ImportWarning)
with open((tmpdir / 'test_filter.log')) as f:
lines = f.readlines()
assert ('Warn message 1' in lines[0])
assert ('Warn message 2' in lines[1])
assert ('Warn message 3' not in lines[1])
assert ('Warn message 4' in lines[2])
|
def test_major_true() -> None:
'Test major version check.'
assert check_version('3.5.1', (lambda x: (int(x) > 1)))
|
def test_major_false() -> None:
'Test major version check false.'
assert (check_version('1.5.1', (lambda x: (int(x) > 1))) is False)
|
def test_minor_true() -> None:
'Test minor version check.'
assert check_version('3.5.1', minor_check=(lambda x: (int(x) > 2)))
|
def test_minor_false() -> None:
'Test minor version check false.'
assert (check_version('3.1.1', minor_check=(lambda x: (int(x) >= 2))) is False)
|
def test_patch_true() -> None:
'Test patch version check.'
assert check_version('3.1.5', patch_check=(lambda x: (int(x) > 2)))
|
def test_patch_false() -> None:
'Test patch version check false.'
assert (check_version('3.1.1', patch_check=(lambda x: (int(x) >= 2))) is False)
|
def test_multiple_true() -> None:
'Test multiple checks.'
assert check_version('3.2.1', major_check=(lambda x: (int(x) == 3)), minor_check=(lambda x: (int(x) == 2)), patch_check=(lambda x: (int(x) >= 1)))
|
def test_multiple_false() -> None:
'Test multiple checks false.'
assert (check_version('3.2.1', major_check=(lambda x: (int(x) == 3)), minor_check=(lambda x: (int(x) == 3)), patch_check=(lambda x: (int(x) >= 2))) is False)
|
def test_joblib_args_higer_1(monkeypatch: MonkeyPatch) -> None:
'Test joblib args for sklearn >= 1.0.'
with monkeypatch.context() as m:
m.setattr('sklearn.__version__', '2.2.11')
kwargs = _joblib_parallel_args(prefer='threads')
assert (kwargs['prefer'] == 'threads')
|
def test_joblib_args_lower_1(monkeypatch: MonkeyPatch) -> None:
'Test joblib args for sklearn < 1.0.'
with monkeypatch.context() as m:
import sklearn
m.setattr('sklearn.__version__', '0.24.2')
m.setattr(sklearn.utils.fixes, '_joblib_parallel_args', (lambda prefer: {'backend': 'threads'}), raising=False)
kwargs = _joblib_parallel_args(prefer='threads')
assert (kwargs['backend'] == 'threads')
|
@runtime_checkable
class EstimatorLikeFit1(Protocol):
'Class for estimator-like fit 1.'
def fit(self, X: List[str], y: str, **kwargs: Any) -> 'EstimatorLikeFit1':
'Fit estimator.\n\n Parameters\n ----------\n X : list of str\n The features to use.\n y : str\n The target to use.\n **kwargs : dict\n Extra keyword arguments.\n\n Returns\n -------\n EstimatorLikeFit1\n The fitted estimator.\n\n '
return self
def get_params(self, deep: bool=True) -> Dict:
'Get params.\n\n Parameters\n ----------\n deep : bool, optional\n Whether to get in a deep fashion (default True).\n\n Returns\n -------\n dict\n The parameters.\n\n '
return {}
def set_params(self, **params: Any) -> 'EstimatorLikeFit1':
'Set params.\n\n Parameters\n ----------\n **params : dict\n The parameters to set.\n\n Returns\n -------\n EstimatorLikeFit1\n Estimator with set parameters.\n\n '
return self
|
@runtime_checkable
class EstimatorLikeFit2(Protocol):
'Class for estimator-like fit 2.'
def fit(self, X: List[str], y: str) -> 'EstimatorLikeFit2':
'Fit estimator.\n\n Parameters\n ----------\n X : list of str\n The features to use.\n y : str\n The target to use.\n\n Returns\n -------\n EstimatorLikeFit2\n The fitted estimator.\n\n '
return self
def get_params(self, deep: bool=True) -> Dict:
'Get params.\n\n Parameters\n ----------\n deep : bool, optional\n Whether to get in a deep fashion (default True).\n\n Returns\n -------\n dict\n The parameters.\n\n '
return {}
def set_params(self, **params: Any) -> 'EstimatorLikeFit2':
'Set params.\n\n Parameters\n ----------\n **params : dict\n The parameters to set.\n\n Returns\n -------\n EstimatorLikeFit2\n Estimator with set parameters.\n\n '
return self
|
@runtime_checkable
class EstimatorLikeFity(Protocol):
'Class for estimator-like fit y.'
def fit(self, y: str) -> 'EstimatorLikeFity':
'Fit estimator.\n\n Parameters\n ----------\n y : str\n The target to use.\n\n Returns\n -------\n EstimatorLikeFity\n The fitted estimator.\n\n '
return self
def get_params(self, deep: bool=True) -> Dict:
'Get params.\n\n Parameters\n ----------\n deep : bool, optional\n Whether to get in a deep fashion (default True).\n\n Returns\n -------\n dict\n The parameters.\n\n '
return {}
def set_params(self, **params: Any) -> 'EstimatorLikeFity':
'Set params.\n\n Parameters\n ----------\n **params : dict\n The parameters to set.\n\n Returns\n -------\n EstimatorLikeFity\n Estimator with set parameters.\n\n '
return self
|
@runtime_checkable
class TransformerLike(EstimatorLikeFit1, Protocol):
'Class for transformer-like.'
def fit(self, X: List[str], y: Optional[str]=None, **fit_params: Any) -> None:
'Fit transformer.\n\n Parameters\n ----------\n X : list of str\n The features to use.\n y : str, optional\n The target to use (default None).\n **fit_params : dict\n Fit parameters.\n\n '
pass
def transform(self, X: DataLike) -> DataLike:
'Transform.\n\n Parameters\n ----------\n X : DataLike\n The features to use.\n\n Returns\n -------\n DataLike\n The transformed data.\n\n '
return X
def fit_transform(self, X: DataLike, y: Optional[DataLike]=None) -> DataLike:
'Fit and transform.\n\n Parameters\n ----------\n X : DataLike\n The features to use.\n y : DataLike, optional\n The target to use (default None).\n\n Returns\n -------\n DataLike\n The fit and transformed object.\n\n '
return X
|
@runtime_checkable
class ModelLike(EstimatorLikeFit1, Protocol):
'Class for model-like.'
classes_: np.ndarray
def predict(self, X: pd.DataFrame) -> DataLike:
'Predict using the model.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to predict on.\n\n Returns\n -------\n DataLike\n The predictions.\n\n '
return np.zeros(1)
def score(self, X: pd.DataFrame, y: DataLike, sample_weight: Optional[DataLike]=None) -> float:
'Score the model.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to predict on.\n y : DataLike\n The true target values.\n sample_weight : DataLike, optional\n Sample weights to use when computing the score (default None).\n\n Returns\n -------\n float\n The score.\n\n '
return 0.0
|
@runtime_checkable
class JuEstimatorLike(EstimatorLikeFit1, Protocol):
'Class for juestimator-like.'
def get_needed_types(self) -> ColumnTypes:
'Get the column types needed by the estimator.\n\n Returns\n -------\n ColumnTypes\n The column types needed by the estimator.\n\n '
return ColumnTypes('placeholder')
def get_apply_to(self) -> ColumnTypes:
'Get the column types the estimator applies to.\n\n Returns\n -------\n ColumnTypes\n The column types the estimator applies to.\n\n '
return ColumnTypes('placeholder')
|
@runtime_checkable
class JuModelLike(ModelLike, Protocol):
'Class for jumodel-like.'
def get_needed_types(self) -> ColumnTypes:
'Get the column types needed by the estimator.\n\n Returns\n -------\n ColumnTypes\n The column types needed by the estimator.\n\n '
return ColumnTypes('placeholder')
def get_apply_to(self) -> ColumnTypes:
'Get the column types the estimator applies to.\n\n Returns\n -------\n ColumnTypes\n The column types the estimator applies to.\n\n '
return ColumnTypes('placeholder')
|
def check_version(version: str, major_check: Optional[Callable]=None, minor_check: Optional[Callable]=None, patch_check: Optional[Callable]=None):
'Check a version following major.minor.patch version numbers.\n\n The version is checked according to checks as functions major, minor and\n patch. This functions must take a string and return a boolean.\n\n Parameters\n ----------\n version : str\n version to check\n\n major_check : Callable\n function to check major version\n\n minor_check : func\n function to check minor version\n\n patch_check : func\n function to check patch version\n\n Returns\n -------\n version_checked : bool\n if the version passes the checks\n '
def get_check(check_func):
return (lambda x: (True if (check_func is None) else check_func(x)))
version_checks = [major_check, minor_check, patch_check]
versions_checked = [get_check(check)(version) for (version, check) in zip(version.split('.'), version_checks)]
return all(versions_checked)
|
def _joblib_parallel_args(**kwargs: Any) -> Any:
'Get joblib parallel args depending on scikit-learn version.\n\n Parameters\n ----------\n **kwargs : dict\n keyword arguments to pass to joblib.Parallel\n '
sklearn_version = sklearn.__version__
higher_than_11 = check_version(sklearn_version, (lambda x: (int(x) >= 1)), (lambda x: (int(x) >= 1)))
if higher_than_11:
return kwargs
else:
from sklearn.utils.fixes import _joblib_parallel_args as _sk_parallel
return _sk_parallel(**kwargs)
|
class _JulearnScoresViewer(param.Parameterized):
'A class to visualize the scores for model comparison.\n\n Parameters\n ----------\n *scores : pd.DataFrame\n DataFrames containing the scores of the models. The DataFrames must\n be the output of `run_cross_validation`\n width : int\n The width of the plot (default is 800).\n height : int\n The height of the plot (default is 600).\n ci : int\n The confidence interval to use for the error bars (default is 0.95).\n '
metric = param.Selector([], default=None)
models = param.ListSelector(default=None, objects=[])
sets = param.ListSelector(default=None, objects=[])
show_stats = param.Boolean(False)
group_repeats = param.Selector(objects=['mean', 'median', 'no'], default='no')
def __init__(self, *params, **kwargs):
scores = kwargs.pop('scores', None)
if (scores is not None):
self.set_data(scores)
self.width = kwargs.pop('width', 800)
self.height = kwargs.pop('height', 600)
self.ci = kwargs.pop('ci', 0.95)
super().__init__(*params, **kwargs)
def set_data(self, scores):
'Set the data to use for the plot.\n\n Parameters\n ----------\n scores : list of pd.DataFrame\n DataFrames containing the scores of the models. The DataFrames must\n be the output of `run_cross_validation`\n\n Returns\n -------\n self : _JulearnScoresViewer\n '
self.scores = check_scores_df(*scores, same_cv=False)
results_df = pd.concat(self.scores)
all_metrics = [x for x in results_df.columns if (x.startswith('test_') or x.startswith('train_'))]
long_df = results_df.set_index(['model', 'fold', 'repeat'])[all_metrics].stack()
long_df.name = 'score'
long_df.index.names = ['model', 'fold', 'repeat', 'metric']
long_df = long_df.reset_index()
long_df['set'] = long_df['metric'].str.split('_').str[0]
long_df['metric'] = long_df['metric'].str.replace('train_', '').str.replace('test_', '')
self.long_df = long_df
self.param.metric.objects = long_df['metric'].unique().tolist()
self.param.models.objects = long_df['model'].unique().tolist()
self.param.sets.objects = long_df['set'].unique().tolist()
self.param.metric.default = self.param.metric.objects[0]
self.param.models.default = self.param.models.objects
self.param.sets.default = self.param.sets.objects
self.metric = self.param.metric.default
self.models = self.param.models.default
self.sets = self.param.sets.default
return self
@param.depends('metric', 'sets', 'group_repeats', 'models')
def plot_scores(self):
if (len(self.sets) == 0):
p = figure(width=self.width, height=self.height, title=self.metric)
labels = Label(x=(self.width / 2), y=(self.height / 2), x_units='screen', y_units='screen', text_align='center', text='Please select a set to display', text_font_size='14pt')
p.add_layout(labels)
return p
t_metric = self.metric
group_repeats = self.group_repeats
t_df = self.long_df[(self.long_df['metric'] == t_metric)]
if (group_repeats != 'no'):
t_group = t_df.groupby(['model', 'set', 'metric', 'repeat'])
if (group_repeats == 'mean'):
t_df = t_group.mean()['score'].reset_index()
elif (group_repeats == 'median'):
t_df = t_group.median()['score'].reset_index()
t_df['fold'] = 'all'
self.models = self.models
if (len(self.sets) > 1):
x = [(m, s) for m in self.models for s in self.sets]
x_values = list(map(tuple, t_df[['model', 'set']].values))
color = factor_cmap('x', palette=Colorblind[3], factors=self.sets, start=1, end=3)
else:
x = list(self.models)
t_df = t_df[(t_df['set'] == self.sets[0])]
x_values = list(t_df['model'].values)
if (self.sets[0] == 'test'):
color = Colorblind[3][0]
else:
color = Colorblind[3][1]
x_range = FactorRange(factors=x)
scores = t_df['score'].values
folds = t_df['fold'].values
repeats = t_df['repeat'].values
set_values = t_df['set'].values
data_dict = {'x': x_values, 'score': scores, 'fold': folds, 'repeat': repeats, 'set': set_values}
source = ColumnDataSource(data=data_dict)
p = figure(width=self.width, height=self.height, x_range=x_range, title=t_metric, tools=SCORE_PLOT_TOOLS, tooltips=[('Fold', '@fold'), ('Repeat', '@repeat'), ('score', '@score')])
if (len(self.models) == 0):
labels = Label(x=(self.width / 2), y=(self.height / 2), x_units='screen', y_units='screen', text_align='center', text='Please select a at least one model to display', text_font_size='14pt')
p.add_layout(labels)
return p
p.circle(x=jitter('x', width=0.7, range=p.x_range), y='score', alpha=0.5, source=source, size=10, line_color='white', color=color, legend_group='set')
if (len(self.sets) > 1):
p.add_layout(p.legend[0], 'right')
else:
p.legend.visible = False
if (len(self.sets) > 1):
g = t_df.groupby(['model', 'set'])
else:
g = t_df.groupby('model')
ci_upper = (self.ci + ((1 - self.ci) / 2))
ci_lower = ((1 - self.ci) / 2)
upper = g.score.quantile(ci_upper)
lower = g.score.quantile(ci_lower)
source = ColumnDataSource(data={'base': upper.index.values, 'upper': upper, 'lower': lower})
error = Whisker(base='base', upper='upper', lower='lower', source=source, level='annotation', line_width=2)
error.upper_head.size = 20
error.lower_head.size = 20
p.add_layout(error)
mean_score = g.score.mean()
source = ColumnDataSource(data={'base': mean_score.index.values, 'upper': mean_score, 'lower': mean_score})
mean_bar = Whisker(base='base', upper='upper', lower='lower', source=source, level='annotation', line_width=2)
mean_bar.upper_head.size = 10
mean_bar.lower_head.size = 10
p.add_layout(mean_bar)
if (len(self.sets) > 1):
grp_pad = p.x_range.group_padding
span_x = [(*list(t_x), (1 + ((grp_pad - 1.0) / 2.0))) for t_x in x[1:(- 1):len(self.sets)]]
else:
grp_pad = p.x_range.factor_padding
span_x = [(t_x, (1 + ((grp_pad - 1.0) / 2.0))) for t_x in x[:(- 1)]]
src_sep = ColumnDataSource(data={'base': span_x, 'lower': ([0] * len(span_x)), 'upper': ([(max(scores) * 1.05)] * len(span_x))})
sep = Whisker(base='base', upper='upper', lower='lower', source=src_sep, level='annotation', line_width=2, line_color='lightgrey', dimension='height', line_alpha=1, upper_head=None, lower_head=None)
p.add_layout(sep)
if (len(self.sets) > 1):
p.xaxis.major_tick_line_color = None
p.xaxis.major_label_text_font_size = '0pt'
p.xaxis.group_label_orientation = 'vertical'
else:
p.xaxis.major_label_orientation = 'vertical'
p.xaxis.major_label_text_color = 'grey'
p.xgrid.grid_line_color = None
return p
@param.depends('metric', 'sets', 'show_stats')
def plot_stats(self):
if (self.show_stats and (len(self.sets) > 0)):
stats_df = corrected_ttest(*self.scores)
stats_df['set'] = stats_df['metric'].str.split('_').str[0]
stats_df['metric'] = stats_df['metric'].str.replace('train_', '').str.replace('test_', '')
stats_df = stats_df[(stats_df['metric'] == self.metric)]
stats_df = stats_df[stats_df['set'].isin(self.sets)]
stats_df.sort_values(by='p-val-corrected', inplace=True, ascending=True)
source = ColumnDataSource(stats_df)
columns = [TableColumn(field='model_1', title='Model 1'), TableColumn(field='model_2', title='Model 2'), TableColumn(field='t-stat', title='t-stat', formatter=ScientificFormatter(precision=3)), TableColumn(field='p-val', title='p-value', formatter=ScientificFormatter(precision=3)), TableColumn(field='p-val-corrected', title='corrected_p-value', formatter=ScientificFormatter(precision=3))]
if (len(self.sets) > 1):
columns.append(TableColumn(field='set', title='Set'))
data_table = DataTable(source=source, columns=columns, width=self.width, index_position=None)
return data_table
else:
return pn.pane.Markdown('')
|
def plot_scores(*scores: pd.DataFrame, width: int=800, height: int=600, ci: float=0.95) -> pn.layout.Panel:
'Plot the scores of the models on a panel dashboard.\n\n Parameters\n ----------\n *scores : pd.DataFrame\n DataFrames containing the scores of the models. The DataFrames must\n be the output of `run_cross_validation`\n width : int, optional\n Width of the plot (default is 800)\n height : int, optional\n Height of the plot (default is 600)\n ci : float, optional\n Confidence interval to use for the plots (default is 0.95)\n '
viewer = _JulearnScoresViewer(scores=[*scores], width=width, height=height, ci=ci)
pn.extension(template='fast')
dashboard_title = pn.panel('## Scores Viewer')
logo = ((Path(__file__).parent / 'res') / 'julearn_logo_generalization.png')
png = pn.panel(logo, width=200)
header = pn.Row(png, pn.Spacer(width=50), dashboard_title)
widget_row = pn.Row(pn.Param(viewer.param.metric, name='Metric', show_name=True, widgets={'metric': {'type': pn.widgets.Select, 'button_type': 'primary', 'name': ''}}))
widget_row.append(pn.Param(viewer.param.show_stats, name='Statistics', show_name=True, widgets={'show_stats': {'type': pn.widgets.Toggle, 'button_type': 'primary', 'name': 'Show'}}))
widget_row.append(pn.Param(viewer.param.group_repeats, name='Aggregate Repeats', show_name=True, widgets={'group_repeats': {'type': pn.widgets.RadioButtonGroup, 'button_type': 'primary', 'options': ['no', 'median', 'mean']}}))
filter_widgets = pn.Column()
if (len(viewer.sets) > 1):
filter_widgets.append(pn.Param(viewer.param.sets, name='Sets', show_name=True, widgets={'sets': {'type': pn.widgets.CheckButtonGroup, 'button_type': 'primary', 'orientation': 'vertical'}}))
filter_widgets.append(pn.Param(viewer.param.models, name='Models', show_name=True, widgets={'models': {'type': pn.widgets.CheckButtonGroup, 'button_type': 'primary', 'orientation': 'vertical'}}))
column = pn.Column(header, widget_row, pn.Row(viewer.plot_scores, filter_widgets), viewer.plot_stats)
return column
|
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, x):
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
return ((x - mean) / std)
|
def add_data_normalization(model, mean, std):
norm_layer = Normalize(mean=mean, std=std)
model_ = torch.nn.Sequential(norm_layer, model)
return model_
|
def apply_attack_on_dataset(model, dataloader, attack, epsilons, device, verbose=True):
robust_accuracy = []
c_a = []
for (images, labels) in dataloader:
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
c_a.append((correct_predictions.sum() / len(correct_predictions)).cpu().numpy())
clean_accuracy = np.mean(c_a)
print('Clean accuracy: ', clean_accuracy)
for epsilon in epsilons:
attack.eps = epsilon
r_a = []
if verbose:
print('Epsilon: ', epsilon)
t = trange(len(dataloader))
for (images, labels) in dataloader:
(images, labels) = (images.to(device), labels.to(device))
adv_images = attack(images, labels)
outputs = model(adv_images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
r_a.append((correct_predictions.sum() / len(correct_predictions)).cpu().numpy())
if verbose:
t.update(1)
robust_acc = np.mean(r_a)
if verbose:
print('Robust accuracy: ', robust_acc)
robust_accuracy.append(robust_acc)
return (clean_accuracy, robust_accuracy)
|
def apply_attack_on_batch(model, images, labels, attack, device):
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
correct_predictions = correct_predictions.cpu().numpy()
clean_accuracy = (correct_predictions.sum() / len(correct_predictions))
adv_images = attack(images, labels)
outputs = model(adv_images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions_adv = (pre == labels)
correct_predictions_adv = correct_predictions_adv.cpu().numpy()
robust_accuracy = (correct_predictions_adv.sum() / len(correct_predictions_adv))
adversarial_success = []
for (pred_c, pred_r) in zip(correct_predictions, correct_predictions_adv):
if (pred_c and (not pred_r)):
adversarial_success.append(True)
else:
adversarial_success.append(False)
print('Clean Accuracy on Batch: {}%'.format(clean_accuracy))
print('Robust Accuracy on Batch: {}%'.format(robust_accuracy))
return (adv_images.cpu(), adversarial_success, clean_accuracy, robust_accuracy)
|
def plot_accuracy(x, accuracy, methods, title, xlabel='x', ylabel='accuracy'):
for i in range(len(methods)):
plt.plot(x, accuracy[i], label=methods[i])
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
|
def imshow(img, title):
npimg = img.numpy()
fig = plt.figure(figsize=(15, 15))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title(title)
plt.show()
|
class Conv2dGrad(autograd.Function):
@staticmethod
def forward(context, input, weight, bias, stride, padding, dilation, groups):
(context.stride, context.padding, context.dilation, context.groups) = (stride, padding, dilation, groups)
context.save_for_backward(input, weight, bias)
output = torch.nn.functional.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, bias) = context.saved_tensors
grad_input = grad_weight = grad_bias = None
if context.needs_input_grad[0]:
grad_input = torch.nn.grad.conv2d_input(input_size=input.shape, weight=weight, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if context.needs_input_grad[1]:
grad_weight = torch.nn.grad.conv2d_weight(input=input, weight_size=weight.shape, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if ((bias is not None) and context.needs_input_grad[2]):
grad_bias = grad_output.sum(0).sum(2).sum(1)
return (grad_input, grad_weight, grad_bias, None, None, None, None)
|
class LinearGrad(autograd.Function):
@staticmethod
def forward(context, input, weight, bias=None):
context.save_for_backward(input, weight, bias)
output = torch.nn.functional.linear(input, weight, bias)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, bias) = context.saved_tensors
grad_input = grad_weight = grad_bias = None
if context.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if context.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if ((bias is not None) and context.needs_input_grad[2]):
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight, grad_bias)
|
class Conv2dGrad(autograd.Function):
'\n Autograd Function that Does a backward pass using the weight_backward matrix of the layer\n '
@staticmethod
def forward(context, input, weight, weight_backward, bias, bias_backward, stride, padding, dilation, groups):
(context.stride, context.padding, context.dilation, context.groups) = (stride, padding, dilation, groups)
context.save_for_backward(input, weight, weight_backward, bias, bias_backward)
output = torch.nn.functional.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, weight_backward, bias, bias_backward) = context.saved_tensors
grad_input = grad_weight = grad_weight_backward = grad_bias = grad_bias_backward = None
if context.needs_input_grad[0]:
grad_input = torch.nn.grad.conv2d_input(input_size=input.shape, weight=weight_backward, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if context.needs_input_grad[1]:
grad_weight = torch.nn.grad.conv2d_weight(input=input, weight_size=weight_backward.shape, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if ((bias is not None) and context.needs_input_grad[3]):
grad_bias = grad_output.sum(0).sum(2).sum(1)
return (grad_input, grad_weight, grad_weight_backward, grad_bias, grad_bias_backward, None, None, None, None)
|
class LinearGrad(autograd.Function):
'\n Autograd Function that Does a backward pass using the weight_backward matrix of the layer\n '
@staticmethod
def forward(context, input, weight, weight_backward, bias=None, bias_backward=None):
context.save_for_backward(input, weight, weight_backward, bias, bias_backward)
output = input.mm(weight.t())
if (bias is not None):
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, weight_backward, bias, bias_backward) = context.saved_tensors
grad_input = grad_weight = grad_weight_backward = grad_bias = grad_bias_backward = None
if context.needs_input_grad[0]:
grad_input = grad_output.mm(weight_backward)
if context.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if ((bias is not None) and context.needs_input_grad[3]):
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight, grad_weight_backward, grad_bias, grad_bias_backward)
|
def select_loss_function(loss_function_config):
if (loss_function_config['name'] == 'cross_entropy'):
return torch.nn.CrossEntropyLoss()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.