repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_vaegmm.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierVAEGMM
from alibi_detect.version import __version__
threshold = [None, 5.]
n_gmm = [1, 2]
w_energy = [.1, .5]
w_recon = [0., 1e-7]
samples = [1, 10]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, n_gmm, w_energy, w_recon, samples, threshold_perc, return_instance_score))
n_tests = len(tests)
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(X_train.shape[0], -1)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def vaegmm_params(request):
return tests[request.param]
@pytest.mark.parametrize('vaegmm_params', list(range(n_tests)), indirect=True)
def test_vaegmm(vaegmm_params):
# OutlierVAEGMM parameters
threshold, n_gmm, w_energy, w_recon, samples, threshold_perc, return_instance_score = vaegmm_params
# define encoder, decoder and GMM density net
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
# init OutlierAEGMM
vaegmm = OutlierVAEGMM(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples
)
assert vaegmm.threshold == threshold
assert vaegmm.meta == {'name': 'OutlierVAEGMM', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAEGMM, infer threshold and compute scores
vaegmm.fit(X, w_recon=w_recon, w_energy=w_energy, epochs=5, batch_size=1000, verbose=False)
vaegmm.infer_threshold(X, threshold_perc=threshold_perc)
energy = vaegmm.score(X)
perc_score = 100 * (energy < vaegmm.threshold).astype(int).sum() / energy.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = vaegmm.predict(X, return_instance_score=return_instance_score)
assert od_preds['meta'] == vaegmm.meta
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> vaegmm.threshold).astype(int).sum()
else:
assert od_preds['data']['instance_score'] is None
| 3,175 | 32.083333 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__gmm/test__gmm_pytorch_backend.py | import pytest
import numpy as np
import torch
from alibi_detect.od.pytorch.gmm import GMMTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
def test_gmm_pytorch_scoring():
"""Test GMM detector pytorch scoring method.
Tests the scoring method of the GMMTorch pytorch backend detector.
"""
gmm_torch = GMMTorch(n_components=1)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
gmm_torch.fit(x_ref)
x_1 = torch.tensor(np.array([[8., 8.]]))
scores_1 = gmm_torch.score(x_1)
x_2 = torch.tensor(np.random.multivariate_normal(mean, cov, 1))
scores_2 = gmm_torch.score(x_2)
x_3 = torch.tensor(np.array([[-10., 10.]]))
scores_3 = gmm_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true outlier
gmm_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = gmm_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(gmm_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = gmm_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.01
def test_gmm_torch_backend_ts(tmp_path):
"""Test GMM detector backend is torch-scriptable and savable."""
gmm_torch = GMMTorch(n_components=2)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
gmm_torch.fit(x_ref)
gmm_torch.infer_threshold(x_ref, 0.1)
pred_1 = gmm_torch(x)
gmm_torch = torch.jit.script(gmm_torch)
pred_2 = gmm_torch(x)
assert torch.all(pred_1 == pred_2)
gmm_torch.save(tmp_path / 'gmm_torch.pt')
gmm_torch = torch.load(tmp_path / 'gmm_torch.pt')
pred_2 = gmm_torch(x)
assert torch.all(pred_1 == pred_2)
def test_gmm_pytorch_backend_fit_errors():
"""Test gmm detector pytorch backend fit errors.
Tests the correct errors are raised when using the GMMTorch pytorch backend detector.
"""
gmm_torch = GMMTorch(n_components=2)
assert not gmm_torch.fitted
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.tensor(np.random.randn(1, 10))
with pytest.raises(NotFittedError) as err:
gmm_torch(x)
assert str(err.value) == 'GMMTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
gmm_torch.predict(x)
assert str(err.value) == 'GMMTorch has not been fit!'
# Test the backend updates _fitted flag on fit.
x_ref = torch.tensor(np.random.randn(1024, 10))
gmm_torch.fit(x_ref)
assert gmm_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
gmm_torch(x)
assert str(err.value) == 'GMMTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
assert gmm_torch.predict(x)
def test_gmm_pytorch_fit():
"""Test GMM detector pytorch fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
gmm_torch = GMMTorch(n_components=1)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = gmm_torch.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_epochs'] < 10
assert fit_results['lower_bound'] < 1
| 3,906 | 33.575221 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__gmm/test__gmm.py | import pytest
import numpy as np
import torch
from alibi_detect.od._gmm import GMM
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_unfitted_gmm_score(backend):
"""Test GMM detector raises exceptions when not fitted."""
gmm_detector = GMM(n_components=2, backend=backend)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
gmm_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'GMM has not been fit!'
with pytest.raises(NotFittedError) as err:
gmm_detector.score(x)
assert str(err.value) == 'GMM has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
gmm_detector.predict(x)
assert str(err.value) == 'GMM has not been fit!'
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_fitted_gmm_score(backend):
"""Test GMM detector score method.
Test GMM detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
gmm_detector = GMM(n_components=1, backend=backend)
x_ref = np.random.randn(100, 2)
gmm_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = gmm_detector.score(x)
y = gmm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 2
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_fitted_gmm_predict(backend):
"""Test GMM detector predict method.
Test GMM detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
gmm_detector = GMM(n_components=1, backend=backend)
x_ref = np.random.randn(100, 2)
gmm_detector.fit(x_ref)
gmm_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = gmm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 2
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_gmm_integration(backend):
"""Test GMM detector on moons dataset.
Test GMM detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
gmm_detector = GMM(n_components=8, backend=backend)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
gmm_detector.fit(X_ref)
gmm_detector.infer_threshold(X_ref, 0.1)
result = gmm_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = gmm_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
def test_gmm_torchscript(tmp_path):
"""Tests user can torch-script gmm detector."""
gmm_detector = GMM(n_components=8, backend='pytorch')
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
gmm_detector.fit(X_ref)
gmm_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[-1, 1.5]])
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
ts_gmm = torch.jit.script(gmm_detector.backend)
y = ts_gmm(x)
assert torch.all(y == torch.tensor([False, True]))
ts_gmm.save(tmp_path / 'gmm.pt')
ts_gmm = torch.load(tmp_path / 'gmm.pt')
y = ts_gmm(x)
assert torch.all(y == torch.tensor([False, True]))
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_gmm_fit(backend):
"""Test GMM detector fit method.
Tests detector checks for convergence and stops early if it does.
"""
gmm = GMM(n_components=1, backend=backend)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = gmm.fit(x_ref, tol=0.01, batch_size=32)
assert isinstance(fit_results['lower_bound'], float)
assert fit_results['converged']
assert fit_results['lower_bound'] < 1
| 4,890 | 34.963235 | 102 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__lof/test__lof_backend.py | import pytest
import torch
from alibi_detect.od.pytorch.lof import LOFTorch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.ensemble import Ensembler, PValNormalizer, AverageAggregator
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.fixture(scope='function')
def ensembler(request):
return Ensembler(
normalizer=PValNormalizer(),
aggregator=AverageAggregator()
)
def test_lof_torch_backend():
"""
Test the lof torch backend can be correctly initialized, fit and used to
predict outliers.
"""
lof_torch = LOFTorch(k=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
outputs = lof_torch.predict(x)
assert outputs.instance_score.shape == (3, )
assert outputs.is_outlier is None
assert outputs.p_value is None
scores = lof_torch.score(x)
assert torch.all(scores == outputs.instance_score)
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(lof_torch(x) == torch.tensor([False, False, True]))
def test_lof_torch_backend_ensemble(ensembler):
"""
Test the lof torch backend can be correctly initialized as an ensemble, fit
on data and used to predict outliers.
"""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(lof_torch(x) == torch.tensor([False, False, True]))
def test_lof_torch_backend_ensemble_ts(tmp_path, ensembler):
"""
Test the lof torch backend can be initialized as an ensemble and
torch scripted, as well as saved and loaded to and from disk.
"""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
lof_torch.save(tmp_path / 'lof_torch.pt')
lof_torch = torch.load(tmp_path / 'lof_torch.pt')
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_torch_backend_ts(tmp_path):
"""
Test the lof torch backend can be initialized and torch scripted, as well as
saved and loaded to and from disk.
"""
lof_torch = LOFTorch(k=7)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
lof_torch.save(tmp_path / 'lof_torch.pt')
lof_torch = torch.load(tmp_path / 'lof_torch.pt')
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_kernel(ensembler):
"""
Test the lof torch backend can be correctly initialized with a kernel, fit
on data and used to predict outliers.
"""
kernel = GaussianRBF(sigma=torch.tensor((1)))
lof_torch = LOFTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([0, 0, 1]))
assert torch.all(lof_torch(x) == torch.tensor([0, 0, 1]))
@pytest.mark.skip(reason="Can't convert GaussianRBF to torch script due to torch script type constraints")
def test_lof_kernel_ts(ensembler):
"""
Test the lof torch backend can be correctly initialized with a kernel,
and torch scripted, as well as saved and loaded to and from disk.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
lof_torch = LOFTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_torch_backend_ensemble_fit_errors(ensembler):
"""Tests the correct errors are raised when using the LOFTorch backend as an ensemble."""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
assert lof_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
def test_lof_torch_backend_fit_errors():
"""Tests the correct errors are raised when using the LOFTorch backend as a single detector."""
lof_torch = LOFTorch(k=4)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
assert lof_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
lof_torch.predict(x)
def test_lof_infer_threshold_value_errors():
"""Tests the correct errors are raised when using incorrect choice of fpr for the LOFTorch backend detector."""
lof_torch = LOFTorch(k=4)
x = torch.randn((1024, 10))
lof_torch.fit(x)
# fpr must be greater than 1/len(x) otherwise it excludes all points in the reference dataset
with pytest.raises(ValueError) as err:
lof_torch.infer_threshold(x, 1/1025)
assert str(err.value) == '`fpr` must be greater than `1/len(x)=0.0009765625`.'
# fpr must be between 0 and 1
with pytest.raises(ValueError) as err:
lof_torch.infer_threshold(x, 1.1)
assert str(err.value) == '`fpr` must be in `(0, 1)`.'
lof_torch.infer_threshold(x, 0.99)
lof_torch.infer_threshold(x, 1/1023)
| 7,975 | 35.090498 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__lof/test__lof.py | import pytest
import numpy as np
import torch
from alibi_detect.od._lof import LOF
from alibi_detect.od.pytorch.ensemble import AverageAggregator, TopKAggregator, MaxAggregator, \
MinAggregator, ShiftAndScaleNormalizer, PValNormalizer
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
from sklearn.datasets import make_moons
def make_lof_detector(k=5, aggregator=None, normalizer=None):
lof_detector = LOF(
k=k, aggregator=aggregator,
normalizer=normalizer
)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
lof_detector.infer_threshold(x_ref, 0.1)
return lof_detector
def test_unfitted_lof_single_score():
lof_detector = LOF(k=10)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'LOF has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.score(x)
assert str(err.value) == 'LOF has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.predict(x)
assert str(err.value) == 'LOF has not been fit!'
def test_fitted_lof_score():
"""
Test fitted but not threshold inferred non-ensemble detectors can still score data using the predict method.
Unlike the ensemble detectors, the non-ensemble detectors do not require the ensembler to be fit in the
infer_threshold method. See the test_fitted_lof_ensemble_score test for the ensemble case.
"""
lof_detector = LOF(k=10)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
y = lof_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_lof_ensemble_score():
"""
Test fitted but not threshold inferred ensemble detectors correctly raise an error when calling
the predict method. This is because the ensembler is fit in the infer_threshold method.
"""
lof_detector = LOF(k=[10, 14, 18])
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
with pytest.raises(ThresholdNotInferredError):
lof_detector.predict(x)
with pytest.raises(ThresholdNotInferredError):
lof_detector.score(x)
def test_incorrect_lof_ensemble_init():
# test lof ensemble with aggregator passed as None raises exception
with pytest.raises(ValueError) as err:
LOF(k=[8, 9, 10], aggregator=None)
assert str(err.value) == ('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
def test_fitted_lof_predict():
"""
Test that a detector fitted on data and with threshold inferred correctly, will score
and label outliers, as well as return the p-values using the predict method. Also Check
that the score method gives the same results.
"""
lof_detector = make_lof_detector(k=10)
x_ref = np.random.randn(100, 2)
lof_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = lof_detector.predict(x)
y = y['data']
scores = lof_detector.score(x)
assert np.all(y['instance_score'] == scores)
assert y['instance_score'][0] > y['instance_score'][1]
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_unfitted_lof_ensemble(aggregator, normalizer):
lof_detector = LOF(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0.1, 0]])
# Test unfit lof ensemble raises exception when calling predict method.
with pytest.raises(NotFittedError) as err:
_ = lof_detector.predict(x)
assert str(err.value) == 'LOF has not been fit!'
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_lof_ensemble(aggregator, normalizer):
lof_detector = LOF(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0, 0.1]])
# test ensemble raises ThresholdNotInferredError if only fit and not threshold inferred and
# the normalizer is not None.
if normalizer() is not None:
with pytest.raises(ThresholdNotInferredError):
lof_detector.predict(x)
else:
lof_detector.predict(x)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_lof_ensemble_predict(aggregator, normalizer):
lof_detector = make_lof_detector(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0, 0.1]])
# test fitted detectors with inferred thresholds can score data using the predict method.
y = lof_detector.predict(x)
y = y['data']
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
# test fitted detectors with inferred thresholds can score data using the score method.
scores = lof_detector.score(x)
assert np.all(y['instance_score'] == scores)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_lof_ensemble_torch_script(aggregator, normalizer):
lof_detector = make_lof_detector(k=[5, 6, 7], aggregator=aggregator(), normalizer=normalizer())
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted ensemble lof detector can be saved and loaded correctly.
y = ts_lof(x)
assert torch.all(y == torch.tensor([True, False]))
def test_lof_single_torchscript():
lof_detector = make_lof_detector(k=5)
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted single lof detector can be saved and loaded correctly.
y = ts_lof(x)
assert torch.all(y == torch.tensor([True, False]))
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator, lambda: 'AverageAggregator',
lambda: 'TopKAggregator', lambda: 'MaxAggregator',
lambda: 'MinAggregator'])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None,
lambda: 'ShiftAndScaleNormalizer', lambda: 'PValNormalizer'])
def test_lof_ensemble_integration(tmp_path, aggregator, normalizer):
"""Test lof ensemble detector on moons dataset.
Tests ensemble lof detector with every combination of aggregator and normalizer on the moons dataset.
Fits and infers thresholds in each case. Verifies that the detector can correctly detect inliers
and outliers and that it can be serialized using the torchscript.
"""
lof_detector = LOF(
k=[10, 14, 18],
aggregator=aggregator(),
normalizer=normalizer()
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
lof_detector.fit(X_ref)
lof_detector.infer_threshold(X_ref, 0.1)
result = lof_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = lof_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_lof(x)
assert torch.all(y == torch.tensor([False, True]))
ts_lof.save(tmp_path / 'lof.pt')
lof_detector = torch.load(tmp_path / 'lof.pt')
y = lof_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_lof_integration(tmp_path):
"""Test lof detector on moons dataset.
Tests lof detector on the moons dataset. Fits and infers thresholds and verifies that the detector can
correctly detect inliers and outliers. Checks that it can be serialized using the torchscript.
"""
lof_detector = LOF(k=18)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
lof_detector.fit(X_ref)
lof_detector.infer_threshold(X_ref, 0.1)
result = lof_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = lof_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_lof(x)
assert torch.all(y == torch.tensor([False, True]))
ts_lof.save(tmp_path / 'lof.pt')
lof_detector = torch.load(tmp_path / 'lof.pt')
y = lof_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 10,349 | 37.333333 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__knn/test__knn.py | import pytest
import numpy as np
import torch
from alibi_detect.od._knn import KNN
from alibi_detect.od.pytorch.ensemble import AverageAggregator, TopKAggregator, MaxAggregator, \
MinAggregator, ShiftAndScaleNormalizer, PValNormalizer
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
from sklearn.datasets import make_moons
def make_knn_detector(k=5, aggregator=None, normalizer=None):
knn_detector = KNN(
k=k, aggregator=aggregator,
normalizer=normalizer
)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
knn_detector.infer_threshold(x_ref, 0.1)
return knn_detector
def test_unfitted_knn_single_score():
knn_detector = KNN(k=10)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'KNN has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.score(x)
assert str(err.value) == 'KNN has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.predict(x)
assert str(err.value) == 'KNN has not been fit!'
def test_fitted_knn_score():
"""
Test fitted but not threshold inferred non-ensemble detectors can still score data using the predict method.
Unlike the ensemble detectors, the non-ensemble detectors do not require the ensembler to be fit in the
infer_threshold method. See the test_fitted_knn_ensemble_score test for the ensemble case.
"""
knn_detector = KNN(k=10)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
y = knn_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_knn_ensemble_score():
"""
Test fitted but not threshold inferred ensemble detectors correctly raise an error when calling
the predict method. This is because the ensembler is fit in the infer_threshold method.
"""
knn_detector = KNN(k=[10, 14, 18])
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
with pytest.raises(ThresholdNotInferredError):
knn_detector.predict(x)
with pytest.raises(ThresholdNotInferredError):
knn_detector.score(x)
def test_incorrect_knn_ensemble_init():
# test knn ensemble with aggregator passed as None raises exception
with pytest.raises(ValueError) as err:
KNN(k=[8, 9, 10], aggregator=None)
assert str(err.value) == ('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
def test_fitted_knn_predict():
"""
Test that a detector fitted on data and with threshold inferred correctly, will score
and label outliers, as well as return the p-values using the predict method. Also Check
that the score method gives the same results.
"""
knn_detector = make_knn_detector(k=10)
x_ref = np.random.randn(100, 2)
knn_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = knn_detector.predict(x)
y = y['data']
scores = knn_detector.score(x)
assert np.all(y['instance_score'] == scores)
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_unfitted_knn_ensemble(aggregator, normalizer):
knn_detector = KNN(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0.1, 0]])
# Test unfit knn ensemble raises exception when calling predict method.
with pytest.raises(NotFittedError) as err:
_ = knn_detector.predict(x)
assert str(err.value) == 'KNN has not been fit!'
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_knn_ensemble(aggregator, normalizer):
knn_detector = KNN(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0, 0.1]])
# test ensemble raises ThresholdNotInferredError if only fit and not threshold inferred and
# the normalizer is not None.
if normalizer() is not None:
with pytest.raises(ThresholdNotInferredError):
knn_detector.predict(x)
else:
knn_detector.predict(x)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_knn_ensemble_predict(aggregator, normalizer):
knn_detector = make_knn_detector(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0, 0.1]])
# test fitted detectors with inferred thresholds can score data using the predict method.
y = knn_detector.predict(x)
y = y['data']
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
# test fitted detectors with inferred thresholds can score data using the score method.
scores = knn_detector.score(x)
assert np.all(y['instance_score'] == scores)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_knn_ensemble_torch_script(aggregator, normalizer):
knn_detector = make_knn_detector(k=[5, 6, 7], aggregator=aggregator(), normalizer=normalizer())
tsknn = torch.jit.script(knn_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted ensemble knn detector can be saved and loaded correctly.
y = tsknn(x)
assert torch.all(y == torch.tensor([True, False]))
def test_knn_single_torchscript():
knn_detector = make_knn_detector(k=5)
tsknn = torch.jit.script(knn_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted single knn detector can be saved and loaded correctly.
y = tsknn(x)
assert torch.all(y == torch.tensor([True, False]))
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator, lambda: 'AverageAggregator',
lambda: 'TopKAggregator', lambda: 'MaxAggregator',
lambda: 'MinAggregator'])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None,
lambda: 'ShiftAndScaleNormalizer', lambda: 'PValNormalizer'])
def test_knn_ensemble_integration(tmp_path, aggregator, normalizer):
"""Test knn ensemble detector on moons dataset.
Tests ensemble knn detector with every combination of aggregator and normalizer on the moons dataset.
Fits and infers thresholds in each case. Verifies that the detector can correctly detect inliers
and outliers and that it can be serialized using the torchscript.
"""
knn_detector = KNN(
k=[10, 14, 18],
aggregator=aggregator(),
normalizer=normalizer()
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
knn_detector.fit(X_ref)
knn_detector.infer_threshold(X_ref, 0.1)
result = knn_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = knn_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_knn = torch.jit.script(knn_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_knn(x)
assert torch.all(y == torch.tensor([False, True]))
ts_knn.save(tmp_path / 'knn.pt')
knn_detector = torch.load(tmp_path / 'knn.pt')
y = knn_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_knn_integration(tmp_path):
"""Test knn detector on moons dataset.
Tests knn detector on the moons dataset. Fits and infers thresholds and verifies that the detector can
correctly detect inliers and outliers. Checks that it can be serialized using the torchscript.
"""
knn_detector = KNN(k=18)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
knn_detector.fit(X_ref)
knn_detector.infer_threshold(X_ref, 0.1)
result = knn_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = knn_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_knn = torch.jit.script(knn_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_knn(x)
assert torch.all(y == torch.tensor([False, True]))
ts_knn.save(tmp_path / 'knn.pt')
knn_detector = torch.load(tmp_path / 'knn.pt')
y = knn_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 10,379 | 37.161765 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__knn/test__knn_backend.py | import pytest
import torch
from alibi_detect.od.pytorch.knn import KNNTorch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.ensemble import Ensembler, PValNormalizer, AverageAggregator
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.fixture(scope='function')
def ensembler(request):
return Ensembler(
normalizer=PValNormalizer(),
aggregator=AverageAggregator()
)
def test_knn_torch_backend():
"""
Test the knn torch backend can be correctly initialized, fit and used to
predict outliers.
"""
knn_torch = KNNTorch(k=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
outputs = knn_torch.predict(x)
assert outputs.instance_score.shape == (3, )
assert outputs.is_outlier is None
assert outputs.p_value is None
scores = knn_torch.score(x)
assert torch.all(scores == outputs.instance_score)
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
def test_knn_torch_backend_ensemble(ensembler):
"""
Test the knn torch backend can be correctly initialized as an ensemble, fit
on data and used to predict outliers.
"""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
def test_knn_torch_backend_ensemble_ts(tmp_path, ensembler):
"""
Test the knn torch backend can be initialized as an ensemble and
torchscripted, as well as saved and loaded to and from disk.
"""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
knn_torch.save(tmp_path / 'knn_torch.pt')
knn_torch = torch.load(tmp_path / 'knn_torch.pt')
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_torch_backend_ts(tmp_path):
"""
Test the knn torch backend can be initialized and torchscripted, as well as
saved and loaded to and from disk.
"""
knn_torch = KNNTorch(k=7)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
knn_torch.save(tmp_path / 'knn_torch.pt')
knn_torch = torch.load(tmp_path / 'knn_torch.pt')
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_kernel(ensembler):
"""
Test the knn torch backend can be correctly initialized with a kernel, fit
on data and used to predict outliers.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
knn_torch = KNNTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
@pytest.mark.skip(reason="Can't convert GaussianRBF to torchscript due to torchscript type constraints")
def test_knn_kernel_ts(ensembler):
"""
Test the knn torch backend can be correctly initialized with a kernel,
and torchscripted, as well as saved and loaded to and from disk.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
knn_torch = KNNTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_torch_backend_ensemble_fit_errors(ensembler):
"""Tests the correct errors are raised when using the KNNTorch backend as an ensemble."""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
assert knn_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
def test_knn_torch_backend_fit_errors():
"""Tests the correct errors are raised when using the KNNTorch backend as a single detector."""
knn_torch = KNNTorch(k=4)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
assert knn_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
knn_torch.predict(x)
def test_knn_infer_threshold_value_errors():
"""Tests the correct errors are raised when using incorrect choice of fpr for the KNNTorch backend detector."""
knn_torch = KNNTorch(k=4)
x = torch.randn((1024, 10))
knn_torch.fit(x)
# fpr must be greater than 1/len(x) otherwise it excludes all points in the reference dataset
with pytest.raises(ValueError) as err:
knn_torch.infer_threshold(x, 1/1025)
assert str(err.value) == '`fpr` must be greater than `1/len(x)=0.0009765625`.'
# fpr must be between 0 and 1
with pytest.raises(ValueError) as err:
knn_torch.infer_threshold(x, 1.1)
assert str(err.value) == '`fpr` must be in `(0, 1)`.'
knn_torch.infer_threshold(x, 0.99)
knn_torch.infer_threshold(x, 1/1023)
| 7,995 | 35.180995 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__pca/test__pca.py | import pytest
import numpy as np
import torch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od._pca import PCA
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
def fit_PCA_detector(detector):
pca_detector = detector()
x_ref = np.random.randn(100, 3)
pca_detector.fit(x_ref)
pca_detector.infer_threshold(x_ref, 0.1)
return pca_detector
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=3),
lambda: PCA(n_components=3, kernel=GaussianRBF())
])
def test_unfitted_PCA_single_score(detector):
"""Test pca detector throws errors when not fitted."""
pca = detector()
x = np.array([[0, 10, 11], [0.1, 0, 11]])
x_ref = np.random.randn(100, 3)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.infer_threshold(x_ref, 0.1)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.score(x)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.predict(x)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
def test_pca_value_errors():
with pytest.raises(ValueError) as err:
PCA(n_components=0)
assert str(err.value) == 'n_components must be at least 1'
with pytest.raises(ValueError) as err:
pca = PCA(n_components=4)
pca.fit(np.random.randn(100, 3))
assert str(err.value) == 'n_components must be less than the number of features.'
with pytest.raises(ValueError) as err:
pca = PCA(n_components=10, kernel=GaussianRBF())
pca.fit(np.random.randn(9, 3))
assert str(err.value) == 'n_components must be less than the number of reference instances.'
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=2),
lambda: PCA(n_components=2, kernel=GaussianRBF())
])
def test_fitted_PCA_score(detector):
"""Test Linear and Kernel PCA detector score method.
Test Linear and Kernel PCA detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
and does not return `threshold`, `p_value` and `is_outlier` values.
"""
pca_detector = detector()
x_ref = np.random.randn(100, 3)
pca_detector.fit(x_ref)
x = np.array([[0, 10, 0], [0.1, 0, 0]])
y = pca_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=2),
lambda: PCA(n_components=2, kernel=GaussianRBF())
])
def test_fitted_PCA_predict(detector):
"""Test Linear and Kernel PCA detector predict method.
Test Linear and Kernel PCA detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method. Test that it does not raise an error and does
return `threshold`, `p_value` and `is_outlier` values.
"""
pca_detector = fit_PCA_detector(detector)
x_ref = np.random.randn(100, 3)
pca_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10, 0], [0.1, 0, 0]])
y = pca_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
def test_PCA_integration(tmp_path):
"""Test Linear PCA detector on moons dataset.
Test the Linear PCA detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
Test that the detector can be scripted.
"""
pca_detector = PCA(n_components=1)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
result = pca_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[0, -3]])
result = pca_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_PCA = torch.jit.script(pca_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_PCA(x)
assert torch.all(y == torch.tensor([False, True]))
ts_PCA.save(tmp_path / 'pca.pt')
pca_detector = PCA(n_components=1)
pca_detector = torch.load(tmp_path / 'pca.pt')
y = pca_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_kernel_PCA_integration():
"""Test kernel PCA detector on moons dataset.
Test the kernel PCA detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
pca_detector = PCA(n_components=10, kernel=GaussianRBF())
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
result = pca_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[1, 1]])
result = pca_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
@pytest.mark.skip(reason='GaussianRBF kernel does not have torchscript support yet.')
def test_kernel_PCA_integration_ts():
"""Test the kernel PCA detector can be scripted."""
pca_detector = PCA(n_components=10, kernel=GaussianRBF())
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[1, 1]])
ts_PCA = torch.jit.script(pca_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_PCA(x)
assert torch.all(y == torch.tensor([False, True]))
| 6,603 | 35.893855 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__pca/test__pca_backend.py | import pytest
import torch
import numpy as np
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.pca import LinearPCATorch, KernelPCATorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.mark.parametrize('backend_detector', [
lambda: LinearPCATorch(n_components=5),
lambda: KernelPCATorch(n_components=5, kernel=GaussianRBF())
])
def test_pca_torch_backend_fit_errors(backend_detector):
"""Test Linear and Kernel PCA detector backend fit errors.
Test that an unfit detector backend raises an error when calling predict or score. Test that the
detector backend raises an error when calling the forward method while the threshold has not been
inferred.
"""
pca_torch = backend_detector()
assert not pca_torch.fitted
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
pca_torch(x)
assert str(err.value) == f'{pca_torch.__class__.__name__} has not been fit!'
with pytest.raises(NotFittedError) as err:
pca_torch.predict(x)
assert str(err.value) == f'{pca_torch.__class__.__name__} has not been fit!'
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
assert pca_torch.fitted
with pytest.raises(ThresholdNotInferredError) as err:
pca_torch(x)
assert str(err.value) == (f'{pca_torch.__class__.__name__} has no threshold set, '
'call `infer_threshold` to fit one!')
assert pca_torch.predict(x)
@pytest.mark.parametrize('backend_detector', [
lambda: LinearPCATorch(n_components=1),
lambda: KernelPCATorch(n_components=1, kernel=GaussianRBF())
])
def test_pca_scoring(backend_detector):
"""Test Linear and Kernel PCATorch detector backend scoring methods.
Test that the detector correctly detects true outliers and that the correct proportion of in
distribution data is flagged as outliers.
"""
pca_torch = backend_detector()
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
pca_torch.fit(x_ref)
x_1 = torch.tensor([[8., 8.]], dtype=torch.float64)
scores_1 = pca_torch.score(x_1)
x_2 = torch.tensor([[10., 8.]], dtype=torch.float64)
scores_2 = pca_torch.score(x_2)
x_3 = torch.tensor([[8., 20.]], dtype=torch.float64)
scores_3 = pca_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true Outlier
pca_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = pca_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(pca_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = pca_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.005
def test_pca_linear_torch_backend_ts(tmp_path):
"""Test Linear PCA detector backend is torch-scriptable and savable."""
pca_torch = LinearPCATorch(n_components=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
pca_torch.infer_threshold(x_ref, 0.1)
pred_1 = pca_torch(x)
pca_torch = torch.jit.script(pca_torch)
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
pca_torch.save(tmp_path / 'pca_torch.pt')
pca_torch = torch.load(tmp_path / 'pca_torch.pt')
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
@pytest.mark.skip(reason='GaussianRBF kernel does not have torchscript support yet.')
def test_pca_kernel_torch_backend_ts(tmp_path):
"""Test Kernel PCA detector backend is torch-scriptable and savable."""
pca_torch = KernelPCATorch(n_components=5, kernel=GaussianRBF())
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
pca_torch.infer_threshold(x_ref, 0.1)
pred_1 = pca_torch(x)
pca_torch = torch.jit.script(pca_torch)
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
pca_torch.save(tmp_path / 'pca_torch.pt')
pca_torch = torch.load(tmp_path / 'pca_torch.pt')
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
| 4,470 | 34.768 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__svm/test__svm_pytorch_backend.py | import pytest
import numpy as np
import torch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.svm import BgdSVMTorch, SgdSVMTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_scoring(backend_cls):
"""Test SVM detector pytorch scoring method.
Tests the scoring method of the SVMTorch pytorch backend detector.
"""
sigma = torch.tensor(2)
svm_torch = backend_cls(
n_components=100,
kernel=GaussianRBF(sigma=sigma),
nu=0.1
)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
svm_torch.fit(x_ref)
x_1 = torch.tensor(np.array([[8., 8.]]))
scores_1 = svm_torch.score(x_1)
x_2 = torch.tensor(np.array([[13., 13.]]))
scores_2 = svm_torch.score(x_2)
x_3 = torch.tensor(np.array([[-100., 100.]]))
scores_3 = svm_torch.score(x_3)
# test correct ordering of scores given relative outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true outlier
svm_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = svm_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, True, True]))
assert torch.all(svm_torch(x) == torch.tensor([False, True, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = svm_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.01
@pytest.mark.skip(reason="Can't convert GaussianRBF to torchscript due to torchscript type constraints")
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_torch_backend_ts(tmp_path, backend_cls):
"""Test SVM detector backend is torch-scriptable and savable."""
svm_torch = backend_cls(n_components=10, kernel=GaussianRBF())
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
svm_torch.fit(x_ref, nu=0.01)
svm_torch.infer_threshold(x_ref, 0.1)
pred_1 = svm_torch(x)
svm_torch = torch.jit.script(svm_torch)
pred_2 = svm_torch(x)
assert torch.all(pred_1 == pred_2)
svm_torch.save(tmp_path / 'svm_torch.pt')
svm_torch = torch.load(tmp_path / 'svm_torch.pt')
pred_2 = svm_torch(x)
assert torch.all(pred_1 == pred_2)
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_backend_fit_errors(backend_cls):
"""Test SVM detector pytorch backend fit errors.
Tests the correct errors are raised when using the SVMTorch pytorch backend detector.
"""
svm_torch = backend_cls(n_components=100, kernel=GaussianRBF(), nu=0.1)
assert not svm_torch.fitted
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.tensor(np.random.randn(1, 10))
with pytest.raises(NotFittedError) as err:
svm_torch(x)
assert str(err.value) == f'{backend_cls.__name__} has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
svm_torch.predict(x)
assert str(err.value) == f'{backend_cls.__name__} has not been fit!'
# Test the backend updates _fitted flag on fit.
x_ref = torch.tensor(np.random.randn(1024, 10))
svm_torch.fit(x_ref)
assert svm_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
svm_torch(x)
assert str(err.value) == f'{backend_cls.__name__} has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
assert svm_torch.predict(x)
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_fit(backend_cls):
"""Test SVM detector pytorch fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
kernel = GaussianRBF(torch.tensor(1.))
svm_torch = backend_cls(n_components=1, kernel=kernel, nu=0.01)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = svm_torch.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_iter'] < 100
assert fit_results.get('lower_bound', 0) < 1
def test_sgd_bgd_diffs():
n_components = 300
bgd_svm = BgdSVMTorch(n_components=n_components, kernel=GaussianRBF(sigma=torch.tensor(2)), nu=0.05)
sgd_svm = SgdSVMTorch(n_components=n_components, kernel=GaussianRBF(sigma=torch.tensor(2)), nu=0.05)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
bgd_svm.fit(x_ref)
sgd_svm.fit(x_ref)
test_x = x_ref[:1000]
diffs = (sgd_svm.score(test_x) - bgd_svm.score(test_x)).numpy()
assert np.abs(diffs.mean()) < 0.1
| 5,250 | 36.241135 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__svm/test__svm.py | import pytest
import numpy as np
import torch
from alibi_detect.od._svm import SVM
from alibi_detect.exceptions import NotFittedError
from alibi_detect.utils.pytorch import GaussianRBF
from sklearn.datasets import make_moons
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
def test_unfitted_svm_score(optimization):
"""Test SVM detector raises exceptions when not fitted."""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
svm_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'SVM has not been fit!'
with pytest.raises(NotFittedError) as err:
svm_detector.score(x)
assert str(err.value) == 'SVM has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
svm_detector.predict(x)
assert str(err.value) == 'SVM has not been fit!'
@pytest.mark.parametrize('optimization,device', [('sgd', 'gpu'), ('bgd', 'cpu')])
def test_svm_device_warnings(optimization, device):
"""Test SVM detector device warnings."""
warning_msgs = {
'sgd': ('If using the `sgd` optimization option with GPU then only the Nystroem approximation'
' portion of the method will utilize the GPU. Consider using the `bgd` option which will'
' run everything on the GPU.'),
'bgd': ('The `bgd` optimization option is best suited for GPU. '
'If you want to use CPU, consider using the `sgd` option.')
}
with pytest.warns(UserWarning) as warning:
_ = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
device=device,
nu=0.1
)
assert len(warning) == 1
assert str(warning[0].message) == warning_msgs[optimization]
def test_svm_optimization_error():
"""Test SVM detector raises correct errors for wrong optimization kwargs."""
with pytest.raises(ValueError) as err:
_ = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization='not_an_option',
device='cpu',
nu=0.1
)
assert str(err.value) == 'Optimization not_an_option not recognized. Choose from `sgd` or `bgd`.'
def test_svm_n_components_error():
"""Test SVM detector raises correct errors for wrong value of n_components."""
with pytest.raises(ValueError) as err:
_ = SVM(
n_components=0,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization='bgd',
device='cpu',
nu=0.1
)
assert str(err.value) == 'n_components must be a positive integer, got 0.'
@pytest.mark.parametrize('optimization', [('sgd'), ('bgd')])
def test_fitted_svm_score(optimization):
"""Test SVM detector score method.
Test SVM detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x_ref = np.random.randn(100, 2)
svm_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = svm_detector.score(x)
y = svm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > -0.01
assert y['instance_score'][1] < -0.8
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('optimization', [('sgd'), ('bgd')])
def test_fitted_svm_predict(optimization):
"""Test SVM detector predict method.
Test SVM detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x_ref = np.random.randn(100, 2)
svm_detector.fit(x_ref)
svm_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = svm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > -0.01
assert y['instance_score'][1] < -0.8
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
@pytest.mark.parametrize('n_components', [None, 100])
@pytest.mark.parametrize('kernel', [None, GaussianRBF(torch.tensor(2))])
def test_svm_integration(optimization, n_components, kernel):
"""Test SVM detector on moons dataset.
Test SVM detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
svm_detector = SVM(
n_components=n_components,
nu=0.1,
backend='pytorch',
kernel=kernel,
optimization=optimization,
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
svm_detector.fit(X_ref)
svm_detector.infer_threshold(X_ref, 0.1)
result = svm_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = svm_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
@pytest.mark.skip(reason="Can't convert default kernel GaussianRBF to torchscript due to torchscript type constraints")
def test_svm_torchscript(tmp_path):
"""Tests user can torch-script svm detector."""
sigma = torch.tensor(0.2)
svm_detector = SVM(
n_components=100,
backend='pytorch',
kernel=GaussianRBF(sigma=sigma)
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
svm_detector.fit(X_ref, nu=0.1)
svm_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[-1, 1.5]])
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
ts_svm = torch.jit.script(svm_detector.backend)
y = ts_svm(x)
assert torch.all(y == torch.tensor([False, True]))
ts_svm.save(tmp_path / 'svm.pt')
ts_svm = torch.load(tmp_path / 'svm.pt')
y = ts_svm(x)
assert torch.all(y == torch.tensor([False, True]))
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
def test_svm_fit(optimization):
"""Test SVM detector fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
kernel = GaussianRBF(torch.tensor(1.))
svm = SVM(
n_components=10,
kernel=kernel,
nu=0.01,
optimization=optimization,
)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = svm.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_iter'] < 100
assert fit_results.get('lower_bound', 0) < 1
# 'sgd' optimization does not return lower bound
if optimization == 'bgd':
assert isinstance(fit_results['lower_bound'], float)
| 7,983 | 32.974468 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__mahalanobis/test__mahalanobis.py | import pytest
import numpy as np
import torch
from alibi_detect.od._mahalanobis import Mahalanobis
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
def make_mahalanobis_detector():
mahalanobis_detector = Mahalanobis()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.fit(x_ref)
mahalanobis_detector.infer_threshold(x_ref, 0.1)
return mahalanobis_detector
def test_unfitted_mahalanobis_single_score():
"""Test Mahalanobis detector throws errors when not fitted."""
mahalanobis_detector = Mahalanobis()
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'Mahalanobis has not been fit!'
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.score(x)
assert str(err.value) == 'Mahalanobis has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.predict(x)
assert str(err.value) == 'Mahalanobis has not been fit!'
def test_fitted_mahalanobis_score():
"""Test Mahalanobis detector score method.
Test Mahalanobis detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
mahalanobis_detector = Mahalanobis()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = mahalanobis_detector.score(x)
y = mahalanobis_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_mahalanobis_predict():
"""Test Mahalanobis detector predict method.
Test Mahalanobis detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
mahalanobis_detector = make_mahalanobis_detector()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = mahalanobis_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
def test_mahalanobis_integration(tmp_path):
"""Test Mahalanobis detector on moons dataset.
Test Mahalanobis detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
Test that the detector can be scripted.
"""
mahalanobis_detector = Mahalanobis()
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
mahalanobis_detector.fit(X_ref)
mahalanobis_detector.infer_threshold(X_ref, 0.1)
result = mahalanobis_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = mahalanobis_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_mahalanobis = torch.jit.script(mahalanobis_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_mahalanobis(x)
assert torch.all(y == torch.tensor([False, True]))
ts_mahalanobis.save(tmp_path / 'mahalanobis.pt')
mahalanobis_detector = torch.load(tmp_path / 'mahalanobis.pt')
y = mahalanobis_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 4,177 | 35.973451 | 102 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test__mahalanobis/test__mahalanobis_backend.py | import pytest
import torch
import numpy as np
from alibi_detect.od.pytorch.mahalanobis import MahalanobisTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
def test_mahalanobis_linear_scoring():
"""Test Mahalanobis detector linear scoring method.
Test that the Mahalanobis detector `_compute_linear_proj` method correctly whitens the x_ref data
and that the score method correctly orders different test points. Test that the detector correctly
detects true outliers and that the correct proportion of in distribution data is flagged as
outliers.
"""
mahalanobis_torch = MahalanobisTorch()
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
mahalanobis_torch.fit(x_ref)
p = mahalanobis_torch._compute_linear_proj(mahalanobis_torch.x_ref)
# test that the x_ref is whitened by the data
assert p.mean() < 0.1
assert p.std() - 1 < 0.1
x_1 = torch.tensor([[8., 8.]])
scores_1 = mahalanobis_torch.score(x_1)
x_2 = torch.tensor(np.random.multivariate_normal(mean, cov, 1))
scores_2 = mahalanobis_torch.score(x_2)
x_3 = torch.tensor([[-10., 10.]])
scores_3 = mahalanobis_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true Outlier
mahalanobis_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = mahalanobis_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(mahalanobis_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = mahalanobis_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.005
def test_mahalanobis_torch_backend_ts(tmp_path):
"""Test Mahalanobis detector backend is torch-scriptable and savable."""
mahalanobis_torch = MahalanobisTorch()
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
mahalanobis_torch.fit(x_ref)
mahalanobis_torch.infer_threshold(x_ref, 0.1)
pred_1 = mahalanobis_torch(x)
mahalanobis_torch = torch.jit.script(mahalanobis_torch)
pred_2 = mahalanobis_torch(x)
assert torch.all(pred_1 == pred_2)
mahalanobis_torch.save(tmp_path / 'mahalanobis_torch.pt')
mahalanobis_torch = torch.load(tmp_path / 'mahalanobis_torch.pt')
pred_2 = mahalanobis_torch(x)
assert torch.all(pred_1 == pred_2)
def test_mahalanobis_torch_backend_fit_errors():
"""Test Mahalanobis detector backend fit errors.
Test that an unfit detector backend raises an error when calling predict or score. Test that the
detector backend raises an error when calling the forward method while the threshold has not been
inferred.
"""
mahalanobis_torch = MahalanobisTorch()
assert not mahalanobis_torch.fitted
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
mahalanobis_torch(x)
assert str(err.value) == 'MahalanobisTorch has not been fit!'
with pytest.raises(NotFittedError) as err:
mahalanobis_torch.predict(x)
assert str(err.value) == 'MahalanobisTorch has not been fit!'
x_ref = torch.randn((1024, 10))
mahalanobis_torch.fit(x_ref)
assert mahalanobis_torch.fitted
with pytest.raises(ThresholdNotInferredError) as err:
mahalanobis_torch(x)
assert str(err.value) == 'MahalanobisTorch has no threshold set, call `infer_threshold` to fit one!'
assert mahalanobis_torch.predict(x)
| 3,764 | 36.277228 | 104 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/sklearn/base.py | from typing import List, Union, Optional, Dict
from dataclasses import dataclass, asdict
from abc import ABC, abstractmethod
from typing_extensions import Self
import numpy as np
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@dataclass
class SklearnOutlierDetectorOutput:
"""Output of the outlier detector."""
threshold_inferred: bool
instance_score: np.ndarray
threshold: Optional[np.ndarray]
is_outlier: Optional[np.ndarray]
p_value: Optional[np.ndarray]
class FitMixinSklearn(ABC):
fitted = False
@abstractmethod
def fit(self, x_ref: np.ndarray) -> Self:
"""Abstract fit method.
Parameters
----------
x
`torch.Tensor` to fit object on.
"""
return self
def _set_fitted(self) -> Self:
"""Sets the fitted attribute to True.
Should be called within the object fit method.
"""
self.fitted = True
return self
def check_fitted(self):
"""Checks to make sure object has been fitted.
Raises
------
NotFittedError
Raised if method called and object has not been fit.
"""
if not self.fitted:
raise NotFittedError(self.__class__.__name__)
class SklearnOutlierDetector(FitMixinSklearn, ABC):
"""Base class for sklearn backend outlier detection algorithms."""
threshold_inferred = False
threshold = None
@abstractmethod
def score(self, x: np.ndarray) -> np.ndarray:
"""Score the data.
Parameters
----------
x
Data to score.
"""
pass
def check_threshold_inferred(self):
"""Check if threshold is inferred.
Raises
------
ThresholdNotInferredError
Raised if threshold is not inferred.
"""
if not self.threshold_inferred:
raise ThresholdNotInferredError(self.__class__.__name__)
@staticmethod
def _to_frontend_dtype(
arg: Union[np.ndarray, SklearnOutlierDetectorOutput]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts input to frontend data format.
This is an interface method that ensures that the output of the outlier detector is in a common format for
different backends. If `arg` is a `SklearnOutlierDetectorOutput` object, we unpack it into a `dict` and
return it.
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary containing frontend compatible data.
"""
if isinstance(arg, SklearnOutlierDetectorOutput):
return asdict(arg)
return arg
@staticmethod
def _to_backend_dtype(x: Union[List, np.ndarray]) -> np.ndarray:
"""Converts data from the frontend to the backend format.
This is an interface method that ensures that the input of the chosen outlier detector backend is in the correct
format. In the case of the Sklearn backend, we ensure the data is a numpy array.
Parameters
----------
x
Data to convert.
"""
return np.asarray(x)
def _classify_outlier(self, scores: np.ndarray) -> Optional[np.ndarray]:
"""Classify the data as outlier or not.
Parameters
----------
scores
Scores to classify. Larger scores indicate more likely outliers.
Returns
-------
`np.ndarray` or ``None``
"""
if (self.threshold_inferred and self.threshold is not None):
return (scores > self.threshold).astype(int)
return None
def _p_vals(self, scores: np.ndarray) -> np.ndarray:
"""Compute p-values for the scores.
Parameters
----------
scores
Scores to compute p-values for.
Returns
-------
`np.ndarray` or ``None``
"""
return (1 + (scores[:, None] < self.val_scores).sum(-1))/len(self.val_scores) \
if self.threshold_inferred else None
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the data. Prerequisite for outlier predictions.
Parameters
----------
x
Data to infer the threshold for.
fpr
False positive rate to use for threshold inference.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
ValueError
Raised if `fpr` is less than ``1/len(x)``.
"""
if not 0 < fpr < 1:
raise ValueError('`fpr` must be in `(0, 1)`.')
if fpr < 1/len(x):
raise ValueError(f'`fpr` must be greater than `1/len(x)={1/len(x)}`.')
self.val_scores = self.score(x)
self.threshold = np.quantile(self.val_scores, 1-fpr, interpolation='higher') # type: ignore[call-overload]
self.threshold_inferred = True
def predict(self, x: np.ndarray) -> SklearnOutlierDetectorOutput:
"""Predict outlier labels for the data.
Computes the outlier scores. If the detector is not fit on reference data we raise an error.
If the threshold is inferred, the outlier labels and p-values are also computed and returned.
Otherwise, the outlier labels and p-values are set to ``None``.
Parameters
----------
x
Data to predict.
Returns
-------
`SklearnOutlierDetectorOutput`
Output of the outlier detector.
Raises
------
ValueError
Raised if the detector is not fit on reference data.
"""
self.check_fitted()
scores = self.score(x)
return SklearnOutlierDetectorOutput(
instance_score=scores,
is_outlier=self._classify_outlier(scores),
p_value=self._p_vals(scores),
threshold_inferred=self.threshold_inferred,
threshold=self.threshold
)
def __call__(self, x: np.ndarray) -> np.ndarray:
"""Classify outliers.
Parameters
----------
x
Data to classify.
"""
scores = self.score(x)
self.check_threshold_inferred()
return self._classify_outlier(scores)
| 6,383 | 28.018182 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/svm.py | import warnings
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
import torch
from sklearn.linear_model import SGDOneClassSVM
from sklearn.utils.extmath import safe_sparse_dot
from tqdm import tqdm
from typing_extensions import Literal, Self
from alibi_detect.od.pytorch.base import TorchOutlierDetector
from alibi_detect.utils.pytorch.losses import hinge_loss
from alibi_detect.utils.pytorch.kernels import GaussianRBF
class SVMTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Support Vector Machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device)
self.n_components = n_components
if kernel is None:
kernel = GaussianRBF()
self.kernel = kernel
self.nystroem = _Nystroem(
self.kernel,
self.n_components
)
self.nu = nu
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
class SgdSVMTorch(SVMTorch):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""SGD Optimization backend for the One class support vector machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
if (isinstance(device, str) and device in ('gpu', 'cuda')) or \
(isinstance(device, torch.device) and device.type == 'cuda'):
warnings.warn(('If using the `sgd` optimization option with GPU then only the Nystroem approximation'
' portion of the method will utilize the GPU. Consider using the `bgd` option which will'
' run everything on the GPU.'))
super().__init__(
device=device,
n_components=n_components,
kernel=kernel,
nu=nu,
)
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
tol: float = 1e-6,
max_iter: int = 1000,
verbose: int = 0,
) -> Dict:
"""Fit the Nystroem approximation and Sklearn `SGDOneClassSVM` SVM model.
Parameters
----------
x_ref
Training data.
tol
The decrease in loss required over the previous ``n_iter_no_change`` iterations in order to
continue optimizing.
max_iter
The maximum number of optimization steps.
verbose
Verbosity level during training. ``0`` is silent, ``1`` a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
"""
x_nys = self.nystroem.fit(x_ref).transform(x_ref)
self.svm = SGDOneClassSVM(
tol=tol,
max_iter=max_iter,
verbose=verbose,
nu=self.nu
)
x_nys = x_nys.cpu().numpy()
self.svm = self.svm.fit(x_nys)
self._set_fitted()
return {
'converged': self.svm.n_iter_ < max_iter,
'n_iter': self.svm.n_iter_,
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
fit_kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
tol=fit_kwargs.get('tol', 1e-3),
max_iter=fit_kwargs.get('max_iter', 1000),
verbose=fit_kwargs.get('verbose', 0),
)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor¬` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
self.check_fitted()
x_nys = self.nystroem.transform(x)
x_nys = x_nys.cpu().numpy()
coef_ = self.svm.coef_ / (self.svm.coef_ ** 2).sum()
x_nys = self.svm._validate_data(x_nys, accept_sparse="csr", reset=False)
result = safe_sparse_dot(x_nys, coef_.T, dense_output=True).ravel()
return - self._to_backend_dtype(result)
class BgdSVMTorch(SVMTorch):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Support Vector Machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
if (isinstance(device, str) and device == 'cpu') or \
(isinstance(device, torch.device) and device.type == 'cpu'):
warnings.warn(('The `bgd` optimization option is best suited for GPU. If '
'you want to use CPU, consider using the `sgd` option.'))
super().__init__(
device=device,
n_components=n_components,
kernel=kernel,
nu=nu,
)
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
step_size_range: Tuple[float, float] = (1e-8, 1.0),
n_step_sizes: int = 16,
tol: float = 1e-6,
n_iter_no_change: int = 25,
max_iter: int = 1000,
verbose: int = 0,
) -> Dict:
"""Fit the Nystroem approximation and python SVM model.
Parameters
----------
x_ref
Training data.
step_size_range
The range of values to be considered for the gradient descent step size at each iteration. This is
specified as a tuple of the form `(min_eta, max_eta)`.
n_step_sizes
The number of step sizes in the defined range to be tested for loss reduction. This many points are spaced
equidistantly along the range in log space.
tol
The decrease in loss required over the previous n_iter_no_change iterations in order to continue optimizing.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
max_iter
The maximum number of optimization steps.
verbose
Verbosity level during training. ``0`` is silent, ``1`` a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
- lower_bound: loss lower bound.
"""
x_nys = self.nystroem.fit(x_ref).transform(x_ref)
n, d = x_nys.shape
min_eta, max_eta = step_size_range
etas = torch.tensor(
np.linspace(
np.log(min_eta),
np.log(max_eta),
n_step_sizes
),
dtype=x_nys.dtype,
device=self.device
).exp()
# Initialise coeffs/preds/loss
coeffs = torch.zeros(d, dtype=x_nys.dtype, device=self.device)
intercept = torch.zeros(1, dtype=x_nys.dtype, device=self.device)
preds = x_nys @ coeffs + intercept
loss = self.nu * (coeffs.square().sum()/2 + intercept) + hinge_loss(preds)
min_loss, min_loss_coeffs, min_loss_intercept = loss, coeffs, intercept
iter, t_since_improv = 0, 0
converged = False
with tqdm(total=max_iter, disable=not verbose) as pbar:
while not converged:
pbar.update(1)
# First two lines give form of sgd update (for each candidate step size)
sup_vec_inds = (preds < 1)
cand_coeffs = coeffs[:, None] * \
(1-etas*self.nu) + etas*(x_nys[sup_vec_inds].sum(0)/n)[:, None]
cand_intercept = intercept - etas*self.nu + (sup_vec_inds.sum()/n)
# Compute loss for each candidate step size and choose the best
cand_preds = x_nys @ cand_coeffs + cand_intercept
cand_losses = self.nu * (cand_coeffs.square().sum(0)/2 + cand_intercept) + hinge_loss(cand_preds)
best_step_size = cand_losses.argmin()
coeffs, intercept = cand_coeffs[:, best_step_size], cand_intercept[best_step_size]
preds, loss = cand_preds[:, best_step_size], cand_losses[best_step_size]
# Keep track of best performing coefficients and time since improving (by more than tol)
if loss < min_loss:
if loss < min_loss - tol:
t_since_improv = 0
min_loss, min_loss_coeffs, min_loss_intercept = loss, coeffs, intercept
else:
t_since_improv += 1
# Decide whether to continue
if iter > max_iter or t_since_improv > n_iter_no_change:
self.coeffs = min_loss_coeffs
self.intercept = min_loss_intercept
converged = True
break
else:
iter += 1
if verbose and isinstance(pbar, tqdm):
pbar.set_postfix(dict(loss=loss.cpu().detach().numpy().item()))
self._set_fitted()
return {
'converged': converged,
'lower_bound': self._to_frontend_dtype(min_loss),
'n_iter': iter
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
fit_kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
step_size_range=fit_kwargs.get('step_size_range', (1e-8, 1.0)),
n_iter_no_change=fit_kwargs.get('n_iter_no_change', 25),
tol=fit_kwargs.get('tol', 1e-6),
verbose=fit_kwargs.get('verbose', 0),
n_step_sizes=fit_kwargs.get('n_step_sizes', 16),
max_iter=fit_kwargs.get('max_iter', 1000)
)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
if not torch.jit.is_scripting():
self.check_fitted()
x_nys = self.nystroem.transform(x)
coeffs = torch.nn.functional.normalize(self.coeffs, dim=-1)
preds = x_nys @ coeffs
return -preds
class _Nystroem:
def __init__(
self,
kernel: Callable,
n_components: Optional[int] = None
) -> None:
"""Nystroem Approximation of a kernel.
Parameters
----------
kernel
Kernel function.
n_components
Number of components in the Nystroem approximation. By default uses all of them.
"""
self.kernel = kernel
self.n_components = n_components
def fit(
self,
x: torch.Tensor
) -> Self:
"""Fit the Nystroem approximation.
Parameters
----------
x
`torch.Tensor` of shape ``(n, d)`` where ``n`` is the number of samples and ``d`` is the dimensionality of
the data.
"""
n = len(x)
n_components = n if self.n_components is None else self.n_components
inds = torch.randperm(n)[:n_components]
self.z = x[inds]
K_zz = self.kernel(self.z, self.z)
K_zz += 1e-16 + torch.eye(n_components, device=K_zz.device)
U, S, V = torch.linalg.svd(K_zz)
self.K_zz_root_inv = (U / S.sqrt()) @ V
return self
def transform(
self,
x: torch.Tensor
) -> torch.Tensor:
"""Transform `x` into the Nystroem approximation.
Parameters
----------
x
`torch.Tensor` of shape ``(n, d)`` where ``n`` is the number of samples and ``d`` is the dimensionality of
the data.
Returns
-------
`torch.Tensor` of shape ``(n, n_components)`` where ``n_components`` is the number of components in the
Nystroem approximation.
"""
K_xz = self.kernel(x, self.z)
return K_xz @ self.K_zz_root_inv
| 15,972 | 34.416851 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/base.py | from typing import List, Union, Optional, Dict
from typing_extensions import Literal
from dataclasses import dataclass, fields
from abc import ABC, abstractmethod
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import FitMixinTorch
from alibi_detect.utils.pytorch.misc import get_device
from alibi_detect.exceptions import ThresholdNotInferredError
@dataclass
class TorchOutlierDetectorOutput:
"""Output of the outlier detector."""
threshold_inferred: bool
instance_score: torch.Tensor
threshold: Optional[torch.Tensor]
is_outlier: Optional[torch.Tensor]
p_value: Optional[torch.Tensor]
def to_frontend_dtype(self):
result = {}
for f in fields(self):
value = getattr(self, f.name)
if isinstance(value, torch.Tensor):
result[f.name] = value.cpu().detach().numpy()
else:
result[f.name] = value
return result
def _tensor_to_frontend_dtype(x: Union[torch.Tensor, np.ndarray, float]) -> Union[np.ndarray, float]:
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
if isinstance(x, np.ndarray) and x.ndim == 0:
x = x.item()
return x # type: ignore[return-value]
def _raise_type_error(x):
raise TypeError(f'x is type={type(x)} but must be one of TorchOutlierDetectorOutput or a torch Tensor')
def to_frontend_dtype(x: Union[torch.Tensor, TorchOutlierDetectorOutput]) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts any `torch` tensors found in input to `numpy` arrays.
Takes a `torch` tensor or `TorchOutlierDetectorOutput` and converts any `torch` tensors found to `numpy` arrays
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary of containing `numpy` arrays
"""
return {
'TorchOutlierDetectorOutput': lambda x: x.to_frontend_dtype(),
'Tensor': _tensor_to_frontend_dtype
}.get(
x.__class__.__name__,
_raise_type_error
)(x)
class TorchOutlierDetector(torch.nn.Module, FitMixinTorch, ABC):
"""Base class for torch backend outlier detection algorithms."""
threshold_inferred = False
threshold = None
def __init__(
self,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
self.device = get_device(device)
super().__init__()
@abstractmethod
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Score the data.
Parameters
----------
x
Data to score.
"""
pass
@torch.jit.unused
def check_threshold_inferred(self):
"""Check if threshold is inferred.
Raises
------
ThresholdNotInferredError
Raised if threshold is not inferred.
"""
if not self.threshold_inferred:
raise ThresholdNotInferredError(self.__class__.__name__)
@staticmethod
def _to_frontend_dtype(
arg: Union[torch.Tensor, TorchOutlierDetectorOutput]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts input to frontend data format.
This is an interface method that ensures that the output of the outlier detector is in a common format for
different backends. Mostly this means converting `torch.tensors` to `np.ndarray`. If `arg` is a
`TorchOutlierDetectorOutput` object, we call its `to_frontend_dtype` method. Otherwise, if `arg` is a
`torch.Tensor`, we convert it to a `numpy` array.
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary of containing `numpy` arrays
"""
return to_frontend_dtype(arg)
def _to_backend_dtype(self, x: Union[List, np.ndarray]) -> torch.Tensor:
"""Converts data from the frontend to the backend format.
This is an interface method that ensures that the input of the chosen outlier detector backend is in the correct
format.
Parameters
----------
x
Data to convert.
"""
return torch.as_tensor(x, dtype=torch.float32, device=self.device)
def _ensembler(self, x: torch.Tensor) -> torch.Tensor:
"""Aggregates and normalizes the data
If the detector has an ensembler attribute we use it to aggregate and normalize the data.
Parameters
----------
x
Data to aggregate and normalize.
Returns
-------
`torch.Tensor` or original data without alteration
Raises
------
ThresholdNotInferredError
If the detector is an ensemble, and the ensembler used to aggregate the outlier scores has a fittable
component, then the detector threshold must be inferred before predictions can be made. This is because
while the scoring functionality of the detector is fit within the `.fit` method on the training data
the ensembler has to be fit on the validation data along with the threshold and this is done in the
`.infer_threshold` method.
"""
if hasattr(self, 'ensembler') and self.ensembler is not None:
# `type: ignore` here because self.ensembler here causes an error with mypy when using torch.jit.script.
# For some reason it thinks self.ensembler is a torch.Tensor and therefore is not callable.
if not torch.jit.is_scripting():
if not self.ensembler.fitted: # type: ignore
self.check_threshold_inferred()
return self.ensembler(x) # type: ignore
else:
return x
def _classify_outlier(self, scores: torch.Tensor) -> torch.Tensor:
"""Classify the data as outlier or not.
Parameters
----------
scores
Scores to classify. Larger scores indicate more likely outliers.
Returns
-------
`torch.Tensor` or ``None``
"""
return (scores > self.threshold).to(torch.int8) if self.threshold_inferred else None
def _p_vals(self, scores: torch.Tensor) -> torch.Tensor:
"""Compute p-values for the scores.
Parameters
----------
scores
Scores to compute p-values for.
Returns
-------
`torch.Tensor` or ``None``
"""
return (1 + (scores[:, None] < self.val_scores).sum(-1))/len(self.val_scores) \
if self.threshold_inferred else None
def infer_threshold(self, x: torch.Tensor, fpr: float):
"""Infer the threshold for the data. Prerequisite for outlier predictions.
Parameters
----------
x
Data to infer the threshold for.
fpr
False positive rate to use for threshold inference.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
ValueError
Raised if `fpr` is less than ``1/len(x)``.
"""
if not 0 < fpr < 1:
raise ValueError('`fpr` must be in `(0, 1)`.')
if fpr < 1/len(x):
raise ValueError(f'`fpr` must be greater than `1/len(x)={1/len(x)}`.')
self.val_scores = self.score(x)
if self.ensemble:
self.val_scores = self.ensembler.fit(self.val_scores).transform(self.val_scores) # type: ignore
self.threshold = torch.quantile(self.val_scores, 1-fpr, interpolation='higher')
self.threshold_inferred = True
def predict(self, x: torch.Tensor) -> TorchOutlierDetectorOutput:
"""Predict outlier labels for the data.
Computes the outlier scores. If the detector is not fit on reference data we raise an error.
If the threshold is inferred, the outlier labels and p-values are also computed and returned.
Otherwise, the outlier labels and p-values are set to ``None``.
Parameters
----------
x
Data to predict.
Returns
-------
Output of the outlier detector. Includes the p-values, outlier labels, instance scores and threshold.
Raises
------
ValueError
Raised if the detector is not fit on reference data.
"""
self.check_fitted()
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
return TorchOutlierDetectorOutput(
instance_score=scores,
is_outlier=self._classify_outlier(scores),
p_value=self._p_vals(scores),
threshold_inferred=self.threshold_inferred,
threshold=self.threshold
)
| 8,752 | 32.408397 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/knn.py | from typing import Optional, Union, List, Tuple
from typing_extensions import Literal
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import Ensembler
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class KNNTorch(TorchOutlierDetector):
def __init__(
self,
k: Union[np.ndarray, List, Tuple, int],
kernel: Optional[torch.nn.Module] = None,
ensembler: Optional[Ensembler] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for KNN detector.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If `k` is a single value the outlier score is the distance/kernel
similarity to the `k`-th nearest neighbor. If `k` is a list then it returns the distance/kernel
similarity to each of the specified `k` neighbors.
kernel
If a kernel is specified then instead of using `torch.cdist` the kernel defines the `k` nearest
neighbor distance.
ensembler
If `k` is an array of integers then the ensembler must not be ``None``. Should be an instance
of :py:obj:`alibi_detect.od.pytorch.ensemble.ensembler`. Responsible for combining
multiple scores into a single score.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
"""
super().__init__(device=device)
self.kernel = kernel
self.ensemble = isinstance(k, (np.ndarray, list, tuple))
self.ks = torch.tensor(k) if self.ensemble else torch.tensor([k], device=self.device)
self.ensembler = ensembler
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredError
If called before detector has had `infer_threshold` method called.
"""
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFittedError
If called before detector has been fit.
"""
self.check_fitted()
K = -self.kernel(x, self.x_ref) if self.kernel is not None else torch.cdist(x, self.x_ref)
bot_k_dists = torch.topk(K, int(torch.max(self.ks)), dim=1, largest=False)
all_knn_dists = bot_k_dists.values[:, self.ks-1]
return all_knn_dists if self.ensemble else all_knn_dists[:, 0]
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
self.x_ref = x_ref
self._set_fitted()
| 3,635 | 34.647059 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/gmm.py | from typing import Optional, Union, Dict, Type
from typing_extensions import Literal
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.od.pytorch.base import TorchOutlierDetector
from alibi_detect.models.pytorch.gmm import GMMModel
from alibi_detect.utils.pytorch.misc import get_optimizer
class GMMTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Gaussian Mixture Model (GMM) outlier detector.
Parameters
----------
n_components
Number of components in gaussian mixture model.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__(device=device)
if n_components < 1:
raise ValueError('n_components must be at least 1')
self.n_components = n_components
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate: float = 0.1,
max_epochs: int = 10,
batch_size: int = 32,
tol: float = 1e-3,
n_iter_no_change: int = 25,
verbose: int = 0,
) -> Dict:
"""Fit the GMM model.
Parameters
----------
x_ref
Training data.
optimizer
Optimizer used to train the model.
learning_rate
Learning rate used to train the model.
max_epochs
Maximum number of training epochs.
batch_size
Batch size used to train the model.
tol
Convergence threshold. Training iterations will stop when the lower bound average
gain is below this threshold.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for
optimization to continue.
verbose
Verbosity level during training. 0 is silent, 1 a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: bool indicating whether training converged.
- n_epochs: number of gradient descent iterations performed.
- lower_bound: log-likelihood lower bound.
"""
self.model = GMMModel(self.n_components, x_ref.shape[-1]).to(self.device)
x_ref = x_ref.to(torch.float32)
batch_size = len(x_ref) if batch_size is None else batch_size
dataset = TorchDataset(x_ref)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True
)
optimizer_instance: torch.optim.Optimizer = optimizer( # type: ignore[call-arg]
self.model.parameters(),
lr=learning_rate
)
self.model.train()
min_loss = None
converged = False
epoch = 0
while not converged and epoch < max_epochs:
epoch += 1
dl = tqdm(
enumerate(dataloader),
total=len(dataloader),
disable=not verbose
)
loss_ma = 0
for step, x in dl:
x = x.to(self.device)
nll = self.model(x).mean()
optimizer_instance.zero_grad()
nll.backward()
optimizer_instance.step()
if verbose and isinstance(dl, tqdm):
loss_ma = loss_ma + (nll.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{max_epochs}')
dl.set_postfix(dict(loss_ma=loss_ma))
if min_loss is None or nll < min_loss - tol:
t_since_improv = 0
min_loss = nll
else:
t_since_improv += 1
if t_since_improv > n_iter_no_change:
converged = True
break
self._set_fitted()
return {
'converged': converged,
'lower_bound': self._to_frontend_dtype(min_loss),
'n_epochs': epoch
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
optimizer=get_optimizer(fit_kwargs.get('optimizer')),
learning_rate=fit_kwargs.get('learning_rate', 0.1),
batch_size=fit_kwargs.get('batch_size', None),
max_epochs=(lambda v: 10 if v is None else v)(fit_kwargs.get('max_epochs', None)),
verbose=fit_kwargs.get('verbose', 0),
tol=fit_kwargs.get('tol', 1e-3),
n_iter_no_change=fit_kwargs.get('n_iter_no_change', 25)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
if not torch.jit.is_scripting():
self.check_fitted()
x = x.to(torch.float32)
preds = self.model(x.to(self.device))
return preds
| 6,651 | 31.291262 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/pca.py | from typing import Optional, Union, Callable
from typing_extensions import Literal
import torch
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class PCATorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace. For linear PCA should have
``1 <= n_components < dim(data)``. For kernel pca should have ``1 <= n_components < len(data)``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__(device=device)
self.n_components = n_components
if n_components < 1:
raise ValueError('n_components must be at least 1')
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFitException
If called before detector has been fit.
"""
self.check_fitted()
score = self._score(x)
return score
def fit(self, x_ref: torch.Tensor) -> None:
"""Fits the PCA detector.
Parameters
----------
x_ref
The Dataset tensor.
"""
self.pcs = self._fit(x_ref)
self._set_fitted()
def _fit(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _score(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class LinearPCATorch(PCATorch):
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Linear variant of the PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device, n_components=n_components)
def _fit(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the principal components of the reference data.
We compute the principal components of the reference data using the covariance matrix and then
remove the largest `n_components` eigenvectors. The remaining eigenvectors correspond to the
invariant dimensions of the data. Changes in these dimensions are used to compute the outlier
score which is the distance to the principal subspace spanned by the first `n_components`
eigenvectors.
Parameters
----------
x
The reference data.
Returns
-------
The principal components of the reference data.
Raises
------
ValueError
If `n_components` is greater than or equal to number of features
"""
if self.n_components >= x.shape[1]:
raise ValueError("n_components must be less than the number of features.")
self.x_ref_mean = x.mean(0)
x -= self.x_ref_mean
cov_mat = (x.t() @ x)/(len(x)-1)
_, V = torch.linalg.eigh(cov_mat)
return V[:, :-self.n_components]
def _score(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the outlier score.
Centers the data and projects it onto the principal components. The score is then the sum of the
squared projections.
Parameters
----------
x
The test data.
Returns
-------
The outlier score.
"""
x_cen = x - self.x_ref_mean
x_pcs = x_cen @ self.pcs
return (x_pcs**2).sum(1)
class KernelPCATorch(PCATorch):
def __init__(
self,
n_components: int,
kernel: Optional[Callable],
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Kernel variant of the PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace.
kernel
Kernel function to use for outlier detection.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device, n_components=n_components)
self.kernel = kernel
def _fit(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the principal components of the reference data.
We compute the principal components of the reference data using the kernel matrix and then
return the largest `n_components` eigenvectors. These are then normalized to have length
equal to `1/eigenvalue`. Note that this differs from the linear case where we remove the
largest eigenvectors.
Parameters
----------
x
The reference data.
Returns
-------
The principal components of the reference data.
Raises
------
ValueError
If `n_components` is greater than or equal to the number of reference samples.
"""
if self.n_components >= x.shape[0]:
raise ValueError("n_components must be less than the number of reference instances.")
self.x_ref = x
K = self.compute_kernel_mat(x)
D, V = torch.linalg.eigh(K)
pcs = V / torch.sqrt(D)[None, :]
return pcs[:, -self.n_components:]
def _score(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the outlier score.
Centers the data and projects it onto the principal components. The score is then the sum of the
squared projections.
Parameters
----------
x
The test data.
Returns
-------
The outlier score.
"""
k_xr = self.kernel(x, self.x_ref)
k_xr_row_sums = k_xr.sum(1)
n, m = k_xr.shape
k_xr_cen = k_xr - self.k_col_sums[None, :]/m - k_xr_row_sums[:, None]/n + self.k_sum/(m*n)
x_pcs = k_xr_cen @ self.pcs
scores = -2 * k_xr.mean(-1) - (x_pcs**2).sum(1)
return scores
def compute_kernel_mat(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the centered kernel matrix.
Parameters
----------
x
The reference data.
Returns
-------
The centered kernel matrix.
"""
n = len(x)
k = self.kernel(x, x)
self.k_col_sums = k.sum(0)
k_row_sums = k.sum(1)
self.k_sum = k_row_sums.sum()
k_cen = k - self.k_col_sums[None, :]/n - k_row_sums[:, None]/n + self.k_sum/(n**2)
return k_cen
| 8,313 | 30.255639 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
KNNTorch = import_optional('alibi_detect.od.pytorch.knn', ['KNNTorch'])
LOFTorch = import_optional('alibi_detect.od.pytorch.lof', ['LOFTorch'])
MahalanobisTorch = import_optional('alibi_detect.od.pytorch.mahalanobis', ['MahalanobisTorch'])
KernelPCATorch, LinearPCATorch = import_optional('alibi_detect.od.pytorch.pca', ['KernelPCATorch', 'LinearPCATorch'])
Ensembler = import_optional('alibi_detect.od.pytorch.ensemble', ['Ensembler'])
GMMTorch = import_optional('alibi_detect.od.pytorch.gmm', ['GMMTorch'])
BgdSVMTorch, SgdSVMTorch = import_optional('alibi_detect.od.pytorch.svm', ['BgdSVMTorch', 'SgdSVMTorch'])
| 691 | 68.2 | 117 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/ensemble.py | from abc import ABC, abstractmethod
from typing import Optional
from typing_extensions import Self
import torch
import numpy as np
from torch.nn import Module
from alibi_detect.exceptions import NotFittedError
class BaseTransformTorch(Module):
def __init__(self):
"""Base Transform class.
provides abstract methods for transform objects that map `torch` tensors.
"""
super().__init__()
def transform(self, x: torch.Tensor):
"""Public transform method.
Parameters
----------
x
`torch.Tensor` array to be transformed
"""
raise NotImplementedError()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.transform(x)
class FitMixinTorch(ABC):
fitted = False
@abstractmethod
def fit(self, x_ref: torch.Tensor) -> Self:
"""Abstract fit method.
Parameters
----------
x
`torch.Tensor` to fit object on.
"""
pass
def _set_fitted(self) -> Self:
"""Sets the fitted attribute to True.
Should be called within each transform method.
"""
self.fitted = True
return self
def check_fitted(self):
"""Checks to make sure object has been fitted.
Raises
------
NotFittedError
Raised if method called and object has not been fit.
"""
if not torch.jit.is_scripting():
self._check_fitted()
@torch.jit.unused
def _check_fitted(self):
"""Raises error if parent object instance has not been fit."""
if not self.fitted:
raise NotFittedError(self.__class__.__name__)
class PValNormalizer(BaseTransformTorch, FitMixinTorch):
def __init__(self):
"""Maps scores to there p-values.
Needs to be fit (see :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseFittedTransformTorch`).
Returns the proportion of scores in the reference dataset that are greater than the score of
interest. Output is between ``1`` and ``0``. Small values are likely to be outliers.
"""
super().__init__()
self.val_scores = None
def fit(self, val_scores: torch.Tensor) -> Self:
"""Fit transform on scores.
Parameters
----------
val_scores
score outputs of ensemble of detectors applied to reference data.
"""
self.val_scores = val_scores
return self._set_fitted()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Transform scores to 1 - p-values.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of 1 - p-values.
"""
self.check_fitted()
less_than_val_scores = scores[:, None, :] < self.val_scores[None, :, :]
p_vals = (1 + less_than_val_scores.sum(1))/(len(self.val_scores) + 1)
return 1 - p_vals
class ShiftAndScaleNormalizer(BaseTransformTorch, FitMixinTorch):
def __init__(self):
"""Maps scores to their normalized values.
Needs to be fit (see :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseFittedTransformTorch`).
Subtracts the dataset mean and scales by the standard deviation.
"""
super().__init__()
self.val_means = None
self.val_scales = None
def fit(self, val_scores: torch.Tensor) -> Self:
"""Computes the mean and standard deviation of the scores and stores them.
Parameters
----------
val_scores
`Torch.Tensor` of scores from ensemble of detectors.
"""
self.val_means = val_scores.mean(0)[None, :]
self.val_scales = val_scores.std(0)[None, :]
return self._set_fitted()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Transform scores to normalized values. Subtracts the mean and scales by the standard deviation.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of normalized scores.
"""
self.check_fitted()
return (scores - self.val_means)/self.val_scales
class TopKAggregator(BaseTransformTorch):
def __init__(self, k: Optional[int] = None):
"""Takes the mean of the top `k` scores.
Parameters
----------
k
number of scores to take the mean of. If `k` is left ``None`` then will be set to
half the number of scores passed in the forward call.
"""
super().__init__()
self.k = k
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the mean of the top `k` scores.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of mean of top `k` scores.
"""
if self.k is None:
self.k = int(np.ceil(scores.shape[1]/2))
sorted_scores, _ = torch.sort(scores, 1)
return sorted_scores[:, -self.k:].mean(-1)
class AverageAggregator(BaseTransformTorch):
def __init__(self, weights: Optional[torch.Tensor] = None):
"""Averages the scores of the detectors in an ensemble.
Parameters
----------
weights
Optional parameter to weight the scores. If `weights` is left ``None`` then will be set to
a vector of ones.
Raises
------
ValueError
If `weights` does not sum to ``1``.
"""
super().__init__()
if weights is not None and not np.isclose(weights.sum(), 1):
raise ValueError("Weights must sum to 1.")
self.weights = weights
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Averages the scores of the detectors in an ensemble. If weights were passed in the `__init__`
then these are used to weight the scores.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of mean of scores.
"""
if self.weights is None:
m = scores.shape[-1]
self.weights = torch.ones(m, device=scores.device)/m
return scores @ self.weights
class MaxAggregator(BaseTransformTorch):
def __init__(self):
"""Takes the maximum of the scores of the detectors in an ensemble."""
super().__init__()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the maximum score of a set of detectors in an ensemble.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of maximum scores.
"""
vals, _ = torch.max(scores, dim=-1)
return vals
class MinAggregator(BaseTransformTorch):
def __init__(self):
"""Takes the minimum score of a set of detectors in an ensemble."""
super().__init__()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the minimum score of a set of detectors in an ensemble.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of minimum scores.
"""
vals, _ = torch.min(scores, dim=-1)
return vals
class Ensembler(BaseTransformTorch, FitMixinTorch):
def __init__(self,
normalizer: Optional[BaseTransformTorch] = None,
aggregator: BaseTransformTorch = None):
"""An Ensembler applies normalization and aggregation operations to the scores of an ensemble of detectors.
Parameters
----------
normalizer
`BaseFittedTransformTorch` object to normalize the scores. If ``None`` then no normalization
is applied.
aggregator
`BaseTransformTorch` object to aggregate the scores. If ``None`` defaults to `AverageAggregator`.
"""
super().__init__()
self.normalizer = normalizer
if self.normalizer is None:
self.fitted = True
if aggregator is None:
aggregator = AverageAggregator()
self.aggregator = aggregator
def transform(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the normalizer and aggregator to the scores.
Parameters
----------
x
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of aggregated and normalized scores.
"""
if self.normalizer is not None:
x = self.normalizer(x)
x = self.aggregator(x)
return x
def fit(self, x: torch.Tensor) -> Self:
"""Fit the normalizer to the scores.
Parameters
----------
x
`Torch.Tensor` of scores from ensemble of detectors.
"""
if self.normalizer is not None:
self.normalizer.fit(x) # type: ignore
return self._set_fitted()
| 9,337 | 28.644444 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/mahalanobis.py | from typing import Optional, Union
from typing_extensions import Literal
import torch
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class MahalanobisTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
min_eigenvalue: float = 1e-6,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for Mahalanobis detector.
Parameters
----------
min_eigenvalue
Eigenvectors with eigenvalues below this value will be discarded.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device)
self.min_eigenvalue = min_eigenvalue
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFitException
If called before detector has been fit.
"""
self.check_fitted()
x_pcs = self._compute_linear_proj(x)
return (x_pcs**2).sum(-1)
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
self.x_ref = x_ref
self._compute_linear_pcs(self.x_ref)
self._set_fitted()
def _compute_linear_pcs(self, x: torch.Tensor):
"""Computes the principal components of the data.
Parameters
----------
x
The reference dataset.
"""
self.means = x.mean(0)
x = x - self.means
cov_mat = (x.t() @ x)/(len(x)-1)
D, V = torch.linalg.eigh(cov_mat)
non_zero_inds = D > self.min_eigenvalue
self.pcs = V[:, non_zero_inds] / D[None, non_zero_inds].sqrt()
def _compute_linear_proj(self, x: torch.Tensor) -> torch.Tensor:
"""Projects the data point being tested onto the principal components.
Parameters
----------
x
The data point being tested.
"""
x_cen = x - self.means
x_proj = x_cen @ self.pcs
return x_proj
| 3,162 | 27.495495 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/pytorch/lof.py | from typing import Optional, Union, List, Tuple
from typing_extensions import Literal
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import Ensembler
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class LOFTorch(TorchOutlierDetector):
def __init__(
self,
k: Union[np.ndarray, List, Tuple, int],
kernel: Optional[torch.nn.Module] = None,
ensembler: Optional[Ensembler] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for LOF detector.
Parameters
----------
k
Number of nearest neighbors used to compute the local outlier factor. `k` can be a single
value or an array of integers. If `k` is a single value the score method uses the
distance/kernel similarity to the `k`-th nearest neighbor. If `k` is a list then it uses
the distance/kernel similarity to each of the specified `k` neighbors.
kernel
If a kernel is specified then instead of using `torch.cdist` the kernel defines the `k` nearest
neighbor distance.
ensembler
If `k` is an array of integers then the ensembler must not be ``None``. Should be an instance
of :py:obj:`alibi_detect.od.pytorch.ensemble.ensembler`. Responsible for combining
multiple scores into a single score.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
"""
TorchOutlierDetector.__init__(self, device=device)
self.kernel = kernel
self.ensemble = isinstance(k, (np.ndarray, list, tuple))
self.ks = torch.tensor(k) if self.ensemble else torch.tensor([k], device=self.device)
self.ensembler = ensembler
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredError
If called before detector has had `infer_threshold` method called.
"""
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def _make_mask(self, reachabilities: torch.Tensor):
"""Generate a mask for computing the average reachability.
If k is an array then we need to compute the average reachability for each k separately. To do
this we use a mask to weight the reachability of each k-close neighbor by 1/k and the rest to 0.
"""
mask = torch.zeros_like(reachabilities[0], device=self.device)
for i, k in enumerate(self.ks):
mask[:k, i] = torch.ones(k, device=self.device)/k
return mask
def _compute_K(self, x, y):
"""Compute the distance matrix matrix between `x` and `y`."""
return torch.exp(-self.kernel(x, y)) if self.kernel is not None else torch.cdist(x, y)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFittedError
If called before detector has been fit.
"""
self.check_fitted()
# compute the distance matrix between x and x_ref
K = self._compute_K(x, self.x_ref)
# compute k nearest neighbors for maximum k in self.ks
max_k = torch.max(self.ks)
bot_k_items = torch.topk(K, int(max_k), dim=1, largest=False)
bot_k_inds, bot_k_dists = bot_k_items.indices, bot_k_items.values
# To compute the reachabilities we get the k-distances of each object in the instances
# k nearest neighbors. Then we take the maximum of their k-distances and the distance
# to the instance.
lower_bounds = self.knn_dists_ref[bot_k_inds]
reachabilities = torch.max(bot_k_dists[:, :, None], lower_bounds)
# Compute the average reachability for each instance. We use a mask to manage each k in
# self.ks separately.
mask = self._make_mask(reachabilities)
avg_reachabilities = (reachabilities*mask[None, :, :]).sum(1)
# Compute the LOF score for each instance. Note we don't take 1/avg_reachabilities as
# avg_reachabilities is the denominator in the LOF formula.
factors = (self.ref_inv_avg_reachabilities[bot_k_inds] * mask[None, :, :]).sum(1)
lofs = (avg_reachabilities * factors)
return lofs if self.ensemble else lofs[:, 0]
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
# compute the distance matrix
K = self._compute_K(x_ref, x_ref)
# set diagonal to max distance to prevent torch.topk from returning the instance itself
K += torch.eye(len(K), device=self.device) * torch.max(K)
# compute k nearest neighbors for maximum k in self.ks
max_k = torch.max(self.ks)
bot_k_items = torch.topk(K, int(max_k), dim=1, largest=False)
bot_k_inds, bot_k_dists = bot_k_items.indices, bot_k_items.values
# store the k-distances for each instance for each k.
self.knn_dists_ref = bot_k_dists[:, self.ks-1]
# To compute the reachabilities we get the k-distances of each object in the instances
# k nearest neighbors. Then we take the maximum of their k-distances and the distance
# to the instance.
lower_bounds = self.knn_dists_ref[bot_k_inds]
reachabilities = torch.max(bot_k_dists[:, :, None], lower_bounds)
# Compute the average reachability for each instance. We use a mask to manage each k in
# self.ks separately.
mask = self._make_mask(reachabilities)
avg_reachabilities = (reachabilities*mask[None, :, :]).sum(1)
# Compute the inverse average reachability for each instance.
self.ref_inv_avg_reachabilities = 1/avg_reachabilities
self.x_ref = x_ref
self._set_fitted()
| 6,709 | 39.666667 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/pytorch/embedding.py | from functools import partial
import torch
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from typing import Dict, List
def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> torch.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = torch.cat(hs, dim=1) # type: ignore
y = hs.mean(dim=1) if reduce_mean else hs # type: ignore
return y
class TransformerEmbedding(nn.Module):
def __init__(self, model_name_or_path: str, embedding_type: str, layers: List[int] = None) -> None:
super().__init__()
self.config = AutoConfig.from_pretrained(model_name_or_path, output_hidden_states=True)
self.model = AutoModel.from_pretrained(model_name_or_path, config=self.config)
self.emb_type = embedding_type
self.hs_emb = partial(hidden_state_embedding, layers=layers, use_cls=embedding_type.endswith('cls'))
def forward(self, tokens: Dict[str, torch.Tensor]) -> torch.Tensor:
output = self.model(**tokens)
if self.emb_type == 'pooler_output':
return output.pooler_output
elif self.emb_type == 'last_hidden_state':
return output.last_hidden_state.mean(dim=1)
attention_hidden_states = output.hidden_states[1:]
if self.emb_type.startswith('hidden_state'):
return self.hs_emb(attention_hidden_states)
else:
raise ValueError('embedding_type needs to be one of pooler_output, '
'last_hidden_state, hidden_state, or hidden_state_cls.')
| 2,138 | 38.611111 | 108 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/pytorch/gmm.py | from torch import nn
import torch
class GMMModel(nn.Module):
def __init__(self, n_components: int, dim: int) -> None:
"""Gaussian Mixture Model (GMM).
Parameters
----------
n_components
The number of mixture components.
dim
The dimensionality of the data.
"""
super().__init__()
self.weight_logits = nn.Parameter(torch.zeros(n_components))
self.means = nn.Parameter(torch.randn(n_components, dim))
self.inv_cov_factor = nn.Parameter(torch.randn(n_components, dim, dim)/10)
@property
def _inv_cov(self) -> torch.Tensor:
return torch.bmm(self.inv_cov_factor, self.inv_cov_factor.transpose(1, 2))
@property
def _weights(self) -> torch.Tensor:
return nn.functional.softmax(self.weight_logits, dim=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the log-likelihood of the data.
Parameters
----------
x
Data to score.
"""
det = torch.linalg.det(self._inv_cov) # Note det(A^-1)=1/det(A)
to_means = x[:, None, :] - self.means[None, :, :]
likelihood = ((-0.5 * (
torch.einsum('bke,bke->bk', (torch.einsum('bkd,kde->bke', to_means, self._inv_cov), to_means))
)).exp()*det[None, :]*self._weights[None, :]).sum(-1)
return -likelihood.log()
| 1,402 | 31.627907 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
TransformerEmbedding = import_optional(
'alibi_detect.models.pytorch.embedding',
names=['TransformerEmbedding'])
trainer = import_optional(
'alibi_detect.models.pytorch.trainer',
names=['trainer'])
__all__ = [
"TransformerEmbedding",
"trainer"
]
| 349 | 20.875 | 74 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/pytorch/trainer.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Callable, Union
def trainer(
model: Union[nn.Module, nn.Sequential],
loss_fn: Callable,
dataloader: DataLoader,
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda model: 0),
verbose: int = 1,
) -> None:
"""
Train PyTorch model.
Parameters
----------
model
Model to train.
loss_fn
Loss function used for training.
dataloader
PyTorch dataloader.
device
Device used for training.
optimizer
Optimizer used for training.
learning_rate
Optimizer's learning rate.
preprocess_fn
Preprocessing function applied to each training batch.
epochs
Number of training epochs.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
verbose
Whether to print training progress.
"""
optimizer = optimizer(model.parameters(), lr=learning_rate)
model.train()
for epoch in range(epochs):
dl = tqdm(enumerate(dataloader), total=len(dataloader)) if verbose == 1 else enumerate(dataloader)
loss_ma = 0
for step, (x, y) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x = preprocess_fn(x)
x, y = x.to(device), y.to(device)
y_hat = model(x)
optimizer.zero_grad() # type: ignore
loss = loss_fn(y_hat, y) + reg_loss_fn(model)
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss_ma=loss_ma))
| 2,027 | 30.6875 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/embedding.py | from functools import partial
import tensorflow as tf
from transformers import TFAutoModel, AutoConfig
from typing import Dict, List
def hidden_state_embedding(hidden_states: tf.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> tf.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = tf.concat(hs, axis=1)
y = tf.reduce_mean(hs, axis=1) if reduce_mean else hs
return y
class TransformerEmbedding(tf.keras.Model):
def __init__(
self,
model_name_or_path: str,
embedding_type: str,
layers: List[int] = None
) -> None:
"""
Extract text embeddings from transformer models.
Parameters
----------
model_name_or_path
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
From the HuggingFace documentation:
- pooler_output
Last layer hidden-state of the first token of the sequence
(classification token) further processed by a Linear layer and a Tanh
activation function. The Linear layer weights are trained from the next
sentence prediction (classification) objective during pre-training.
This output is usually not a good summary of the semantic content of the
input, you’re often better with averaging or pooling the sequence of
hidden-states for the whole input sequence.
- last_hidden_state
Sequence of hidden-states at the output of the last layer of the model.
- hidden_state
Hidden states of the model at the output of each layer.
- hidden_state_cls
See hidden_state but use the CLS token output.
layers
If "hidden_state" or "hidden_state_cls" is used as embedding
type, layers has to be a list with int's referring to the hidden layers used
to extract the embedding.
"""
super(TransformerEmbedding, self).__init__()
self.config = AutoConfig.from_pretrained(model_name_or_path, output_hidden_states=True)
self.model = TFAutoModel.from_pretrained(model_name_or_path, config=self.config)
self.emb_type = embedding_type
self.hs_emb = partial(hidden_state_embedding, layers=layers, use_cls=embedding_type.endswith('cls'))
def call(self, tokens: Dict[str, tf.Tensor]) -> tf.Tensor:
output = self.model(tokens)
if self.emb_type == 'pooler_output':
return output.pooler_output
elif self.emb_type == 'last_hidden_state':
return tf.reduce_mean(output.last_hidden_state, axis=1)
attention_hidden_states = output.hidden_states[1:]
if self.emb_type.startswith('hidden_state'):
return self.hs_emb(attention_hidden_states)
else:
raise ValueError('embedding_type needs to be one of pooler_output, '
'last_hidden_state, hidden_state, or hidden_state_cls.')
| 3,718 | 40.322222 | 108 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/pixelcnn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import logistic
from tensorflow_probability.python.distributions import mixture_same_family
from tensorflow_probability.python.distributions import quantized_distribution
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'Shift',
]
class WeightNorm(tf.keras.layers.Wrapper):
def __init__(self, layer, data_init: bool = True, **kwargs):
"""Layer wrapper to decouple magnitude and direction of the layer's weights.
This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem. It has an optional data-dependent
initialization scheme, in which initial values of weights are set as functions
of the first minibatch of data. Both the weight normalization and data-
dependent initialization are described in [Salimans and Kingma (2016)][1].
Parameters
----------
layer
A `tf.keras.layers.Layer` instance. Supported layer types are
`Dense`, `Conv2D`, and `Conv2DTranspose`. Layers with multiple inputs
are not supported.
data_init
If `True` use data dependent variable initialization.
**kwargs
Additional keyword args passed to `tf.keras.layers.Wrapper`.
Raises
------
ValueError
If `layer` is not a `tf.keras.layers.Layer` instance.
"""
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
'Please initialize `WeightNorm` layer with a `tf.keras.layers.Layer` '
'instance. You passed: {input}'.format(input=layer)
)
layer_type = type(layer).__name__
if layer_type not in ['Dense', 'Conv2D', 'Conv2DTranspose']:
warnings.warn('`WeightNorm` is tested only for `Dense`, `Conv2D`, and '
'`Conv2DTranspose` layers. You passed a layer of type `{}`'
.format(layer_type))
super(WeightNorm, self).__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name='layer')
self.filter_axis = -2 if layer_type == 'Conv2DTranspose' else -1
def _compute_weights(self):
"""Generate weights with normalization."""
# Determine the axis along which to expand `g` so that `g` broadcasts to
# the shape of `v`.
new_axis = -self.filter_axis - 3
self.layer.kernel = tf.nn.l2_normalize(self.v, axis=self.kernel_norm_axes) * tf.expand_dims(self.g, new_axis)
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
self.g.assign(kernel_norm)
def _data_dep_init(self, inputs):
"""Data dependent initialization."""
# Normalize kernel first so that calling the layer calculates
# `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
self._compute_weights()
activation = self.layer.activation
self.layer.activation = None
use_bias = self.layer.bias is not None
if use_bias:
bias = self.layer.bias
self.layer.bias = tf.zeros_like(bias)
# Since the bias is initialized as zero, setting the activation to zero and
# calling the initialized layer (with normalized kernel) yields the correct
# computation ((5) in Salimans and Kingma (2016))
x_init = self.layer(inputs)
norm_axes_out = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
self.g.assign(self.g * scale_init)
if use_bias:
self.layer.bias = bias
self.layer.bias.assign(-m_init * scale_init)
self.layer.activation = activation
def build(self, input_shape=None):
"""Build `Layer`.
Parameters
----------
input_shape
The shape of the input to `self.layer`.
Raises
------
ValueError
If `Layer` does not contain a `kernel` of weights.
"""
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[0] = None
self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, 'kernel'):
raise ValueError('`WeightNorm` must wrap a layer that contains a `kernel` for weights')
self.kernel_norm_axes = list(range(self.layer.kernel.shape.ndims))
self.kernel_norm_axes.pop(self.filter_axis)
self.v = self.layer.kernel
# to avoid a duplicate `kernel` variable after `build` is called
self.layer.kernel = None
self.g = self.add_weight(
name='g',
shape=(int(self.v.shape[self.filter_axis]),),
initializer='ones',
dtype=self.v.dtype,
trainable=True
)
self.initialized = self.add_weight(
name='initialized',
dtype=tf.bool,
trainable=False
)
self.initialized.assign(False)
super(WeightNorm, self).build()
@tf.function
def call(self, inputs):
"""Call `Layer`."""
if not self.initialized:
if self.data_init:
self._data_dep_init(inputs)
else: # initialize `g` as the norm of the initialized kernel
self._init_norm()
self.initialized.assign(True)
self._compute_weights()
output = self.layer(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(self.layer.compute_output_shape(input_shape).as_list())
class Shift(bijector.Bijector):
def __init__(self,
shift,
validate_args=False,
name='shift'):
"""Instantiates the `Shift` bijector which computes `Y = g(X; shift) = X + shift`
where `shift` is a numeric `Tensor`.
Parameters
----------
shift
Floating-point `Tensor`.
validate_args
Python `bool` indicating whether arguments should be checked for correctness.
name
Python `str` name given to ops managed by this object.
"""
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([shift], dtype_hint=tf.float32)
self._shift = tensor_util.convert_nonref_to_tensor(shift, dtype=dtype, name='shift')
super(Shift, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name
)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = X + shift`."""
return self._shift
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
return x + self.shift
def _inverse(self, y):
return y - self.shift
def _forward_log_det_jacobian(self, x):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.zeros([], dtype=dtype_util.base_dtype(x.dtype))
class PixelCNN(distribution.Distribution):
def __init__(self,
image_shape: tuple,
conditional_shape: tuple = None,
num_resnet: int = 5,
num_hierarchies: int = 3,
num_filters: int = 160,
num_logistic_mix: int = 10,
receptive_field_dims: tuple = (3, 3),
dropout_p: float = 0.5,
resnet_activation: str = 'concat_elu',
l2_weight: float = 0.,
use_weight_norm: bool = True,
use_data_init: bool = True,
high: int = 255,
low: int = 0,
dtype=tf.float32,
name: str = 'PixelCNN') -> None:
"""
Construct Pixel CNN++ distribution.
Parameters
----------
image_shape
3D `TensorShape` or tuple for the `[height, width, channels]` dimensions of the image.
conditional_shape
`TensorShape` or tuple for the shape of the conditional input, or `None` if there is no conditional input.
num_resnet
The number of layers (shown in Figure 2 of [2]) within each highest-level block of Figure 2 of [1].
num_hierarchies
The number of highest-level blocks (separated by expansions/contractions of dimensions in Figure 2 of [1].)
num_filters
The number of convolutional filters.
num_logistic_mix
Number of components in the logistic mixture distribution.
receptive_field_dims
Height and width in pixels of the receptive field of the convolutional layers above and to the left
of a given pixel. The width (second element of the tuple) should be odd. Figure 1 (middle) of [2]
shows a receptive field of (3, 5) (the row containing the current pixel is included in the height).
The default of (3, 3) was used to produce the results in [1].
dropout_p
The dropout probability. Should be between 0 and 1.
resnet_activation
The type of activation to use in the resnet blocks. May be 'concat_elu', 'elu', or 'relu'.
l2_weight
The L2 regularization weight.
use_weight_norm
If `True` then use weight normalization (works only in Eager mode).
use_data_init
If `True` then use data-dependent initialization (has no effect if `use_weight_norm` is `False`).
high
The maximum value of the input data (255 for an 8-bit image).
low
The minimum value of the input data.
dtype
Data type of the `Distribution`.
name
The name of the `Distribution`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
super(PixelCNN, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=False,
allow_nan_stats=True,
parameters=parameters,
name=name
)
if not tensorshape_util.is_fully_defined(image_shape):
raise ValueError('`image_shape` must be fully defined.')
if conditional_shape is not None and not tensorshape_util.is_fully_defined(conditional_shape):
raise ValueError('`conditional_shape` must be fully defined`')
if tensorshape_util.rank(image_shape) != 3:
raise ValueError('`image_shape` must have length 3, representing [height, width, channels] dimensions.')
self._high = tf.cast(high, self.dtype)
self._low = tf.cast(low, self.dtype)
self._num_logistic_mix = num_logistic_mix
self.network = _PixelCNNNetwork(
dropout_p=dropout_p,
num_resnet=num_resnet,
num_hierarchies=num_hierarchies,
num_filters=num_filters,
num_logistic_mix=num_logistic_mix,
receptive_field_dims=receptive_field_dims,
resnet_activation=resnet_activation,
l2_weight=l2_weight,
use_weight_norm=use_weight_norm,
use_data_init=use_data_init,
dtype=dtype
)
image_input_shape = tensorshape_util.concatenate([None], image_shape)
if conditional_shape is None:
input_shape = image_input_shape
else:
conditional_input_shape = tensorshape_util.concatenate([None], conditional_shape)
input_shape = [image_input_shape, conditional_input_shape]
self.image_shape = image_shape
self.conditional_shape = conditional_shape
self.network.build(input_shape)
def _make_mixture_dist(self, component_logits, locs, scales, return_per_feature: bool = False):
"""Builds a mixture of quantized logistic distributions.
Parameters
----------
component_logits
4D `Tensor` of logits for the Categorical distribution
over Quantized Logistic mixture components. Dimensions are `[batch_size,
height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
return_per_feature
If True, return per pixel level log prob.
Returns
-------
dist
A quantized logistic mixture `tfp.distribution` over the input data.
"""
mixture_distribution = categorical.Categorical(logits=component_logits)
# Convert distribution parameters for pixel values in
# `[self._low, self._high]` for use with `QuantizedDistribution`
locs = self._low + 0.5 * (self._high - self._low) * (locs + 1.)
scales *= 0.5 * (self._high - self._low)
logistic_dist = quantized_distribution.QuantizedDistribution(
distribution=transformed_distribution.TransformedDistribution(
distribution=logistic.Logistic(loc=locs, scale=scales),
bijector=Shift(shift=tf.cast(-0.5, self.dtype))),
low=self._low, high=self._high)
# mixture with logistics for the loc and scale on each pixel for each component
dist = mixture_same_family.MixtureSameFamily(
mixture_distribution=mixture_distribution,
components_distribution=independent.Independent(logistic_dist, reinterpreted_batch_ndims=1))
if return_per_feature:
return dist
else:
return independent.Independent(dist, reinterpreted_batch_ndims=2)
def _log_prob(self, value, conditional_input=None, training=None, return_per_feature=False):
"""Log probability function with optional conditional input.
Calculates the log probability of a batch of data under the modeled
distribution (or conditional distribution, if conditional input is
provided).
Parameters
----------
value
`Tensor` or Numpy array of image data. May have leading batch
dimension(s), which must broadcast to the leading batch dimensions of
`conditional_input`.
conditional_input
`Tensor` on which to condition the distribution (e.g.
class labels), or `None`. May have leading batch dimension(s), which
must broadcast to the leading batch dimensions of `value`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it defaults to
`tf.keras.backend.learning_phase()`.
return_per_feature
`bool`. If True, return per pixel level log prob.
Returns
-------
log_prob_values: `Tensor`.
"""
# Determine the batch shape of the input images
image_batch_shape = prefer_static.shape(value)[:-3]
# Broadcast `value` and `conditional_input` to the same batch_shape
if conditional_input is None:
image_batch_and_conditional_shape = image_batch_shape
else:
conditional_input = tf.convert_to_tensor(conditional_input)
conditional_input_shape = prefer_static.shape(conditional_input)
conditional_batch_rank = (prefer_static.rank(conditional_input) -
tensorshape_util.rank(self.conditional_shape))
conditional_batch_shape = conditional_input_shape[:conditional_batch_rank]
image_batch_and_conditional_shape = prefer_static.broadcast_shape(
image_batch_shape, conditional_batch_shape)
conditional_input = tf.broadcast_to(
conditional_input,
prefer_static.concat([image_batch_and_conditional_shape, self.conditional_shape], axis=0))
value = tf.broadcast_to(value, prefer_static.concat(
[image_batch_and_conditional_shape, self.event_shape], axis=0))
# Flatten batch dimension for input to Keras model
conditional_input = tf.reshape(
conditional_input,
prefer_static.concat([(-1,), self.conditional_shape], axis=0))
value = tf.reshape(value, prefer_static.concat([(-1,), self.event_shape], axis=0))
transformed_value = (2. * (value - self._low) / (self._high - self._low)) - 1.
inputs = transformed_value if conditional_input is None else [transformed_value, conditional_input]
params = self.network(inputs, training=training)
num_channels = self.event_shape[-1]
if num_channels == 1:
component_logits, locs, scales = params
else:
# If there is more than one channel, we create a linear autoregressive
# dependency among the location parameters of the channels of a single
# pixel (the scale parameters within a pixel are independent). For a pixel
# with R/G/B channels, the `r`, `g`, and `b` saturation values are
# distributed as:
#
# r ~ Logistic(loc_r, scale_r)
# g ~ Logistic(coef_rg * r + loc_g, scale_g)
# b ~ Logistic(coef_rb * r + coef_gb * g + loc_b, scale_b)
# on the coefficients instead of split/multiply/concat
component_logits, locs, scales, coeffs = params
num_coeffs = num_channels * (num_channels - 1) // 2
loc_tensors = tf.split(locs, num_channels, axis=-1)
coef_tensors = tf.split(coeffs, num_coeffs, axis=-1)
channel_tensors = tf.split(value, num_channels, axis=-1)
coef_count = 0
for i in range(num_channels):
channel_tensors[i] = channel_tensors[i][..., tf.newaxis, :]
for j in range(i):
loc_tensors[i] += channel_tensors[j] * coef_tensors[coef_count]
coef_count += 1
locs = tf.concat(loc_tensors, axis=-1)
dist = self._make_mixture_dist(component_logits, locs, scales, return_per_feature=return_per_feature)
log_px = dist.log_prob(value)
if return_per_feature:
return log_px
else:
return tf.reshape(log_px, image_batch_and_conditional_shape)
def _sample_n(self, n, seed=None, conditional_input=None, training=False):
"""Samples from the distribution, with optional conditional input.
Parameters
----------
n
`int`, number of samples desired.
seed
`int`, seed for RNG. Setting a random seed enforces reproducibility
of the samples between sessions (not within a single session).
conditional_input
`Tensor` on which to condition the distribution (e.g.
class labels), or `None`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it defers to Keras'
handling of train/eval status.
Returns
-------
samples
a `Tensor` of shape `[n, height, width, num_channels]`.
"""
if conditional_input is not None:
conditional_input = tf.convert_to_tensor(conditional_input, dtype=self.dtype)
conditional_event_rank = tensorshape_util.rank(self.conditional_shape)
conditional_input_shape = prefer_static.shape(conditional_input)
conditional_sample_rank = prefer_static.rank(conditional_input) - conditional_event_rank
# If `conditional_input` has no sample dimensions, prepend a sample
# dimension
if conditional_sample_rank == 0:
conditional_input = conditional_input[tf.newaxis, ...]
conditional_sample_rank = 1
# Assert that the conditional event shape in the `PixelCnnNetwork` is the
# same as that implied by `conditional_input`.
conditional_event_shape = conditional_input_shape[conditional_sample_rank:]
with tf.control_dependencies([tf.assert_equal(self.conditional_shape, conditional_event_shape)]):
conditional_sample_shape = conditional_input_shape[:conditional_sample_rank]
repeat = n // prefer_static.reduce_prod(conditional_sample_shape)
h = tf.reshape(conditional_input, prefer_static.concat([(-1,), self.conditional_shape], axis=0))
h = tf.tile(h, prefer_static.pad([repeat], paddings=[[0, conditional_event_rank]], constant_values=1))
samples_0 = tf.random.uniform(
prefer_static.concat([(n,), self.event_shape], axis=0),
minval=-1., maxval=1., dtype=self.dtype, seed=seed)
inputs = samples_0 if conditional_input is None else [samples_0, h]
params_0 = self.network(inputs, training=training)
samples_0 = self._sample_channels(*params_0, seed=seed)
image_height, image_width, _ = tensorshape_util.as_list(self.event_shape)
def loop_body(index, samples):
"""Loop for iterative pixel sampling.
Parameters
----------
index
0D `Tensor` of type `int32`. Index of the current pixel.
samples
4D `Tensor`. Images with pixels sampled in raster order, up to
pixel `[index]`, with dimensions `[batch_size, height, width,
num_channels]`.
Returns
-------
samples
4D `Tensor`. Images with pixels sampled in raster order, up to \
and including pixel `[index]`, with dimensions `[batch_size, height, \
width, num_channels]`.
"""
inputs = samples if conditional_input is None else [samples, h]
params = self.network(inputs, training=training)
samples_new = self._sample_channels(*params, seed=seed)
# Update the current pixel
samples = tf.transpose(samples, [1, 2, 3, 0])
samples_new = tf.transpose(samples_new, [1, 2, 3, 0])
row, col = index // image_width, index % image_width
updates = samples_new[row, col, ...][tf.newaxis, ...]
samples = tf.tensor_scatter_nd_update(samples, [[row, col]], updates)
samples = tf.transpose(samples, [3, 0, 1, 2])
return index + 1, samples
index0 = tf.zeros([], dtype=tf.int32)
# Construct the while loop for sampling
total_pixels = image_height * image_width
loop_cond = lambda ind, _: tf.less(ind, total_pixels) # noqa: E731
init_vars = (index0, samples_0)
_, samples = tf.while_loop(loop_cond, loop_body, init_vars, parallel_iterations=1)
transformed_samples = (self._low + 0.5 * (self._high - self._low) * (samples + 1.))
return tf.round(transformed_samples)
def _sample_channels(self, component_logits, locs, scales, coeffs=None, seed=None):
"""Sample a single pixel-iteration and apply channel conditioning.
Parameters
----------
component_logits
4D `Tensor` of logits for the Categorical distribution
over Quantized Logistic mixture components. Dimensions are `[batch_size,
height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
coeffs
4D `Tensor` of coefficients for the linear dependence among color
channels, or `None` if there is only one channel. Dimensions are
`[batch_size, height, width, num_logistic_mix, num_coeffs]`, where
`num_coeffs = num_channels * (num_channels - 1) // 2`.
seed
`int`, random seed.
Returns
-------
samples
4D `Tensor` of sampled image data with autoregression among \
channels. Dimensions are `[batch_size, height, width, num_channels]`.
"""
num_channels = self.event_shape[-1]
# sample mixture components once for the entire pixel
component_dist = categorical.Categorical(logits=component_logits)
mask = tf.one_hot(indices=component_dist.sample(seed=seed), depth=self._num_logistic_mix)
mask = tf.cast(mask[..., tf.newaxis], self.dtype)
# apply mixture component mask and separate out RGB parameters
masked_locs = tf.reduce_sum(locs * mask, axis=-2)
loc_tensors = tf.split(masked_locs, num_channels, axis=-1)
masked_scales = tf.reduce_sum(scales * mask, axis=-2)
scale_tensors = tf.split(masked_scales, num_channels, axis=-1)
if coeffs is not None:
num_coeffs = num_channels * (num_channels - 1) // 2
masked_coeffs = tf.reduce_sum(coeffs * mask, axis=-2)
coef_tensors = tf.split(masked_coeffs, num_coeffs, axis=-1)
channel_samples = []
coef_count = 0
for i in range(num_channels):
loc = loc_tensors[i]
for c in channel_samples:
loc += c * coef_tensors[coef_count]
coef_count += 1
logistic_samp = logistic.Logistic(loc=loc, scale=scale_tensors[i]).sample(seed=seed)
logistic_samp = tf.clip_by_value(logistic_samp, -1., 1.)
channel_samples.append(logistic_samp)
return tf.concat(channel_samples, axis=-1)
def _batch_shape(self):
return tf.TensorShape([])
def _event_shape(self):
return tf.TensorShape(self.image_shape)
class _PixelCNNNetwork(tf.keras.layers.Layer):
"""Keras `Layer` to parameterize a Pixel CNN++ distribution.
This is a Keras implementation of the Pixel CNN++ network, as described in
Salimans et al. (2017)[1] and van den Oord et al. (2016)[2].
(https://github.com/openai/pixel-cnn).
#### References
[1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.
PixelCNN++: Improving the PixelCNN with Discretized Logistic Mixture
Likelihood and Other Modifications. In _International Conference on
Learning Representations_, 2017.
https://pdfs.semanticscholar.org/9e90/6792f67cbdda7b7777b69284a81044857656.pdf
Additional details at https://github.com/openai/pixel-cnn
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _30th Conference on Neural Information Processing
Systems_, 2016.
https://papers.nips.cc/paper/6527-conditional-image-generation-with-pixelcnn-decoders.pdf.
"""
def __init__(self,
dropout_p: float = 0.5,
num_resnet: int = 5,
num_hierarchies: int = 3,
num_filters: int = 160,
num_logistic_mix: int = 10,
receptive_field_dims: tuple = (3, 3),
resnet_activation: str = 'concat_elu',
l2_weight: float = 0.,
use_weight_norm: bool = True,
use_data_init: bool = True,
dtype=tf.float32) -> None:
"""Initialize the neural network for the Pixel CNN++ distribution.
Parameters
----------
dropout_p
`float`, the dropout probability. Should be between 0 and 1.
num_resnet
`int`, the number of layers (shown in Figure 2 of [2]) within
each highest-level block of Figure 2 of [1].
num_hierarchies
`int`, the number of hightest-level blocks (separated by
expansions/contractions of dimensions in Figure 2 of [1].)
num_filters
`int`, the number of convolutional filters.
num_logistic_mix
`int`, number of components in the logistic mixture
distribution.
receptive_field_dims
`tuple`, height and width in pixels of the receptive
field of the convolutional layers above and to the left of a given
pixel. The width (second element of the tuple) should be odd. Figure 1
(middle) of [2] shows a receptive field of (3, 5) (the row containing
the current pixel is included in the height). The default of (3, 3) was
used to produce the results in [1].
resnet_activation
`string`, the type of activation to use in the resnet
blocks. May be 'concat_elu', 'elu', or 'relu'.
l2_weight
`float`, the L2 regularization weight.
use_weight_norm
`bool`, if `True` then use weight normalization.
use_data_init
`bool`, if `True` then use data-dependent initialization
(has no effect if `use_weight_norm` is `False`).
dtype
Data type of the layer.
"""
super(_PixelCNNNetwork, self).__init__(dtype=dtype)
self._dropout_p = dropout_p
self._num_resnet = num_resnet
self._num_hierarchies = num_hierarchies
self._num_filters = num_filters
self._num_logistic_mix = num_logistic_mix
self._receptive_field_dims = receptive_field_dims # first set desired receptive field, then infer kernel
self._resnet_activation = resnet_activation
self._l2_weight = l2_weight
if use_weight_norm:
def layer_wrapper(layer):
def wrapped_layer(*args, **kwargs):
return WeightNorm(layer(*args, **kwargs), data_init=use_data_init)
return wrapped_layer
self._layer_wrapper = layer_wrapper
else:
self._layer_wrapper = lambda layer: layer
def build(self, input_shape):
dtype = self.dtype
if len(input_shape) == 2:
batch_image_shape, batch_conditional_shape = input_shape
conditional_input = tf.keras.layers.Input(shape=batch_conditional_shape[1:], dtype=dtype)
else:
batch_image_shape = input_shape
conditional_input = None
image_shape = batch_image_shape[1:]
image_input = tf.keras.layers.Input(shape=image_shape, dtype=dtype)
if self._resnet_activation == 'concat_elu':
activation = tf.keras.layers.Lambda(lambda x: tf.nn.elu(tf.concat([x, -x], axis=-1)), dtype=dtype)
else:
activation = tf.keras.activations.get(self._resnet_activation)
# Define layers with default inputs and layer wrapper applied
Conv2D = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Convolution2D),
filters=self._num_filters,
padding='same',
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
Dense = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Dense),
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
Conv2DTranspose = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Conv2DTranspose),
filters=self._num_filters,
padding='same',
strides=(2, 2),
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
rows, cols = self._receptive_field_dims
# Define the dimensions of the valid (unmasked) areas of the layer kernels
# for stride 1 convolutions in the internal layers.
kernel_valid_dims = {'vertical': (rows - 1, cols), # vertical stack
'horizontal': (2, cols // 2 + 1)} # horizontal stack
# Define the size of the kernel necessary to center the current pixel
# correctly for stride 1 convolutions in the internal layers.
kernel_sizes = {'vertical': (2 * rows - 3, cols), 'horizontal': (3, cols)}
# Make the kernel constraint functions for stride 1 convolutions in internal
# layers.
kernel_constraints = {
k: _make_kernel_constraint(kernel_sizes[k], (0, v[0]), (0, v[1]))
for k, v in kernel_valid_dims.items()}
# Build the initial vertical stack/horizontal stack convolutional layers,
# as shown in Figure 1 of [2]. The receptive field of the initial vertical
# stack layer is a rectangular area centered above the current pixel.
vertical_stack_init = Conv2D(
kernel_size=(2 * rows - 1, cols),
kernel_constraint=_make_kernel_constraint((2 * rows - 1, cols), (0, rows - 1), (0, cols)))(image_input)
# In Figure 1 [2], the receptive field of the horizontal stack is
# illustrated as the pixels in the same row and to the left of the current
# pixel. [1] increases the height of this receptive field from one pixel to
# two (`horizontal_stack_left`) and additionally includes a subset of the
# row of pixels centered above the current pixel (`horizontal_stack_up`).
horizontal_stack_up = Conv2D(
kernel_size=(3, cols),
kernel_constraint=_make_kernel_constraint((3, cols), (0, 1), (0, cols)))(image_input)
horizontal_stack_left = Conv2D(
kernel_size=(3, cols),
kernel_constraint=_make_kernel_constraint((3, cols), (0, 2), (0, cols // 2)))(image_input)
horizontal_stack_init = tf.keras.layers.add([horizontal_stack_up, horizontal_stack_left], dtype=dtype)
layer_stacks = {
'vertical': [vertical_stack_init],
'horizontal': [horizontal_stack_init]
}
# Build the downward pass of the U-net (left-hand half of Figure 2 of [1]).
# Each `i` iteration builds one of the highest-level blocks (identified as
# 'Sequence of 6 layers' in the figure, consisting of `num_resnet=5` stride-
# 1 layers, and one stride-2 layer that contracts the height/width
# dimensions). The `_` iterations build the stride 1 layers. The layers of
# the downward pass are stored in lists, since we'll later need them to make
# skip-connections to layers in the upward pass of the U-net (the skip-
# connections are represented by curved lines in Figure 2 [1]).
for i in range(self._num_hierarchies):
for _ in range(self._num_resnet):
# Build a layer shown in Figure 2 of [2]. The 'vertical' iteration
# builds the layers in the left half of the figure, and the 'horizontal'
# iteration builds the layers in the right half.
for stack in ['vertical', 'horizontal']:
input_x = layer_stacks[stack][-1]
x = activation(input_x)
x = Conv2D(kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
# Add the vertical-stack layer to the horizontal-stack layer
if stack == 'horizontal':
h = activation(layer_stacks['vertical'][-1])
h = Dense(self._num_filters)(h)
x = tf.keras.layers.add([h, x], dtype=dtype)
x = activation(x)
x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)
x = Conv2D(filters=2*self._num_filters,
kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
if conditional_input is not None:
h_projection = _build_and_apply_h_projection(conditional_input,
self._num_filters, dtype=dtype)
x = tf.keras.layers.add([x, h_projection], dtype=dtype)
x = _apply_sigmoid_gating(x)
# Add a residual connection from the layer's input.
out = tf.keras.layers.add([input_x, x], dtype=dtype)
layer_stacks[stack].append(out)
if i < self._num_hierarchies - 1:
# Build convolutional layers that contract the height/width dimensions
# on the downward pass between each set of layers (e.g. contracting from
# 32x32 to 16x16 in Figure 2 of [1]).
for stack in ['vertical', 'horizontal']:
# Define kernel dimensions/masking to maintain the autoregressive property.
x = layer_stacks[stack][-1]
h, w = kernel_valid_dims[stack]
kernel_height = 2 * h
if stack == 'vertical':
kernel_width = w + 1
else:
kernel_width = 2 * w
kernel_size = (kernel_height, kernel_width)
kernel_constraint = _make_kernel_constraint(kernel_size, (0, h), (0, w))
x = Conv2D(strides=(2, 2), kernel_size=kernel_size,
kernel_constraint=kernel_constraint)(x)
layer_stacks[stack].append(x)
# Upward pass of the U-net (right-hand half of Figure 2 of [1]). We stored
# the layers of the downward pass in a list, in order to access them to make
# skip-connections to the upward pass. For the upward pass, we need to keep
# track of only the current layer, so we maintain a reference to the
# current layer of the horizontal/vertical stack in the `upward_pass` dict.
# The upward pass begins with the last layer of the downward pass.
upward_pass = {key: stack.pop() for key, stack in layer_stacks.items()}
# As with the downward pass, each `i` iteration builds a highest level block
# in Figure 2 [1], and the `_` iterations build individual layers within the
# block.
for i in range(self._num_hierarchies):
num_resnet = self._num_resnet if i == 0 else self._num_resnet + 1
for _ in range(num_resnet):
# Build a layer as shown in Figure 2 of [2], with a skip-connection
# from the symmetric layer in the downward pass.
for stack in ['vertical', 'horizontal']:
input_x = upward_pass[stack]
x_symmetric = layer_stacks[stack].pop()
x = activation(input_x)
x = Conv2D(kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
# Include the vertical-stack layer of the upward pass in the layers
# to be added to the horizontal layer.
if stack == 'horizontal':
x_symmetric = tf.keras.layers.Concatenate(axis=-1,
dtype=dtype)([upward_pass['vertical'],
x_symmetric])
# Add a skip-connection from the symmetric layer in the downward
# pass to the layer `x` in the upward pass.
h = activation(x_symmetric)
h = Dense(self._num_filters)(h)
x = tf.keras.layers.add([h, x], dtype=dtype)
x = activation(x)
x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)
x = Conv2D(filters=2*self._num_filters,
kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
if conditional_input is not None:
h_projection = _build_and_apply_h_projection(conditional_input, self._num_filters, dtype=dtype)
x = tf.keras.layers.add([x, h_projection], dtype=dtype)
x = _apply_sigmoid_gating(x)
upward_pass[stack] = tf.keras.layers.add([input_x, x], dtype=dtype)
# Define deconvolutional layers that expand height/width dimensions on the
# upward pass (e.g. expanding from 8x8 to 16x16 in Figure 2 of [1]), with
# the correct kernel dimensions/masking to maintain the autoregressive
# property.
if i < self._num_hierarchies - 1:
for stack in ['vertical', 'horizontal']:
h, w = kernel_valid_dims[stack]
kernel_height = 2 * h - 2
if stack == 'vertical':
kernel_width = w + 1
kernel_constraint = _make_kernel_constraint(
(kernel_height, kernel_width), (h - 2, kernel_height), (0, w))
else:
kernel_width = 2 * w - 2
kernel_constraint = _make_kernel_constraint(
(kernel_height, kernel_width), (h - 2, kernel_height),
(w - 2, kernel_width))
x = upward_pass[stack]
x = Conv2DTranspose(kernel_size=(kernel_height, kernel_width),
kernel_constraint=kernel_constraint)(x)
upward_pass[stack] = x
x_out = tf.keras.layers.ELU(dtype=dtype)(upward_pass['horizontal'])
# Build final Dense/Reshape layers to output the correct number of
# parameters per pixel.
num_channels = tensorshape_util.as_list(image_shape)[-1]
num_coeffs = num_channels * (num_channels - 1) // 2 # alpha, beta, gamma in eq.3 of paper
num_out = num_channels * 2 + num_coeffs + 1 # mu, s + alpha, beta, gamma + 1 (mixture weight)
num_out_total = num_out * self._num_logistic_mix
params = Dense(num_out_total)(x_out)
params = tf.reshape(params, prefer_static.concat( # [-1,H,W,nb mixtures, params per mixture]
[[-1], image_shape[:-1], [self._num_logistic_mix, num_out]], axis=0))
# If there is one color channel, split the parameters into a list of three
# output `Tensor`s: (1) component logits for the Quantized Logistic mixture
# distribution, (2) location parameters for each component, and (3) scale
# parameters for each component. If there is more than one color channel,
# return a fourth `Tensor` for the coefficients for the linear dependence
# among color channels (e.g. alpha, beta, gamma).
# [logits, mu, s, linear dependence]
splits = 3 if num_channels == 1 else [1, num_channels, num_channels, num_coeffs]
outputs = tf.split(params, splits, axis=-1)
# Squeeze singleton dimension from component logits
outputs[0] = tf.squeeze(outputs[0], axis=-1)
# Ensure scales are positive and do not collapse to near-zero
outputs[2] = tf.nn.softplus(outputs[2]) + tf.cast(tf.exp(-7.), self.dtype)
inputs = image_input if conditional_input is None else [image_input, conditional_input]
self._network = tf.keras.Model(inputs=inputs, outputs=outputs)
super(_PixelCNNNetwork, self).build(input_shape)
def call(self, inputs, training=None):
"""Call the Pixel CNN network model.
Parameters
----------
inputs
4D `Tensor` of image data with dimensions [batch size, height,
width, channels] or a 2-element `list`. If `list`, the first element is
the 4D image `Tensor` and the second element is a `Tensor` with
conditional input data (e.g. VAE encodings or class labels) with the
same leading batch dimension as the image `Tensor`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it it defaults to
`tf.keras.backend.learning_phase()`
Returns
-------
outputs
a 3- or 4-element `list` of `Tensor`s in the following order: \
component_logits: 4D `Tensor` of logits for the Categorical distribution \
over Quantized Logistic mixture components. Dimensions are \
`[batch_size, height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic \
mixture components. Dimensions are `[batch_size, height, width, \
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic \
mixture components. Dimensions are `[batch_size, height, width, \
num_logistic_mix, num_channels]`.
coeffs
4D `Tensor` of coefficients for the linear dependence among \
color channels, included only if the image has more than one channel. \
Dimensions are `[batch_size, height, width, num_logistic_mix, \
num_coeffs]`, where `num_coeffs = num_channels * (num_channels - 1) // 2`.
"""
return self._network(inputs, training=training)
def _make_kernel_constraint(kernel_size, valid_rows, valid_columns):
"""Make the masking function for layer kernels."""
mask = np.zeros(kernel_size)
lower, upper = valid_rows
left, right = valid_columns
mask[lower:upper, left:right] = 1.
mask = mask[:, :, np.newaxis, np.newaxis]
return lambda x: x * mask
def _build_and_apply_h_projection(h, num_filters, dtype):
"""Project the conditional input."""
h = tf.keras.layers.Flatten(dtype=dtype)(h)
h_projection = tf.keras.layers.Dense(2*num_filters, kernel_initializer='random_normal', dtype=dtype)(h)
return h_projection[..., tf.newaxis, tf.newaxis, :]
def _apply_sigmoid_gating(x):
"""Apply the sigmoid gating in Figure 2 of [2]."""
activation_tensor, gate_tensor = tf.split(x, 2, axis=-1)
sigmoid_gate = tf.sigmoid(gate_tensor)
return tf.keras.layers.multiply([sigmoid_gate, activation_tensor], dtype=x.dtype)
| 48,470 | 45.383732 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/losses.py | from typing import Optional
import tensorflow as tf
from tensorflow.keras.layers import Flatten
from tensorflow.keras.losses import kld, categorical_crossentropy
import tensorflow_probability as tfp
from alibi_detect.models.tensorflow.gmm import gmm_params, gmm_energy
def elbo(y_true: tf.Tensor,
y_pred: tf.Tensor,
cov_full: Optional[tf.Tensor] = None,
cov_diag: Optional[tf.Tensor] = None,
sim: Optional[float] = None
) -> tf.Tensor:
"""
Compute ELBO loss. The covariance matrix can be specified by passing the full covariance matrix, the matrix
diagonal, or a scale identity multiplier. Only one of these should be specified. If none are specified, the
identity matrix is used.
Parameters
----------
y_true
Labels.
y_pred
Predictions.
cov_full
Full covariance matrix.
cov_diag
Diagonal (variance) of covariance matrix.
sim
Scale identity multiplier.
Returns
-------
ELBO loss value.
Example
-------
>>> import tensorflow as tf
>>> from alibi_detect.models.tensorflow.losses import elbo
>>> y_true = tf.constant([[0.0, 1.0], [1.0, 0.0]])
>>> y_pred = tf.constant([[0.1, 0.9], [0.8, 0.2]])
>>> # Specifying scale identity multiplier
>>> elbo(y_true, y_pred, sim=1.0)
>>> # Specifying covariance matrix diagonal
>>> elbo(y_true, y_pred, cov_diag=tf.ones(2))
>>> # Specifying full covariance matrix
>>> elbo(y_true, y_pred, cov_full=tf.eye(2))
"""
if len([x for x in [cov_full, cov_diag, sim] if x is not None]) > 1:
raise ValueError('Only one of cov_full, cov_diag or sim should be specified.')
y_pred_flat = Flatten()(y_pred)
if isinstance(cov_full, tf.Tensor):
y_mn = tfp.distributions.MultivariateNormalFullCovariance(y_pred_flat,
covariance_matrix=cov_full)
else:
if sim:
cov_diag = sim * tf.ones(y_pred_flat.shape[-1])
y_mn = tfp.distributions.MultivariateNormalDiag(y_pred_flat,
scale_diag=cov_diag)
loss = -tf.reduce_mean(y_mn.log_prob(Flatten()(y_true)))
return loss
def loss_aegmm(x_true: tf.Tensor,
x_pred: tf.Tensor,
z: tf.Tensor,
gamma: tf.Tensor,
w_energy: float = .1,
w_cov_diag: float = .005
) -> tf.Tensor:
"""
Loss function used for OutlierAEGMM.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the autoencoder.
z
Latent space values.
gamma
Membership prediction for mixture model components.
w_energy
Weight on sample energy loss term.
w_cov_diag
Weight on covariance regularizing loss term.
Returns
-------
Loss value.
"""
recon_loss = tf.reduce_mean((x_true - x_pred) ** 2)
phi, mu, cov, L, log_det_cov = gmm_params(z, gamma)
sample_energy, cov_diag = gmm_energy(z, phi, mu, cov, L, log_det_cov, return_mean=True)
loss = recon_loss + w_energy * sample_energy + w_cov_diag * cov_diag
return loss
def loss_vaegmm(x_true: tf.Tensor,
x_pred: tf.Tensor,
z: tf.Tensor,
gamma: tf.Tensor,
w_recon: float = 1e-7,
w_energy: float = .1,
w_cov_diag: float = .005,
cov_full: tf.Tensor = None,
cov_diag: tf.Tensor = None,
sim: float = .05
) -> tf.Tensor:
"""
Loss function used for OutlierVAEGMM.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the variational autoencoder.
z
Latent space values.
gamma
Membership prediction for mixture model components.
w_recon
Weight on elbo loss term.
w_energy
Weight on sample energy loss term.
w_cov_diag
Weight on covariance regularizing loss term.
cov_full
Full covariance matrix.
cov_diag
Diagonal (variance) of covariance matrix.
sim
Scale identity multiplier.
Returns
-------
Loss value.
"""
recon_loss = elbo(x_true, x_pred, cov_full=cov_full, cov_diag=cov_diag, sim=sim)
phi, mu, cov, L, log_det_cov = gmm_params(z, gamma)
sample_energy, cov_diag = gmm_energy(z, phi, mu, cov, L, log_det_cov)
loss = w_recon * recon_loss + w_energy * sample_energy + w_cov_diag * cov_diag
return loss
def loss_adv_ae(x_true: tf.Tensor,
x_pred: tf.Tensor,
model: tf.keras.Model = None,
model_hl: list = None,
w_model: float = 1.,
w_recon: float = 0.,
w_model_hl: list = None,
temperature: float = 1.
) -> tf.Tensor:
"""
Loss function used for AdversarialAE.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the autoencoder.
model
A trained tf.keras model with frozen layers (layers.trainable = False).
model_hl
List with tf.keras models used to extract feature maps and make predictions on hidden layers.
w_model
Weight on model prediction loss term.
w_recon
Weight on MSE reconstruction error loss term.
w_model_hl
Weights assigned to the loss of each model in model_hl.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
Returns
-------
Loss value.
"""
y_true = model(x_true)
y_pred = model(x_pred)
# apply temperature scaling
if temperature != 1.:
y_true = y_true ** (1 / temperature)
y_true = y_true / tf.reshape(tf.reduce_sum(y_true, axis=-1), (-1, 1))
# compute K-L divergence loss
loss_kld = kld(y_true, y_pred)
std_kld = tf.math.reduce_std(loss_kld)
loss = tf.reduce_mean(loss_kld)
# add loss from optional K-L divergences extracted from hidden layers
if isinstance(model_hl, list):
if w_model_hl is None:
w_model_hl = list(tf.ones(len(model_hl)))
for m, w in zip(model_hl, w_model_hl):
h_true = m(x_true)
h_pred = m(x_pred)
loss_kld_hl = tf.reduce_mean(kld(h_true, h_pred))
loss += tf.constant(w) * loss_kld_hl
loss *= w_model
# add optional reconstruction loss
if w_recon > 0.:
loss_recon = (x_true - x_pred) ** 2
std_recon = tf.math.reduce_std(loss_recon)
w_scale = std_kld / (std_recon + 1e-10)
loss_recon = w_recon * w_scale * tf.reduce_mean(loss_recon)
loss += loss_recon
return loss
else:
return loss
def loss_distillation(x_true: tf.Tensor,
y_pred: tf.Tensor,
model: tf.keras.Model = None,
loss_type: str = 'kld',
temperature: float = 1.,
) -> tf.Tensor:
"""
Loss function used for Model Distillation.
Parameters
----------
x_true
Batch of data points.
y_pred
Batch of prediction from the distilled model.
model
tf.keras model.
loss_type
Type of loss for distillation. Supported 'kld', 'xent.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
Returns
-------
Loss value.
"""
y_true = model(x_true)
# apply temperature scaling
if temperature != 1.:
y_true = y_true ** (1 / temperature)
y_true = y_true / tf.reshape(tf.reduce_sum(y_true, axis=-1), (-1, 1))
if loss_type == 'kld':
loss_dist = kld(y_true, y_pred)
elif loss_type == 'xent':
loss_dist = categorical_crossentropy(y_true, y_pred, from_logits=False)
else:
raise NotImplementedError
# compute K-L divergence loss
loss = tf.reduce_mean(loss_dist)
return loss
| 8,256 | 29.925094 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/resnet.py | # implementation adopted from https://github.com/tensorflow/models
# TODO: proper train-val-test split
import argparse
import numpy as np
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.keras.callbacks import Callback, ModelCheckpoint
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import (Activation, Add, BatchNormalization, Conv2D,
Dense, Input, ZeroPadding2D)
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.regularizers import l2
from typing import Callable, Tuple, Union
# parameters specific for CIFAR-10 training
BATCH_NORM_DECAY = 0.997
BATCH_NORM_EPSILON = 1e-5
L2_WEIGHT_DECAY = 2e-4
LR_SCHEDULE = [(0.1, 91), (0.01, 136), (0.001, 182)] # (multiplier, epoch to start) tuples
BASE_LEARNING_RATE = 0.1
HEIGHT, WIDTH, NUM_CHANNELS = 32, 32, 3
def l2_regulariser(l2_regularisation: bool = True):
"""
Apply L2 regularisation to kernel.
Parameters
----------
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Kernel regularisation.
"""
return l2(L2_WEIGHT_DECAY) if l2_regularisation else None
def identity_block(x_in: tf.Tensor,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
block: str,
l2_regularisation: bool = True) -> tf.Tensor:
"""
Identity block in ResNet.
Parameters
----------
x_in
Input Tensor.
filters
Number of filters for each of the 2 conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
block
Block within a stage in the ResNet.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the identity block.
"""
# name of block
conv_name_base = 'res' + str(stage) + '_' + block + '_branch'
bn_name_base = 'bn' + str(stage) + '_' + block + '_branch'
filters_1, filters_2 = filters
bn_axis = 3 # channels last format
x = Conv2D(
filters_1,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2a')(x_in)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters_2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2b')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
x = Add()([x, x_in])
x = Activation('relu')(x)
return x
def conv_block(x_in: tf.Tensor,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
block: str,
strides: Tuple[int, int] = (2, 2),
l2_regularisation: bool = True) -> tf.Tensor:
"""
Conv block in ResNet with a parameterised skip connection to reduce the width and height
controlled by the strides.
Parameters
----------
x_in
Input Tensor.
filters
Number of filters for each of the 2 conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
block
Block within a stage in the ResNet.
strides
Stride size applied to reduce the image size.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the conv block.
"""
# name of block
conv_name_base = 'res' + str(stage) + '_' + block + '_branch'
bn_name_base = 'bn' + str(stage) + '_' + block + '_branch'
filters_1, filters_2 = filters
bn_axis = 3 # channels last format
x = Conv2D(
filters_1,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2a')(x_in)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters_2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2b')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
shortcut = Conv2D(
filters_2,
(1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '1')(x_in)
shortcut = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def resnet_block(x_in: tf.Tensor,
size: int,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
strides: Tuple[int, int] = (2, 2),
l2_regularisation: bool = True) -> tf.Tensor:
"""
Block in ResNet combining a conv block with identity blocks.
Parameters
----------
x_in
Input Tensor.
size
The ResNet block consists of 1 conv block and size-1 identity blocks.
filters
Number of filters for each of the conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
strides
Stride size applied to reduce the image size.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the conv block.
"""
x = conv_block(
x_in,
filters,
kernel_size,
stage,
'block0',
strides=strides,
l2_regularisation=l2_regularisation
)
for i in range(size - 1):
x = identity_block(
x,
filters,
kernel_size,
stage,
f'block{i + 1}',
l2_regularisation=l2_regularisation
)
return x
def resnet(num_blocks: int,
classes: int = 10,
input_shape: Tuple[int, int, int] = (32, 32, 3)) -> tf.keras.Model:
"""
Define ResNet.
Parameters
----------
num_blocks
Number of ResNet blocks.
classes
Number of classification classes.
input_shape
Input shape of an image.
Returns
-------
ResNet as a tf.keras.Model.
"""
bn_axis = 3 # channels last format
l2_regularisation = True
x_in = Input(shape=input_shape)
x = ZeroPadding2D(
padding=(1, 1),
name='conv1_pad')(x_in)
x = Conv2D(
16,
(3, 3),
strides=(1, 1),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name='conv1')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(x)
x = Activation('relu')(x)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(16, 16),
kernel_size=3,
stage=2,
strides=(1, 1),
l2_regularisation=True
)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(32, 32),
kernel_size=3,
stage=3,
strides=(2, 2),
l2_regularisation=True
)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(64, 64),
kernel_size=3,
stage=4,
strides=(2, 2),
l2_regularisation=True
)
x = tf.reduce_mean(x, axis=(1, 2)) # take mean across width and height
x_out = Dense(
classes,
activation='softmax',
kernel_initializer=RandomNormal(stddev=.01),
kernel_regularizer=l2(L2_WEIGHT_DECAY),
bias_regularizer=l2(L2_WEIGHT_DECAY),
name='fc10')(x)
model = Model(x_in, x_out, name='resnet')
return model
def learning_rate_schedule(current_epoch: int,
current_batch: int,
batches_per_epoch: int,
batch_size: int) -> float:
"""
Linear learning rate scaling and learning rate decay at specified epochs.
Parameters
----------
current_epoch
Current training epoch.
current_batch
Current batch with current epoch, not used.
batches_per_epoch
Number of batches or steps in an epoch, not used.
batch_size
Batch size.
Returns
-------
Adjusted learning rate.
"""
del current_batch, batches_per_epoch # not used
initial_learning_rate = BASE_LEARNING_RATE * batch_size / 128
learning_rate = initial_learning_rate
for mult, start_epoch in LR_SCHEDULE:
if current_epoch >= start_epoch:
learning_rate = initial_learning_rate * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(Callback):
def __init__(self, schedule: Callable, batch_size: int, steps_per_epoch: int):
"""
Callback to update learning rate on every batch instead of epoch.
Parameters
----------
schedule
Function taking the epoch and batch index as input which returns the new
learning rate as output.
batch_size
Batch size.
steps_per_epoch
Number of batches or steps per epoch.
"""
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
def preprocess_image(x: np.ndarray, is_training: bool = True) -> np.ndarray:
if is_training:
# resize image and add 4 pixels to each side
x = tf.image.resize_with_crop_or_pad(x, HEIGHT + 8, WIDTH + 8)
# randomly crop a [HEIGHT, WIDTH] section of the image
x = tf.image.random_crop(x, [HEIGHT, WIDTH, NUM_CHANNELS])
# randomly flip the image horizontally
x = tf.image.random_flip_left_right(x)
# standardise by image
x = tf.image.per_image_standardization(x).numpy().astype(np.float32)
return x
def scale_by_instance(x: np.ndarray, eps: float = 1e-12) -> np.ndarray:
xmean = x.mean(axis=(1, 2, 3)).reshape(-1, 1, 1, 1)
xstd = x.std(axis=(1, 2, 3)).reshape(-1, 1, 1, 1)
x_scaled = (x - xmean) / (xstd + eps)
return x_scaled
def run(num_blocks: int,
epochs: int,
batch_size: int,
model_dir: Union[str, os.PathLike],
num_classes: int = 10,
input_shape: Tuple[int, int, int] = (32, 32, 3),
validation_freq: int = 10,
verbose: int = 2,
seed: int = 1,
serving: bool = False
) -> None:
# load and preprocess CIFAR-10 data
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
X_train = X_train.astype('float32')
X_test = scale_by_instance(X_test.astype('float32')) # can already preprocess test data
y_train = y_train.astype('int64').reshape(-1, )
y_test = y_test.astype('int64').reshape(-1, )
# define and compile model
model = resnet(num_blocks, classes=num_classes, input_shape=input_shape)
optimizer = SGD(learning_rate=BASE_LEARNING_RATE, momentum=0.9)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['sparse_categorical_accuracy']
)
# set up callbacks
steps_per_epoch = X_train.shape[0] // batch_size
ckpt_path = Path(model_dir).joinpath('model.h5')
callbacks = [
ModelCheckpoint(
ckpt_path,
monitor='val_sparse_categorical_accuracy',
save_best_only=True,
save_weights_only=False
),
LearningRateBatchScheduler(
schedule=learning_rate_schedule,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch
)
]
# data augmentation and preprocessing
datagen = ImageDataGenerator(preprocessing_function=preprocess_image)
# train
model.fit(
x=datagen.flow(X_train, y_train, batch_size=batch_size, shuffle=True, seed=seed),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
validation_freq=validation_freq,
validation_data=(X_test, y_test),
shuffle=True,
verbose=verbose
)
if serving:
tf.saved_model.save(model, model_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train ResNet on CIFAR-10.")
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--model_dir', type=str, default='./model/')
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--validation_freq', type=int, default=10)
parser.add_argument('--verbose', type=int, default=2)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--serving', type=bool, default=False)
args = parser.parse_args()
run(
args.num_blocks,
args.epochs,
args.batch_size,
args.model_dir,
num_classes=args.num_classes,
validation_freq=args.validation_freq,
verbose=args.verbose,
seed=args.seed,
serving=args.serving
)
| 15,500 | 28.469582 | 92 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Bidirectional, Concatenate, Dense, Flatten, Layer, LSTM
from typing import Callable, List, Tuple
from alibi_detect.utils.tensorflow.distance import relative_euclidean_distance
class Sampling(Layer):
""" Reparametrization trick. Uses (z_mean, z_log_var) to sample the latent vector z. """
def call(self, inputs: Tuple[tf.Tensor, tf.Tensor]) -> tf.Tensor:
"""
Sample z.
Parameters
----------
inputs
Tuple with mean and log variance.
Returns
-------
Sampled vector z.
"""
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class EncoderVAE(Layer):
def __init__(self,
encoder_net: tf.keras.Model,
latent_dim: int,
name: str = 'encoder_vae') -> None:
"""
Encoder of VAE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
latent_dim
Dimensionality of the latent space.
name
Name of encoder.
"""
super(EncoderVAE, self).__init__(name=name)
self.encoder_net = encoder_net
self.fc_mean = Dense(latent_dim, activation=None)
self.fc_log_var = Dense(latent_dim, activation=None)
self.sampling = Sampling()
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
x = self.encoder_net(x)
if len(x.shape) > 2:
x = Flatten()(x)
z_mean = self.fc_mean(x)
z_log_var = self.fc_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(Layer):
def __init__(self,
decoder_net: tf.keras.Model,
name: str = 'decoder') -> None:
"""
Decoder of (V)AE.
Parameters
----------
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
name
Name of decoder.
"""
super(Decoder, self).__init__(name=name)
self.decoder_net = decoder_net
def call(self, x: tf.Tensor) -> tf.Tensor:
return self.decoder_net(x)
class VAE(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
latent_dim: int,
beta: float = 1.,
name: str = 'vae') -> None:
"""
Combine encoder and decoder in VAE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
latent_dim
Dimensionality of the latent space.
beta
Beta parameter for KL-divergence loss term.
name
Name of VAE model.
"""
super(VAE, self).__init__(name=name)
self.encoder = EncoderVAE(encoder_net, latent_dim)
self.decoder = Decoder(decoder_net)
self.beta = beta
self.latent_dim = latent_dim
def call(self, x: tf.Tensor) -> tf.Tensor:
z_mean, z_log_var, z = self.encoder(x)
x_recon = self.decoder(z)
# add KL divergence loss term
kl_loss = -.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
self.add_loss(self.beta * kl_loss)
return x_recon
class EncoderAE(Layer):
def __init__(self,
encoder_net: tf.keras.Model,
name: str = 'encoder_ae') -> None:
"""
Encoder of AE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
name
Name of encoder.
"""
super(EncoderAE, self).__init__(name=name)
self.encoder_net = encoder_net
def call(self, x: tf.Tensor) -> tf.Tensor:
return self.encoder_net(x)
class AE(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
name: str = 'ae') -> None:
"""
Combine encoder and decoder in AE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
name
Name of autoencoder model.
"""
super(AE, self).__init__(name=name)
self.encoder = EncoderAE(encoder_net)
self.decoder = Decoder(decoder_net)
def call(self, x: tf.Tensor) -> tf.Tensor:
z = self.encoder(x)
x_recon = self.decoder(z)
return x_recon
class EncoderLSTM(Layer):
def __init__(self,
latent_dim: int,
name: str = 'encoder_lstm') -> None:
"""
Bidirectional LSTM encoder.
Parameters
----------
latent_dim
Latent dimension. Must be an even number given the bidirectional encoder.
name
Name of encoder.
"""
super(EncoderLSTM, self).__init__(name=name)
self.encoder_net = Bidirectional(LSTM(latent_dim // 2, return_state=True, return_sequences=True))
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, List[tf.Tensor]]:
enc_out, fwd_h, fwd_c, bwd_h, bwd_c = self.encoder_net(x)
h = Concatenate()([fwd_h, bwd_h])
c = Concatenate()([fwd_c, bwd_c])
return enc_out, [h, c]
class DecoderLSTM(Layer):
def __init__(self,
latent_dim: int,
output_dim: int,
output_activation: str = None,
name: str = 'decoder_lstm') -> None:
"""
LSTM decoder.
Parameters
----------
latent_dim
Latent dimension.
output_dim
Decoder output dimension.
output_activation
Activation used in the Dense output layer.
name
Name of decoder.
"""
super(DecoderLSTM, self).__init__(name=name)
self.decoder_net = LSTM(latent_dim, return_state=True, return_sequences=True)
self.dense = Dense(output_dim, activation=output_activation)
def call(self, x: tf.Tensor, init_state: List[tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor, List[tf.Tensor]]:
x, h, c = self.decoder_net(x, initial_state=init_state)
dec_out = self.dense(x)
return dec_out, x, [h, c]
class Seq2Seq(tf.keras.Model):
def __init__(self,
encoder_net: EncoderLSTM,
decoder_net: DecoderLSTM,
threshold_net: tf.keras.Model,
n_features: int,
score_fn: Callable = tf.math.squared_difference,
beta: float = 1.,
name: str = 'seq2seq') -> None:
"""
Sequence-to-sequence model.
Parameters
----------
encoder_net
Encoder network.
decoder_net
Decoder network.
threshold_net
Regression network used to estimate threshold.
n_features
Number of features.
score_fn
Function used for outlier score.
beta
Weight on the threshold estimation loss term.
name
Name of the seq2seq model.
"""
super(Seq2Seq, self).__init__(name=name)
self.encoder = encoder_net
self.decoder = decoder_net
self.threshold_net = threshold_net
self.threshold_est = Dense(n_features, activation=None)
self.score_fn = score_fn
self.beta = beta
def call(self, x: tf.Tensor) -> tf.Tensor:
""" Forward pass used for teacher-forcing training. """
# reconstruct input via encoder-decoder
init_state = self.encoder(x)[1]
x_recon, z, _ = self.decoder(x, init_state=init_state)
# compute outlier score
err_recon = self.score_fn(x, x_recon)
# estimate outlier threshold from hidden state of decoder
z = self.threshold_net(z)
threshold_est = self.threshold_est(z)
# add threshold estimate loss
threshold_loss = tf.reduce_mean((err_recon - threshold_est) ** 2)
self.add_loss(self.beta * threshold_loss)
return x_recon
def decode_seq(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" Sequence decoding and threshold estimation used for inference. """
seq_len = np.shape(x)[1]
n_batch = x.shape[0]
# use encoder to get state vectors
init_state = self.encoder(x)[1]
# generate start of target sequence
decoder_input = np.reshape(x[:, 0, :], (n_batch, 1, -1))
# initialize hidden states used to compute outlier thresholds
z = np.zeros((n_batch, seq_len, init_state[0].numpy().shape[1])).astype(np.float32)
# sequential prediction of time series
decoded_seq = np.zeros_like(x)
decoded_seq[:, 0, :] = x[:, 0, :]
i = 1
while i < seq_len:
# decode step in sequence
decoder_output = self.decoder(decoder_input, init_state=init_state)
decoded_seq[:, i:i+1, :] = decoder_output[0].numpy()
init_state = decoder_output[2]
# update hidden state decoder used for outlier threshold
z[:, i:i+1, :] = decoder_output[1].numpy()
# update next decoder input
decoder_input = np.zeros_like(decoder_input)
decoder_input[:, :1, :] = decoder_output[0].numpy()
i += 1
# compute outlier thresholds
z = self.threshold_net(z)
threshold_est = self.threshold_est(z).numpy()
return decoded_seq, threshold_est
def eucl_cosim_features(x: tf.Tensor,
y: tf.Tensor,
max_eucl: float = 1e2) -> tf.Tensor:
"""
Compute features extracted from the reconstructed instance using the
relative Euclidean distance and cosine similarity between 2 tensors.
Parameters
----------
x
Tensor used in feature computation.
y
Tensor used in feature computation.
max_eucl
Maximum value to clip relative Euclidean distance by.
Returns
-------
Tensor concatenating the relative Euclidean distance and cosine similarity features.
"""
if len(x.shape) > 2 or len(y.shape) > 2:
x = Flatten()(x)
y = Flatten()(y)
rec_cos = tf.reshape(tf.keras.losses.cosine_similarity(y, x, -1), (-1, 1))
rec_euc = tf.reshape(relative_euclidean_distance(y, x, -1), (-1, 1))
# rec_euc could become very large so should be clipped
rec_euc = tf.clip_by_value(rec_euc, 0, max_eucl)
return tf.concat([rec_cos, rec_euc], -1)
class AEGMM(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
gmm_density_net: tf.keras.Model,
n_gmm: int,
recon_features: Callable = eucl_cosim_features,
name: str = 'aegmm') -> None:
"""
Deep Autoencoding Gaussian Mixture Model.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
recon_features
Function to extract features from the reconstructed instance by the decoder.
name
Name of the AEGMM model.
"""
super(AEGMM, self).__init__(name=name)
self.encoder = encoder_net
self.decoder = decoder_net
self.gmm_density = gmm_density_net
self.n_gmm = n_gmm
self.recon_features = recon_features
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
enc = self.encoder(x)
x_recon = self.decoder(enc)
recon_features = self.recon_features(x, x_recon)
z = tf.concat([enc, recon_features], -1)
gamma = self.gmm_density(z)
return x_recon, z, gamma
class VAEGMM(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
gmm_density_net: tf.keras.Model,
n_gmm: int,
latent_dim: int,
recon_features: Callable = eucl_cosim_features,
beta: float = 1.,
name: str = 'vaegmm') -> None:
"""
Variational Autoencoding Gaussian Mixture Model.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
latent_dim
Dimensionality of the latent space.
recon_features
Function to extract features from the reconstructed instance by the decoder.
beta
Beta parameter for KL-divergence loss term.
name
Name of the VAEGMM model.
"""
super(VAEGMM, self).__init__(name=name)
self.encoder = EncoderVAE(encoder_net, latent_dim)
self.decoder = decoder_net
self.gmm_density = gmm_density_net
self.n_gmm = n_gmm
self.latent_dim = latent_dim
self.recon_features = recon_features
self.beta = beta
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
enc_mean, enc_log_var, enc = self.encoder(x)
x_recon = self.decoder(enc)
recon_features = self.recon_features(x, x_recon)
z = tf.concat([enc, recon_features], -1)
gamma = self.gmm_density(z)
# add KL divergence loss term
kl_loss = -.5 * tf.reduce_mean(enc_log_var - tf.square(enc_mean) - tf.exp(enc_log_var) + 1)
self.add_loss(self.beta * kl_loss)
return x_recon, z, gamma
| 14,709 | 31.472406 | 110 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/trainer.py | from functools import partial
import numpy as np
import tensorflow as tf
from typing import Callable, Tuple
def trainer(
model: tf.keras.Model,
loss_fn: tf.keras.losses,
x_train: np.ndarray,
y_train: np.ndarray = None,
dataset: tf.keras.utils.Sequence = None,
optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
loss_fn_kwargs: dict = None,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda model: 0),
batch_size: int = 64,
buffer_size: int = 1024,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None
) -> None:
"""
Train TensorFlow model.
Parameters
----------
model
Model to train.
loss_fn
Loss function used for training.
x_train
Training data.
y_train
Training labels.
dataset
Training dataset which returns (x, y).
optimizer
Optimizer used for training.
loss_fn_kwargs
Kwargs for loss function.
preprocess_fn
Preprocessing function applied to each training batch.
epochs
Number of training epochs.
reg_loss_fn
Allows an additional regularisation term to be defined as reg_loss_fn(model)
batch_size
Batch size used for training.
buffer_size
Maximum number of elements that will be buffered when prefetching.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
return_xy = False if not isinstance(dataset, tf.keras.utils.Sequence) and y_train is None else True
if not isinstance(dataset, tf.keras.utils.Sequence): # create dataset
train_data = x_train if y_train is None else (x_train, y_train)
dataset = tf.data.Dataset.from_tensor_slices(train_data)
dataset = dataset.shuffle(buffer_size=buffer_size).batch(batch_size)
n_minibatch = len(dataset)
if loss_fn_kwargs:
loss_fn = partial(loss_fn, **loss_fn_kwargs)
# iterate over epochs
for epoch in range(epochs):
if verbose:
pbar = tf.keras.utils.Progbar(n_minibatch, 1)
if hasattr(dataset, 'on_epoch_end'):
dataset.on_epoch_end()
loss_val_ma = 0.
for step, data in enumerate(dataset):
x, y = data if return_xy else (data, None)
if isinstance(preprocess_fn, Callable): # type: ignore
x = preprocess_fn(x)
with tf.GradientTape() as tape:
y_hat = model(x)
y = x if y is None else y
if isinstance(loss_fn, Callable): # type: ignore
args = [y, y_hat] if tf.is_tensor(y_hat) else [y] + list(y_hat)
loss = loss_fn(*args)
else:
loss = 0.
if model.losses: # additional model losses
loss += sum(model.losses)
loss += reg_loss_fn(model) # alternative way they might be specified
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
if verbose:
loss_val = loss.numpy()
if loss_val.shape:
if loss_val.shape[0] != batch_size:
if len(loss_val.shape) == 1:
shape = (batch_size - loss_val.shape[0], )
elif len(loss_val.shape) == 2:
shape = (batch_size - loss_val.shape[0], loss_val.shape[1]) # type: ignore
add_mean = np.ones(shape) * loss_val.mean()
loss_val = np.r_[loss_val, add_mean]
loss_val_ma = loss_val_ma + (loss_val - loss_val_ma) / (step + 1)
pbar_values = [('loss_ma', loss_val_ma)]
if log_metric is not None:
log_metric[1](y, y_hat)
pbar_values.append((log_metric[0], log_metric[1].result().numpy()))
pbar.add(1, values=pbar_values)
| 4,358 | 37.919643 | 103 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/tests/test_autoencoder_tf.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM, Seq2Seq, EncoderLSTM, DecoderLSTM
from alibi_detect.models.tensorflow.losses import loss_aegmm, loss_vaegmm
from alibi_detect.models.tensorflow.trainer import trainer
input_dim = 784
latent_dim = 50
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
ae = AE(encoder_net, decoder_net)
vae = VAE(encoder_net, decoder_net, latent_dim)
tests = [ae, vae]
@pytest.fixture
def tf_v_ae_mnist(request):
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(60000, input_dim)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
# init model, predict with untrained model, train and predict with trained model
model = request.param
X_recon_untrained = model(X).numpy()
model_weights = model.weights[1].numpy().copy()
model.compile(optimizer='adam', loss='mse')
model.fit(X, X, epochs=5)
X_recon = model(X).numpy()
assert (model_weights != model.weights[1].numpy()).any()
assert np.sum((X - X_recon_untrained)**2) > np.sum((X - X_recon)**2)
@pytest.mark.parametrize('tf_v_ae_mnist', tests, indirect=True)
def test_ae_vae(tf_v_ae_mnist):
pass
n_gmm = 1
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm)
vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm, latent_dim)
tests = [(aegmm, loss_aegmm), (vaegmm, loss_vaegmm)]
n_tests = len(tests)
@pytest.fixture
def tf_v_aegmm_mnist(request):
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(60000, input_dim)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
# init model, predict with untrained model, train and predict with trained model
model, loss_fn = tests[request.param]
X_recon_untrained, z, gamma = model(X)
assert X_recon_untrained.shape == X.shape
assert z.shape[1] == latent_dim + 2
assert gamma.shape[1] == n_gmm
model_weights = model.weights[1].numpy().copy()
trainer(model, loss_fn, X, epochs=5, verbose=False, batch_size=1000)
assert (model_weights != model.weights[1].numpy()).any()
@pytest.mark.parametrize('tf_v_aegmm_mnist', list(range(n_tests)), indirect=True)
def test_aegmm_vaegmm(tf_v_aegmm_mnist):
pass
seq_len = 10
tests_seq2seq = [(DecoderLSTM(latent_dim, 1, None), 1),
(DecoderLSTM(latent_dim, 2, None), 2)]
n_tests = len(tests_seq2seq)
@pytest.fixture
def tf_seq2seq_sine(request):
# create artificial sine time series
X = np.sin(np.linspace(-50, 50, 10000)).astype(np.float32)
# init model
decoder_net_, n_features = tests_seq2seq[request.param]
encoder_net = EncoderLSTM(latent_dim)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(10, activation=tf.nn.relu)
]
)
model = Seq2Seq(encoder_net, decoder_net_, threshold_net, n_features)
# reshape data
shape = (-1, seq_len, n_features)
y = np.roll(X, -1, axis=0).reshape(shape)
X = X.reshape(shape)
# predict with untrained model, train and predict with trained model
X_recon_untrained = model(X)
assert X_recon_untrained.shape == X.shape
model_weights = model.weights[1].numpy().copy()
trainer(model, tf.keras.losses.mse, X, y_train=y, epochs=2, verbose=False, batch_size=64)
X_recon = model(X).numpy()
assert (model_weights != model.weights[1].numpy()).any()
assert np.sum((X - X_recon_untrained)**2) > np.sum((X - X_recon)**2)
@pytest.mark.parametrize('tf_seq2seq_sine', list(range(n_tests)), indirect=True)
def test_seq2seq(tf_seq2seq_sine):
pass
| 4,397 | 31.338235 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/tests/test_losses_tf.py | import pytest
import numpy as np
import tensorflow as tf
from alibi_detect.models.tensorflow.losses import elbo, loss_adv_ae, loss_aegmm, loss_vaegmm, loss_distillation
N, K, D, F = 10, 5, 1, 3
x = np.random.rand(N, F).astype(np.float32)
y = np.random.rand(N, F).astype(np.float32)
sim = 1.
cov_diag = tf.ones(x.shape[1])
cov_full = tf.eye(x.shape[1])
def test_elbo():
assert elbo(x, y, cov_full=cov_full) == elbo(x, y, cov_diag=cov_diag) == elbo(x, y, sim=sim)
assert elbo(x, y) == elbo(x, y, sim=1.) # Passing no kwarg's should lead to an identity covariance matrix
assert elbo(x, y, sim=.05).numpy() > 0
assert elbo(x, x, sim=.05).numpy() < 0
def test_elbo_error():
with pytest.raises(ValueError):
elbo(x, y, cov_full=cov_full, cov_diag=cov_diag)
elbo(x, y, cov_full=cov_full, sim=sim)
elbo(x, y, cov_diag=cov_diag, sim=sim)
z = np.random.rand(N, D).astype(np.float32)
gamma = np.random.rand(N, K).astype(np.float32)
def test_loss_aegmm():
loss = loss_aegmm(x, y, z, gamma, w_energy=.1, w_cov_diag=.005)
loss_no_cov = loss_aegmm(x, y, z, gamma, w_energy=.1, w_cov_diag=0.)
loss_xx = loss_aegmm(x, x, z, gamma, w_energy=.1, w_cov_diag=0.)
assert loss > loss_no_cov
assert loss_no_cov > loss_xx
def test_loss_vaegmm():
loss = loss_vaegmm(x, y, z, gamma, w_recon=1e-7, w_energy=.1, w_cov_diag=.005)
loss_no_recon = loss_vaegmm(x, y, z, gamma, w_recon=0., w_energy=.1, w_cov_diag=.005)
loss_no_recon_cov = loss_vaegmm(x, y, z, gamma, w_recon=0., w_energy=.1, w_cov_diag=0.)
loss_xx = loss_vaegmm(x, x, z, gamma, w_recon=1e-7, w_energy=.1, w_cov_diag=.005)
assert loss > loss_no_recon
assert loss_no_recon > loss_no_recon_cov
assert loss > loss_xx
inputs = tf.keras.Input(shape=(x.shape[1],))
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
def test_loss_adv_ae():
loss = loss_adv_ae(x, y, model, w_model=1., w_recon=0.)
loss_with_recon = loss_adv_ae(x, y, model, w_model=1., w_recon=1.)
assert loss > 0.
assert loss_with_recon > loss
layers = [tf.keras.layers.InputLayer(input_shape=(x.shape[1],)),
tf.keras.layers.Dense(5, activation=tf.nn.softmax)]
distilled_model = tf.keras.Sequential(layers)
def test_loss_adv_md():
y_true = distilled_model(x).numpy()
loss_kld = loss_distillation(x, y_true, model, loss_type='kld')
loss_xent = loss_distillation(x, y_true, model, loss_type='xent')
assert loss_kld > 0.
assert loss_xent > 0.
| 2,563 | 34.123288 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/models/tensorflow/tests/test_trainer_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.losses import categorical_crossentropy
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.data import TFDataset
N, F = 100, 2
x = np.random.rand(N, F).astype(np.float32)
y = np.concatenate([np.zeros((N, 1)), np.ones((N, 1))], axis=1).astype(np.float32)
inputs = tf.keras.Input(shape=(x.shape[1],))
outputs = tf.keras.layers.Dense(F, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
check_model_weights = model.weights[0].numpy()
def preprocess_fn(x: np.ndarray) -> np.ndarray:
return x
X_train = [x]
y_train = [None, y]
dataset = [partial(TFDataset, batch_size=10), None]
loss_fn_kwargs = [None, {'from_logits': False}]
preprocess = [preprocess_fn, None]
verbose = [False, True]
tests = list(product(X_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose))
n_tests = len(tests)
@pytest.fixture
def trainer_params(request):
x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose = tests[request.param]
return x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose
@pytest.mark.parametrize('trainer_params', list(range(n_tests)), indirect=True)
def test_trainer(trainer_params):
x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose = trainer_params
if dataset is not None and y_train is not None:
ds = dataset(x_train, y_train)
else:
ds = None
trainer(model, categorical_crossentropy, x_train, y_train=y_train, dataset=ds,
loss_fn_kwargs=loss_fn_kwargs, preprocess_fn=preprocess, epochs=2, verbose=verbose)
assert (model.weights[0].numpy() != check_model_weights).any()
| 1,795 | 34.215686 | 95 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/loading.py | import logging
import os
from functools import partial
from importlib import import_module
from pathlib import Path
from typing import Any, Callable, Optional, Union, Type, TYPE_CHECKING
import dill
import numpy as np
import toml
from transformers import AutoTokenizer
from alibi_detect.saving.registry import registry
from alibi_detect.saving._tensorflow import load_detector_legacy, load_embedding_tf, load_kernel_config_tf, \
load_model_tf, load_optimizer_tf, prep_model_and_emb_tf, get_tf_dtype
from alibi_detect.saving._pytorch import load_embedding_pt, load_kernel_config_pt, load_model_pt, \
load_optimizer_pt, prep_model_and_emb_pt, get_pt_dtype
from alibi_detect.saving._keops import load_kernel_config_ke
from alibi_detect.saving._sklearn import load_model_sk
from alibi_detect.saving.validate import validate_config
from alibi_detect.base import Detector, ConfigurableDetector, StatefulDetectorOnline
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch, Framework
from alibi_detect.saving.schemas import supported_models_tf, supported_models_torch
from alibi_detect.utils.missing_optional_dependency import import_optional
get_device = import_optional('alibi_detect.utils.pytorch.misc', names=['get_device'])
if TYPE_CHECKING:
import tensorflow as tf
import torch
STATE_PATH = 'state/' # directory (relative to detector directory) where state is saved (and loaded from)
logger = logging.getLogger(__name__)
# Fields to resolve in resolve_config ("resolve" meaning either load local artefact or resolve @registry, conversion to
# tuple, np.ndarray and np.dtype are dealt with separately).
# Note: For fields consisting of nested dicts, they must be listed in order from deepest to shallowest, so that the
# deepest fields are resolved first. e.g. 'preprocess_fn.src' must be resolved before 'preprocess_fn'.
FIELDS_TO_RESOLVE = [
['preprocess_fn', 'src'],
['preprocess_fn', 'model'],
['preprocess_fn', 'embedding'],
['preprocess_fn', 'tokenizer'],
['preprocess_fn', 'preprocess_batch_fn'],
['preprocess_fn'],
['x_ref'],
['c_ref'],
['model'],
['optimizer'],
['reg_loss_fn'],
['dataset'],
['kernel', 'src'],
['kernel', 'proj'],
['kernel', 'init_sigma_fn'],
['kernel', 'kernel_a', 'src'],
['kernel', 'kernel_a', 'init_sigma_fn'],
['kernel', 'kernel_b', 'src'],
['kernel', 'kernel_b', 'init_sigma_fn'],
['kernel'],
['x_kernel', 'src'],
['x_kernel', 'init_sigma_fn'],
['x_kernel'],
['c_kernel', 'src'],
['c_kernel', 'init_sigma_fn'],
['c_kernel'],
['initial_diffs'],
['tokenizer']
]
# Fields to convert from str to dtype
FIELDS_TO_DTYPE = [
['preprocess_fn', 'dtype']
]
def load_detector(filepath: Union[str, os.PathLike], **kwargs) -> Union[Detector, ConfigurableDetector]:
"""
Load outlier, drift or adversarial detector.
Parameters
----------
filepath
Load directory.
Returns
-------
Loaded outlier or adversarial detector object.
"""
filepath = Path(filepath)
# If reference is a 'config.toml' itself, pass to new load function
if filepath.name == 'config.toml':
return _load_detector_config(filepath)
# Otherwise, if a directory, look for meta.dill, meta.pickle or config.toml inside it
elif filepath.is_dir():
files = [str(f.name) for f in filepath.iterdir() if f.is_file()]
if 'config.toml' in files:
return _load_detector_config(filepath.joinpath('config.toml'))
elif 'meta.dill' in files:
return load_detector_legacy(filepath, '.dill', **kwargs)
elif 'meta.pickle' in files:
return load_detector_legacy(filepath, '.pickle', **kwargs)
else:
raise ValueError(f'Neither meta.dill, meta.pickle or config.toml exist in {filepath}.')
# No other file types are accepted, so if not dir raise error
else:
raise ValueError("load_detector accepts only a filepath to a directory, or a config.toml file.")
# TODO - will eventually become load_detector
def _load_detector_config(filepath: Union[str, os.PathLike]) -> ConfigurableDetector:
"""
Loads a drift detector specified in a detector config dict. Validation is performed with pydantic.
Parameters
----------
filepath
Filepath to the `config.toml` file.
Returns
-------
The instantiated detector.
"""
# Load toml if needed
if isinstance(filepath, (str, os.PathLike)):
config_file = Path(filepath)
config_dir = config_file.parent
cfg = read_config(config_file)
else:
raise ValueError("`filepath` should point to a directory containing a 'config.toml' file.")
# Resolve and validate config
cfg = validate_config(cfg)
logger.info('Validated unresolved config.')
cfg = resolve_config(cfg, config_dir=config_dir)
cfg = validate_config(cfg, resolved=True)
logger.info('Validated resolved config.')
# Init detector from config
logger.info('Instantiating detector.')
detector = _init_detector(cfg)
# Load state if it exists (and detector supports it)
# TODO - this will be removed in follow-up offline state PR, as loading to be moved to __init__ (w/ state_dir kwarg)
if isinstance(detector, StatefulDetectorOnline):
state_dir = config_dir.joinpath(STATE_PATH)
if state_dir.is_dir():
detector.load_state(state_dir)
logger.info('Finished loading detector.')
return detector
def _init_detector(cfg: dict) -> ConfigurableDetector:
"""
Instantiates a detector from a fully resolved config dictionary.
Parameters
----------
cfg
The detector's resolved config dictionary.
Returns
-------
The instantiated detector.
"""
detector_name = cfg.pop('name')
# Instantiate the detector
klass = getattr(import_module('alibi_detect.cd'), detector_name)
detector = klass.from_config(cfg)
logger.info('Instantiated drift detector {}'.format(detector_name))
return detector
def _load_kernel_config(cfg: dict, backend: str = Framework.TENSORFLOW) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
backend
The backend.
Returns
-------
The kernel.
"""
if backend == Framework.TENSORFLOW:
kernel = load_kernel_config_tf(cfg)
elif backend == Framework.PYTORCH:
kernel = load_kernel_config_pt(cfg)
else: # backend=='keops'
kernel = load_kernel_config_ke(cfg)
return kernel
def _load_preprocess_config(cfg: dict) -> Optional[Callable]:
"""
This function builds a preprocess_fn from the preprocess dict in a detector config dict. The dict format is
expected to match that generated by serialize_preprocess in alibi_detect.utils.saving (also see pydantic schema).
The model, tokenizer and preprocess_batch_fn are expected to be already resolved.
Parameters
----------
cfg
A preprocess_fn config dict. (see pydantic schemas).
Returns
-------
The preprocess_fn function.
"""
preprocess_fn = cfg.pop('src')
if callable(preprocess_fn):
if preprocess_fn.__name__ == 'preprocess_drift':
# If preprocess_drift function, kwargs is preprocess cfg minus 'src' and 'kwargs'
cfg.pop('kwargs')
kwargs = cfg.copy()
# Final processing of model (and/or embedding)
model = kwargs['model']
emb = kwargs.pop('embedding') # embedding passed to preprocess_drift as `model` therefore remove
# Backend specifics
if has_tensorflow and isinstance(model, supported_models_tf):
model = prep_model_and_emb_tf(model, emb)
elif has_pytorch and isinstance(model, supported_models_torch):
model = prep_model_and_emb_pt(model, emb)
elif model is None:
model = emb
if model is None:
raise ValueError("A 'model' and/or `embedding` must be specified when "
"preprocess_fn='preprocess_drift'")
kwargs.update({'model': model})
# Set`device` if a PyTorch model, otherwise remove from kwargs
if isinstance(model, supported_models_torch):
device = get_device(cfg['device'])
model = model.to(device).eval()
kwargs.update({'device': device})
kwargs.update({'model': model})
else:
kwargs.pop('device')
else:
kwargs = cfg['kwargs'] # If generic callable, kwargs is cfg['kwargs']
else:
logger.warning('Unable to process preprocess_fn. No preprocessing function is defined.')
return None
if kwargs == {}:
return preprocess_fn
else:
return partial(preprocess_fn, **kwargs)
def _load_model_config(cfg: dict) -> Callable:
"""
Loads supported models from a model config dict.
Parameters
----------
cfg
Model config dict. (see pydantic model schemas).
Returns
-------
The loaded model.
"""
# Load model
flavour = cfg['flavour']
src = cfg['src']
custom_obj = cfg['custom_objects']
layer = cfg['layer']
src = Path(src)
if not src.is_dir():
raise FileNotFoundError("The `src` field is not a recognised directory. It should be a directory containing "
"a compatible model.")
if flavour == Framework.TENSORFLOW:
model = load_model_tf(src, custom_objects=custom_obj, layer=layer)
elif flavour == Framework.PYTORCH:
model = load_model_pt(src, layer=layer)
elif flavour == Framework.SKLEARN:
model = load_model_sk(src)
return model
def _load_embedding_config(cfg: dict) -> Callable: # TODO: Could type return more tightly
"""
Load a pre-trained text embedding from an embedding config dict.
Parameters
----------
cfg
An embedding config dict. (see the pydantic schemas).
Returns
-------
The loaded embedding.
"""
src = cfg['src']
layers = cfg['layers']
typ = cfg['type']
flavour = cfg['flavour']
if flavour == Framework.TENSORFLOW:
emb = load_embedding_tf(src, embedding_type=typ, layers=layers)
else:
emb = load_embedding_pt(src, embedding_type=typ, layers=layers)
return emb
def _load_tokenizer_config(cfg: dict) -> AutoTokenizer:
"""
Loads a text tokenizer from a tokenizer config dict.
Parameters
----------
cfg
A tokenizer config dict. (see the pydantic schemas).
Returns
-------
The loaded tokenizer.
"""
src = cfg['src']
kwargs = cfg['kwargs']
src = Path(src)
tokenizer = AutoTokenizer.from_pretrained(src, **kwargs)
return tokenizer
def _load_optimizer_config(cfg: dict, backend: str) \
-> Union['tf.keras.optimizers.Optimizer', Type['tf.keras.optimizers.Optimizer'],
Type['torch.optim.Optimizer']]:
"""
Loads an optimzier from an optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
backend
The backend.
Returns
-------
The loaded optimizer.
"""
if backend == Framework.TENSORFLOW:
return load_optimizer_tf(cfg)
else:
return load_optimizer_pt(cfg)
def _get_nested_value(dic: dict, keys: list) -> Any:
"""
Get a value from a nested dictionary.
Parameters
----------
dic
The dictionary.
keys
List of keys to "walk" to nested value.
For example, to extract the value `dic['key1']['key2']['key3']`, set `keys = ['key1', 'key2', 'key3']`.
Returns
-------
The nested value specified by `keys`.
"""
for key in keys:
try:
dic = dic[key]
except (TypeError, KeyError):
return None
return dic
def _set_nested_value(dic: dict, keys: list, value: Any):
"""
Set a value in a nested dictionary.
Parameters
----------
dic
The dictionary.
keys
List of keys to "walk" to nested value.
For example, to set the value `dic['key1']['key2']['key3']`, set `keys = ['key1', 'key2', 'key3']`.
value
The value to set.
"""
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def _set_dtypes(cfg: dict):
"""
Converts str's in the config dictionary to dtypes e.g. 'np.float32' is converted to np.float32.
Parameters
----------
cfg
The config dictionary.
"""
# TODO - we could explore a custom pydantic generic type for this (similar to how we handle NDArray)
for key in FIELDS_TO_DTYPE:
val = _get_nested_value(cfg, key)
if val is not None:
lib, dtype, *_ = val.split('.')
# val[0] = np if val[0] == 'np' else tf if val[0] == 'tf' else torch if val[0] == 'torch' else None
# TODO - add above back in once optional deps are handled properly
if lib is None:
raise ValueError("`dtype` must be in format np.<dtype>, tf.<dtype> or torch.<dtype>.")
{
'tf': lambda: _set_nested_value(cfg, key, get_tf_dtype(dtype)),
'torch': lambda: _set_nested_value(cfg, key, get_pt_dtype(dtype)),
'np': lambda: _set_nested_value(cfg, key, getattr(np, dtype)),
}[lib]()
def read_config(filepath: Union[os.PathLike, str]) -> dict:
"""
This function reads a detector toml config file and returns a dict specifying the detector.
Parameters
----------
filepath
The filepath to the config.toml file.
Returns
-------
Parsed toml dictionary.
"""
filepath = Path(filepath)
cfg = dict(toml.load(filepath)) # toml.load types return as MutableMapping, force to dict
logger.info('Loaded config file from {}'.format(str(filepath)))
# This is necessary as no None/null in toml spec., and missing values are set to defaults set in pydantic models.
# But we sometimes need to explicitly spec as None.
cfg = _replace(cfg, "None", None)
return cfg
def resolve_config(cfg: dict, config_dir: Optional[Path]) -> dict:
"""
Resolves artefacts in a config dict. For example x_ref='x_ref.npy' is resolved by loading the np.ndarray from
the .npy file. For a list of fields that are resolved, see
https://docs.seldon.io/projects/alibi-detect/en/stable/overview/config_file.html.
Parameters
----------
cfg
The unresolved config dict.
config_dir
Filepath to directory the `config.toml` is located in. Only required if different from the
runtime directory, and artefacts are specified with filepaths relative to the config.toml file.
Returns
-------
The resolved config dict.
"""
# Convert selected str's to required dtype's (all other type coercion is performed by pydantic)
_set_dtypes(cfg)
# Before main resolution, update filepaths relative to config file
if config_dir is not None:
_prepend_cfg_filepaths(cfg, config_dir)
# Resolve filepaths (load files) and resolve function/object registries
for key in FIELDS_TO_RESOLVE:
logger.info('Resolving config field: {}.'.format(key))
src = _get_nested_value(cfg, key)
obj = None
# Resolve string references to registered objects and filepaths
if isinstance(src, str):
# Resolve registry references
if src.startswith('@'):
src = src[1:]
if src in registry.get_all():
obj = registry.get(src)
else:
raise ValueError(
f"Can't find {src} in the custom function registry, It may be misspelled or missing "
"if you have incorrect optional dependencies installed. Make sure the loading environment"
" is the same as the saving environment. For more information, check the Installation "
"documentation at "
"https://docs.seldon.io/projects/alibi-detect/en/stable/overview/getting_started.html."
)
logger.info('Successfully resolved registry entry {}'.format(src))
# Resolve dill or numpy file references
elif Path(src).is_file():
if Path(src).suffix == '.dill':
obj = dill.load(open(src, 'rb'))
if Path(src).suffix == '.npy':
obj = np.load(src)
# Resolve artefact dicts
elif isinstance(src, dict):
backend = cfg.get('backend', Framework.TENSORFLOW)
if key[-1] in ('model', 'proj'):
obj = _load_model_config(src)
elif key[-1] == 'embedding':
obj = _load_embedding_config(src)
elif key[-1] == 'tokenizer':
obj = _load_tokenizer_config(src)
elif key[-1] == 'optimizer':
obj = _load_optimizer_config(src, backend)
elif key[-1] == 'preprocess_fn':
obj = _load_preprocess_config(src)
elif key[-1] in ('kernel', 'x_kernel', 'c_kernel'):
obj = _load_kernel_config(src, backend)
# Put the resolved function into the cfg dict
if obj is not None:
_set_nested_value(cfg, key, obj)
return cfg
def _replace(cfg: dict, orig: Optional[str], new: Optional[str]) -> dict:
"""
Recursively traverse a nested dictionary and replace values.
Parameters
----------
cfg
The dictionary.
orig
Original value to search.
new
Value to replace original with.
Returns
-------
The updated dictionary.
"""
for k, v in cfg.items():
if isinstance(v == orig, bool) and v == orig:
cfg[k] = new
elif isinstance(v, dict):
_replace(v, orig, new)
return cfg
def _prepend_cfg_filepaths(cfg: dict, prepend_dir: Path):
"""
Recursively traverse through a nested dictionary and prepend a directory to any filepaths.
Parameters
----------
cfg
The dictionary.
prepend_dir
The filepath to prepend to any filepaths in the dictionary.
Returns
-------
The updated config dictionary.
"""
for k, v in cfg.items():
if isinstance(v, str):
v = prepend_dir.joinpath(Path(v))
if v.is_file() or v.is_dir(): # Update if prepending config_dir made config value a real filepath
cfg[k] = str(v)
elif isinstance(v, dict):
_prepend_cfg_filepaths(v, prepend_dir)
| 18,954 | 31.624785 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/schemas.py | """
Pydantic models used by :func:`~alibi_detect.utils.validate.validate_config` to validate configuration dictionaries.
The `resolved` kwarg of :func:`~alibi_detect.utils.validate.validate_config` determines whether the *unresolved* or
*resolved* pydantic models are used:
- The *unresolved* models expect any artefacts specified within it to not yet have been resolved.
The artefacts are still string references to local filepaths or registries (e.g. `x_ref = 'x_ref.npy'`).
- The *resolved* models expect all artefacts to be have been resolved into runtime objects. For example, `x_ref`
should have been resolved into an `np.ndarray`.
.. note::
For detector pydantic models, the fields match the corresponding detector's args/kwargs. Refer to the
detector's api docs for a full description of each arg/kwarg.
"""
from typing import Callable, Dict, List, Optional, Type, Union, Any
import numpy as np
from pydantic import BaseModel, validator
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import (Literal, supported_models_all, supported_models_tf,
supported_models_sklearn, supported_models_torch, supported_optimizers_tf,
supported_optimizers_torch, supported_optimizers_all)
from alibi_detect.saving.validators import NDArray, validate_framework, coerce_int2list, coerce_2_tensor
class SupportedModel:
"""
Pydantic custom type to check the model is one of the supported types (conditional on what optional deps
are installed).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate_model
@classmethod
def validate_model(cls, model: Any, values: dict) -> Any:
backend = values['backend']
err_msg = f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model, "\
f"or {backend} is not installed. Model: {model}"
if backend == Framework.TENSORFLOW and not isinstance(model, supported_models_tf):
raise TypeError(err_msg)
elif backend == Framework.PYTORCH and not isinstance(model, supported_models_torch):
raise TypeError(err_msg)
elif backend == Framework.SKLEARN and not isinstance(model, supported_models_sklearn):
raise TypeError(f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model.")
elif isinstance(model, supported_models_all): # If model supported and no `backend` incompatibility
return model
else: # Catch any other unexpected issues
raise TypeError('The model is not recognised as a supported type.')
class SupportedOptimizer:
"""
Pydantic custom type to check the optimizer is one of the supported types (conditional on what optional deps
are installed).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate_optimizer
@classmethod
def validate_optimizer(cls, optimizer: Any, values: dict) -> Any:
backend = values['backend']
err_msg = f"`backend={backend}` but the `optimizer` doesn't appear to be a {backend} supported optimizer, "\
f"or {backend} is not installed. Optimizer: {optimizer}"
if backend == Framework.TENSORFLOW and not isinstance(optimizer, supported_optimizers_tf):
raise TypeError(err_msg)
elif backend == Framework.PYTORCH and not isinstance(optimizer, supported_optimizers_torch):
raise TypeError(err_msg)
elif isinstance(optimizer, supported_optimizers_all): # If optimizer supported and no `backend` incompatibility
return optimizer
else: # Catch any other unexpected issues
raise TypeError('The model is not recognised as a supported type.')
# TODO - We could add validator to check `model` and `embedding` type when chained together. Leave this until refactor
# of preprocess_drift.
# Custom BaseModel so that we can set default config
class CustomBaseModel(BaseModel):
"""
Base pydantic model schema. The default pydantic settings are set here.
"""
class Config:
arbitrary_types_allowed = True # since we have np.ndarray's etc
extra = 'forbid' # Forbid extra fields so that we catch misspelled fields
# Custom BaseModel with additional kwarg's allowed
class CustomBaseModelWithKwargs(BaseModel):
"""
Base pydantic model schema. The default pydantic settings are set here.
"""
class Config:
arbitrary_types_allowed = True # since we have np.ndarray's etc
extra = 'allow' # Allow extra fields
class MetaData(CustomBaseModel):
version: str
version_warning: bool = False
class DetectorConfig(CustomBaseModel):
"""
Base detector config schema. Only fields universal across all detectors are defined here.
"""
name: str
"Name of the detector e.g. `MMDDrift`."
meta: Optional[MetaData] = None
"Config metadata. Should not be edited."
# Note: Although not all detectors have a backend, we define in base class as `backend` also determines
# whether tf or torch models used for preprocess_fn.
# backend validation (only applied if the detector config has a `backend` field
_validate_backend = validator('backend', allow_reuse=True, pre=False, check_fields=False)(validate_framework)
class ModelConfig(CustomBaseModel):
"""
Unresolved schema for (ML) models. Note that the model "backend" e.g. 'tensorflow', 'pytorch', 'sklearn', is set
by `backend` in :class:`DetectorConfig`.
Examples
--------
A TensorFlow classifier model stored in the `model/` directory, with the softmax layer extracted:
.. code-block :: toml
[model]
flavour = "tensorflow"
src = "model/"
layer = -1
"""
flavour: Literal['tensorflow', 'pytorch', 'sklearn']
"""
Whether the model is a `tensorflow`, `pytorch` or `sklearn` model. XGBoost models following the scikit-learn API
are also included under `sklearn`.
"""
src: str
"""
Filepath to directory storing the model (relative to the `config.toml` file, or absolute). At present,
TensorFlow models must be stored in
`H5 format <https://www.tensorflow.org/guide/keras/save_and_serialize#keras_h5_format>`_.
"""
custom_objects: Optional[dict] = None
"""
Dictionary of custom objects. Passed to the tensorflow
`load_model <https://www.tensorflow.org/api_docs/python/tf/keras/models/load_model>`_ function. This can be
used to pass custom registered functions and classes to a model.
"""
layer: Optional[int] = None
"""
Optional index of hidden layer to extract. If not `None`, a
:class:`~alibi_detect.cd.tensorflow.preprocess.HiddenOutput` or
:class:`~alibi_detect.cd.pytorch.preprocess.HiddenOutput` model is returned (dependent on `flavour`).
Only applies to 'tensorflow' and 'pytorch' models.
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
class EmbeddingConfig(CustomBaseModel):
"""
Unresolved schema for text embedding models. Currently, only pre-trained
`HuggingFace transformer <https://github.com/huggingface/transformers>`_ models are supported.
Examples
--------
Using the hidden states at the output of each layer of a TensorFlow
`BERT base <https://huggingface.co/bert-base-cased>`_ model as text embeddings:
.. code-block :: toml
[embedding]
flavour = "tensorflow"
src = "bert-base-cased"
type = "hidden_state"
layers = [-1, -2, -3, -4, -5, -6, -7, -8]
"""
flavour: Literal['tensorflow', 'pytorch'] = 'tensorflow'
"""
Whether the embedding model is a `tensorflow` or `pytorch` model.
"""
type: Literal['pooler_output', 'last_hidden_state', 'hidden_state', 'hidden_state_cls']
"""
The type of embedding to be loaded. See `embedding_type` in
:class:`~alibi_detect.models.tensorflow.embedding.TransformerEmbedding`.
"""
layers: Optional[List[int]] = None
"List specifying the hidden layers to be used to extract the embedding."
# TODO - add check conditional on embedding type (see docstring in above)
src: str
"""
Model name e.g. `"bert-base-cased"`, or a filepath to directory storing the model to extract embeddings from
(relative to the `config.toml` file, or absolute).
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
class TokenizerConfig(CustomBaseModel):
"""
Unresolved schema for text tokenizers. Currently, only pre-trained
`HuggingFace tokenizer <https://github.com/huggingface/tokenizers>`_ models are supported.
Examples
--------
`BERT base <https://huggingface.co/bert-base-cased>`_ tokenizer with additional keyword arguments passed to the
HuggingFace :meth:`~transformers.AutoTokenizer.from_pretrained` method:
.. code-block :: toml
[tokenizer]
src = "bert-base-cased"
[tokenizer.kwargs]
use_fast = false
force_download = true
"""
src: str
"""
Model name e.g. `"bert-base-cased"`, or a filepath to directory storing the tokenizer model (relative to the
`config.toml` file, or absolute). Passed to passed to :meth:`transformers.AutoTokenizer.from_pretrained`.
"""
kwargs: Optional[dict] = {}
"Dictionary of keyword arguments to pass to :meth:`transformers.AutoTokenizer.from_pretrained`."
class PreprocessConfig(CustomBaseModel):
"""
Unresolved schema for drift detector preprocess functions, to be passed to a detector's `preprocess_fn` kwarg.
Once loaded, the function is wrapped in a :func:`~functools.partial`, to be evaluated within the detector.
If `src` specifies a generic Python function, the dictionary specified by `kwargs` is passed to it. Otherwise,
if `src` specifies :func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift`
(`src='@cd.tensorflow.preprocess.preprocess_drift'`), all fields (except `kwargs`) are passed to it.
Examples
--------
Preprocessor with a `model`, text `embedding` and `tokenizer` passed to
:func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift`:
.. code-block :: toml
[preprocess_fn]
src = "@cd.tensorflow.preprocess.preprocess_drift"
batch_size = 32
max_len = 100
tokenizer.src = "tokenizer/" # TokenizerConfig
[preprocess_fn.model]
# ModelConfig
src = "model/"
[preprocess_fn.embedding]
# EmbeddingConfig
src = "embedding/"
type = "hidden_state"
layers = [-1, -2, -3, -4, -5, -6, -7, -8]
A serialized Python function with keyword arguments passed to it:
.. code-block :: toml
[preprocess_fn]
src = 'myfunction.dill'
kwargs = {'kwarg1'=0.7, 'kwarg2'=true}
"""
src: str = "@cd.tensorflow.preprocess.preprocess_drift"
"""
The preprocessing function. A string referencing a filepath to a serialized function in `dill` format, or an
object registry reference.
"""
# Below kwargs are only passed if src == @preprocess_drift
model: Optional[Union[str, ModelConfig]] = None
"""
Model used for preprocessing. Either an object registry reference, or a
:class:`~alibi_detect.utils.schemas.ModelConfig`.
"""
# TODO - make model required field when src is preprocess_drift
embedding: Optional[Union[str, EmbeddingConfig]] = None
"""
A text embedding model. Either a string referencing a HuggingFace transformer model name, an object registry
reference, or a :class:`~alibi_detect.utils.schemas.EmbeddingConfig`. If `model=None`, the `embedding` is passed to
:func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift` as `model`. Otherwise, the `model` is chained to
the output of the `embedding` as an additional preprocessing step.
"""
tokenizer: Optional[Union[str, TokenizerConfig]] = None
"""
Optional tokenizer for text drift. Either a string referencing a HuggingFace tokenizer model name, or a
:class:`~alibi_detect.utils.schemas.TokenizerConfig`.
"""
device: Optional[Literal['cpu', 'cuda']] = None
"""
Device type used. The default `None` tries to use the GPU and falls back on CPU if needed. Only relevant if
`src='@cd.torch.preprocess.preprocess_drift'`
"""
preprocess_batch_fn: Optional[str] = None
"""
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be processed
by the `model`.
"""
max_len: Optional[int] = None
"Optional max token length for text drift."
batch_size: Optional[int] = int(1e10)
"Batch size used during prediction."
dtype: str = 'np.float32'
"Model output type, e.g. `'tf.float32'`"
# Additional kwargs
kwargs: dict = {}
"""
Dictionary of keyword arguments to be passed to the function specified by `src`. Only used if `src` specifies a
generic Python function.
"""
class KernelConfig(CustomBaseModelWithKwargs):
"""
Unresolved schema for kernels, to be passed to a detector's `kernel` kwarg.
If `src` specifies a :class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel, the `sigma`, `trainable` and
`init_sigma_fn` fields are passed to it. Otherwise, all fields except `src` are passed as kwargs.
Examples
--------
A :class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel, with three different bandwidths:
.. code-block :: toml
[kernel]
src = "@alibi_detect.utils.tensorflow.GaussianRBF"
trainable = false
sigma = [0.1, 0.2, 0.3]
A serialized kernel with keyword arguments passed:
.. code-block :: toml
[kernel]
src = "mykernel.dill"
sigma = 0.42
custom_setting = "xyz"
"""
src: str
"A string referencing a filepath to a serialized kernel in `.dill` format, or an object registry reference."
# Below kwargs are only passed if kernel == @GaussianRBF
flavour: Literal['tensorflow', 'pytorch', 'keops']
"""
Whether the kernel is a `tensorflow` or `pytorch` kernel.
"""
sigma: Optional[Union[float, List[float]]] = None
"""
Bandwidth used for the kernel. Needn’t be specified if being inferred or trained. Can pass multiple values to eval
kernel with and then average.
"""
trainable: bool = False
"Whether or not to track gradients w.r.t. sigma to allow it to be trained."
init_sigma_fn: Optional[str] = None
"""
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred. The function's signature
should match :py:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`. If `None`, it is set to
:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`.
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
_coerce_sigma2tensor = validator('sigma', allow_reuse=True, pre=False)(coerce_2_tensor)
class DeepKernelConfig(CustomBaseModel):
"""
Unresolved schema for :class:`~alibi_detect.utils.tensorflow.kernels.DeepKernel`'s.
Examples
--------
A :class:`~alibi_detect.utils.tensorflow.DeepKernel`, with a trainable
:class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel applied to the projected inputs and a custom
serialized kernel applied to the raw inputs:
.. code-block :: toml
[kernel]
eps = 0.01
[kernel.kernel_a]
src = "@utils.tensorflow.kernels.GaussianRBF"
trainable = true
[kernel.kernel_b]
src = "custom_kernel.dill"
sigma = [ 1.2,]
trainable = false
[kernel.proj]
src = "model/"
"""
proj: Union[str, ModelConfig]
"""
The projection to be applied to the inputs before applying `kernel_a`. This should be a Tensorflow or PyTorch
model, specified as an object registry reference, or a :class:`~alibi_detect.utils.schemas.ModelConfig`.
"""
kernel_a: Union[str, KernelConfig] = "@utils.tensorflow.kernels.GaussianRBF"
"""
The kernel to apply to the projected inputs. Defaults to a
:class:`~alibi_detect.utils.tensorflow.kernels.GaussianRBF` with trainable bandwidth.
"""
kernel_b: Optional[Union[str, KernelConfig]] = "@utils.tensorflow.kernels.GaussianRBF"
"""
The kernel to apply to the raw inputs. Defaults to a :class:`~alibi_detect.utils.tensorflow.kernels.GaussianRBF`
with trainable bandwidth. Set to `None` in order to use only the deep component (i.e. `eps=0`).
"""
eps: Union[float, str] = 'trainable'
"""
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be either specified or
set to `'trainable'`. Only relevant is `kernel_b` is not `None`.
"""
class OptimizerConfig(CustomBaseModelWithKwargs):
"""
Unresolved schema for optimizers. The `optimizer` dictionary has two possible formats:
1. A configuration dictionary compatible with
`tf.keras.optimizers.deserialize <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/deserialize>`_.
For `backend='tensorflow'` only.
2. A dictionary containing only `class_name`, where this is a string referencing the optimizer name e.g.
`optimizer.class_name = 'Adam'`. In this case, the tensorflow or pytorch optimizer class of the same name is
loaded. For `backend='tensorflow'` and `backend='pytorch'`.
Examples
--------
A TensorFlow Adam optimizer:
.. code-block :: toml
[optimizer]
class_name = "Adam"
[optimizer.config]
name = "Adam"
learning_rate = 0.001
decay = 0.0
A PyTorch Adam optimizer:
.. code-block :: toml
[optimizer]
class_name = "Adam"
"""
class_name: str
config: Optional[Dict[str, Any]] = None
class DriftDetectorConfig(DetectorConfig):
"""
Unresolved base schema for drift detectors.
"""
# args/kwargs shared by all drift detectors
x_ref: str
"Data used as reference distribution. Should be a string referencing a NumPy `.npy` file."
preprocess_fn: Optional[Union[str, PreprocessConfig]] = None
"""
Function to preprocess the data before computing the data drift metrics. A string referencing a serialized function
in `.dill` format, an object registry reference, or a :class:`~alibi_detect.utils.schemas.PreprocessConfig`.
"""
input_shape: Optional[tuple] = None
"Optionally pass the shape of the input data. Used when saving detectors."
data_type: Optional[str] = None
"Specify data type added to the metadata. E.g. `‘tabular’`or `‘image’`."
x_ref_preprocessed: bool = False
"""
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only the test
data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference data will also be
preprocessed.
"""
class DriftDetectorConfigResolved(DetectorConfig):
"""
Resolved base schema for drift detectors.
"""
# args/kwargs shared by all drift detectors
x_ref: Union[np.ndarray, list]
"Data used as reference distribution."
preprocess_fn: Optional[Callable] = None
"Function to preprocess the data before computing the data drift metrics."
input_shape: Optional[tuple] = None
"Optionally pass the shape of the input data. Used when saving detectors."
data_type: Optional[str] = None
"Specify data type added to the metadata. E.g. `‘tabular’` or `‘image’`."
x_ref_preprocessed: bool = False
"""
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only the test
data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference data will also be
preprocessed.
"""
class KSDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`KSDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/ksdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.KSDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class KSDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`KSDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/ksdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.KSDrift` documentation for a description of each field.
Resolved schema for the :class:`~alibi_detect.cd.KSDrift` detector.
"""
p_val: float = .05
preprocess_at_init: bool = True # Note: Duplication needed to avoid mypy error (unless we allow reassignment)
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class ChiSquareDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ChiSquareDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/chisquaredrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ChiSquareDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Union[int, List[int]]] = None
n_features: Optional[int] = None
class ChiSquareDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`ChiSquareDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/chisquaredrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ChiSquareDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: str = 'bonferroni'
categories_per_feature: Dict[int, Union[int, List[int]]] = None
n_features: Optional[int] = None
class TabularDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`TabularDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/tabulardrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.TabularDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Optional[Union[int, List[int]]]] = None
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class TabularDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`TabularDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/tabulardrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.TabularDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Optional[Union[int, List[int]]]] = None
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class CVMDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`CVMDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/cvmdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
n_features: Optional[int] = None
class CVMDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`CVMDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/cvmdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: str = 'bonferroni'
n_features: Optional[int] = None
class FETDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`FETDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/fetdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class FETDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`FETDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/fetdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class MMDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
kernel: Optional[Union[str, KernelConfig]] = None
sigma: Optional[NDArray[np.float32]] = None
configure_kernel_from_x_ref: bool = True
n_permutations: int = 100
batch_size_permutations: int = 1000000
device: Optional[Literal['cpu', 'cuda']] = None
class MMDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
kernel: Optional[Callable] = None
sigma: Optional[NDArray[np.float32]] = None
configure_kernel_from_x_ref: bool = True
n_permutations: int = 100
batch_size_permutations: int = 1000000
device: Optional[Literal['cpu', 'cuda']] = None
class LSDDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LSDDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/lsdddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
sigma: Optional[NDArray[np.float32]] = None
n_permutations: int = 100
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
class LSDDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LSDDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/lsdddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
sigma: Optional[NDArray[np.float32]] = None
n_permutations: int = 100
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
class ClassifierDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ClassifierDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/classifierdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
model: Union[str, ModelConfig]
preds_type: Literal['probs', 'logits'] = 'probs'
binarize_preds: bool = False
reg_loss_fn: Optional[str] = None
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
use_calibration: bool = False
calibration_kwargs: Optional[dict] = None
use_oob: bool = False
class ClassifierDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`ClassifierDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/classifierdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
model: Optional[SupportedModel] = None
preds_type: Literal['probs', 'logits'] = 'probs'
binarize_preds: bool = False
reg_loss_fn: Optional[Callable] = None
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
use_calibration: bool = False
calibration_kwargs: Optional[dict] = None
use_oob: bool = False
class SpotTheDiffDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`SpotTheDiffDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/spotthediffdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
binarize_preds: bool = False
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
kernel: Optional[Union[str, KernelConfig]] = None
n_diffs: int = 1
initial_diffs: Optional[str] = None
l1_reg: float = 0.01
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
class SpotTheDiffDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`SpotTheDiffDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/spotthediffdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
binarize_preds: bool = False
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
kernel: Optional[Callable] = None
n_diffs: int = 1
initial_diffs: Optional[np.ndarray] = None
l1_reg: float = 0.01
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
class LearnedKernelDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LearnedKernelDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/learnedkerneldrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
kernel: Union[str, DeepKernelConfig]
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
n_permutations: int = 100
batch_size_permutations: int = 1000000
var_reg: float = 1e-5
reg_loss_fn: Optional[str] = None
train_size: Optional[float] = .75
retrain_from_scratch: bool = True
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
batch_size_predict: int = 1000000
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
num_workers: int = 0
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
class LearnedKernelDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LearnedKernelDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/learnedkerneldrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
kernel: Optional[Callable] = None
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
n_permutations: int = 100
batch_size_permutations: int = 1000000
var_reg: float = 1e-5
reg_loss_fn: Optional[Callable] = None
train_size: Optional[float] = .75
retrain_from_scratch: bool = True
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
batch_size_predict: int = 1000000
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
num_workers: int = 0
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
class ContextMMDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ContextMMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/contextmmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ContextMMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
c_ref: str
preprocess_at_init: bool = True
update_ref: Optional[Dict[str, int]] = None
x_kernel: Optional[Union[str, KernelConfig]] = None
c_kernel: Optional[Union[str, KernelConfig]] = None
n_permutations: int = 100
prop_c_held: float = 0.25
n_folds: int = 5
batch_size: Optional[int] = 256
verbose: bool = False
device: Optional[Literal['cpu', 'cuda']] = None
class ContextMMDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
c_ref: np.ndarray
preprocess_at_init: bool = True
update_ref: Optional[Dict[str, int]] = None
x_kernel: Optional[Callable] = None
c_kernel: Optional[Callable] = None
n_permutations: int = 100
prop_c_held: float = 0.25
n_folds: int = 5
batch_size: Optional[int] = 256
verbose: bool = False
device: Optional[Literal['cpu', 'cuda']] = None
class MMDDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`MMDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinemmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
kernel: Optional[Union[str, KernelConfig]] = None
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class MMDDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinemmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
kernel: Optional[Callable] = None
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class LSDDDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LSDDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinelsdddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class LSDDDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LSDDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinelsdddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class CVMDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`CVMDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinecvmdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
batch_size: int = 64
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class CVMDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`CVMDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinecvmdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
batch_size: int = 64
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class FETDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`FETDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinefetdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
t_max: Optional[int] = None
alternative: Literal['greater', 'less'] = 'greater'
lam: float = 0.99
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class FETDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`FETDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinefetdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
t_max: Optional[int] = None
alternative: Literal['greater', 'less'] = 'greater'
lam: float = 0.99
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
# The uncertainty detectors don't inherit from DriftDetectorConfig since their kwargs are a little different from the
# other drift detectors (e.g. no preprocess_fn). Subject to change in the future.
class ClassifierUncertaintyDriftConfig(DetectorConfig):
"""
Unresolved schema for the
`ClassifierUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: str
model: Union[str, ModelConfig]
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
preds_type: Literal['probs', 'logits'] = 'probs'
uncertainty_type: Literal['entropy', 'margin'] = 'entropy'
margin_width: float = 0.1
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, TokenizerConfig]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class ClassifierUncertaintyDriftConfigResolved(DetectorConfig):
"""
Resolved schema for the
`ClassifierUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: Union[np.ndarray, list]
model: Optional[SupportedModel] = None
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
preds_type: Literal['probs', 'logits'] = 'probs'
uncertainty_type: Literal['entropy', 'margin'] = 'entropy'
margin_width: float = 0.1
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, Callable]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class RegressorUncertaintyDriftConfig(DetectorConfig):
"""
Unresolved schema for the
`RegressorUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: str
model: Union[str, ModelConfig]
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
uncertainty_type: Literal['mc_dropout', 'ensemble'] = 'mc_dropout'
n_evals: int = 25
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, TokenizerConfig]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class RegressorUncertaintyDriftConfigResolved(DetectorConfig):
"""
Resolved schema for the
`RegressorUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: Union[np.ndarray, list]
model: Optional[SupportedModel] = None
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
uncertainty_type: Literal['mc_dropout', 'ensemble'] = 'mc_dropout'
n_evals: int = 25
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
device: Optional[str] = None
tokenizer: Optional[Callable] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
# Unresolved schema dictionary (used in alibi_detect.utils.loading)
DETECTOR_CONFIGS: Dict[str, Type[DetectorConfig]] = {
'KSDrift': KSDriftConfig,
'ChiSquareDrift': ChiSquareDriftConfig,
'TabularDrift': TabularDriftConfig,
'CVMDrift': CVMDriftConfig,
'FETDrift': FETDriftConfig,
'MMDDrift': MMDDriftConfig,
'LSDDDrift': LSDDDriftConfig,
'ClassifierDrift': ClassifierDriftConfig,
'SpotTheDiffDrift': SpotTheDiffDriftConfig,
'LearnedKernelDrift': LearnedKernelDriftConfig,
'ContextMMDDrift': ContextMMDDriftConfig,
'MMDDriftOnline': MMDDriftOnlineConfig,
'LSDDDriftOnline': LSDDDriftOnlineConfig,
'CVMDriftOnline': CVMDriftOnlineConfig,
'FETDriftOnline': FETDriftOnlineConfig,
'ClassifierUncertaintyDrift': ClassifierUncertaintyDriftConfig,
'RegressorUncertaintyDrift': RegressorUncertaintyDriftConfig,
}
# Resolved schema dictionary (used in alibi_detect.utils.loading)
DETECTOR_CONFIGS_RESOLVED: Dict[str, Type[DetectorConfig]] = {
'KSDrift': KSDriftConfigResolved,
'ChiSquareDrift': ChiSquareDriftConfigResolved,
'TabularDrift': TabularDriftConfigResolved,
'CVMDrift': CVMDriftConfigResolved,
'FETDrift': FETDriftConfigResolved,
'MMDDrift': MMDDriftConfigResolved,
'LSDDDrift': LSDDDriftConfigResolved,
'ClassifierDrift': ClassifierDriftConfigResolved,
'SpotTheDiffDrift': SpotTheDiffDriftConfigResolved,
'LearnedKernelDrift': LearnedKernelDriftConfigResolved,
'ContextMMDDrift': ContextMMDDriftConfigResolved,
'MMDDriftOnline': MMDDriftOnlineConfigResolved,
'LSDDDriftOnline': LSDDDriftOnlineConfigResolved,
'CVMDriftOnline': CVMDriftOnlineConfigResolved,
'FETDriftOnline': FETDriftOnlineConfigResolved,
'ClassifierUncertaintyDrift': ClassifierUncertaintyDriftConfigResolved,
'RegressorUncertaintyDrift': RegressorUncertaintyDriftConfigResolved,
}
| 52,612 | 39.193277 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/validators.py | import sys
from typing import Any, Generic, Optional, Type, TypeVar, Union, List
import numpy as np
from numpy.lib import NumpyVersion
from pydantic.fields import ModelField
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch, has_keops, Framework
if has_tensorflow:
import tensorflow as tf
if has_pytorch:
import torch
def coerce_int2list(value: int) -> List[int]:
"""Validator to coerce int to list (pydantic doesn't do this by default)."""
if not isinstance(value, list):
return [value]
else:
return value
# Framework validator (validates `flavour` and `backend` fields)
def validate_framework(framework: str, field: ModelField) -> str:
if (framework == Framework.TENSORFLOW and has_tensorflow) or (framework == Framework.PYTORCH and has_pytorch) or \
(framework == Framework.KEOPS and has_keops):
return framework
elif framework == Framework.SKLEARN: # sklearn is a core dep
return framework
else:
raise ImportError(f"`{field.name} = '{framework}'` not possible since {framework} is not installed.")
# NumPy NDArray pydantic validator type
# The code below is adapted from https://github.com/cheind/pydantic-numpy.
T = TypeVar("T", bound=np.generic)
if NumpyVersion(np.__version__) < "1.22.0" or sys.version_info < (3, 9):
class NDArray(Generic[T], np.ndarray):
"""
A Generic pydantic model to coerce to np.ndarray's.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, val: Any, field: ModelField) -> np.ndarray:
return _coerce_2_ndarray(cls, val, field)
else:
class NDArray(Generic[T], np.ndarray[Any, T]): # type: ignore[no-redef, type-var]
"""
A Generic pydantic model to coerce to np.ndarray's.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, val: Any, field: ModelField) -> Optional[np.ndarray]:
return _coerce_2_ndarray(cls, val, field)
def _coerce_2_ndarray(cls: Type, val: Any, field: ModelField) -> np.ndarray:
if field.sub_fields is not None:
dtype_field = field.sub_fields[0]
return np.asarray(val, dtype=dtype_field.type_)
else:
return np.asarray(val)
def coerce_2_tensor(value: Union[float, List[float]], values: dict):
if value is None:
return value
framework = values.get('backend') or values.get('flavour')
if framework is None:
raise ValueError('`coerce_2tensor` failed since no framework identified.')
elif framework == Framework.TENSORFLOW and has_tensorflow:
return tf.convert_to_tensor(value)
elif (framework == Framework.PYTORCH and has_pytorch) or (framework == Framework.KEOPS and has_keops):
return torch.tensor(value)
else:
# Error should not be raised since `flavour` should have already been validated.
raise ImportError(f'Cannot coerce to {framework} Tensor since {framework} is not installed.')
| 3,113 | 35.635294 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/registry.py | """
This registry allows Python objects to be registered and accessed by their string reference later on. The primary usage
is to register objects so that they can be specified in a `config.toml` file. A number of Alibi Detect functions are
also pre-registered in the registry for convenience. See the
`Registering artefacts <https://docs.seldon.io/projects/alibi-detect/en/stable/overview/config_files.html#registering-artefacts>`_ # noqa: E501
documentation.
Examples
--------
Registering a simple function using the `@registry.register` decorator, and immediately fetching it:
.. code-block :: python
import numpy as np
from alibi_detect.saving import registry
# Register a simple function
@registry.register('my_function.v1')
def my_function(x: np.ndarray) -> np.ndarray:
"A custom function to normalise input data."
return (x - x.mean()) / x.std()
# Get function from registry
fetched_function = registry.get('my_function.v1')
Instead of using a decorator, objects can also be registered by directly using the `registry.register()` function:
.. code-block :: python
from alibi_detect.saving import registry
my_object = ...
registry.register("my_object.v1", func=my_object)
"""
import catalogue
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops
if has_tensorflow:
from alibi_detect.cd.tensorflow import \
preprocess_drift as preprocess_drift_tf
from alibi_detect.utils.tensorflow.data import TFDataset as TFDataset_tf
from alibi_detect.utils.tensorflow.kernels import \
GaussianRBF as GaussianRBF_tf, sigma_median as sigma_median_tf
from alibi_detect.cd.tensorflow.context_aware import _sigma_median_diag as _sigma_median_diag_tf
if has_pytorch:
from alibi_detect.cd.pytorch import \
preprocess_drift as preprocess_drift_torch
from alibi_detect.utils.pytorch.kernels import \
GaussianRBF as GaussianRBF_torch, sigma_median as sigma_median_torch
from alibi_detect.cd.pytorch.context_aware import _sigma_median_diag as _sigma_median_diag_torch
if has_keops:
from alibi_detect.utils.keops.kernels import \
GaussianRBF as GaussianRBF_keops, sigma_mean as sigma_mean_keops
# Create registry
registry = catalogue.create("alibi_detect", "registry")
# Register alibi-detect classes/functions
if has_tensorflow:
registry.register('utils.tensorflow.kernels.GaussianRBF', func=GaussianRBF_tf)
registry.register('utils.tensorflow.kernels.sigma_median', func=sigma_median_tf)
registry.register('cd.tensorflow.context_aware._sigma_median_diag', func=_sigma_median_diag_tf)
registry.register('cd.tensorflow.preprocess.preprocess_drift', func=preprocess_drift_tf)
registry.register('utils.tensorflow.data.TFDataset', func=TFDataset_tf)
if has_pytorch:
registry.register('utils.pytorch.kernels.GaussianRBF', func=GaussianRBF_torch)
registry.register('utils.pytorch.kernels.sigma_median', func=sigma_median_torch)
registry.register('cd.pytorch.context_aware._sigma_median_diag', func=_sigma_median_diag_torch)
registry.register('cd.pytorch.preprocess.preprocess_drift', func=preprocess_drift_torch)
if has_keops:
registry.register('utils.keops.kernels.GaussianRBF', func=GaussianRBF_keops)
registry.register('utils.keops.kernels.sigma_mean', func=sigma_mean_keops)
| 3,373 | 41.708861 | 144 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/saving.py | import logging
import os
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import Callable, Optional, Tuple, Union, Any, Dict, TYPE_CHECKING
import dill
import numpy as np
import toml
from transformers import PreTrainedTokenizerBase
from alibi_detect.saving._typing import VALID_DETECTORS
from alibi_detect.saving.loading import _replace, validate_config, STATE_PATH
from alibi_detect.saving.registry import registry
from alibi_detect.utils._types import supported_models_all, supported_models_tf, supported_models_torch, \
supported_models_sklearn
from alibi_detect.base import Detector, ConfigurableDetector, StatefulDetectorOnline
from alibi_detect.saving._tensorflow import save_detector_legacy, save_model_config_tf, save_optimizer_config_tf
from alibi_detect.saving._pytorch import save_model_config_pt
from alibi_detect.saving._sklearn import save_model_config_sk
if TYPE_CHECKING:
import tensorflow as tf
# do not extend pickle dispatch table so as not to change pickle behaviour
dill.extend(use_dill=False)
logger = logging.getLogger(__name__)
X_REF_FILENAME = 'x_ref.npy'
C_REF_FILENAME = 'c_ref.npy'
def save_detector(
detector: Union[Detector, ConfigurableDetector],
filepath: Union[str, os.PathLike],
legacy: bool = False,
) -> None:
"""
Save outlier, drift or adversarial detector.
Parameters
----------
detector
Detector object.
filepath
Save directory.
legacy
Whether to save in the legacy .dill format instead of via a config.toml file. Default is `False`.
This option will be removed in a future version.
"""
if legacy:
warnings.warn('The `legacy` option will be removed in a future version.', DeprecationWarning)
# TODO: Replace .__args__ w/ typing.get_args() once Python 3.7 dropped (and remove type ignore below)
detector_name = detector.__class__.__name__
if detector_name not in [detector for detector in VALID_DETECTORS]:
raise NotImplementedError(f'{detector_name} is not supported by `save_detector`.')
# Saving is wrapped in a try, with cleanup in except. To prevent a half-saved detector remaining upon error.
filepath = Path(filepath)
try:
# Create directory if it doesn't exist
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# If a drift detector, wrap drift detector save method
if isinstance(detector, ConfigurableDetector) and not legacy:
_save_detector_config(detector, filepath)
# Otherwise, save via the previous meta and state_dict approach
else:
save_detector_legacy(detector, filepath)
except Exception as error:
# Get a list of all existing files in `filepath` (so we know what not to cleanup if an error occurs)
orig_files = set(filepath.iterdir())
_cleanup_filepath(orig_files, filepath)
raise RuntimeError(f'Saving failed. The save directory {filepath} has been cleaned.') from error
logger.info('finished saving.')
def _cleanup_filepath(orig_files: set, filepath: Path):
"""
Cleans up the `filepath` directory in the event of a saving failure.
Parameters
----------
orig_files
Set of original files (not to delete).
filepath
The directory to clean up.
"""
# Find new files
new_files = set(filepath.iterdir())
files_to_rm = new_files - orig_files
# Delete new files
for file in files_to_rm:
if file.is_dir():
shutil.rmtree(file)
elif file.is_file():
file.unlink()
# Delete filepath directory if it is now empty
if filepath is not None:
if not any(filepath.iterdir()):
filepath.rmdir()
# TODO - eventually this will become save_detector (once outlier and adversarial updated to save via config.toml)
def _save_detector_config(detector: ConfigurableDetector,
filepath: Union[str, os.PathLike]):
"""
Save a drift detector. The detector is saved as a yaml config file. Artefacts such as
`preprocess_fn`, models, embeddings, tokenizers etc are serialized, and their filepaths are
added to the config file.
The detector can be loaded again by passing the resulting config file or filepath to `load_detector`.
Parameters
----------
detector
The detector to save.
filepath
File path to save serialized artefacts to.
"""
# detector name
detector_name = detector.__class__.__name__
# Process file paths
filepath = Path(filepath)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Get the detector config (with artefacts still within it)
if hasattr(detector, 'get_config'):
cfg = detector.get_config() # TODO - remove once all detectors have get_config
cfg = validate_config(cfg, resolved=True)
else:
raise NotImplementedError(f'{detector_name} does not yet support config.toml based saving.')
# Save state if an online detector and online state exists (self.t > 0)
if isinstance(detector, StatefulDetectorOnline):
if detector.t > 0:
detector.save_state(filepath.joinpath(STATE_PATH))
# Save x_ref
save_path = filepath.joinpath(X_REF_FILENAME)
np.save(str(save_path), cfg['x_ref'])
cfg.update({'x_ref': X_REF_FILENAME})
# Save c_ref
c_ref = cfg.get('c_ref')
if c_ref is not None:
save_path = filepath.joinpath(C_REF_FILENAME)
np.save(str(save_path), cfg['c_ref'])
cfg.update({'c_ref': C_REF_FILENAME})
# Save preprocess_fn
preprocess_fn = cfg.get('preprocess_fn')
if preprocess_fn is not None:
logger.info('Saving the preprocess_fn function.')
preprocess_cfg = _save_preprocess_config(preprocess_fn, cfg['input_shape'], filepath)
cfg['preprocess_fn'] = preprocess_cfg
# Serialize kernels
for kernel_str in ('kernel', 'x_kernel', 'c_kernel'):
kernel = cfg.get(kernel_str)
if kernel is not None:
cfg[kernel_str] = _save_kernel_config(kernel, filepath, Path(kernel_str))
if 'proj' in cfg[kernel_str]: # serialise proj from DeepKernel - do here as need input_shape
cfg[kernel_str]['proj'], _ = _save_model_config(cfg[kernel_str]['proj'], base_path=filepath,
input_shape=cfg['input_shape'])
# ClassifierDrift and SpotTheDiffDrift specific artefacts.
# Serialize detector model
model = cfg.get('model')
if model is not None:
model_cfg, _ = _save_model_config(model, base_path=filepath, input_shape=cfg['input_shape'])
cfg['model'] = model_cfg
# Serialize optimizer
optimizer = cfg.get('optimizer')
if optimizer is not None:
cfg['optimizer'] = _save_optimizer_config(optimizer)
# Serialize dataset
dataset = cfg.get('dataset')
if dataset is not None:
dataset_cfg, dataset_kwargs = _serialize_object(dataset, filepath, Path('dataset'))
cfg.update({'dataset': dataset_cfg})
if len(dataset_kwargs) != 0:
cfg['dataset']['kwargs'] = dataset_kwargs
# Serialize reg_loss_fn
reg_loss_fn = cfg.get('reg_loss_fn')
if reg_loss_fn is not None:
reg_loss_fn_cfg, _ = _serialize_object(reg_loss_fn, filepath, Path('reg_loss_fn'))
cfg['reg_loss_fn'] = reg_loss_fn_cfg
# Save initial_diffs
initial_diffs = cfg.get('initial_diffs')
if initial_diffs is not None:
save_path = filepath.joinpath('initial_diffs.npy')
np.save(str(save_path), initial_diffs)
cfg.update({'initial_diffs': 'initial_diffs.npy'})
# Save config
write_config(cfg, filepath)
def write_config(cfg: dict, filepath: Union[str, os.PathLike]):
"""
Save an unresolved detector config dict to a TOML file.
Parameters
----------
cfg
Unresolved detector config dict.
filepath
Filepath to directory to save 'config.toml' file in.
"""
# Create directory if it doesn't exist
filepath = Path(filepath)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Convert pathlib.Path's to str's
cfg = _path2str(cfg)
# Validate config before final tweaks
validate_config(cfg) # Must validate here as replacing None w/ str will break validation
# Replace None with "None", and dicts with integer keys with str keys
# TODO: Subject to change depending on toml library updates
cfg = _replace(cfg, None, "None") # Note: None replaced with "None" as None/null not valid TOML
cfg = _int2str_keys(cfg)
# Write to TOML file
logger.info('Writing config to {}'.format(filepath.joinpath('config.toml')))
with open(filepath.joinpath('config.toml'), 'w') as f:
toml.dump(cfg, f, encoder=toml.TomlNumpyEncoder()) # type: ignore[misc]
def _save_preprocess_config(preprocess_fn: Callable,
input_shape: Optional[tuple],
filepath: Path) -> dict:
"""
Serializes a drift detectors preprocess_fn. Artefacts are saved to disk, and a config dict containing filepaths
to the saved artefacts is returned.
Parameters
----------
preprocess_fn
The preprocess function to be serialized.
input_shape
Input shape for a model (if a model exists).
filepath
Directory to save serialized artefacts to.
Returns
-------
The config dictionary, containing references to the serialized artefacts. The format if this dict matches that \
of the `preprocess` field in the drift detector specification.
"""
preprocess_cfg: Dict[str, Any] = {}
local_path = Path('preprocess_fn')
# Serialize function
func, func_kwargs = _serialize_object(preprocess_fn, filepath, local_path.joinpath('function'))
preprocess_cfg.update({'src': func})
# Process partial function kwargs (if they exist)
kwargs = {}
for k, v in func_kwargs.items():
# Model/embedding
if isinstance(v, supported_models_all):
cfg_model, cfg_embed = _save_model_config(v, filepath, input_shape, local_path)
kwargs.update({k: cfg_model})
if cfg_embed is not None:
kwargs.update({'embedding': cfg_embed})
# Tokenizer
elif isinstance(v, PreTrainedTokenizerBase):
cfg_token = _save_tokenizer_config(v, filepath, local_path)
kwargs.update({k: cfg_token})
# torch device
elif v.__class__.__name__ == 'device': # avoiding torch import in case not installed
kwargs.update({k: v.type})
# Arbitrary function
elif callable(v):
src, _ = _serialize_object(v, filepath, local_path.joinpath(k))
kwargs.update({k: src})
# Put remaining kwargs directly into cfg
else:
kwargs.update({k: v})
if 'preprocess_drift' in func:
preprocess_cfg.update(kwargs)
else:
preprocess_cfg.update({'kwargs': kwargs})
return preprocess_cfg
def _serialize_object(obj: Callable, base_path: Path,
local_path: Path = Path('.')) -> Tuple[str, dict]:
"""
Serializes a python object. If the object is in the object registry, the registry str is returned. If not,
the object is saved to dill, and if wrapped in a functools.partial, the kwargs are returned.
Parameters
----------
obj
The object to serialize.
base_path
Base directory to save in.
local_path
A local (relative) filepath to append to base_path.
Returns
-------
Tuple containing a string referencing the save filepath and a dict of kwargs.
"""
# If a functools.partial, unpick function and kwargs
if isinstance(obj, partial):
kwargs = obj.keywords
obj = obj.func
else:
kwargs = {}
# If object has been registered, save registry string
keys = [k for k, v in registry.get_all().items() if obj == v]
registry_str = keys[0] if len(keys) == 1 else None
if registry_str is not None: # alibi-detect registered object
src = '@' + registry_str
# Otherwise, save as dill
else:
# create folder to save object in
filepath = base_path.joinpath(local_path)
if not filepath.parent.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath.parent))
filepath.parent.mkdir(parents=True, exist_ok=True)
logger.info('Saving object to {}.'.format(filepath.with_suffix('.dill')))
with open(filepath.with_suffix('.dill'), 'wb') as f:
dill.dump(obj, f)
src = str(local_path.with_suffix('.dill'))
return src, kwargs
def _path2str(cfg: dict, absolute: bool = False) -> dict:
"""
Private function to traverse a config dict and convert pathlib Path's to strings.
Parameters
----------
cfg
The config dict.
absolute
Whether to convert to absolute filepaths.
Returns
-------
The converted config dict.
"""
for k, v in cfg.items():
if isinstance(v, dict):
_path2str(v, absolute)
elif isinstance(v, Path):
if absolute:
v = v.resolve()
cfg.update({k: str(v.as_posix())})
return cfg
def _int2str_keys(dikt: dict) -> dict:
"""
Private function to traverse a dict and convert any dict's with int keys to str keys (e.g.
`categories_per_feature` kwarg for `TabularDrift`.
Parameters
----------
dikt
The dictionary.
Returns
-------
The converted dictionary.
"""
dikt_copy = dikt.copy()
for k, v in dikt.items():
if isinstance(k, int):
dikt_copy[str(k)] = dikt[k]
dikt_copy.pop(k)
if isinstance(v, dict):
dikt_copy[k] = _int2str_keys(v)
return dikt_copy
def _save_model_config(model: Any,
base_path: Path,
input_shape: Optional[tuple] = None,
path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to.
input_shape
The input dimensions of the model (after the optional embedding has been applied).
path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
if isinstance(model, supported_models_tf):
return save_model_config_tf(model, base_path, input_shape, path)
elif isinstance(model, supported_models_torch):
return save_model_config_pt(model, base_path, path)
elif isinstance(model, supported_models_sklearn):
return save_model_config_sk(model, base_path, path), None
else:
raise NotImplementedError("Support for saving the given model is not yet implemented")
def _save_tokenizer_config(tokenizer: PreTrainedTokenizerBase,
base_path: Path,
path: Path = Path('.')) -> dict:
"""
Saves HuggingFace tokenizers.
Parameters
----------
tokenizer
The tokenizer.
base_path
Base filepath to save to.
path
A local (relative) filepath to append to base_path.
Returns
-------
The tokenizer config dict.
"""
# create folder to save model in
filepath = base_path.joinpath(path).joinpath('tokenizer')
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
cfg_token = {}
logger.info('Saving tokenizer to {}.'.format(filepath))
tokenizer.save_pretrained(filepath)
cfg_token.update({'src': path.joinpath('tokenizer')})
return cfg_token
def _save_kernel_config(kernel: Callable,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""Function to save kernel.
If the kernel is stored in the artefact registry, the registry key (and kwargs) are written
to config. If the kernel is a generic callable, it is pickled.
Parameters
----------
kernel
The kernel to save.
base_path
Base directory to save in.
local_path
A local (relative) filepath to append to base_path.
Returns
-------
The kernel config dictionary.
"""
# if a DeepKernel
if hasattr(kernel, 'proj'):
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config()
else:
raise AttributeError("The detector's `kernel` must have a .get_config() method for it to be saved.")
# Serialize the kernels (if needed)
kernel_a = cfg_kernel.get('kernel_a')
kernel_b = cfg_kernel.get('kernel_b')
if not isinstance(kernel_a, str):
cfg_kernel['kernel_a'] = _save_kernel_config(cfg_kernel['kernel_a'], base_path, Path('kernel_a'))
if not isinstance(kernel_b, str) and kernel_b is not None:
cfg_kernel['kernel_b'] = _save_kernel_config(cfg_kernel['kernel_b'], base_path, Path('kernel_b'))
# If any other kernel, serialize the class to disk and get config
else:
if isinstance(kernel, type): # if still a class
kernel_class = kernel
cfg_kernel = {}
else: # if an object
kernel_class = kernel.__class__
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config()
cfg_kernel['init_sigma_fn'], _ = _serialize_object(cfg_kernel['init_sigma_fn'], base_path,
local_path.joinpath('init_sigma_fn'))
else:
raise AttributeError("The detector's `kernel` must have a .get_config() method for it to be saved.")
# Serialize the kernel class
cfg_kernel['src'], _ = _serialize_object(kernel_class, base_path, local_path.joinpath('kernel'))
return cfg_kernel
def _save_optimizer_config(optimizer: Union['tf.keras.optimizers.Optimizer', type]) -> dict:
"""
Function to save tensorflow or pytorch optimizers.
Parameters
----------
optimizer
The optimizer to save.
Returns
-------
Optimizer config dict.
"""
if isinstance(optimizer, type):
return {'class_name': optimizer.__name__}
else:
return save_optimizer_config_tf(optimizer)
| 19,169 | 34.369004 | 116 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_sklearn/loading.py | import os
from pathlib import Path
from typing import Union
import joblib
from sklearn.base import BaseEstimator
def load_model(filepath: Union[str, os.PathLike],
) -> BaseEstimator:
"""
Load scikit-learn (or xgboost) model. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`.
This includes xgboost models following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
filepath
Saved model directory.
Returns
-------
Loaded model.
"""
model_dir = Path(filepath)
return joblib.load(model_dir.joinpath('model.joblib'))
| 706 | 25.185185 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_sklearn/saving.py | import logging
import os
from pathlib import Path
from typing import Union
import joblib
from sklearn.base import BaseEstimator
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: BaseEstimator,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save a scikit-learn (or xgboost) model to a config dictionary.
Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`. This includes xgboost models
following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
The model config dict.
"""
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath, save_dir='model')
cfg_model = {
'flavour': Framework.SKLEARN.value,
'src': local_path.joinpath('model')
}
return cfg_model
def save_model(model: BaseEstimator,
filepath: Union[str, os.PathLike],
save_dir: Union[str, os.PathLike] = 'model') -> None:
"""
Save scikit-learn (and xgboost) models. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`.
This includes xgboost models following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
model
The tf.keras.Model to save.
filepath
Save directory.
save_dir
Name of folder to save to within the filepath directory.
"""
# create folder to save model in
model_path = Path(filepath).joinpath(save_dir)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath('model.joblib')
joblib.dump(model, model_path)
| 2,235 | 30.942857 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_sklearn/tests/test_saving_sk.py | from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import classifier_model, xgb_classifier_model
from alibi_detect.saving.loading import _load_model_config
from alibi_detect.saving.saving import _path2str, _save_model_config
from alibi_detect.saving.schemas import ModelConfig
backend = param_fixture("backend", ['sklearn'])
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [classifier_model, xgb_classifier_model])
def test_save_model_sk(data, model, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with scikit-learn and xgboost model.
"""
# Save model
filepath = tmp_path
cfg_model, _ = _save_model_config(model, base_path=filepath)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.joblib').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
# Load model
model_load = _load_model_config(cfg_model)
assert isinstance(model_load, type(model))
| 1,295 | 38.272727 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/tests/models.py | from functools import partial
from importlib import import_module
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from requests.exceptions import HTTPError
import pytest
from pytest_cases import fixture, parametrize
from transformers import AutoTokenizer
from alibi_detect.cd.pytorch import UAE as UAE_pt
from alibi_detect.cd.pytorch import preprocess_drift as preprocess_drift_pt
from alibi_detect.cd.tensorflow import UAE as UAE_tf
from alibi_detect.cd.tensorflow import preprocess_drift as preprocess_drift_tf
from alibi_detect.utils.pytorch.kernels import GaussianRBF as GaussianRBF_pt
from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernel_pt
from alibi_detect.utils.tensorflow.kernels import GaussianRBF as GaussianRBF_tf
from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf
from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt
from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf
from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt
from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf
from alibi_detect.utils.frameworks import has_keops
if has_keops: # pykeops only installed in Linux CI
from alibi_detect.utils.keops.kernels import GaussianRBF as GaussianRBF_ke
from alibi_detect.utils.keops.kernels import DeepKernel as DeepKernel_ke
LATENT_DIM = 2 # Must be less than input_dim set in ./datasets.py
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@fixture
def encoder_model(backend, current_cases):
"""
An untrained encoder of given input dimension and backend (this is a "custom" model, NOT an Alibi Detect UAE).
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(5, activation=tf.nn.relu),
tf.keras.layers.Dense(LATENT_DIM, activation=None)
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 5),
nn.ReLU(),
nn.Linear(5, LATENT_DIM))
else:
pytest.skip('`encoder_model` only implemented for tensorflow and pytorch.')
return model
@fixture
def encoder_dropout_model(backend, current_cases):
"""
An untrained encoder with dropout, of given input dimension and backend.
TODO: consolidate this model (and encoder_model above) with models like that in test_model_uncertainty.py
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(5, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.0), # 0.0 to ensure determinism
tf.keras.layers.Dense(LATENT_DIM, activation=None)
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 5),
nn.ReLU(),
nn.Dropout(0.0), # 0.0 to ensure determinism
nn.Linear(5, LATENT_DIM))
else:
pytest.skip('`encoder_dropout_model` only implemented for tensorflow and pytorch.')
return model
@fixture
def preprocess_uae(encoder_model):
"""
Preprocess function with Untrained Autoencoder.
"""
if isinstance(encoder_model, tf.keras.Model):
preprocess_fn = partial(preprocess_drift_tf, model=encoder_model)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=encoder_model, device=device)
return preprocess_fn
@fixture
def kernel(request, backend):
"""
Gaussian RBF kernel for given backend. Settings are parametrised in the test function.
"""
kernel = request.param
if isinstance(kernel, dict): # dict of kwargs
kernel_cfg = kernel.copy()
sigma = kernel_cfg.pop('sigma', None)
if backend == 'tensorflow':
if sigma is not None and not isinstance(sigma, tf.Tensor):
sigma = tf.convert_to_tensor(sigma)
kernel = GaussianRBF_tf(sigma=sigma, **kernel_cfg)
elif backend == 'pytorch':
if sigma is not None and not isinstance(sigma, torch.Tensor):
sigma = torch.tensor(sigma)
kernel = GaussianRBF_pt(sigma=sigma, **kernel_cfg)
elif backend == 'keops':
if sigma is not None and not isinstance(sigma, torch.Tensor):
sigma = torch.tensor(sigma)
kernel = GaussianRBF_ke(sigma=sigma, **kernel_cfg)
else:
pytest.skip('`kernel` only implemented for tensorflow, pytorch and keops.')
return kernel
@fixture
def optimizer(request, backend):
"""
Optimizer for given backend. Optimizer is expected to be passed via `request` as a string, i.e. "Adam".
For tensorflow, the optimizer is an instantiated `tf.of.keras.optimizers.Optimizer` object. For pytorch,
the optimizer is a `torch.optim.Optimizer` class (NOT instantiated).
"""
optimizer = request.param # Get parametrized setting
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip('`optimizer` only implemented for tensorflow, pytorch and keops.')
if isinstance(optimizer, str):
module = 'tensorflow.keras.optimizers' if backend == 'tensorflow' else 'torch.optim'
try:
optimizer = getattr(import_module(module), optimizer)
except AttributeError:
raise ValueError(f"{optimizer} is not a recognised optimizer in {module}.")
return optimizer
@fixture
def deep_kernel(request, backend, encoder_model):
"""
Deep kernel, built using the `encoder_model` fixture for the projection, and using the kernel_a and eps
parametrised in the test function.
"""
# Get DeepKernel options
kernel_a = request.param.get('kernel_a', 'rbf')
kernel_b = request.param.get('kernel_b', 'rbf')
eps = request.param.get('eps', 'trainable')
# Proj model (backend managed in encoder_model fixture)
proj = encoder_model
# Build DeepKernel
if backend == 'tensorflow':
kernel_a = GaussianRBF_tf(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_tf(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_tf(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
elif backend == 'pytorch':
kernel_a = GaussianRBF_pt(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_pt(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_pt(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
elif backend == 'keops':
kernel_a = GaussianRBF_ke(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_ke(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_ke(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
else:
pytest.skip('`deep_kernel` only implemented for tensorflow and pytorch.')
return deep_kernel
@fixture
def classifier_model(backend, current_cases):
"""
Classification model with given input dimension and backend.
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(2, activation=tf.nn.softmax),
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 2),
nn.Softmax(1))
elif backend == 'sklearn':
model = RandomForestClassifier()
else:
pytest.skip('`classifier_model` only implemented for tensorflow, pytorch, keops and sklearn.')
return model
@fixture
def xgb_classifier_model():
model = XGBClassifier()
return model
@fixture(unpack_into=('tokenizer, embedding, max_len, enc_dim'))
@parametrize('model_name, max_len', [('bert-base-cased', 100)])
@parametrize('uae', [True, False])
def nlp_embedding_and_tokenizer(model_name, max_len, uae, backend):
"""
A fixture to build nlp embedding and tokenizer models based on the HuggingFace pre-trained models.
"""
backend = 'tf' if backend == 'tensorflow' else 'pt'
# Load tokenizer
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
X = 'A dummy string' # this will be padded to max_len
tokens = tokenizer(list(X[:5]), pad_to_max_length=True,
max_length=max_len, return_tensors=backend)
# Load embedding model
emb_type = 'hidden_state'
n_layers = 8
layers = [-_ for _ in range(1, n_layers + 1)]
enc_dim = 32
if backend == 'tf':
try:
embedding = TransformerEmbedding_tf(model_name, emb_type, layers)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
if uae:
x_emb = embedding(tokens)
shape = (x_emb.shape[1],)
embedding = UAE_tf(input_layer=embedding, shape=shape, enc_dim=enc_dim)
elif backend == 'pt':
try:
embedding = TransformerEmbedding_pt(model_name, emb_type, layers)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
if uae:
x_emb = embedding(tokens)
shape = (x_emb.shape[1],)
embedding = UAE_pt(input_layer=embedding, shape=shape, enc_dim=enc_dim)
return tokenizer, embedding, max_len, enc_dim
def preprocess_simple(x: np.ndarray):
"""
Simple function to test serialization of generic Python function within preprocess_fn.
"""
return x*2.0
@fixture
def preprocess_simple_with_kwargs():
"""
Simple function to test serialization of generic Python function with kwargs, within preprocess_fn.
"""
return partial(preprocess_simple, kwarg1=42, kwarg2=True)
@fixture
def preprocess_nlp(embedding, tokenizer, max_len, backend):
"""
Preprocess function with Untrained Autoencoder.
"""
if backend == 'tensorflow':
preprocess_fn = partial(preprocess_drift_tf, model=embedding, tokenizer=tokenizer,
max_len=max_len, preprocess_batch_fn=preprocess_simple)
elif backend in ('pytorch', 'keops'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=embedding, tokenizer=tokenizer, max_len=max_len,
preprocess_batch_fn=preprocess_simple, device=device)
else:
pytest.skip('`preprocess_nlp` only implemented for tensorflow, pytorch and keops.')
return preprocess_fn
@fixture
def preprocess_hiddenoutput(classifier_model, current_cases, backend):
"""
Preprocess function to extract the softmax layer of a classifier (with the HiddenOutput utility function).
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = HiddenOutput_tf(classifier_model, layer=-1, input_shape=(None, input_dim))
preprocess_fn = partial(preprocess_drift_tf, model=model)
elif backend in ('pytorch', 'keops'):
model = HiddenOutput_pt(classifier_model, layer=-1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=model, device=device)
else:
pytest.skip('`preprocess_hiddenoutput` only implemented for tensorflow, pytorch and keops.')
return preprocess_fn
| 12,456 | 39.313916 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/tests/test_saving.py | # type: ignore
"""
Tests for saving/loading of detectors via config.toml files.
Internal functions such as save_kernel/load_kernel_config etc are also tested.
"""
from functools import partial
import os
from pathlib import Path
from typing import Callable
import sklearn.base
import toml
import dill
import numpy as np
import pytest
import scipy
import tensorflow as tf
import torch
import torch.nn as nn
from .datasets import BinData, CategoricalData, ContinuousData, MixedData, TextData
from .models import (encoder_model, preprocess_uae, preprocess_hiddenoutput, preprocess_simple, # noqa: F401
preprocess_simple_with_kwargs,
preprocess_nlp, LATENT_DIM, classifier_model, kernel, deep_kernel, nlp_embedding_and_tokenizer,
embedding, tokenizer, max_len, enc_dim, encoder_dropout_model, optimizer)
from alibi_detect.utils._random import fixed_seed
from packaging import version
from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from sklearn.model_selection import StratifiedKFold
from alibi_detect.cd import (ChiSquareDrift, ClassifierUncertaintyDrift, RegressorUncertaintyDrift,
ClassifierDrift, FETDrift, KSDrift, LearnedKernelDrift, LSDDDrift, MMDDrift,
SpotTheDiffDrift, TabularDrift, ContextMMDDrift, MMDDriftOnline, LSDDDriftOnline,
CVMDriftOnline, FETDriftOnline)
from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt
from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf
from alibi_detect.saving import (load_detector, read_config, registry,
resolve_config, save_detector, write_config)
from alibi_detect.saving.loading import (_get_nested_value, _replace,
_set_dtypes, _set_nested_value, _prepend_cfg_filepaths)
from alibi_detect.saving.saving import _serialize_object
from alibi_detect.saving.saving import (_path2str, _int2str_keys, _save_kernel_config, _save_model_config,
_save_preprocess_config)
from alibi_detect.saving.schemas import DeepKernelConfig, KernelConfig, ModelConfig, PreprocessConfig
from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernel_pt
from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf
from alibi_detect.utils.frameworks import has_keops
if has_keops: # pykeops only installed in Linux CI
from pykeops.torch import LazyTensor
from alibi_detect.utils.keops.kernels import DeepKernel as DeepKernel_ke
if version.parse(scipy.__version__) >= version.parse('1.7.0'):
from alibi_detect.cd import CVMDrift
# TODO: We currently parametrize encoder_model etc (in models.py) with backend, so the same flavour of
# preprocessing is used as the detector backend. In the future we could decouple this in tests.
backends = ['tensorflow', 'pytorch', 'sklearn']
if has_keops: # pykeops only installed in Linux CI
backends.append('keops')
backend = param_fixture("backend", backends)
P_VAL = 0.05
ERT = 10
N_PERMUTATIONS = 10
N_BOOTSTRAPS = 100
WINDOW_SIZE = 5
REGISTERED_OBJECTS = registry.get_all()
# Define a detector config dict
MMD_CFG = {
'name': 'MMDDrift',
'x_ref': np.array([[-0.30074928], [1.50240758], [0.43135768], [2.11295779], [0.79684913]]),
'p_val': 0.05,
'n_permutations': 150,
'data_type': 'tabular'
}
CFGS = [MMD_CFG]
# TODO - future: Some of the fixtures can/should be moved elsewhere (i.e. if they can be recycled for use elsewhere)
@parametrize('cfg', CFGS)
def test_load_simple_config(cfg, tmp_path):
"""
Test that a bare-bones `config.toml` without a [meta] field can be loaded by `load_detector`.
"""
save_dir = tmp_path
x_ref_path = str(save_dir.joinpath('x_ref.npy'))
cfg_path = save_dir.joinpath('config.toml')
# Save x_ref in config.toml
x_ref = cfg['x_ref']
np.save(x_ref_path, x_ref)
cfg['x_ref'] = 'x_ref.npy'
# Save config.toml then load it
with open(cfg_path, 'w') as f:
toml.dump(cfg, f)
cd = load_detector(cfg_path)
assert cd.__class__.__name__ == cfg['name']
# Get config and compare to original (orginal cfg not fully spec'd so only compare items that are present)
cfg_new = cd.get_config()
for k, v in cfg.items():
if k == 'x_ref':
assert v == 'x_ref.npy'
else:
assert v == cfg_new[k]
@parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_ksdrift(data, preprocess_fn, tmp_path):
"""
Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this
detector is tested with preprocessing strategies, as other detectors should see the same preprocess_fn output.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
cd = KSDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_fn,
preprocess_at_init=True,
)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_fn(X_ref), cd_load.x_ref)
assert cd_load.n_features == LATENT_DIM
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@pytest.mark.skipif(backend == 'sklearn', reason="Don't test with sklearn preprocessing.")
@parametrize('preprocess_fn', [preprocess_nlp])
@parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_')
def test_save_ksdrift_nlp(data, preprocess_fn, enc_dim, tmp_path): # noqa: F811
"""
Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this
detector is tested with embedding and embedding+uae, as other detectors should see the same preprocessed data.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data['X_train'][:5], data['X_test'][:5]
cd = KSDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_fn,
preprocess_at_init=True,
input_shape=(768,), # hardcoded to bert-base-cased for now
)
save_detector(cd, tmp_path, legacy=False)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_fn(X_ref), cd_load.x_ref)
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
assert cd_load.n_features == 768 # hardcoded to bert-base-cased for now
else:
assert cd_load.n_features == enc_dim # encoder dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@pytest.mark.skipif(version.parse(scipy.__version__) < version.parse('1.7.0'),
reason="Requires scipy version >= 1.7.0")
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_cvmdrift(data, preprocess_uae, tmp_path):
"""
Test CVMDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
cd = CVMDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_uae,
preprocess_at_init=True,
)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load.x_ref)
assert cd_load.n_features == LATENT_DIM
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@parametrize('kernel', [
None, # Use default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernel as object
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_mmddrift(data, kernel, preprocess_uae, backend, tmp_path, seed): # noqa: F811
"""
Test MMDDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
kwargs = {
'p_val': P_VAL,
'backend': backend,
'preprocess_fn': preprocess_uae,
'n_permutations': N_PERMUTATIONS,
'preprocess_at_init': True,
'kernel': kernel,
'configure_kernel_from_x_ref': False,
'sigma': np.array([0.5], dtype=np.float32)
}
if backend in ('pytorch', 'keops'):
kwargs['device'] = 'cuda' if torch.cuda.is_available() else 'cpu'
with fixed_seed(seed):
cd = MMDDrift(X_ref, **kwargs)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# assertions
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load._detector.x_ref)
assert not cd_load._detector.infer_sigma
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
assert preds['data']['p_val'] == preds_load['data']['p_val']
# @parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize('preprocess_at_init', [True, False])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_lsdddrift(data, preprocess_at_init, backend, tmp_path, seed):
"""
Test LSDDDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
preprocess_fn = preprocess_simple
# TODO - TensorFlow based preprocessors currently cause un-deterministic behaviour with LSDD permutations. Replace
# preprocess_simple with parametrized preprocess_fn's once above issue resolved.
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed): # Init and predict with a fixed random state
cd = LSDDDrift(X_ref,
p_val=P_VAL,
backend=backend,
preprocess_fn=preprocess_fn,
preprocess_at_init=preprocess_at_init,
n_permutations=N_PERMUTATIONS
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed): # Again, load and predict with fixed random state
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# assertions
if preprocess_at_init:
np.testing.assert_array_almost_equal(cd_load.get_config()['x_ref'], preprocess_fn(X_ref), 5)
else:
np.testing.assert_array_almost_equal(cd_load.get_config()['x_ref'], X_ref, 5)
np.testing.assert_array_almost_equal(cd._detector.x_ref, cd_load._detector.x_ref, 5)
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_fetdrift(data, tmp_path):
"""
Test FETDrift on binary datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = FETDrift(X_ref,
p_val=P_VAL,
alternative='less',
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert not cd_load.x_ref_preprocessed
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert cd_load.alternative == 'less'
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=CategoricalData, prefix='data_')
def test_save_chisquaredrift(data, tmp_path):
"""
Test ChiSquareDrift on categorical datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = ChiSquareDrift(X_ref,
p_val=P_VAL,
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.x_ref_categories, dict)
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
assert cd_load.x_ref_categories == cd.x_ref_categories
@parametrize_with_cases("data", cases=MixedData, prefix='data_')
def test_save_tabulardrift(data, tmp_path):
"""
Test TabularDrift on mixed datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = TabularDrift(X_ref,
p_val=P_VAL,
categories_per_feature={0: None},
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.x_ref_categories, dict)
assert cd_load.x_ref_categories == cd.x_ref_categories
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('optimizer', [None, "Adam"], indirect=True)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_classifierdrift(data, optimizer, classifier_model, backend, tmp_path, seed): # noqa: F811
"""
Test ClassifierDrift on continuous datasets.
"""
if backend not in ('tensorflow', 'pytorch', 'sklearn'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = ClassifierDrift(X_ref,
model=classifier_model,
p_val=P_VAL,
optimizer=optimizer,
n_folds=5,
backend=backend,
train_size=None)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector.x_ref)
assert isinstance(cd_load._detector.skf, StratifiedKFold)
assert cd_load._detector.p_val == P_VAL
if backend != 'sklearn':
assert isinstance(cd_load._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector.model, tf.keras.Model)
elif backend == 'pytorch':
assert isinstance(cd_load._detector.model, nn.Module)
elif backend == 'sklearn':
assert isinstance(cd_load._detector.model, sklearn.base.BaseEstimator)
# TODO - detector still not deterministic, investigate in future
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_spotthediff(data, classifier_model, backend, tmp_path, seed): # noqa: F811
"""
Test SpotTheDiffDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = SpotTheDiffDrift(X_ref,
p_val=P_VAL,
n_folds=5,
train_size=None,
backend=backend)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector._detector.x_ref)
assert isinstance(cd_load._detector._detector.skf, StratifiedKFold)
assert cd_load._detector._detector.p_val == P_VAL
assert isinstance(cd_load._detector._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector._detector.model, tf.keras.Model)
elif backend == 'pytorch':
assert isinstance(cd_load._detector._detector.model, nn.Module)
# TODO - detector still not deterministic, investigate in future
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('deep_kernel', [
{'kernel_a': 'rbf', 'eps': 0.01} # Default for kernel_a
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_learnedkernel(data, deep_kernel, backend, tmp_path, seed): # noqa: F811
"""
Test LearnedKernelDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = LearnedKernelDrift(X_ref,
deep_kernel,
p_val=P_VAL,
backend=backend,
train_size=0.7,
num_workers=0)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector.x_ref)
assert not cd_load._detector.x_ref_preprocessed
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector.kernel, DeepKernel_tf)
elif backend == 'pytorch':
assert isinstance(cd_load._detector.kernel, DeepKernel_pt)
else: # backend == keops
assert isinstance(cd_load._detector.kernel, DeepKernel_ke)
# TODO: Not yet deterministic
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('kernel', [
None, # Default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernels as GaussianRBF objects, with default sigma_median fn
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_contextmmddrift(data, kernel, backend, tmp_path, seed): # noqa: F811
"""
Test ContextMMDDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
C_ref, C_h0 = (X_ref[:, 0] + 1).reshape(-1, 1), (X_h0[:, 0] + 1).reshape(-1, 1)
with fixed_seed(seed):
cd = ContextMMDDrift(X_ref,
C_ref,
p_val=P_VAL,
backend=backend,
preprocess_fn=preprocess_simple,
n_permutations=N_PERMUTATIONS,
preprocess_at_init=True,
x_kernel=kernel,
c_kernel=kernel
)
preds = cd.predict(X_h0, C_h0)
save_detector(cd, tmp_path)
# Load and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0, C_h0)
# assertions
np.testing.assert_array_equal(preprocess_simple(X_ref), cd_load._detector.x_ref)
np.testing.assert_array_equal(C_ref, cd_load._detector.c_ref)
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.__name__ == 'preprocess_simple'
assert cd._detector.x_kernel.sigma == cd_load._detector.x_kernel.sigma
assert cd._detector.c_kernel.sigma == cd_load._detector.c_kernel.sigma
assert cd._detector.x_kernel.init_sigma_fn == cd_load._detector.x_kernel.init_sigma_fn
assert cd._detector.c_kernel.init_sigma_fn == cd_load._detector.c_kernel.init_sigma_fn
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_classifieruncertaintydrift(data, classifier_model, backend, tmp_path, seed): # noqa: F811
""" Test ClassifierDrift on continuous datasets."""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = ClassifierUncertaintyDrift(X_ref,
model=classifier_model,
p_val=P_VAL,
backend=backend,
preds_type='probs',
uncertainty_type='entropy')
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(cd._detector.preprocess_fn(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
@parametrize('regressor', [encoder_dropout_model])
def test_save_regressoruncertaintydrift(data, regressor, backend, tmp_path, seed):
""" Test RegressorDrift on continuous datasets."""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = RegressorUncertaintyDrift(X_ref,
model=regressor,
p_val=P_VAL,
backend=backend,
uncertainty_type='mc_dropout'
)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(cd._detector.preprocess_fn(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('kernel', [
None, # Use default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernel as object
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinemmddrift(data, kernel, preprocess_uae, backend, tmp_path, seed): # noqa: F811
"""
Test MMDDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = MMDDriftOnline(X_ref,
ert=ERT,
backend=backend,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
kernel=kernel,
window_size=WINDOW_SIZE
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE:
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.n_bootstraps == N_BOOTSTRAPS
assert cd_load._detector.ert == ERT
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinelsdddrift(data, preprocess_uae, backend, tmp_path, seed):
"""
Test LSDDDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = LSDDDriftOnline(X_ref,
ert=ERT,
backend=backend,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
window_size=WINDOW_SIZE
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE:
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_almost_equal(preprocess_uae(X_ref), cd_load.get_config()['x_ref'], 5)
assert cd_load._detector.n_bootstraps == N_BOOTSTRAPS
assert cd_load._detector.ert == ERT
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinecvmdrift(data, preprocess_uae, tmp_path, seed):
"""
Test CVMDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = CVMDriftOnline(X_ref,
ert=ERT,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
window_sizes=[WINDOW_SIZE]
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_almost_equal(preprocess_uae(X_ref), cd_load.get_config()['x_ref'], 5)
assert cd_load.n_bootstraps == N_BOOTSTRAPS
assert cd_load.ert == ERT
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_onlinefetdrift(data, tmp_path, seed):
"""
Test FETDriftOnline on binary datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = FETDriftOnline(X_ref,
ert=ERT,
n_bootstraps=N_BOOTSTRAPS,
window_sizes=[WINDOW_SIZE]
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_equal(X_ref, cd_load.get_config()['x_ref'])
assert cd_load.n_bootstraps == N_BOOTSTRAPS
assert cd_load.ert == ERT
np.testing.assert_array_almost_equal(stats, stats_load, 4)
@parametrize("detector", [MMDDriftOnline, LSDDDriftOnline])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_multivariate_online_state(detector, data, backend, seed, tmp_path):
"""
Test the saving (and loading) of multivariate online detectors' state via `save_detector`.
"""
# Skip if backend not `tensorflow` or `pytorch`
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make prediction to update state
X_ref, X_h0 = data
with fixed_seed(seed):
dd = detector(X_ref, ert=100, window_size=10, backend=backend)
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd._detector.state_dir == tmp_path.joinpath('state')
# Load
with fixed_seed(seed):
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
if detector == LSDDDriftOnline: # Often a small (~1e-6) difference in LSDD test stats post-load # TODO - why?
np.testing.assert_array_almost_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5], 5)
else:
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize("detector", [CVMDriftOnline])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_cvm_online_state(detector, data, tmp_path):
"""
Test the saving (and loading) of the CVM online detector's state via `save_detector`.
"""
# Init detector and make prediction to update state
X_ref, X_h0 = data
dd = detector(X_ref, ert=100, window_sizes=[10])
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd.state_dir == tmp_path.joinpath('state')
# Load
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize("detector", [FETDriftOnline])
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_fet_online_state(detector, data, tmp_path):
"""
Test the saving (and loading) of the FET online detector's state via `save_detector`.
"""
# Init detector and make prediction to update state
X_ref, X_h0 = data
dd = detector(X_ref, ert=100, window_sizes=[10])
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd.state_dir == tmp_path.joinpath('state')
# Load
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_online_state_t0(data, tmp_path):
"""
Test that state is not saved when t=0.
"""
# Init detector
X_ref, X_h0 = data
dd = CVMDriftOnline(X_ref, ert=100, window_sizes=[10])
# Check state NOT saved when t=0
state_dir = tmp_path.joinpath('state')
save_detector(dd, tmp_path)
assert not state_dir.is_dir()
# Check state IS saved when t>0
dd.predict(X_h0[0])
save_detector(dd, tmp_path)
assert state_dir.is_dir()
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd)
def test_load_absolute(data, tmp_path):
"""
Test that load_detector() works with absolute paths in config.
"""
# Init detector and save
X_ref, X_h0 = data
cd = KSDrift(X_ref, p_val=P_VAL)
save_detector(cd, tmp_path)
# Write a new cfg file elsewhere, with x_ref reference inside it an absolute path to original x_ref location
cfg = read_config(tmp_path.joinpath('config.toml'))
x_ref_path = tmp_path.joinpath(Path(cfg['x_ref'])).resolve() # Absolute path for x_ref
cfg['x_ref'] = x_ref_path.as_posix() # we always write paths to config.toml as Posix not Windows paths
new_cfg_dir = tmp_path.joinpath('new_config_dir')
new_cfg_dir.mkdir()
write_config(cfg, new_cfg_dir)
# Reload
cd_new = load_detector(new_cfg_dir)
# Assertions
np.testing.assert_array_equal(cd.x_ref, cd_new.x_ref)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_version_warning(data, tmp_path):
"""
Test that a version mismatch warning is raised if a detector is loaded from a config generated with a
different alibi_detect version, then saved, then loaded again (warning is still expected on final load).
This is only tested on one detector since the functionality lies outside of the actual detector classes.
"""
X_ref, X_h0 = data
cd = KSDrift(X_ref, p_val=P_VAL)
# First save (just to create a config)
save_detector(cd, tmp_path)
# Emulate version mismatch
cfg = read_config(tmp_path.joinpath('config.toml'))
cfg['meta']['version'] = '0.1.x'
_ = write_config(cfg, tmp_path)
# Reload and save again
cd = load_detector(tmp_path)
save_detector(cd, tmp_path)
# Check saved config contains a "version_warning"
cfg = read_config(tmp_path.joinpath('config.toml'))
assert cfg['meta']['version_warning']
# Final load (we expect a warning to be raised here)
with pytest.warns(Warning): # error will be raised if a warning IS NOT raised
cd_new = load_detector(tmp_path)
assert cd_new.meta.get('version_warning', False)
@parametrize('kernel', [
{'sigma': 0.5, 'trainable': False, 'init_sigma_fn': None},
{'sigma': [0.5, 0.8], 'trainable': False, 'init_sigma_fn': None},
{'sigma': None, 'trainable': True, 'init_sigma_fn': None},
], indirect=True
)
def test_save_kernel(kernel, backend, tmp_path): # noqa: F811
"""
Unit test for _save/_load_kernel_config, when kernel is a GaussianRBF kernel.
Kernels are saved and then loaded, with assertions to check equivalence.
"""
# Save kernel to config
filepath = tmp_path
filename = Path('mykernel')
cfg_kernel = _save_kernel_config(kernel, filepath, filename)
cfg_kernel = KernelConfig(**cfg_kernel).dict() # Pass through validator to test, and coerce sigma to Tensor
if kernel.__class__.__name__ == 'GaussianRBF':
assert cfg_kernel['src'] == '@utils.' + backend + '.kernels.GaussianRBF'
else:
assert Path(cfg_kernel['src']).suffix == '.dill'
assert cfg_kernel['trainable'] == kernel.trainable
if not kernel.trainable and cfg_kernel['sigma'] is not None:
np.testing.assert_array_almost_equal(cfg_kernel['sigma'], kernel.sigma, 6)
# Resolve and load config (_load_kernel_config is called within resolve_config)
cfg = {'kernel': cfg_kernel, 'backend': backend}
_prepend_cfg_filepaths(cfg, tmp_path)
kernel_loaded = resolve_config(cfg, tmp_path)['kernel']
# Call kernels
if backend == 'tensorflow':
X = tf.random.normal((10, 1), dtype=tf.float32)
elif backend == 'pytorch':
X = torch.randn((10, 1), dtype=torch.float32)
else: # backend == 'keops'
X = torch.randn((10, 1), dtype=torch.float32)
X = LazyTensor(X[None, :])
kernel(X, X)
kernel_loaded(X, X)
# Final checks
assert type(kernel_loaded) == type(kernel)
if backend == 'tensorflow':
np.testing.assert_array_almost_equal(np.array(kernel_loaded.sigma), np.array(kernel.sigma), 5)
else:
np.testing.assert_array_almost_equal(kernel_loaded.sigma.detach().numpy(), kernel.sigma.detach().numpy(), 5)
assert kernel_loaded.trainable == kernel.trainable
assert kernel_loaded.init_sigma_fn == kernel.init_sigma_fn
# `data` passed below as needed in encoder_model, which is used in deep_kernel
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd)
@parametrize('deep_kernel', [
{'kernel_a': 'rbf', 'kernel_b': 'rbf', 'eps': 'trainable'}, # Default for kernel_a and kernel_b, trainable eps
{'kernel_a': {'trainable': True}, 'kernel_b': 'rbf', 'eps': 0.01}, # Explicit kernel_a, fixed eps
], indirect=True
)
def test_save_deepkernel(data, deep_kernel, backend, tmp_path): # noqa: F811
"""
Unit test for _save/_load_kernel_config, when kernel is a DeepKernel kernel.
Kernels are saved and then loaded, with assertions to check equivalence.
"""
# Get data dim
if backend == 'tensorflow':
X = tf.random.normal((10, 1), dtype=tf.float32)
elif backend == 'pytorch':
X = torch.randn((10, 1), dtype=torch.float32)
else: # backend == 'keops'
X = torch.randn((10, 1), dtype=torch.float32)
X = LazyTensor(X[None, :])
# X, _ = data
input_shape = (X.shape[1],)
# Save kernel to config
filepath = tmp_path
filename = 'mykernel'
cfg_kernel = _save_kernel_config(deep_kernel, filepath, filename)
cfg_kernel['proj'], _ = _save_model_config(cfg_kernel['proj'], base_path=filepath, input_shape=input_shape)
cfg_kernel = _path2str(cfg_kernel)
cfg_kernel['proj'] = ModelConfig(**cfg_kernel['proj']).dict() # Pass thru ModelConfig to set `layers` etc
cfg_kernel = DeepKernelConfig(**cfg_kernel).dict() # pydantic validation
assert cfg_kernel['proj']['src'] == 'model'
assert cfg_kernel['proj']['custom_objects'] is None
assert cfg_kernel['proj']['layer'] is None
# Resolve and load config
cfg = {'kernel': cfg_kernel, 'backend': backend}
kernel_loaded = resolve_config(cfg, tmp_path)['kernel'] # implicitly calls _load_kernel_config
# Call kernels
deep_kernel.kernel_a(X, X)
deep_kernel.kernel_b(X, X)
kernel_loaded.kernel_a(X, X)
kernel_loaded.kernel_b(X, X)
# Final checks
assert isinstance(kernel_loaded.proj, (torch.nn.Module, tf.keras.Model))
if backend == 'tensorflow':
assert pytest.approx(deep_kernel.eps.numpy(), abs=1e-4) == kernel_loaded.eps.numpy()
else:
assert pytest.approx(deep_kernel.eps.detach().numpy(), abs=1e-4) == kernel_loaded.eps.detach().numpy()
assert kernel_loaded.kernel_a.sigma == deep_kernel.kernel_a.sigma
assert kernel_loaded.kernel_b.sigma == deep_kernel.kernel_b.sigma
@parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
def test_save_preprocess_drift(data, preprocess_fn, tmp_path, backend):
"""
Test saving/loading of the inbuilt `preprocess_drift` preprocessing functions when containing a `model`, with the
`model` either being a simple tf/torch model, or a `HiddenOutput` class.
"""
registry_str = 'tensorflow' if backend == 'tensorflow' else 'pytorch'
# Save preprocess_fn to config
filepath = tmp_path
X_ref, X_h0 = data
input_shape = (X_ref.shape[1],)
cfg_preprocess = _save_preprocess_config(preprocess_fn, input_shape=input_shape, filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert cfg_preprocess['src'] == '@cd.' + registry_str + '.preprocess.preprocess_drift'
assert cfg_preprocess['model']['src'] == 'preprocess_fn/model'
# TODO - check layer details here once implemented
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess, 'backend': backend}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
if backend == 'tensorflow':
assert preprocess_fn_load.func.__name__ == 'preprocess_drift'
assert isinstance(preprocess_fn_load.keywords['model'], tf.keras.Model)
else: # pytorch and keops backend
assert preprocess_fn_load.func.__name__ == 'preprocess_drift'
assert isinstance(preprocess_fn_load.keywords['model'], nn.Module)
@parametrize('preprocess_fn', [preprocess_simple, preprocess_simple_with_kwargs])
def test_save_preprocess_custom(preprocess_fn, tmp_path):
"""
Test saving/loading of custom preprocessing functions, without and with kwargs.
"""
# Save preprocess_fn to config
filepath = tmp_path
cfg_preprocess = _save_preprocess_config(preprocess_fn, input_shape=None, filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert tmp_path.joinpath(cfg_preprocess['src']).is_file()
assert cfg_preprocess['src'] == os.path.join('preprocess_fn', 'function.dill')
if isinstance(preprocess_fn, partial): # kwargs expected
assert cfg_preprocess['kwargs'] == preprocess_fn.keywords
else: # no kwargs expected
assert cfg_preprocess['kwargs'] == {}
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
if isinstance(preprocess_fn, partial):
assert preprocess_fn_load.func == preprocess_fn.func
assert preprocess_fn_load.keywords == preprocess_fn.keywords
else:
assert preprocess_fn_load == preprocess_fn
@parametrize('preprocess_fn', [preprocess_nlp])
@parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_')
def test_save_preprocess_nlp(data, preprocess_fn, tmp_path, backend):
"""
Test saving/loading of the inbuilt `preprocess_drift` preprocessing functions when containing a `model`, text
`tokenizer` and text `embedding` model.
"""
registry_str = 'tensorflow' if backend == 'tensorflow' else 'pytorch'
# Save preprocess_fn to config
filepath = tmp_path
cfg_preprocess = _save_preprocess_config(preprocess_fn,
input_shape=(768,), # hardcoded to bert-base-cased for now
filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert cfg_preprocess['src'] == '@cd.' + registry_str + '.preprocess.preprocess_drift'
assert cfg_preprocess['embedding']['src'] == 'preprocess_fn/embedding'
assert cfg_preprocess['tokenizer']['src'] == 'preprocess_fn/tokenizer'
assert tmp_path.joinpath(cfg_preprocess['preprocess_batch_fn']).is_file()
assert cfg_preprocess['preprocess_batch_fn'] == os.path.join('preprocess_fn', 'preprocess_batch_fn.dill')
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
assert cfg_preprocess['model'] is None
else:
assert cfg_preprocess['model']['src'] == 'preprocess_fn/model'
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess, 'backend': backend}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
assert isinstance(preprocess_fn_load.keywords['tokenizer'], type(preprocess_fn.keywords['tokenizer']))
assert isinstance(preprocess_fn_load.keywords['model'], type(preprocess_fn.keywords['model']))
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
emb = preprocess_fn.keywords['model']
emb_load = preprocess_fn_load.keywords['model']
else:
if backend == 'tensorflow':
emb = preprocess_fn.keywords['model'].encoder.layers[0]
emb_load = preprocess_fn_load.keywords['model'].encoder.layers[0]
else: # pytorch and keops backends
emb = list(preprocess_fn.keywords['model'].encoder.children())[0]
emb_load = list(preprocess_fn_load.keywords['model'].encoder.children())[0]
assert isinstance(emb_load.model, type(emb.model))
assert emb_load.emb_type == emb.emb_type
assert emb_load.hs_emb.keywords['layers'] == emb.hs_emb.keywords['layers']
def test_nested_value():
"""
Unit test for _get_nested_value and _set_nested_value.
"""
dict1 = {'dict2': {'dict3': {}}}
_set_nested_value(dict1, ['dict2', 'dict3', 'a string'], 'hello')
_set_nested_value(dict1, ['a float'], 42.0)
_set_nested_value(dict1, ['dict2', 'a list'], [1, 2, 3])
assert _get_nested_value(dict1, ['dict2', 'dict3', 'a string']) == dict1['dict2']['dict3']['a string']
assert _get_nested_value(dict1, ['a float']) == dict1['a float']
assert _get_nested_value(dict1, ['dict2', 'a list']) == dict1['dict2']['a list']
def test_replace():
"""
A unit test for _replace.
"""
dict1 = {
'key1': 'key1',
'key7': None,
'dict2': {
'key2': 'key2',
'key4': None,
'dict3': {
'key5': 'key5',
'key6': None
}
}
}
new_dict = _replace(dict1, None, 'None')
assert new_dict['key7'] == 'None'
assert new_dict['dict2']['key4'] == 'None'
assert new_dict['dict2']['dict3']['key6'] == 'None'
assert new_dict['key1'] == dict1['key1']
def test_path2str(tmp_path):
"""
A unit test for _path2str.
"""
cfg = {
'dict': {'a path': tmp_path}
}
cfg_rel = _path2str(cfg)
rel_path = cfg_rel['dict']['a path']
assert isinstance(rel_path, str)
assert rel_path == str(tmp_path.as_posix())
cfg_abs = _path2str(cfg, absolute=True)
abs_path = cfg_abs['dict']['a path']
assert isinstance(abs_path, str)
assert abs_path == str(tmp_path.resolve().as_posix())
def test_int2str_keys():
"""
A unit test for _int2str_keys
"""
cfg = {
'dict': {'0': 'A', '1': 3, 2: 'C'},
3: 'D',
'4': 'E'
}
cfg_fixed = _int2str_keys(cfg)
# Check all str keys changed to int
assert cfg['dict'].pop(2) == cfg_fixed['dict'].pop('2')
assert cfg.pop(3) == cfg_fixed.pop('3')
# Check remaining items untouched
assert cfg == cfg_fixed
assert cfg
def generic_function(x: float, add: float = 0.0, invert: bool = True):
if invert:
return 1/x + add
else:
return x + add
@parametrize('function', [generic_function])
def test_serialize_function_partial(function, tmp_path):
"""
Unit tests for _serialize_function, with a functools.partial function.
"""
partial_func = partial(function, invert=False, add=1.0)
src, kwargs = _serialize_object(partial_func, base_path=tmp_path, local_path=Path('function'))
filepath = tmp_path.joinpath('function.dill')
assert filepath.is_file()
with open(filepath, 'rb') as f:
partial_func_load = dill.load(f)
x = 2.0
assert partial_func_load(x, **kwargs) == partial_func(x)
def test_serialize_function_registry(tmp_path):
"""
Unit tests for _serialize_function, with a registered function.
"""
registry_ref = 'cd.tensorflow.preprocess.preprocess_drift'
function = registry.get(registry_ref)
src, kwargs = _serialize_object(function, base_path=tmp_path, local_path=Path('function'))
assert kwargs == {}
assert src == '@' + registry_ref
def test_registry_get():
"""
Unit test for alibi_detect.utils.registry.get(). This will make more sense once we have a more automated
process for pre-registering alibi-detect objects, as then can compare against list of objects we wish to register.
"""
for k, v in REGISTERED_OBJECTS.items():
obj = registry.get(k)
assert type(obj) == type(v)
def test_set_dtypes(backend):
"""
Unit test to test _set_dtypes.
"""
if backend == 'tensorflow':
dtype = 'tf.float32'
elif backend == 'pytorch':
dtype = 'torch.float32'
else:
pytest.skip('Only test set_dtypes for tensorflow and pytorch.')
cfg = {
'preprocess_fn': {
'dtype': dtype
}
}
_set_dtypes(cfg)
dtype_resolved = cfg['preprocess_fn']['dtype']
if backend == 'tensorflow':
assert dtype_resolved == tf.float32
elif backend == 'pytorch':
assert dtype_resolved == torch.float32
def test_cleanup(tmp_path):
"""
Test that the filepath given to save_detector is deleted in the event of an error whilst saving.
Also check that the error is caught and raised.
"""
# Detector save/load
X_ref = np.random.normal(size=(5, 1))
cd = KSDrift(X_ref)
# Add a garbage preprocess_fn to cause an error
cd.preprocess_fn = cd.x_ref
# Save, catch and check error
with pytest.raises(RuntimeError):
save_detector(cd, tmp_path)
# Check `filepath` is deleted
assert not tmp_path.is_dir()
| 56,177 | 40.065789 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/tests/test_validate.py | import numpy as np
import pytest
from pydantic import ValidationError
from alibi_detect.saving import validate_config
from alibi_detect.saving.schemas import KernelConfig
from alibi_detect.saving.saving import X_REF_FILENAME
from alibi_detect.version import __version__
from copy import deepcopy
import tensorflow as tf
import torch
# Define a detector config dict
mmd_cfg = {
'meta': {
'version': __version__,
},
'name': 'MMDDrift',
'x_ref': np.array([[-0.30074928], [1.50240758], [0.43135768], [2.11295779], [0.79684913]]),
'p_val': 0.05,
}
# Define a detector config dict without meta (as simple as it gets!)
mmd_cfg_nometa = deepcopy(mmd_cfg)
mmd_cfg_nometa.pop('meta')
@pytest.mark.parametrize('cfg', [mmd_cfg])
def test_validate_config(cfg):
# Original cfg
# Check original cfg doesn't raise errors
cfg_full = validate_config(cfg, resolved=True)
# Check cfg is returned with correct metadata
meta = cfg_full.get('meta') # pop as don't want to compare meta to cfg in next bit
assert meta['version'] == __version__
assert not meta.pop('version_warning') # pop this one to remove from next check
# Check remaining values of items in cfg unchanged
for k, v in cfg.items():
assert np.all((v == cfg_full[k])) # use np.all to deal with x_ref comparision
# Check original cfg doesn't raise errors in the unresolved case
cfg_unres = cfg.copy()
cfg_unres['x_ref'] = X_REF_FILENAME
_ = validate_config(cfg_unres)
assert not cfg.get('meta').get('version_warning')
# Check warning raised and warning field added if version different
cfg_err = cfg.copy()
cfg_err['meta']['version'] = '0.1.x'
with pytest.warns(Warning): # error will be raised if a warning IS NOT raised
cfg_err = validate_config(cfg_err, resolved=True)
assert cfg_err.get('meta').get('version_warning')
# Check ValueError raised if name unrecognised
cfg_err = cfg.copy()
cfg_err['name'] = 'MMDDriftWrong'
with pytest.raises(ValueError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
# Check ValidationError raised if unrecognised field or type wrong
cfg_err = cfg.copy()
cfg_err['p_val'] = [cfg['p_val']] # p_val should be float not list
with pytest.raises(ValidationError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
cfg_err = cfg.copy()
cfg_err['wrong_var'] = 42.0
with pytest.raises(ValidationError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
@pytest.mark.parametrize('cfg', [mmd_cfg_nometa])
def test_validate_config_wo_meta(cfg):
# Check a config w/o a meta dict can be validated
_ = validate_config(cfg, resolved=True)
# Check the unresolved case
cfg_unres = cfg.copy()
cfg_unres['x_ref'] = X_REF_FILENAME
_ = validate_config(cfg_unres)
@pytest.mark.parametrize('sigma', [
0.5,
[0.5, 1.0],
None
])
@pytest.mark.parametrize('flavour', ['tensorflow', 'pytorch'])
def test_validate_kernel_and_coerce_2_tensor(flavour, sigma):
"""
Pass a kernel config through the KernelConfig pydantic model. This implicitly
tests the coerce_2_tensor validator.
"""
# Define a kernel config
kernel_cfg = {
'src': f'@utils.{flavour}.kernels.GaussianRBF',
'flavour': flavour,
'sigma': sigma
}
# Pass through validation and check results
kernel_cfg_val = KernelConfig(**kernel_cfg).dict()
assert kernel_cfg_val['src'] == kernel_cfg['src']
assert kernel_cfg_val['flavour'] == flavour
if sigma is None:
assert kernel_cfg_val['sigma'] is None
else:
if flavour == 'tensorflow':
assert isinstance(kernel_cfg_val['sigma'], tf.Tensor)
else:
assert isinstance(kernel_cfg_val['sigma'], torch.Tensor)
| 3,998 | 32.889831 | 95 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_pytorch/loading.py | import logging
import os
from importlib import import_module
from pathlib import Path
from typing import Callable, Optional, Union, Type
import dill
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import UAE, HiddenOutput
from alibi_detect.cd.pytorch.preprocess import _Encoder
from alibi_detect.models.pytorch import TransformerEmbedding
from alibi_detect.utils.pytorch.kernels import DeepKernel
logger = logging.getLogger(__name__)
def load_model(filepath: Union[str, os.PathLike],
layer: Optional[int] = None,
) -> nn.Module:
"""
Load PyTorch model.
Parameters
----------
filepath
Saved model filepath.
layer
Optional index of a hidden layer to extract. If not `None`, a
:py:class:`~alibi_detect.cd.pytorch.HiddenOutput` model is returned.
Returns
-------
Loaded model.
"""
filepath = Path(filepath).joinpath('model.pt')
model = torch.load(filepath, pickle_module=dill)
# Optionally extract hidden layer
if isinstance(layer, int):
model = HiddenOutput(model, layer=layer)
return model
def prep_model_and_emb(model: nn.Module, emb: Optional[TransformerEmbedding]) -> nn.Module:
"""
Function to perform final preprocessing of model (and/or embedding) before it is passed to preprocess_drift.
Parameters
----------
model
A compatible model.
emb
An optional text embedding model.
Returns
-------
The final model ready to passed to preprocess_drift.
"""
# Process model (and embedding)
model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE
if emb is not None:
model = _Encoder(emb, mlp=model)
model = UAE(encoder_net=model)
return model
def load_kernel_config(cfg: dict) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
Returns
-------
The kernel.
"""
if 'src' in cfg: # Standard kernel config
kernel = cfg.pop('src')
if hasattr(kernel, 'from_config'):
kernel = kernel.from_config(cfg)
elif 'proj' in cfg: # DeepKernel config
# Kernel a
kernel_a = cfg['kernel_a']
kernel_b = cfg['kernel_b']
if kernel_a != 'rbf':
cfg['kernel_a'] = load_kernel_config(kernel_a)
if kernel_b != 'rbf':
cfg['kernel_b'] = load_kernel_config(kernel_b)
# Assemble deep kernel
kernel = DeepKernel.from_config(cfg)
else:
raise ValueError('Unable to process kernel. The kernel config dict must either be a `KernelConfig` with a '
'`src` field, or a `DeepkernelConfig` with a `proj` field.)')
return kernel
def load_optimizer(cfg: dict) -> Type[torch.optim.Optimizer]:
"""
Imports a PyTorch torch.optim.Optimizer class from an optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
Returns
-------
The loaded optimizer class.
"""
class_name = cfg.get('class_name')
try:
return getattr(import_module('torch.optim'), class_name)
except AttributeError:
raise ValueError(f"{class_name} is not a recognised optimizer in `torch.optim`.")
def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:
"""
Load a pre-trained PyTorch text embedding from a directory.
See the `:py:class:~alibi_detect.models.pytorch.TransformerEmbedding` documentation for a
full description of the `embedding_type` and `layers` kwargs.
Parameters
----------
src
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
layers
A list with int's referring to the hidden layers used to extract the embedding.
Returns
-------
The loaded embedding.
"""
emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)
return emb
| 4,175 | 27.8 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_pytorch/conversions.py | import torch
def get_pt_dtype(dtype_str: str):
"""Returns pytorch datatype specified by string."""
return getattr(torch, dtype_str)
| 143 | 17 | 55 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_pytorch/saving.py | import os
import logging
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple, Union
import dill # dispatch table setting not done here as done in top-level saving.py file
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import UAE, HiddenOutput
from alibi_detect.models.pytorch import TransformerEmbedding
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: Callable,
base_path: Path,
local_path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a PyTorch model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
cfg_model: Optional[Dict[str, Any]] = None
cfg_embed: Optional[Dict[str, Any]] = None
if isinstance(model, UAE):
layers = list(model.encoder.children())
if isinstance(layers[0], TransformerEmbedding): # if UAE contains embedding and encoder
# embedding
embed = layers[0]
cfg_embed = save_embedding_config(embed, base_path, local_path.joinpath('embedding'))
# preprocessing encoder
model = layers[1]
else: # If UAE is simply an encoder
model = model.encoder
elif isinstance(model, TransformerEmbedding):
cfg_embed = save_embedding_config(model, base_path, local_path.joinpath('embedding'))
model = None
elif isinstance(model, HiddenOutput):
model = model.model
elif isinstance(model, nn.Module): # Last as TransformerEmbedding and UAE are nn.Module's
model = model
else:
raise ValueError('Model not recognised, cannot save.')
if model is not None:
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath)
cfg_model = {
'flavour': Framework.PYTORCH.value,
'src': local_path.joinpath('model')
}
return cfg_model, cfg_embed
def save_model(model: nn.Module,
filepath: Union[str, os.PathLike],
save_dir: Union[str, os.PathLike] = 'model') -> None:
"""
Save PyTorch model.
Parameters
----------
model
The PyTorch model to save.
filepath
Save directory.
save_dir
Name of folder to save to within the filepath directory.
"""
# create folder to save model in
model_path = Path(filepath).joinpath(save_dir)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath('model.pt')
if isinstance(model, nn.Module):
torch.save(model, model_path, pickle_module=dill)
else:
raise ValueError('The extracted model to save is not a `nn.Module`. Cannot save.')
def save_embedding_config(embed: TransformerEmbedding,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
"""
# create folder to save model in
filepath = base_path.joinpath(local_path)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Populate config dict
cfg_embed: Dict[str, Any] = {}
cfg_embed.update({'type': embed.emb_type})
cfg_embed.update({'layers': embed.hs_emb.keywords['layers']})
cfg_embed.update({'src': local_path})
cfg_embed.update({'flavour': Framework.PYTORCH.value})
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath))
embed.model.save_pretrained(filepath)
return cfg_embed
| 4,430 | 32.568182 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
load_kernel_config_pt, load_embedding_pt, load_model_pt, load_optimizer_pt, \
prep_model_and_emb_pt = import_optional(
'alibi_detect.saving._pytorch.loading',
names=['load_kernel_config',
'load_embedding',
'load_model',
'load_optimizer',
'prep_model_and_emb'])
save_model_config_pt = import_optional(
'alibi_detect.saving._pytorch.saving',
names=['save_model_config']
)
get_pt_dtype = import_optional(
'alibi_detect.saving._pytorch.conversions',
names=['get_pt_dtype']
)
__all__ = [
"load_kernel_config_pt",
"load_embedding_pt",
"load_model_pt",
"load_optimizer_pt",
"prep_model_and_emb_pt",
"save_model_config_pt",
"get_pt_dtype"
]
| 836 | 26 | 77 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_pytorch/tests/test_saving_pt.py | from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import encoder_model
from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt
from alibi_detect.saving.loading import _load_model_config, _load_optimizer_config
from alibi_detect.saving.saving import _path2str, _save_model_config
from alibi_detect.saving.schemas import ModelConfig
backend = param_fixture("backend", ['pytorch'])
# Note: The full save/load functionality of optimizers (inc. validation) is tested in test_save_classifierdrift.
def test_load_optimizer(backend):
"""
Test _load_optimizer_config with a pytorch optimizer, when the `torch.optim.Optimizer` class name is specified.
For pytorch, we expect a `torch.optim` class to be returned.
"""
class_name = 'Adam'
cfg_opt = {'class_name': class_name}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
assert optimizer.__name__ == class_name
assert isinstance(optimizer, type)
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [encoder_model])
@parametrize('layer', [None, -1])
def test_save_model_pt(data, model, layer, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with pytorch model.
"""
# Save model
filepath = tmp_path
input_shape = (data[0].shape[1],)
cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_shape)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.pt').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
if layer is not None:
cfg_model['layer'] = layer
# Load model
model_load = _load_model_config(cfg_model)
if layer is None:
assert isinstance(model_load, type(model))
else:
assert isinstance(model_load, HiddenOutput_pt)
| 2,131 | 38.481481 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_tensorflow/loading.py | import logging
import os
from importlib import import_module
import warnings
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union, Type
import dill
import tensorflow as tf
from tensorflow_probability.python.distributions.distribution import \
Distribution
from transformers import AutoTokenizer
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.ad.adversarialae import DenseHidden
from alibi_detect.cd import (ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift)
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
from alibi_detect.cd.tensorflow.preprocess import _Encoder
from alibi_detect.models.tensorflow import PixelCNN, TransformerEmbedding
from alibi_detect.models.tensorflow.autoencoder import (AE, AEGMM, VAE, VAEGMM,
DecoderLSTM,
EncoderLSTM, Seq2Seq)
from alibi_detect.od import (LLR, IForest, Mahalanobis, OutlierAE,
OutlierAEGMM, OutlierProphet, OutlierSeq2Seq,
OutlierVAE, OutlierVAEGMM, SpectralResidual)
from alibi_detect.od.llr import build_model
from alibi_detect.utils.tensorflow.kernels import DeepKernel
from alibi_detect.utils.frameworks import Framework
# Below imports are used for legacy loading, and will be removed (or moved to utils/loading.py) in the future
from alibi_detect.version import __version__
from alibi_detect.base import Detector
from alibi_detect.saving._typing import VALID_DETECTORS
logger = logging.getLogger(__name__)
def load_model(filepath: Union[str, os.PathLike],
filename: str = 'model',
custom_objects: dict = None,
layer: Optional[int] = None,
) -> tf.keras.Model:
"""
Load TensorFlow model.
Parameters
----------
filepath
Saved model directory.
filename
Name of saved model within the filepath directory.
custom_objects
Optional custom objects when loading the TensorFlow model.
layer
Optional index of a hidden layer to extract. If not `None`, a
:py:class:`~alibi_detect.cd.tensorflow.HiddenOutput` model is returned.
Returns
-------
Loaded model.
"""
# TODO - update this to accept tf format - later PR.
model_dir = Path(filepath)
model_name = filename + '.h5'
# Check if model exists
if model_name not in [f.name for f in model_dir.glob('[!.]*.h5')]:
raise FileNotFoundError(f'{model_name} not found in {model_dir.resolve()}.')
model = tf.keras.models.load_model(model_dir.joinpath(model_name), custom_objects=custom_objects)
# Optionally extract hidden layer
if isinstance(layer, int):
model = HiddenOutput(model, layer=layer)
return model
def prep_model_and_emb(model: Callable, emb: Optional[TransformerEmbedding]) -> Callable:
"""
Function to perform final preprocessing of model (and/or embedding) before it is passed to preprocess_drift.
Parameters
----------
model
A compatible model.
emb
An optional text embedding model.
Returns
-------
The final model ready to passed to preprocess_drift.
"""
# Process model (and embedding)
model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE
if emb is not None:
model = _Encoder(emb, mlp=model)
model = UAE(encoder_net=model)
return model
def load_kernel_config(cfg: dict) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
Returns
-------
The kernel.
"""
if 'src' in cfg: # Standard kernel config
kernel = cfg.pop('src')
if hasattr(kernel, 'from_config'):
kernel = kernel.from_config(cfg)
elif 'proj' in cfg: # DeepKernel config
# Kernel a
kernel_a = cfg['kernel_a']
kernel_b = cfg['kernel_b']
if kernel_a != 'rbf':
cfg['kernel_a'] = load_kernel_config(kernel_a)
if kernel_b != 'rbf':
cfg['kernel_b'] = load_kernel_config(kernel_b)
# Assemble deep kernel
kernel = DeepKernel.from_config(cfg)
else:
raise ValueError('Unable to process kernel. The kernel config dict must either be a `KernelConfig` with a '
'`src` field, or a `DeepkernelConfig` with a `proj` field.)')
return kernel
def load_optimizer(cfg: dict) -> Union[Type[tf.keras.optimizers.Optimizer], tf.keras.optimizers.Optimizer]:
"""
Loads a TensorFlow optimzier from a optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
Returns
-------
The loaded optimizer, either as an instantiated object (if `cfg` is a tensorflow optimizer config dict), otherwise \
as an uninstantiated class.
"""
class_name = cfg.get('class_name')
tf_config = cfg.get('config')
if tf_config is not None: # cfg is a tensorflow config dict
return tf.keras.optimizers.deserialize(cfg)
else:
try:
return getattr(import_module('tensorflow.keras.optimizers'), class_name)
except AttributeError:
raise ValueError(f"{class_name} is not a recognised optimizer in `tensorflow.keras.optimizers`.")
def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:
"""
Load a pre-trained tensorflow text embedding from a directory.
See the `:py:class:~alibi_detect.models.tensorflow.TransformerEmbedding` documentation for a
full description of the `embedding_type` and `layers` kwargs.
Parameters
----------
src
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
layers
A list with int's referring to the hidden layers used to extract the embedding.
Returns
-------
The loaded embedding.
"""
emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)
return emb
#######################################################################################################
# TODO: Everything below here is legacy loading code, and will be removed in the future
#######################################################################################################
def load_detector_legacy(filepath: Union[str, os.PathLike], suffix: str, **kwargs) -> Detector:
"""
Legacy function to load outlier, drift or adversarial detectors stored dill or pickle files.
Warning
-------
This function will be removed in a future version.
Parameters
----------
filepath
Load directory.
suffix
File suffix for meta and state files. Either `'.dill'` or `'.pickle'`.
Returns
-------
Loaded outlier or adversarial detector object.
"""
warnings.warn('Loading of meta.dill and meta.pickle files will be removed in a future version.', DeprecationWarning)
if kwargs:
k = list(kwargs.keys())
else:
k = []
# check if path exists
filepath = Path(filepath)
if not filepath.is_dir():
raise FileNotFoundError(f'{filepath} does not exist.')
# load metadata
meta_dict = dill.load(open(filepath.joinpath('meta' + suffix), 'rb'))
# check version
try:
if meta_dict['version'] != __version__:
warnings.warn(f'Trying to load detector from version {meta_dict["version"]} when using version '
f'{__version__}. This may lead to breaking code or invalid results.')
except KeyError:
warnings.warn('Trying to load detector from an older version.'
'This may lead to breaking code or invalid results.')
if 'backend' in list(meta_dict.keys()) and meta_dict['backend'] == Framework.PYTORCH:
raise NotImplementedError('Detectors with PyTorch backend are not yet supported.')
detector_name = meta_dict['name']
if detector_name not in [detector for detector in VALID_DETECTORS]:
raise NotImplementedError(f'{detector_name} is not supported by `load_detector`.')
# load outlier detector specific parameters
state_dict = dill.load(open(filepath.joinpath(detector_name + suffix), 'rb'))
# Update the drift detector preprocess kwargs if state_dict is from an old alibi-detect version (<v0.10).
# See https://github.com/SeldonIO/alibi-detect/pull/732
if 'kwargs' in state_dict and 'other' in state_dict: # A drift detector if both of these exist
if 'x_ref_preprocessed' not in state_dict['kwargs']: # if already exists then must have been saved w/ >=v0.10
# Set x_ref_preprocessed to True
state_dict['kwargs']['x_ref_preprocessed'] = True
# Move `preprocess_x_ref` from `other` to `kwargs`
state_dict['kwargs']['preprocess_x_ref'] = state_dict['other']['preprocess_x_ref']
# initialize detector
model_dir = filepath.joinpath('model')
detector: Optional[Detector] = None # to avoid mypy errors
if detector_name == 'OutlierAE':
ae = load_tf_ae(filepath)
detector = init_od_ae(state_dict, ae)
elif detector_name == 'OutlierVAE':
vae = load_tf_vae(filepath, state_dict)
detector = init_od_vae(state_dict, vae)
elif detector_name == 'Mahalanobis':
detector = init_od_mahalanobis(state_dict) # type: ignore[assignment]
elif detector_name == 'IForest':
detector = init_od_iforest(state_dict) # type: ignore[assignment]
elif detector_name == 'OutlierAEGMM':
aegmm = load_tf_aegmm(filepath, state_dict)
detector = init_od_aegmm(state_dict, aegmm)
elif detector_name == 'OutlierVAEGMM':
vaegmm = load_tf_vaegmm(filepath, state_dict)
detector = init_od_vaegmm(state_dict, vaegmm)
elif detector_name == 'AdversarialAE':
ae = load_tf_ae(filepath)
custom_objects = kwargs['custom_objects'] if 'custom_objects' in k else None
model = load_model(model_dir, custom_objects=custom_objects)
model_hl = load_tf_hl(filepath, model, state_dict)
detector = init_ad_ae(state_dict, ae, model, model_hl)
elif detector_name == 'ModelDistillation':
md = load_model(model_dir, filename='distilled_model')
custom_objects = kwargs['custom_objects'] if 'custom_objects' in k else None
model = load_model(model_dir, custom_objects=custom_objects)
detector = init_ad_md(state_dict, md, model)
elif detector_name == 'OutlierProphet':
detector = init_od_prophet(state_dict)
elif detector_name == 'SpectralResidual':
detector = init_od_sr(state_dict) # type: ignore[assignment]
elif detector_name == 'OutlierSeq2Seq':
seq2seq = load_tf_s2s(filepath, state_dict)
detector = init_od_s2s(state_dict, seq2seq)
elif detector_name in ['ChiSquareDrift', 'ClassifierDriftTF', 'KSDrift', 'MMDDriftTF', 'TabularDrift']:
emb, tokenizer = None, None
if state_dict['other']['load_text_embedding']:
emb, tokenizer = load_text_embed(filepath)
try: # legacy load_model behaviour was to return None if not found. Now it raises error, hence need try-except.
model = load_model(model_dir, filename='encoder')
except FileNotFoundError:
logger.warning('No model found in {}, setting `model` to `None`.'.format(model_dir))
model = None
if detector_name == 'KSDrift':
load_fn = init_cd_ksdrift
elif detector_name == 'MMDDriftTF':
load_fn = init_cd_mmddrift # type: ignore[assignment]
elif detector_name == 'ChiSquareDrift':
load_fn = init_cd_chisquaredrift # type: ignore[assignment]
elif detector_name == 'TabularDrift':
load_fn = init_cd_tabulardrift # type: ignore[assignment]
elif detector_name == 'ClassifierDriftTF':
# Don't need try-except here since model is not optional for ClassifierDrift
clf_drift = load_model(model_dir, filename='clf_drift')
load_fn = partial(init_cd_classifierdrift, clf_drift) # type: ignore[assignment]
else:
raise NotImplementedError
detector = load_fn(state_dict, model, emb, tokenizer, **kwargs) # type: ignore[assignment]
elif detector_name == 'LLR':
models = load_tf_llr(filepath, **kwargs)
detector = init_od_llr(state_dict, models)
else:
raise NotImplementedError
# TODO - add tests back in!
detector.meta = meta_dict
logger.info('Finished loading detector.')
return detector
def load_tf_hl(filepath: Union[str, os.PathLike], model: tf.keras.Model, state_dict: dict) -> List[tf.keras.Model]:
"""
Load hidden layer models for AdversarialAE.
Parameters
----------
filepath
Saved model directory.
model
tf.keras classification model.
state_dict
Dictionary containing the detector's parameters.
Returns
-------
List with loaded tf.keras models.
"""
model_dir = Path(filepath).joinpath('model')
hidden_layer_kld = state_dict['hidden_layer_kld']
if not hidden_layer_kld:
return []
model_hl = []
for i, (hidden_layer, output_dim) in enumerate(hidden_layer_kld.items()):
m = DenseHidden(model, hidden_layer, output_dim)
m.load_weights(model_dir.joinpath('model_hl_' + str(i) + '.ckpt'))
model_hl.append(m)
return model_hl
def load_tf_ae(filepath: Union[str, os.PathLike]) -> tf.keras.Model:
"""
Load AE.
Parameters
----------
filepath
Saved model directory.
Returns
-------
Loaded AE.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder or ae found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
ae = AE(encoder_net, decoder_net)
ae.load_weights(model_dir.joinpath('ae.ckpt'))
return ae
def load_tf_vae(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load VAE.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the latent dimension and beta parameters.
Returns
-------
Loaded VAE.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder or vae found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
vae = VAE(encoder_net, decoder_net, state_dict['latent_dim'], beta=state_dict['beta'])
vae.load_weights(model_dir.joinpath('vae.ckpt'))
return vae
def load_tf_aegmm(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load AEGMM.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `n_gmm` and `recon_features` parameters.
Returns
-------
Loaded AEGMM.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder, gmm density net or aegmm found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
gmm_density_net = tf.keras.models.load_model(model_dir.joinpath('gmm_density_net.h5'))
aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, state_dict['n_gmm'], state_dict['recon_features'])
aegmm.load_weights(model_dir.joinpath('aegmm.ckpt'))
return aegmm
def load_tf_vaegmm(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load VAEGMM.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `n_gmm`, `latent_dim` and `recon_features` parameters.
Returns
-------
Loaded VAEGMM.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder, gmm density net or vaegmm found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
gmm_density_net = tf.keras.models.load_model(model_dir.joinpath('gmm_density_net.h5'))
vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, state_dict['n_gmm'],
state_dict['latent_dim'], state_dict['recon_features'], state_dict['beta'])
vaegmm.load_weights(model_dir.joinpath('vaegmm.ckpt'))
return vaegmm
def load_tf_s2s(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load seq2seq TensorFlow model.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `latent_dim`, `shape`, `output_activation` and `beta` parameters.
Returns
-------
Loaded seq2seq model.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No seq2seq or threshold estimation net found in {}.'.format(model_dir))
return None
# load threshold estimator net, initialize encoder and decoder and load seq2seq weights
threshold_net = tf.keras.models.load_model(model_dir.joinpath('threshold_net.h5'), compile=False)
latent_dim = state_dict['latent_dim']
n_features = state_dict['shape'][-1]
encoder_net = EncoderLSTM(latent_dim)
decoder_net = DecoderLSTM(latent_dim, n_features, state_dict['output_activation'])
seq2seq = Seq2Seq(encoder_net, decoder_net, threshold_net, n_features, beta=state_dict['beta'])
seq2seq.load_weights(model_dir.joinpath('seq2seq.ckpt'))
return seq2seq
def load_tf_llr(filepath: Union[str, os.PathLike], dist_s: Union[Distribution, PixelCNN] = None,
dist_b: Union[Distribution, PixelCNN] = None, input_shape: tuple = None):
"""
Load LLR TensorFlow models or distributions.
Parameters
----------
detector
Likelihood ratio detector.
filepath
Saved model directory.
dist_s
TensorFlow distribution for semantic model.
dist_b
TensorFlow distribution for background model.
input_shape
Input shape of the model.
Returns
-------
Detector with loaded models.
"""
model_dir = Path(filepath).joinpath('model')
h5files = [f.name for f in model_dir.glob('[!.]*.h5')]
if 'model_s.h5' in h5files and 'model_b.h5' in h5files:
model_s, dist_s = build_model(dist_s, input_shape, str(model_dir.joinpath('model_s.h5').resolve()))
model_b, dist_b = build_model(dist_b, input_shape, str(model_dir.joinpath('model_b.h5').resolve()))
return dist_s, dist_b, model_s, model_b
else:
dist_s = tf.keras.models.load_model(model_dir.joinpath('model.h5'), compile=False)
if 'model_background.h5' in h5files:
dist_b = tf.keras.models.load_model(model_dir.joinpath('model_background.h5'), compile=False)
else:
dist_b = None
return dist_s, dist_b, None, None
def init_od_ae(state_dict: Dict,
ae: tf.keras.Model) -> OutlierAE:
"""
Initialize OutlierVAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
ae
Loaded AE.
Returns
-------
Initialized OutlierAE instance.
"""
od = OutlierAE(threshold=state_dict['threshold'], ae=ae)
return od
def init_od_vae(state_dict: Dict,
vae: tf.keras.Model) -> OutlierVAE:
"""
Initialize OutlierVAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
vae
Loaded VAE.
Returns
-------
Initialized OutlierVAE instance.
"""
od = OutlierVAE(threshold=state_dict['threshold'],
score_type=state_dict['score_type'],
vae=vae,
samples=state_dict['samples'])
return od
def init_ad_ae(state_dict: Dict,
ae: tf.keras.Model,
model: tf.keras.Model,
model_hl: List[tf.keras.Model]) -> AdversarialAE:
"""
Initialize AdversarialAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
ae
Loaded VAE.
model
Loaded classification model.
model_hl
List of tf.keras models.
Returns
-------
Initialized AdversarialAE instance.
"""
ad = AdversarialAE(threshold=state_dict['threshold'],
ae=ae,
model=model,
model_hl=model_hl,
w_model_hl=state_dict['w_model_hl'],
temperature=state_dict['temperature'])
return ad
def init_ad_md(state_dict: Dict,
distilled_model: tf.keras.Model,
model: tf.keras.Model) -> ModelDistillation:
"""
Initialize ModelDistillation.
Parameters
----------
state_dict
Dictionary containing the parameter values.
distilled_model
Loaded distilled model.
model
Loaded classification model.
Returns
-------
Initialized ModelDistillation instance.
"""
ad = ModelDistillation(threshold=state_dict['threshold'],
distilled_model=distilled_model,
model=model,
temperature=state_dict['temperature'],
loss_type=state_dict['loss_type'])
return ad
def init_od_aegmm(state_dict: Dict,
aegmm: tf.keras.Model) -> OutlierAEGMM:
"""
Initialize OutlierAEGMM.
Parameters
----------
state_dict
Dictionary containing the parameter values.
aegmm
Loaded AEGMM.
Returns
-------
Initialized OutlierAEGMM instance.
"""
od = OutlierAEGMM(threshold=state_dict['threshold'],
aegmm=aegmm)
od.phi = state_dict['phi']
od.mu = state_dict['mu']
od.cov = state_dict['cov']
od.L = state_dict['L']
od.log_det_cov = state_dict['log_det_cov']
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Loaded AEGMM detector has not been fit.')
return od
def init_od_vaegmm(state_dict: Dict,
vaegmm: tf.keras.Model) -> OutlierVAEGMM:
"""
Initialize OutlierVAEGMM.
Parameters
----------
state_dict
Dictionary containing the parameter values.
vaegmm
Loaded VAEGMM.
Returns
-------
Initialized OutlierVAEGMM instance.
"""
od = OutlierVAEGMM(threshold=state_dict['threshold'],
vaegmm=vaegmm,
samples=state_dict['samples'])
od.phi = state_dict['phi']
od.mu = state_dict['mu']
od.cov = state_dict['cov']
od.L = state_dict['L']
od.log_det_cov = state_dict['log_det_cov']
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Loaded VAEGMM detector has not been fit.')
return od
def init_od_s2s(state_dict: Dict,
seq2seq: tf.keras.Model) -> OutlierSeq2Seq:
"""
Initialize OutlierSeq2Seq.
Parameters
----------
state_dict
Dictionary containing the parameter values.
seq2seq
Loaded seq2seq model.
Returns
-------
Initialized OutlierSeq2Seq instance.
"""
seq_len, n_features = state_dict['shape'][1:]
od = OutlierSeq2Seq(n_features,
seq_len,
threshold=state_dict['threshold'],
seq2seq=seq2seq,
latent_dim=state_dict['latent_dim'],
output_activation=state_dict['output_activation'])
return od
def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \
-> Tuple[TransformerEmbedding, Callable]:
"""Legacy function to load text embedding."""
model_dir = Path(filepath).joinpath(load_dir)
tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))
args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))
emb = TransformerEmbedding(
str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']
)
return emb, tokenizer
def init_preprocess(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> Tuple[Optional[Callable], Optional[dict]]:
"""Return preprocessing function and kwargs."""
if kwargs: # override defaults
keys = list(kwargs.keys())
preprocess_fn = kwargs['preprocess_fn'] if 'preprocess_fn' in keys else None
preprocess_kwargs = kwargs['preprocess_kwargs'] if 'preprocess_kwargs' in keys else None
return preprocess_fn, preprocess_kwargs
elif model is not None and callable(state_dict['preprocess_fn']) \
and isinstance(state_dict['preprocess_kwargs'], dict):
preprocess_fn = state_dict['preprocess_fn']
preprocess_kwargs = state_dict['preprocess_kwargs']
else:
return None, None
keys = list(preprocess_kwargs.keys())
if 'model' not in keys:
raise ValueError('No model found for the preprocessing step.')
if preprocess_kwargs['model'] == 'UAE':
if emb is not None:
model = _Encoder(emb, mlp=model)
preprocess_kwargs['tokenizer'] = tokenizer
preprocess_kwargs['model'] = UAE(encoder_net=model)
else: # incl. preprocess_kwargs['model'] == 'HiddenOutput'
preprocess_kwargs['model'] = model
return preprocess_fn, preprocess_kwargs
def init_cd_classifierdrift(clf_drift: tf.keras.Model, state_dict: Dict, model: Optional[tf.keras.Model],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> ClassifierDrift:
"""
Initialize ClassifierDrift detector.
Parameters
----------
clf_drift
Model used for drift classification.
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized ClassifierDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
state_dict['kwargs']['train_kwargs']['optimizer'] = \
tf.keras.optimizers.get(state_dict['kwargs']['train_kwargs']['optimizer'])
args = list(state_dict['args'].values()) + [clf_drift]
cd = ClassifierDrift(*args, **state_dict['kwargs'])
attrs = state_dict['other']
cd._detector.n = attrs['n']
cd._detector.skf = attrs['skf']
return cd
def init_cd_chisquaredrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> ChiSquareDrift:
"""
Initialize ChiSquareDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized ChiSquareDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = ChiSquareDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_tabulardrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> TabularDrift:
"""
Initialize TabularDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized TabularDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = TabularDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_ksdrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> KSDrift:
"""
Initialize KSDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized KSDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = KSDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_mmddrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> MMDDrift:
"""
Initialize MMDDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized MMDDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = MMDDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd._detector.n = attrs['n']
return cd
def init_od_mahalanobis(state_dict: Dict) -> Mahalanobis:
"""
Initialize Mahalanobis.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized Mahalanobis instance.
"""
od = Mahalanobis(threshold=state_dict['threshold'],
n_components=state_dict['n_components'],
std_clip=state_dict['std_clip'],
start_clip=state_dict['start_clip'],
max_n=state_dict['max_n'],
cat_vars=state_dict['cat_vars'],
ohe=state_dict['ohe'])
od.d_abs = state_dict['d_abs']
od.clip = state_dict['clip']
od.mean = state_dict['mean']
od.C = state_dict['C']
od.n = state_dict['n']
return od
def init_od_iforest(state_dict: Dict) -> IForest:
"""
Initialize isolation forest.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized IForest instance.
"""
od = IForest(threshold=state_dict['threshold'])
od.isolationforest = state_dict['isolationforest']
return od
def init_od_prophet(state_dict: Dict) -> OutlierProphet:
"""
Initialize OutlierProphet.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized OutlierProphet instance.
"""
od = OutlierProphet(cap=state_dict['cap'])
od.model = state_dict['model']
return od
def init_od_sr(state_dict: Dict) -> SpectralResidual:
"""
Initialize spectral residual detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized SpectralResidual instance.
"""
od = SpectralResidual(threshold=state_dict['threshold'],
window_amp=state_dict['window_amp'],
window_local=state_dict['window_local'],
n_est_points=state_dict['n_est_points'],
n_grad_points=state_dict['n_grad_points'])
return od
def init_od_llr(state_dict: Dict, models: tuple) -> LLR:
"""
Initialize LLR detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
models
Tuple containing the model and background model.
Returns
-------
Initialized LLR instance.
"""
od = LLR(threshold=state_dict['threshold'],
model=models[0],
model_background=models[1],
log_prob=state_dict['log_prob'],
sequential=state_dict['sequential'])
if models[2] is not None and models[3] is not None:
od.model_s = models[2]
od.model_b = models[3]
return od
| 35,509 | 33.34236 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_tensorflow/saving.py | import logging
import os
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import dill # dispatch table setting not done here as done in top-level saving.py file
import tensorflow as tf
from tensorflow.keras.layers import Input, InputLayer
# Below imports are used for legacy saving, and will be removed (or moved to utils/loading.py) in the future
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import (ChiSquareDrift, ClassifierDrift, KSDrift,
MMDDrift, TabularDrift)
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.models.tensorflow import TransformerEmbedding
from alibi_detect.od import (LLR, IForest, Mahalanobis, OutlierAE,
OutlierAEGMM, OutlierProphet, OutlierSeq2Seq,
OutlierVAE, OutlierVAEGMM, SpectralResidual)
from alibi_detect.utils._types import Literal
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.missing_optional_dependency import MissingDependency
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: Callable,
base_path: Path,
input_shape: Optional[tuple],
local_path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a TensorFlow model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
input_shape
The input dimensions of the model (after the optional embedding has been applied).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
cfg_model: Optional[Dict[str, Any]] = None
cfg_embed: Optional[Dict[str, Any]] = None
if isinstance(model, UAE):
if isinstance(model.encoder.layers[0], TransformerEmbedding): # if UAE contains embedding and encoder
if input_shape is None:
raise ValueError('Cannot save combined embedding and model when `input_shape` is None.')
# embedding
embed = model.encoder.layers[0]
cfg_embed = save_embedding_config(embed, base_path, local_path.joinpath('embedding'))
# preprocessing encoder
inputs = Input(shape=input_shape, dtype=tf.int64)
model.encoder.call(inputs)
shape_enc = (model.encoder.layers[0].output.shape[-1],)
layers = [InputLayer(input_shape=shape_enc)] + model.encoder.layers[1:]
model = tf.keras.Sequential(layers)
_ = model(tf.zeros((1,) + shape_enc))
else: # If UAE is simply an encoder
model = model.encoder
elif isinstance(model, TransformerEmbedding):
cfg_embed = save_embedding_config(model, base_path, local_path.joinpath('embedding'))
model = None
elif isinstance(model, HiddenOutput):
model = model.model
elif isinstance(model, tf.keras.Model): # Last as TransformerEmbedding and UAE are tf.keras.Model's
model = model
else:
raise ValueError('Model not recognised, cannot save.')
if model is not None:
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath.joinpath('model'))
cfg_model = {
'flavour': Framework.TENSORFLOW.value,
'src': local_path.joinpath('model')
}
return cfg_model, cfg_embed
def save_model(model: tf.keras.Model,
filepath: Union[str, os.PathLike],
filename: str = 'model',
save_format: Literal['tf', 'h5'] = 'h5') -> None: # TODO - change to tf, later PR
"""
Save TensorFlow model.
Parameters
----------
model
The tf.keras.Model to save.
filepath
Save directory.
filename
Name of file to save to within the filepath directory.
save_format
The format to save to. 'tf' to save to the newer SavedModel format, 'h5' to save to the lighter-weight
legacy hdf5 format.
"""
# create folder to save model in
model_path = Path(filepath)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath(filename + '.h5') if save_format == 'h5' else model_path
if isinstance(model, tf.keras.Model):
model.save(model_path, save_format=save_format)
else:
raise ValueError('The extracted model to save is not a `tf.keras.Model`. Cannot save.')
def save_embedding_config(embed: TransformerEmbedding,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
"""
# create folder to save model in
filepath = base_path.joinpath(local_path)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Populate config dict
cfg_embed: Dict[str, Any] = {}
cfg_embed.update({'type': embed.emb_type})
cfg_embed.update({'layers': embed.hs_emb.keywords['layers']})
cfg_embed.update({'src': local_path})
cfg_embed.update({'flavour': Framework.TENSORFLOW.value})
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath))
embed.model.save_pretrained(filepath)
return cfg_embed
def save_optimizer_config(optimizer: Union[tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer]):
"""
Parameters
----------
optimizer
The tensorflow optimizer to serialize.
Returns
-------
The tensorflow optimizer's config dictionary.
"""
return tf.keras.optimizers.serialize(optimizer)
#######################################################################################################
# TODO: Everything below here is legacy saving code, and will be removed in the future
#######################################################################################################
def save_embedding_legacy(embed: TransformerEmbedding,
embed_args: dict,
filepath: Path) -> None:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
embed_args
Arguments for TransformerEmbedding module.
filepath
The save directory.
"""
# create folder to save model in
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath.joinpath('embedding.dill')))
embed.save_pretrained(filepath)
with open(filepath.joinpath('embedding.dill'), 'wb') as f:
dill.dump(embed_args, f)
def save_detector_legacy(detector, filepath):
detector_name = detector.meta['name']
# save metadata
logger.info('Saving metadata and detector to {}'.format(filepath))
with open(filepath.joinpath('meta.dill'), 'wb') as f:
dill.dump(detector.meta, f)
# save detector specific parameters
if isinstance(detector, OutlierAE):
state_dict = state_ae(detector)
elif isinstance(detector, OutlierVAE):
state_dict = state_vae(detector)
elif isinstance(detector, Mahalanobis):
state_dict = state_mahalanobis(detector)
elif isinstance(detector, IForest):
state_dict = state_iforest(detector)
elif isinstance(detector, ChiSquareDrift):
state_dict, model, embed, embed_args, tokenizer = state_chisquaredrift(detector)
elif isinstance(detector, ClassifierDrift):
state_dict, clf_drift, model, embed, embed_args, tokenizer = state_classifierdrift(detector)
elif isinstance(detector, TabularDrift):
state_dict, model, embed, embed_args, tokenizer = state_tabulardrift(detector)
elif isinstance(detector, KSDrift):
state_dict, model, embed, embed_args, tokenizer = state_ksdrift(detector)
elif isinstance(detector, MMDDrift):
state_dict, model, embed, embed_args, tokenizer = state_mmddrift(detector)
elif isinstance(detector, OutlierAEGMM):
state_dict = state_aegmm(detector)
elif isinstance(detector, OutlierVAEGMM):
state_dict = state_vaegmm(detector)
elif isinstance(detector, AdversarialAE):
state_dict = state_adv_ae(detector)
elif isinstance(detector, ModelDistillation):
state_dict = state_adv_md(detector)
elif not isinstance(OutlierProphet, MissingDependency) and isinstance(detector, OutlierProphet):
state_dict = state_prophet(detector)
elif isinstance(detector, SpectralResidual):
state_dict = state_sr(detector)
elif isinstance(detector, OutlierSeq2Seq):
state_dict = state_s2s(detector)
elif isinstance(detector, LLR):
state_dict = state_llr(detector)
else:
raise NotImplementedError('The %s detector does not have a legacy save method.' % detector_name)
with open(filepath.joinpath(detector_name + '.dill'), 'wb') as f:
dill.dump(state_dict, f)
# save detector specific TensorFlow models
model_dir = filepath.joinpath('model')
if isinstance(detector, OutlierAE):
save_tf_ae(detector, filepath)
elif isinstance(detector, OutlierVAE):
save_tf_vae(detector, filepath)
elif isinstance(detector, (ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift)):
if model is not None:
save_model(model, model_dir, filename='encoder')
if embed is not None:
save_embedding_legacy(embed, embed_args, filepath)
if tokenizer is not None:
tokenizer.save_pretrained(filepath.joinpath('model'))
if detector_name == 'ClassifierDriftTF':
save_model(clf_drift, model_dir, filename='clf_drift')
elif isinstance(detector, OutlierAEGMM):
save_tf_aegmm(detector, filepath)
elif isinstance(detector, OutlierVAEGMM):
save_tf_vaegmm(detector, filepath)
elif isinstance(detector, AdversarialAE):
save_tf_ae(detector, filepath)
save_model(detector.model, model_dir)
save_tf_hl(detector.model_hl, filepath)
elif isinstance(detector, ModelDistillation):
save_model(detector.distilled_model, model_dir, filename='distilled_model')
save_model(detector.model, model_dir, filename='model')
elif isinstance(detector, OutlierSeq2Seq):
save_tf_s2s(detector, filepath)
elif isinstance(detector, LLR):
save_tf_llr(detector, filepath)
def preprocess_step_drift(cd: Union[ChiSquareDrift, ClassifierDriftTF, KSDrift, MMDDriftTF, TabularDrift]) \
-> Tuple[
Optional[Callable], Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Dict, Optional[Callable], bool
]:
# note: need to be able to dill tokenizers other than transformers
preprocess_fn, preprocess_kwargs = None, {}
model, embed, embed_args, tokenizer, load_emb = None, None, {}, None, False
if isinstance(cd.preprocess_fn, partial):
preprocess_fn = cd.preprocess_fn.func
for k, v in cd.preprocess_fn.keywords.items():
if isinstance(v, UAE):
if isinstance(v.encoder.layers[0], TransformerEmbedding): # text drift
# embedding
embed = v.encoder.layers[0].model
embed_args = dict(
embedding_type=v.encoder.layers[0].emb_type,
layers=v.encoder.layers[0].hs_emb.keywords['layers']
)
load_emb = True
# preprocessing encoder
inputs = Input(shape=cd.input_shape, dtype=tf.int64)
v.encoder.call(inputs)
shape_enc = (v.encoder.layers[0].output.shape[-1],)
layers = [InputLayer(input_shape=shape_enc)] + v.encoder.layers[1:]
model = tf.keras.Sequential(layers)
_ = model(tf.zeros((1,) + shape_enc))
else:
model = v.encoder
preprocess_kwargs['model'] = 'UAE'
elif isinstance(v, HiddenOutput):
model = v.model
preprocess_kwargs['model'] = 'HiddenOutput'
elif isinstance(v, tf.keras.Model):
model = v
preprocess_kwargs['model'] = 'custom'
elif hasattr(v, '__module__'):
if 'transformers' in v.__module__: # transformers tokenizer
tokenizer = v
preprocess_kwargs[k] = v.__module__
else:
preprocess_kwargs[k] = v
elif callable(cd.preprocess_fn):
preprocess_fn = cd.preprocess_fn
return preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb
def state_chisquaredrift(cd: ChiSquareDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Chi-Squared drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'categories_per_feature': cd.x_ref_categories,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_classifierdrift(cd: ClassifierDrift) -> Tuple[
Dict, tf.keras.Model,
Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Classifier-based drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd._detector)
cd._detector.train_kwargs['optimizer'] = tf.keras.optimizers.serialize(cd._detector.train_kwargs['optimizer'])
state_dict = {
'args':
{
'x_ref': cd._detector.x_ref,
},
'kwargs':
{
'p_val': cd._detector.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd._detector.preprocess_at_init,
'update_x_ref': cd._detector.update_x_ref,
'preds_type': cd._detector.preds_type,
'binarize_preds': cd._detector.binarize_preds,
'train_size': cd._detector.train_size,
'train_kwargs': cd._detector.train_kwargs,
},
'other':
{
'n': cd._detector.n,
'skf': cd._detector.skf,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, cd._detector.model, model, embed, embed_args, tokenizer
def state_tabulardrift(cd: TabularDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Tabular drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'categories_per_feature': cd.x_ref_categories,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'alternative': cd.alternative,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_ksdrift(cd: KSDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
K-S drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'alternative': cd.alternative,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_mmddrift(cd: MMDDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
MMD drift detector parameters to save.
Note: only GaussianRBF kernel supported.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd._detector)
if not isinstance(cd._detector.kernel, GaussianRBF):
logger.warning('Currently only the default GaussianRBF kernel is supported.')
sigma = cd._detector.kernel.sigma.numpy() if not cd._detector.infer_sigma else None
state_dict = {
'args':
{
'x_ref': cd._detector.x_ref,
},
'kwargs':
{
'p_val': cd._detector.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd._detector.preprocess_at_init,
'update_x_ref': cd._detector.update_x_ref,
'sigma': sigma,
'configure_kernel_from_x_ref': not cd._detector.infer_sigma,
'n_permutations': cd._detector.n_permutations,
'input_shape': cd._detector.input_shape,
},
'other':
{
'n': cd._detector.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_iforest(od: IForest) -> Dict:
"""
Isolation forest parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'isolationforest': od.isolationforest}
return state_dict
def state_mahalanobis(od: Mahalanobis) -> Dict:
"""
Mahalanobis parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'n_components': od.n_components,
'std_clip': od.std_clip,
'start_clip': od.start_clip,
'max_n': od.max_n,
'cat_vars': od.cat_vars,
'ohe': od.ohe,
'd_abs': od.d_abs,
'clip': od.clip,
'mean': od.mean,
'C': od.C,
'n': od.n}
return state_dict
def state_ae(od: OutlierAE) -> Dict:
"""
OutlierAE parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold}
return state_dict
def state_vae(od: OutlierVAE) -> Dict:
"""
OutlierVAE parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'score_type': od.score_type,
'samples': od.samples,
'latent_dim': od.vae.latent_dim,
'beta': od.vae.beta}
return state_dict
def state_aegmm(od: OutlierAEGMM) -> Dict:
"""
OutlierAEGMM parameters to save.
Parameters
----------
od
Outlier detector object.
"""
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Saving AEGMM detector that has not been fit.')
state_dict = {'threshold': od.threshold,
'n_gmm': od.aegmm.n_gmm,
'recon_features': od.aegmm.recon_features,
'phi': od.phi,
'mu': od.mu,
'cov': od.cov,
'L': od.L,
'log_det_cov': od.log_det_cov}
return state_dict
def state_vaegmm(od: OutlierVAEGMM) -> Dict:
"""
OutlierVAEGMM parameters to save.
Parameters
----------
od
Outlier detector object.
"""
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Saving VAEGMM detector that has not been fit.')
state_dict = {'threshold': od.threshold,
'samples': od.samples,
'n_gmm': od.vaegmm.n_gmm,
'latent_dim': od.vaegmm.latent_dim,
'beta': od.vaegmm.beta,
'recon_features': od.vaegmm.recon_features,
'phi': od.phi,
'mu': od.mu,
'cov': od.cov,
'L': od.L,
'log_det_cov': od.log_det_cov}
return state_dict
def state_adv_ae(ad: AdversarialAE) -> Dict:
"""
AdversarialAE parameters to save.
Parameters
----------
ad
Adversarial detector object.
"""
state_dict = {'threshold': ad.threshold,
'w_model_hl': ad.w_model_hl,
'temperature': ad.temperature,
'hidden_layer_kld': ad.hidden_layer_kld}
return state_dict
def state_adv_md(md: ModelDistillation) -> Dict:
"""
ModelDistillation parameters to save.
Parameters
----------
md
ModelDistillation detector object.
"""
state_dict = {'threshold': md.threshold,
'temperature': md.temperature,
'loss_type': md.loss_type}
return state_dict
def state_prophet(od: OutlierProphet) -> Dict:
"""
OutlierProphet parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'model': od.model,
'cap': od.cap}
return state_dict
def state_sr(od: SpectralResidual) -> Dict:
"""
Spectral residual parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'window_amp': od.window_amp,
'window_local': od.window_local,
'n_est_points': od.n_est_points,
'n_grad_points': od.n_grad_points}
return state_dict
def state_s2s(od: OutlierSeq2Seq) -> Dict:
"""
OutlierSeq2Seq parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'beta': od.seq2seq.beta,
'shape': od.shape,
'latent_dim': od.latent_dim,
'output_activation': od.output_activation}
return state_dict
def state_llr(od: LLR) -> Dict:
"""
LLR parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {
'threshold': od.threshold,
'has_log_prob': od.has_log_prob,
'sequential': od.sequential,
'log_prob': od.log_prob
}
return state_dict
def save_tf_ae(detector: Union[OutlierAE, AdversarialAE],
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierAE
Parameters
----------
detector
Outlier or adversarial detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder and vae weights
if isinstance(detector.ae.encoder.encoder_net, tf.keras.Sequential):
detector.ae.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(detector.ae.decoder.decoder_net, tf.keras.Sequential):
detector.ae.decoder.decoder_net.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(detector.ae, tf.keras.Model):
detector.ae.save_weights(model_dir.joinpath('ae.ckpt'))
else:
logger.warning('No `tf.keras.Model` ae detected. No ae saved.')
def save_tf_vae(detector: OutlierVAE,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierVAE.
Parameters
----------
detector
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder and vae weights
if isinstance(detector.vae.encoder.encoder_net, tf.keras.Sequential):
detector.vae.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(detector.vae.decoder.decoder_net, tf.keras.Sequential):
detector.vae.decoder.decoder_net.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(detector.vae, tf.keras.Model):
detector.vae.save_weights(model_dir.joinpath('vae.ckpt'))
else:
logger.warning('No `tf.keras.Model` vae detected. No vae saved.')
def save_tf_llr(detector: LLR, filepath: Union[str, os.PathLike]) -> None:
"""
Save LLR TensorFlow models or distributions.
Parameters
----------
detector
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# Save LLR model
if hasattr(detector, 'model_s') and hasattr(detector, 'model_b'):
detector.model_s.save_weights(model_dir.joinpath('model_s.h5'))
detector.model_b.save_weights(model_dir.joinpath('model_b.h5'))
else:
detector.dist_s.save(model_dir.joinpath('model.h5'))
if detector.dist_b is not None:
detector.dist_b.save(model_dir.joinpath('model_background.h5'))
def save_tf_hl(models: List[tf.keras.Model],
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow model weights.
Parameters
----------
models
List with tf.keras models.
filepath
Save directory.
"""
if isinstance(models, list):
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# Save model
for i, m in enumerate(models):
model_path = model_dir.joinpath('model_hl_' + str(i) + '.ckpt')
m.save_weights(model_path)
def save_tf_aegmm(od: OutlierAEGMM,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierAEGMM.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder, gmm density model and aegmm weights
if isinstance(od.aegmm.encoder, tf.keras.Sequential):
od.aegmm.encoder.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(od.aegmm.decoder, tf.keras.Sequential):
od.aegmm.decoder.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(od.aegmm.gmm_density, tf.keras.Sequential):
od.aegmm.gmm_density.save(model_dir.joinpath('gmm_density_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` GMM density net detected. No GMM density net saved.')
if isinstance(od.aegmm, tf.keras.Model):
od.aegmm.save_weights(model_dir.joinpath('aegmm.ckpt'))
else:
logger.warning('No `tf.keras.Model` AEGMM detected. No AEGMM saved.')
def save_tf_vaegmm(od: OutlierVAEGMM,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierVAEGMM.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder, gmm density model and vaegmm weights
if isinstance(od.vaegmm.encoder.encoder_net, tf.keras.Sequential):
od.vaegmm.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(od.vaegmm.decoder, tf.keras.Sequential):
od.vaegmm.decoder.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(od.vaegmm.gmm_density, tf.keras.Sequential):
od.vaegmm.gmm_density.save(model_dir.joinpath('gmm_density_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` GMM density net detected. No GMM density net saved.')
if isinstance(od.vaegmm, tf.keras.Model):
od.vaegmm.save_weights(model_dir.joinpath('vaegmm.ckpt'))
else:
logger.warning('No `tf.keras.Model` VAEGMM detected. No VAEGMM saved.')
def save_tf_s2s(od: OutlierSeq2Seq,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierSeq2Seq.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save seq2seq model weights and threshold estimation network
if isinstance(od.seq2seq.threshold_net, tf.keras.Sequential):
od.seq2seq.threshold_net.save(model_dir.joinpath('threshold_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` threshold estimation net detected. No threshold net saved.')
if isinstance(od.seq2seq, tf.keras.Model):
od.seq2seq.save_weights(model_dir.joinpath('seq2seq.ckpt'))
else:
logger.warning('No `tf.keras.Model` Seq2Seq detected. No Seq2Seq model saved.')
| 34,567 | 34.237513 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/saving/_tensorflow/tests/test_saving_tf.py | from pytest_cases import param_fixture, parametrize, parametrize_with_cases
import pytest
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import encoder_model
from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf
from alibi_detect.saving.loading import _load_model_config, _load_optimizer_config
from alibi_detect.saving.saving import _path2str, _save_model_config, _save_optimizer_config
from alibi_detect.saving.schemas import ModelConfig, SupportedOptimizer
import tensorflow as tf
import numpy as np
from packaging import version
backend = param_fixture("backend", ['tensorflow'])
# Note: The full save/load functionality of optimizers (inc. validation) is tested in test_save_classifierdrift.
@pytest.mark.skipif(version.parse(tf.__version__) < version.parse('2.11.0'),
reason="Skipping since tensorflow < 2.11.0")
@parametrize('legacy', [True, False])
def test_load_optimizer_object_tf2pt11(legacy, backend):
"""
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow>=2.11.
Here we test that "new" and legacy optimizers can be saved/laoded. We expect the returned optimizer to be an
instantiated `tf.keras.optimizers.Optimizer` object. Also test that the loaded optimizer can be saved.
"""
class_name = 'Adam'
class_str = class_name if legacy else 'Custom>' + class_name # Note: see discussion in #739 re 'Custom>'
learning_rate = np.float32(0.01) # Set as float32 since this is what _save_optimizer_config returns
epsilon = np.float32(1e-7)
amsgrad = False
# Load
cfg_opt = {
'class_name': class_str,
'config': {
'name': class_name,
'learning_rate': learning_rate,
'epsilon': epsilon,
'amsgrad': amsgrad
}
}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
# Check optimizer
SupportedOptimizer.validate_optimizer(optimizer, {'backend': 'tensorflow'})
if legacy:
assert isinstance(optimizer, tf.keras.optimizers.legacy.Optimizer)
else:
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
assert type(optimizer).__name__ == class_name
assert optimizer.learning_rate == learning_rate
assert optimizer.epsilon == epsilon
assert optimizer.amsgrad == amsgrad
# Save
cfg_saved = _save_optimizer_config(optimizer)
# Compare to original config
for key, value in cfg_opt['config'].items():
assert value == cfg_saved['config'][key]
@pytest.mark.skipif(version.parse(tf.__version__) >= version.parse('2.11.0'),
reason="Skipping since tensorflow >= 2.11.0")
def test_load_optimizer_object_tf_old(backend):
"""
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow<2.11.
We expect the returned optimizer to be an instantiated `tf.keras.optimizers.Optimizer` object.
Also test that the loaded optimizer can be saved.
"""
class_name = 'Adam'
learning_rate = np.float32(0.01) # Set as float32 since this is what _save_optimizer_config returns
epsilon = np.float32(1e-7)
amsgrad = False
# Load
cfg_opt = {
'class_name': class_name,
'config': {
'name': class_name,
'learning_rate': learning_rate,
'epsilon': epsilon,
'amsgrad': amsgrad
}
}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
# Check optimizer
SupportedOptimizer.validate_optimizer(optimizer, {'backend': 'tensorflow'})
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
assert type(optimizer).__name__ == class_name
assert optimizer.learning_rate == learning_rate
assert optimizer.epsilon == epsilon
assert optimizer.amsgrad == amsgrad
# Save
cfg_saved = _save_optimizer_config(optimizer)
# Compare to original config
for key, value in cfg_opt['config'].items():
assert value == cfg_saved['config'][key]
def test_load_optimizer_type(backend):
"""
Test the _load_optimizer_config with just the `class_name` specified. In this case we expect a
`tf.keras.optimizers.Optimizer` class to be returned.
"""
class_name = 'Adam'
cfg_opt = {'class_name': class_name}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
assert isinstance(optimizer, type)
assert optimizer.__name__ == class_name
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [encoder_model])
@parametrize('layer', [None, -1])
def test_save_model_tf(data, model, layer, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with tensorflow model.
"""
# Save model
filepath = tmp_path
input_shape = (data[0].shape[1],)
cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_shape)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.h5').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
if layer is not None:
cfg_model['layer'] = layer
# Load model
model_load = _load_model_config(cfg_model)
if layer is None:
assert isinstance(model_load, type(model))
else:
assert isinstance(model_load, HiddenOutput_tf)
| 5,554 | 37.846154 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/tests/test_dep_management.py | """
Test optional dependencies.
These tests import all the named objects from the public API of alibi-detect and test that they throw the correct errors
if the relevant optional dependencies are not installed. If these tests fail, it is likely that:
1. The optional dependency relation hasn't been added to the test script. In this case, this test assumes that the
functionality should work for the default alibi-detect install. If this is not the case the exported object name
should be added to the dependency_map in the relevant test.
2. The relevant export in the public API hasn't been imported using `optional_import` from
`alibi_detect.utils.missing_optional_dependency`.
Notes
-----
1. These tests will be skipped in the normal test suite. To run correctly use tox.
2. If you need to configure a new optional dependency you will need to update the setup.cfg file and add a testenv
environment.
3. Backend functionality may be unique to specific explainers/functions and so there may be multiple such modules
that need to be tested separately.
"""
from types import ModuleType
from collections import defaultdict
def check_correct_dependencies(
module: ModuleType,
dependencies: defaultdict,
opt_dep: str):
"""Checks that imported modules that depend on optional dependencies throw correct errors on use.
Parameters
----------
module
The module to check. Each of the public objects within this module will be checked.
dependencies
A dictionary mapping the name of the object to the list of optional dependencies that it depends on. If a name
is not in the dictionary, the named object is assumed to be independent of optional dependencies. Therefor it
should pass for the default alibi-detect install.
opt_dep
The name of the optional dependency that is being tested.
"""
lib_obj = [obj for obj in dir(module) if not obj.startswith('_')]
for item_name in lib_obj:
item = getattr(module, item_name)
if not isinstance(item, ModuleType):
pass_contexts = dependencies[item_name]
try:
item.test # noqa
except AttributeError:
assert opt_dep in pass_contexts or 'default' in pass_contexts or opt_dep == 'all', \
(f'{item_name} was imported instead of an instance of MissingDependency. '
f'Are your sure {item} is dependent on {opt_dep}?')
except ImportError:
assert opt_dep not in pass_contexts and 'default' not in pass_contexts and opt_dep != 'all', \
(f'{item_name} has been imported as an instance of MissingDependency. '
f'Are you sure the dependency buckets, {pass_contexts} are correct?')
def test_cd_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import cd
check_correct_dependencies(cd, dependency_map, opt_dep)
def test_cd_torch_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("HiddenOutput", ['torch', 'keops']),
("UAE", ['torch', 'keops']),
("preprocess_drift", ['torch', 'keops'])
]:
dependency_map[dependency] = relations
from alibi_detect.cd import pytorch as cd_pytorch
check_correct_dependencies(cd_pytorch, dependency_map, opt_dep)
def test_cd_tensorflow_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("HiddenOutput", ['tensorflow']),
("UAE", ['tensorflow']),
("preprocess_drift", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.cd import tensorflow as tensorflow_cd
check_correct_dependencies(tensorflow_cd, dependency_map, opt_dep)
def test_ad_dependencies(opt_dep):
"""Tests that the ad module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('AdversarialAE', ['tensorflow']),
('ModelDistillation', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect import ad
check_correct_dependencies(ad, dependency_map, opt_dep)
def test_od_dependencies(opt_dep):
"""Tests that the od module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('LLR', ['tensorflow']),
('OutlierVAE', ['tensorflow']),
('OutlierVAEGMM', ['tensorflow']),
('OutlierAE', ['tensorflow']),
('OutlierAEGMM', ['tensorflow']),
('OutlierSeq2Seq', ['tensorflow']),
("OutlierProphet", ['prophet']),
('PValNormalizer', ['torch', 'keops']),
('ShiftAndScaleNormalizer', ['torch', 'keops']),
('TopKAggregator', ['torch', 'keops']),
('AverageAggregator', ['torch', 'keops']),
('MaxAggregator', ['torch', 'keops']),
('MinAggregator', ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect import od
check_correct_dependencies(od, dependency_map, opt_dep)
def test_od_backend_dependencies(opt_dep):
"""Tests that the od module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('Ensembler', ['torch', 'keops']),
('KNNTorch', ['torch', 'keops']),
('MahalanobisTorch', ['torch', 'keops']),
('KernelPCATorch', ['torch', 'keops']),
('LinearPCATorch', ['torch', 'keops']),
('GMMTorch', ['torch', 'keops']),
('LOFTorch', ['torch', 'keops']),
('SgdSVMTorch', ['torch', 'keops']),
('BgdSVMTorch', ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.od import pytorch as od_pt_backend
check_correct_dependencies(od_pt_backend, dependency_map, opt_dep)
def test_tensorflow_model_dependencies(opt_dep):
"""Tests that the tensorflow models module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("AE", ['tensorflow']),
("AEGMM", ['tensorflow']),
("Seq2Seq", ['tensorflow']),
("VAE", ['tensorflow']),
("VAEGMM", ['tensorflow']),
("resnet", ['tensorflow']),
("PixelCNN", ['tensorflow']),
("TransformerEmbedding", ['tensorflow']),
("trainer", ['tensorflow']),
("eucl_cosim_features", ['tensorflow']),
("elbo", ['tensorflow']),
("loss_vaegmm", ['tensorflow']),
("loss_aegmm", ['tensorflow']),
("loss_adv_ae", ['tensorflow']),
("loss_distillation", ['tensorflow']),
("scale_by_instance", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.models import tensorflow as tf_models
check_correct_dependencies(tf_models, dependency_map, opt_dep)
def test_torch_model_dependencies(opt_dep):
"""Tests that the torch models module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("TransformerEmbedding", ['torch', 'keops']),
("trainer", ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.models import pytorch as torch_models
check_correct_dependencies(torch_models, dependency_map, opt_dep)
def test_dataset_dependencies(opt_dep):
"""Tests that the datasets module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import datasets
check_correct_dependencies(datasets, dependency_map, opt_dep)
def test_fetching_utils_dependencies(opt_dep):
"""Tests that the fetching utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('fetch_detector', ['tensorflow']),
('fetch_tf_model', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.utils import fetching
check_correct_dependencies(fetching, dependency_map, opt_dep)
def test_saving_tf_dependencies(opt_dep):
"""Tests that the alibi_detect.saving._tensorflow module correctly protects against uninstalled optional
dependencies.
"""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('Detector', ['tensorflow']),
('load_detector_legacy', ['tensorflow']),
('load_embedding_tf', ['tensorflow']),
('load_kernel_config_tf', ['tensorflow']),
('load_model_tf', ['tensorflow']),
('load_optimizer_tf', ['tensorflow']),
('prep_model_and_emb_tf', ['tensorflow']),
('save_detector_legacy', ['tensorflow']),
('save_model_config_tf', ['tensorflow']),
('save_optimizer_config_tf', ['tensorflow']),
('get_tf_dtype', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.saving import _tensorflow as tf_saving
check_correct_dependencies(tf_saving, dependency_map, opt_dep)
def test_saving_torch_dependencies(opt_dep):
"""Tests that the alibi_detect.saving._pytorch module correctly protects against uninstalled optional
dependencies.
"""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('load_embedding_pt', ['torch', 'keops']),
('load_kernel_config_pt', ['torch', 'keops']),
('load_model_pt', ['torch', 'keops']),
('load_optimizer_pt', ['torch', 'keops']),
('prep_model_and_emb_pt', ['torch', 'keops']),
('save_model_config_pt', ['torch', 'keops']),
('get_pt_dtype', ['torch', 'keops'])
]:
dependency_map[dependency] = relations
from alibi_detect.saving import _pytorch as pt_saving
check_correct_dependencies(pt_saving, dependency_map, opt_dep)
def test_saving_dependencies(opt_dep):
"""Tests that the alibi_detect.saving module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import saving
check_correct_dependencies(saving, dependency_map, opt_dep)
def test_tensorflow_utils_dependencies(opt_dep):
"""Tests that the saving utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("batch_compute_kernel_matrix", ['tensorflow']),
("mmd2", ['tensorflow']),
("mmd2_from_kernel_matrix", ['tensorflow']),
("relative_euclidean_distance", ['tensorflow']),
("squared_pairwise_distance", ['tensorflow']),
("GaussianRBF", ['tensorflow']),
("DeepKernel", ['tensorflow']),
("permed_lsdds", ['tensorflow']),
("predict_batch", ['tensorflow']),
("predict_batch_transformer", ['tensorflow']),
("quantile", ['tensorflow']),
("subset_matrix", ['tensorflow']),
("zero_diag", ['tensorflow']),
("mutate_categorical", ['tensorflow']),
("TFDataset", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.utils import tensorflow as tensorflow_utils
check_correct_dependencies(tensorflow_utils, dependency_map, opt_dep)
def test_torch_utils_dependencies(opt_dep):
"""Tests that the pytorch utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("batch_compute_kernel_matrix", ['torch', 'keops']),
("mmd2", ['torch', 'keops']),
("mmd2_from_kernel_matrix", ['torch', 'keops']),
("squared_pairwise_distance", ['torch', 'keops']),
("GaussianRBF", ['torch', 'keops']),
("DeepKernel", ['torch', 'keops']),
("permed_lsdds", ['torch', 'keops']),
("predict_batch", ['torch', 'keops']),
("predict_batch_transformer", ['torch', 'keops']),
("quantile", ['torch', 'keops']),
("zero_diag", ['torch', 'keops']),
("TorchDataset", ['torch', 'keops']),
("get_device", ['torch', 'keops']),
("_save_state_dict", ['torch', 'keops']),
("_load_state_dict", ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.utils import pytorch as pytorch_utils
check_correct_dependencies(pytorch_utils, dependency_map, opt_dep)
def test_keops_utils_dependencies(opt_dep):
"""Tests that the keops utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("GaussianRBF", ['keops']),
("DeepKernel", ['keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.utils import keops as keops_utils
check_correct_dependencies(keops_utils, dependency_map, opt_dep)
| 14,221 | 43.72327 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/base.py | import logging
from abc import abstractmethod
from typing import Callable, Dict, List, Optional, Tuple, Union, Any
import numpy as np
from alibi_detect.base import BaseDetector, concept_drift_dict, DriftConfigMixin
from alibi_detect.cd.utils import get_input_shape, update_reference
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow
from alibi_detect.utils.statstest import fdr
from scipy.stats import binom_test, ks_2samp
from sklearn.model_selection import StratifiedKFold
if has_pytorch:
import torch
if has_tensorflow:
import tensorflow as tf
logger = logging.getLogger(__name__)
class BaseClassifierDrift(BaseDetector):
model: Union['tf.keras.Model', 'torch.nn.Module']
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs probabilities or logits
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
if isinstance(train_size, float) and isinstance(n_folds, int):
logger.warning('Both `n_folds` and `train_size` specified. By default `n_folds` is used.')
if n_folds is not None and n_folds > 1 and not retrain_from_scratch:
raise ValueError("If using multiple folds the model must be retrained from scratch for each fold.")
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
# define whether soft preds and optionally the stratified k-fold split
self.preds_type = preds_type
self.binarize_preds = binarize_preds
if isinstance(n_folds, int):
self.train_size = None
self.skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed)
else:
self.train_size, self.skf = train_size, None
self.retrain_from_scratch = retrain_from_scratch
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['online'] = False
self.meta['data_type'] = data_type
self.meta['detector_type'] = 'drift'
self.meta['params'] = {'binarize_preds ': binarize_preds, 'preds_type': preds_type}
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x
else:
return self.x_ref, x
def get_splits(
self,
x_ref: Union[np.ndarray, list],
x: Union[np.ndarray, list],
return_splits: bool = True
) -> Union[Tuple[Union[np.ndarray, list], np.ndarray],
Tuple[Union[np.ndarray, list], np.ndarray, Optional[List[Tuple[np.ndarray, np.ndarray]]]]]:
"""
Split reference and test data in train and test folds used by the classifier.
Parameters
----------
x_ref
Data used as reference distribution.
x
Batch of instances.
return_splits
Whether to return the splits.
Returns
-------
Combined reference and test instances with labels and optionally a list with tuples of \
train and test indices for optionally different folds.
"""
# create dataset and labels
y = np.concatenate([np.zeros(len(x_ref)), np.ones(len(x))], axis=0).astype(np.int64) # Fix #411
if isinstance(x_ref, np.ndarray) and isinstance(x, np.ndarray):
x = np.concatenate([x_ref, x], axis=0)
else: # add 2 lists
x = x_ref + x
if not return_splits:
return x, y
# random shuffle if stratified folds are not used
n_tot = len(x)
if self.skf is None:
idx_shuffle = np.random.choice(np.arange(n_tot), size=n_tot, replace=False)
n_tr = int(n_tot * self.train_size)
idx_tr, idx_te = idx_shuffle[:n_tr], idx_shuffle[n_tr:]
splits = [(idx_tr, idx_te)]
else: # use stratified folds
splits = self.skf.split(np.zeros(n_tot), y)
return x, y, splits
def test_probs(
self, y_oof: np.ndarray, probs_oof: np.ndarray, n_ref: int, n_cur: int
) -> Tuple[float, float]:
"""
Perform a statistical test of the probabilities predicted by the model against
what we'd expect under the no-change null.
Parameters
----------
y_oof
Out of fold targets (0 ref, 1 cur)
probs_oof
Probabilities predicted by the model
n_ref
Size of reference window used in training model
n_cur
Size of current window used in training model
Returns
-------
p-value and notion of performance of classifier relative to expectation under null
"""
probs_oof = probs_oof[:, 1] # [1-p, p]
if self.binarize_preds:
baseline_accuracy = max(n_ref, n_cur) / (n_ref + n_cur) # exp under null
n_oof = y_oof.shape[0]
n_correct = (y_oof == probs_oof.round()).sum()
p_val = binom_test(n_correct, n_oof, baseline_accuracy, alternative='greater')
accuracy = n_correct / n_oof
# relative error reduction, in [0,1]
# e.g. (90% acc -> 99% acc) = 0.9, (50% acc -> 59% acc) = 0.18
dist = 1 - (1 - accuracy) / (1 - baseline_accuracy)
dist = max(0, dist) # below 0 = no evidence for drift
else:
probs_ref = probs_oof[y_oof == 0]
probs_cur = probs_oof[y_oof == 1]
dist, p_val = ks_2samp(probs_ref, probs_cur, alternative='greater')
return p_val, dist
@abstractmethod
def score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_probs: bool = True, return_model: bool = True) \
-> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data). The reference and test instances of the associated
probabilities are also returned.
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, performance of the classifier \
relative to its expectation under the no-change null, the out-of-fold classifier model \
prediction probabilities on the reference and test data as well as the associated reference \
and test instances of the out-of-fold predictions, and the trained model.
"""
# compute drift scores
p_val, dist, probs_ref, probs_test, x_ref_oof, x_test_oof = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
# TODO: TBD: can `x` ever be a `list` after pre-processing? update_references and downstream functions
# don't support list inputs and without the type: ignore[arg-type] mypy complains
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
if return_probs:
cd['data']['probs_ref'] = probs_ref
cd['data']['probs_test'] = probs_test
cd['data']['x_ref_oof'] = x_ref_oof
cd['data']['x_test_oof'] = x_test_oof
if return_model:
cd['data']['model'] = self.model
return cd
class BaseLearnedKernelDrift(BaseDetector):
kernel: Union['tf.keras.Model', 'torch.nn.Module']
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Base class for the learned kernel-based drift detector.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations
self.train_size = train_size
self.retrain_from_scratch = retrain_from_scratch
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = False
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x
else:
return self.x_ref, x
def get_splits(self, x_ref: Union[np.ndarray, list], x: Union[np.ndarray, list]) \
-> Tuple[Tuple[Union[np.ndarray, list], Union[np.ndarray, list]],
Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]]:
"""
Split reference and test data into two splits -- one of which to learn test locations
and parameters and one to use for tests.
Parameters
----------
x_ref
Data used as reference distribution.
x
Batch of instances.
Returns
-------
Tuple containing split train data and tuple containing split test data.
"""
n_ref, n_cur = len(x_ref), len(x)
perm_ref, perm_cur = np.random.permutation(n_ref), np.random.permutation(n_cur)
idx_ref_tr, idx_ref_te = perm_ref[:int(n_ref * self.train_size)], perm_ref[int(n_ref * self.train_size):]
idx_cur_tr, idx_cur_te = perm_cur[:int(n_cur * self.train_size)], perm_cur[int(n_cur * self.train_size):]
if isinstance(x_ref, np.ndarray):
x_ref_tr, x_ref_te = x_ref[idx_ref_tr], x_ref[idx_ref_te]
x_cur_tr, x_cur_te = x[idx_cur_tr], x[idx_cur_te]
elif isinstance(x, list):
x_ref_tr, x_ref_te = [x_ref[_] for _ in idx_ref_tr], [x_ref[_] for _ in idx_ref_te]
x_cur_tr, x_cur_te = [x[_] for _ in idx_cur_tr], [x[_] for _ in idx_cur_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
return (x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te)
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_kernel: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
return_kernel
Whether to return the updated kernel trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, MMD metric and \
trained kernel.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
if return_kernel:
cd['data']['kernel'] = self.kernel
return cd
class BaseMMDDrift(BaseDetector):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) base data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the Gaussian RBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
self.infer_sigma = configure_kernel_from_x_ref
if configure_kernel_from_x_ref and isinstance(sigma, np.ndarray):
self.infer_sigma = False
logger.warning('`sigma` is specified for the kernel and `configure_kernel_from_x_ref` '
'is set to True. `sigma` argument takes priority over '
'`configure_kernel_from_x_ref` (set to False).')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
# TODO: TBD: similar to above, can x be a list here? x_ref is also revealed to be Any,
# can we tighten the type up (e.g. by typing Callable with stricter inputs/outputs?
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and MMD metric.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
return cd
class BaseLSDDDrift(BaseDetector):
# TODO: TBD: this is only created when _configure_normalization is called from backend-specific classes,
# is declaring it here the right thing to do?
_normalize: Callable
_unnormalize: Callable
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares Density Difference (LSDD) base data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.sigma = sigma
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
self.n_kernel_centers = n_kernel_centers or max(self.n // 20, 1)
self.lambda_rd_max = lambda_rd_max
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the LSDD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and LSDD metric.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict):
if self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
x = self._normalize(x)
elif self.preprocess_fn is None:
x = self._normalize(x)
else:
pass
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
return cd
class BaseUnivariateDrift(BaseDetector, DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Generic drift detector component which serves as a base class for methods using
univariate tests. If n_features > 1, a multivariate correction is applied such that
the false positive rate is upper bounded by the specified p-value, with equality in
the case of independent features.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the statistical test for each feature. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data. Needs to be provided for text data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.correction = correction
self.n = len(x_ref)
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# compute number of features for the univariate tests
if isinstance(n_features, int):
self.n_features = n_features
elif not isinstance(preprocess_fn, Callable) or preprocess_at_init or x_ref_preprocessed:
# infer features from preprocessed reference data
self.n_features = self.x_ref.reshape(self.x_ref.shape[0], -1).shape[-1]
else: # infer number of features after applying preprocessing step
x = self.preprocess_fn(x_ref[0:1])
self.n_features = x.reshape(x.shape[0], -1).shape[-1]
if correction not in ['bonferroni', 'fdr'] and self.n_features > 1:
raise ValueError('Only `bonferroni` and `fdr` are acceptable for multivariate correction.')
# set metadata
self.meta['online'] = False # offline refers to fitting the CDF for K-S
self.meta['data_type'] = data_type
self.meta['detector_type'] = 'drift'
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
pass
def score(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the feature-wise drift score which is the p-value of the
statistical test and the test statistic.
Parameters
----------
x
Batch of instances.
Returns
-------
Feature level p-values and test statistics.
"""
x_ref, x = self.preprocess(x)
score, dist = self.feature_score(x_ref, x) # feature-wise univariate test
return score, dist
def predict(self, x: Union[np.ndarray, list], drift_type: str = 'batch',
return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
drift_type
Predict drift at the 'feature' or 'batch' level. For 'batch', the test statistics for
each feature are aggregated using the Bonferroni or False Discovery Rate correction (if n_features>1).
return_p_val
Whether to return feature level p-values.
return_distance
Whether to return the test statistic between the features of the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the feature level p-values, threshold after \
multivariate correction if needed and test statistics.
"""
# compute drift scores
p_vals, dist = self.score(x)
# TODO: return both feature-level and batch-level drift predictions by default
# values below p-value threshold are drift
if drift_type == 'feature':
drift_pred = (p_vals < self.p_val).astype(int)
elif drift_type == 'batch' and self.correction == 'bonferroni':
threshold = self.p_val / self.n_features
drift_pred = int((p_vals < threshold).any()) # type: ignore[assignment]
elif drift_type == 'batch' and self.correction == 'fdr':
drift_pred, threshold = fdr(p_vals, q_val=self.p_val) # type: ignore[assignment]
else:
raise ValueError('`drift_type` needs to be either `feature` or `batch`.')
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_vals
cd['data']['threshold'] = self.p_val if drift_type == 'feature' else threshold
if return_distance:
cd['data']['distance'] = dist
return cd
class BaseContextMMDDrift(BaseDetector):
lams: Optional[Tuple[Any, Any]] = None
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = None,
c_kernel: Callable = None,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
) -> None:
"""
Maximum Mean Discrepancy (MMD) based context aware drift detector.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
self.x_kernel = x_kernel
self.c_kernel = c_kernel
if len(c_ref) == self.n:
self.c_ref = c_ref
else:
raise ValueError('x_ref and c_ref should contain the same number of instances.')
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# Regularisation parameter tuning settings
if n_folds > 1:
self.n_folds = n_folds
else:
raise ValueError('The `n_folds` parameter must be > 1.')
self.lams = None
# Update ref attribute. Disallow res
self.update_ref = update_ref
if update_ref is not None:
if 'reservoir_sampling' in update_ref.keys():
raise ValueError("The BaseContextMMDDrift detector doesn't currently support the `reservoir_sampling` "
"option in `update_ref`.")
# Other attributes
self.prop_c_held = prop_c_held
self.batch_size = batch_size
self.verbose = verbose
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
pass
def predict(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray,
return_p_val: bool = True, return_distance: bool = True, return_coupling: bool = False) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data, given the provided context.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the conditional MMD test statistic between the new batch and reference data.
return_coupling
Whether to return the coupling matrices.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, conditional MMD test \
statistic and coupling matrices.
"""
# compute drift scores
p_val, dist, distance_threshold, coupling = self.score(x, c)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_ref) # type: ignore[arg-type]
self.c_ref = update_reference(self.c_ref, c, self.n, self.update_ref)
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
if return_coupling:
cd['data']['coupling_xx'] = coupling[0]
cd['data']['coupling_yy'] = coupling[1]
cd['data']['coupling_xy'] = coupling[2]
return cd
| 52,690 | 41.94295 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/learned_kernel.py | import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from torch.utils.data import DataLoader
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
if has_tensorflow:
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
class LearnedKernelDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Callable,
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch or TensorFlow module that returns a similarity between two instances.
backend
Backend used by the kernel and training loop.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
Only relevant for 'keops' backend.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Relevant for 'pytorch' and 'keops' backends.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.KEOPS: [Framework.KEOPS]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['kernel']]
pop_kwargs = ['self', 'x_ref', 'kernel', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader', 'batch_size_permutations', 'num_workers']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
detector = LearnedKernelDriftTF
else:
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
if backend == Framework.PYTORCH:
pop_kwargs = ['batch_size_permutations']
[kwargs.pop(k, None) for k in pop_kwargs]
detector = LearnedKernelDriftTorch
else:
detector = LearnedKernelDriftKeops
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_kernel: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
return_kernel
Whether to return the updated kernel trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, MMD metric and \
trained kernel.
"""
return self._detector.predict(x, return_p_val, return_distance, return_kernel)
| 9,166 | 45.532995 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/mmd.py | import logging
import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
if has_keops and has_pytorch:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
logger = logging.getLogger(__name__)
class MMDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = None,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the MMD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
Only relevant for 'keops' backend.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.KEOPS: [Framework.KEOPS]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
if backend == Framework.TENSORFLOW:
pop_kwargs += ['device', 'batch_size_permutations']
detector = MMDDriftTF
elif backend == Framework.PYTORCH:
pop_kwargs += ['batch_size_permutations']
detector = MMDDriftTorch
else:
detector = MMDDriftKeops
[kwargs.pop(k, None) for k in pop_kwargs]
if kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
elif backend == Framework.PYTORCH:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore
else:
from alibi_detect.utils.keops.kernels import GaussianRBF # type: ignore
kwargs.update({'kernel': GaussianRBF})
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and MMD metric.
"""
return self._detector.predict(x, return_p_val, return_distance)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
return self._detector.score(x)
| 6,795 | 41.475 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/base_online.py | import logging
import warnings
from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from alibi_detect.base import BaseDetector, concept_drift_dict
from alibi_detect.cd.utils import get_input_shape
from alibi_detect.utils.state import StateMixin
from alibi_detect.utils._types import Literal
if TYPE_CHECKING:
import torch
import tensorflow as tf
logger = logging.getLogger(__name__)
class BaseMultiDriftOnline(BaseDetector, StateMixin):
t: int = 0
thresholds: np.ndarray
backend: Literal['pytorch', 'tensorflow']
online_state_keys: Tuple[str, ...]
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 1000,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Base class for multivariate online drift detectors.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if ert is None:
logger.warning('No expected run-time set for the drift threshold. Need to set it to detect data drift.')
self.ert = ert
self.fpr = 1 / ert
self.window_size = window_size
# x_ref preprocessing
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_bootstraps = n_bootstraps # nb of samples used to estimate thresholds
self.verbose = verbose
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = True
@abstractmethod
def _configure_thresholds(self):
pass
@abstractmethod
def _configure_ref_subset(self):
pass
@abstractmethod
def _update_state(self, x_t: Union[np.ndarray, 'tf.Tensor', 'torch.Tensor']):
pass
def _preprocess_xt(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Private method to preprocess a single test instance ready for _update_state.
Parameters
----------
x_t
A single test instance to be preprocessed.
Returns
-------
The preprocessed test instance `x_t`.
"""
# preprocess if necessary
if self.preprocess_fn is not None:
x_t = x_t[None, :] if isinstance(x_t, np.ndarray) else [x_t]
x_t = self.preprocess_fn(x_t)[0]
return x_t[None, :]
def get_threshold(self, t: int) -> float:
"""
Return the threshold for timestep `t`.
Parameters
----------
t
The timestep to return a threshold for.
Returns
-------
The threshold at timestep `t`.
"""
return self.thresholds[t] if t < self.window_size else self.thresholds[-1]
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`).
If a subclassed detector has additional online state, an additional `_initialise_state` should be defined,
with a call to `super()._initialise_state()` included (see `LSDDDriftOnlineTorch._initialise_state()` for
an example).
"""
self.t = 0 # corresponds to a test set of ref data
self.test_stats = np.array([])
self.drift_preds = np.array([])
def reset(self) -> None:
"""
Deprecated reset method. This method will be repurposed or removed in the future. To reset the detector to
its initial state (`t=0`) use :meth:`reset_state`.
"""
self.reset_state()
warnings.warn('This method is deprecated and will be removed/repurposed in the future. To reset the detector '
'to its initial state use `reset_state`.', DeprecationWarning)
def reset_state(self) -> None:
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._initialise_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True,
) -> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
# Compute test stat and check for drift
test_stat = self.score(x_t)
threshold = self.get_threshold(self.t)
drift_pred = int(test_stat > threshold)
self.test_stats = np.concatenate([self.test_stats, np.array([test_stat])])
self.drift_preds = np.concatenate([self.drift_preds, np.array([drift_pred])])
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
cd['data']['time'] = self.t
cd['data']['ert'] = self.ert
if return_test_stat:
cd['data']['test_stat'] = test_stat
cd['data']['threshold'] = threshold
return cd
class BaseUniDriftOnline(BaseDetector, StateMixin):
t: int = 0
thresholds: np.ndarray
online_state_keys: Tuple[str, ...]
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_sizes: List[int],
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 1000,
n_features: Optional[int] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Base class for univariate online drift detectors. If n_features > 1, a multivariate correction is
used to aggregate p-values during threshold configuration, thus allowing the requested expected run
time (ERT) to be targeted. The multivariate correction assumes independence between the features.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the univariate detectors, the ERT is defined
as the expected run-time after the smallest window is full i.e. the run-time from t=min(windows_sizes)-1.
window_sizes
The sizes of the sliding test-windows used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if ert is None:
logger.warning('No expected run-time set for the drift threshold. Need to set it to detect data drift.')
self.ert = ert
self.fpr = 1 / ert
# Window sizes
self.window_sizes = window_sizes
self.max_ws = np.max(self.window_sizes)
self.min_ws = np.min(self.window_sizes)
# x_ref preprocessing
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Check the (optionally preprocessed) x_ref data is a 2D ndarray
self.x_ref = self._check_x(self.x_ref, x_ref=True)
# Other attributes
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_bootstraps = n_bootstraps # nb of samples used to estimate thresholds
self.verbose = verbose
# compute number of features for the univariate tests
if isinstance(n_features, int):
self.n_features = n_features
elif not isinstance(preprocess_fn, Callable) or x_ref_preprocessed:
# infer features from preprocessed reference data
self.n_features = self.x_ref.reshape(self.x_ref.shape[0], -1).shape[-1]
else: # infer number of features after applying preprocessing step
x = self.preprocess_fn(x_ref[0:1])
self.n_features = x.reshape(x.shape[0], -1).shape[-1]
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = True
@abstractmethod
def _configure_thresholds(self):
pass
@abstractmethod
def _configure_ref(self):
pass
@abstractmethod
def _update_state(self, x_t: np.ndarray):
pass
def _check_x(self, x: Any, x_ref: bool = False) -> np.ndarray:
"""
Check the type and shape of the data `x`, and coerces it to the correct shape if possible.
Parameters
----------
x
The data to be checked.
x_ref
Whether `x` is a batch of reference data instances (if `True`), or a single test data instance (if `False`).
Returns
-------
The checked data, coerced to be a np.ndarray of the correct shape.
"""
# Check the type of x
if isinstance(x, np.ndarray):
pass
elif isinstance(x, (int, float)):
x = np.array([x])
else:
raise TypeError("Detectors expect data to be 2D np.ndarray's. If data is passed as another type, a "
"`preprocess_fn` should be given to convert this data to 2D np.ndarray's.")
# Check the shape of x
if x_ref:
x = x.reshape(x.shape[0], -1)
else:
x = x.reshape(1, -1)
if x.shape[1] != self.x_ref.shape[1]:
raise ValueError("Dimensions do not match. `x` has shape (%d,%d) and `x_ref` has shape (%d,%d)."
% (x.shape[0], x.shape[1], self.x_ref.shape[0], self.x_ref.shape[1]))
return x
def _preprocess_xt(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Private method to preprocess a single test instance ready for _update_state.
Parameters
----------
x_t
A single test instance to be preprocessed.
Returns
-------
The preprocessed test instance `x_t`.
"""
# preprocess if necessary
if self.preprocess_fn is not None:
x_t = x_t[None, :] if isinstance(x_t, np.ndarray) else [x_t]
x_t = self.preprocess_fn(x_t)[0]
# Now check the final data is a 2D ndarray
x_t = self._check_x(x_t)
return x_t
def get_threshold(self, t: int) -> np.ndarray:
"""
Return the threshold for timestep `t`.
Parameters
----------
t
The timestep to return a threshold for.
Returns
-------
The threshold at timestep `t`.
"""
return self.thresholds[t] if t < len(self.thresholds) else self.thresholds[-1]
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`).
If a subclassed detector has additional online state, an additional `_initialise_state` should be defined,
with a call to `super()._initialise_state()` included (see `CVMDriftOnlineTorch._initialise_state()` for
an example).
"""
self.t = 0
self.xs = np.array([])
self.test_stats = np.empty([0, len(self.window_sizes), self.n_features])
self.drift_preds = np.array([])
@abstractmethod
def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int:
pass
def reset(self) -> None:
"""
Deprecated reset method. This method will be repurposed or removed in the future. To reset the detector to
its initial state (`t=0`) use :meth:`reset_state`.
"""
self.reset_state()
warnings.warn('This method is deprecated and will be removed/repurposed in the future. To reset the detector '
'to its initial state use `reset_state`.', DeprecationWarning)
def reset_state(self) -> None:
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._initialise_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True,
) -> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window(s) of data have drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window(s).
return_test_stat
Whether to return the test statistic and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
# Compute test stat and check for drift
test_stats = self.score(x_t)
thresholds = self.get_threshold(self.t - 1) # Note t-1 here, has we wish to use the unconditional thresholds
drift_pred = self._check_drift(test_stats, thresholds)
# Update results attributes
self.test_stats = np.concatenate([self.test_stats, test_stats[None, :, :]])
self.drift_preds = np.concatenate([self.drift_preds, np.array([drift_pred])])
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
cd['data']['time'] = self.t
cd['data']['ert'] = self.ert
if return_test_stat:
cd['data']['test_stat'] = test_stats
cd['data']['threshold'] = thresholds
return cd
| 17,889 | 37.308351 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/utils.py | import logging
import random
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
from alibi_detect.utils.sampling import reservoir_sampling
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def update_reference(X_ref: np.ndarray,
X: np.ndarray,
n: int,
update_method: Dict[str, int] = None,
) -> np.ndarray:
"""
Update reference dataset for drift detectors.
Parameters
----------
X_ref
Current reference dataset.
X
New data.
n
Count of the total number of instances that have been used so far.
update_method
Dict with as key `reservoir_sampling` or `last` and as value n. `reservoir_sampling` will apply
reservoir sampling with reservoir of size n while `last` will return (at most) the last n instances.
Returns
-------
Updated reference dataset.
"""
if isinstance(update_method, dict):
update_type = list(update_method.keys())[0]
size = update_method[update_type]
if update_type == 'reservoir_sampling':
return reservoir_sampling(X_ref, X, size, n)
elif update_type == 'last':
X_update = np.concatenate([X_ref, X], axis=0)
return X_update[-size:]
else:
raise KeyError('Only `reservoir_sampling` and `last` are valid update options for X_ref.')
else:
return X_ref
def encompass_batching(
model: Callable,
backend: str,
batch_size: int,
device: Optional[str] = None,
preprocess_batch_fn: Optional[Callable] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
) -> Callable:
"""
Takes a function that must be batch evaluated (on tokenized input) and returns a function
that handles batching (and tokenization).
"""
backend = backend.lower()
kwargs = {'batch_size': batch_size, 'tokenizer': tokenizer, 'max_len': max_len,
'preprocess_batch_fn': preprocess_batch_fn}
if backend == Framework.TENSORFLOW:
from alibi_detect.cd.tensorflow.preprocess import preprocess_drift
elif backend == Framework.PYTORCH:
from alibi_detect.cd.pytorch.preprocess import preprocess_drift # type: ignore[assignment]
kwargs['device'] = device
else:
raise NotImplementedError(f'{backend} not implemented. Use tensorflow or pytorch instead.')
def model_fn(x: Union[np.ndarray, list]) -> np.ndarray:
return preprocess_drift(x, model, **kwargs) # type: ignore[arg-type]
return model_fn
def encompass_shuffling_and_batch_filling(
model_fn: Callable,
batch_size: int
) -> Callable:
"""
Takes a function that already handles batching but additionally performing shuffling
and ensures instances are evaluated as part of full batches.
"""
def new_model_fn(x: Union[np.ndarray, list]) -> np.ndarray:
is_np = isinstance(x, np.ndarray)
# shuffle
n_x = len(x)
perm = np.random.permutation(n_x)
x = x[perm] if is_np else [x[i] for i in perm]
# add extras if necessary
final_batch_size = n_x % batch_size
if final_batch_size != 0:
doubles_inds = random.choices([i for i in range(n_x)], k=batch_size - final_batch_size)
if is_np:
x = np.concatenate([x, x[doubles_inds]], axis=0) # type: ignore[call-overload]
else:
x += [x[i] for i in doubles_inds]
# remove any extras and unshuffle
preds = np.asarray(model_fn(x))[:n_x]
preds = preds[np.argsort(perm)]
return preds
return new_model_fn
def get_input_shape(shape: Optional[Tuple], x_ref: Union[np.ndarray, list]) -> Optional[Tuple]:
""" Optionally infer shape from reference data. """
if isinstance(shape, tuple):
return shape
elif hasattr(x_ref, 'shape'):
return x_ref.shape[1:]
else:
logger.warning('Input shape could not be inferred. '
'If alibi_detect.models.tensorflow.embedding.TransformerEmbedding '
'is used as preprocessing step, a saved detector cannot be reinitialized.')
return None
| 4,341 | 34.300813 | 108 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/classifier.py | import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, \
BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from sklearn.base import ClassifierMixin
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
if has_pytorch:
from torch.utils.data import DataLoader
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
if has_tensorflow:
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
class ClassifierDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Union[ClassifierMixin, Callable],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
use_calibration: bool = False,
calibration_kwargs: Optional[dict] = None,
use_oob: bool = False,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
PyTorch, TensorFlow or Sklearn classification model used for drift detection.
backend
Backend used for the training loop implementation. Supported: 'tensorflow' | 'pytorch' | 'sklearn'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last `n` instances seen by the detector
or via reservoir sampling with size `n`. For the former, the parameter equals `{'last': n}` while
for reservoir sampling `{'reservoir_sampling': n}` is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' (probabilities - for 'tensorflow', 'pytorch', 'sklearn' models),
'logits' (for 'pytorch', 'tensorflow' models), 'scores' (for 'sklearn' models if `decision_function`
is supported).
binarize_preds
Whether to test for discrepancy on soft (e.g. probs/logits/scores) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term `reg_loss_fn(model)` is added to the loss function being optimized.
Only relevant for 'tensorflow` and 'pytorch' backends.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier. Only relevant for 'tensorflow' and 'pytorch' backends.
learning_rate
Learning rate used by optimizer. Only relevant for 'tensorflow' and 'pytorch' backends.
batch_size
Batch size used during training of the classifier. Only relevant for 'tensorflow' and 'pytorch' backends.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model. Only relevant for 'tensorflow' and 'pytorch' backends.
epochs
Number of training epochs for the classifier for each (optional) fold. Only relevant for 'tensorflow'
and 'pytorch' backends.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar. Only relevant for
'tensorflow' and 'pytorch' backends.
train_kwargs
Optional additional kwargs when fitting the classifier. Only relevant for 'tensorflow' and
'pytorch' backends.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training. Only relevant for 'tensorflow' and 'pytorch' backends.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
use_calibration
Whether to use calibration. Calibration can be used on top of any model.
Only relevant for 'sklearn' backend.
calibration_kwargs
Optional additional kwargs for calibration. Only relevant for 'sklearn' backend.
See https://scikit-learn.org/stable/modules/generated/sklearn.calibration.CalibratedClassifierCV.html
for more details.
use_oob
Whether to use out-of-bag(OOB) predictions. Supported only for `RandomForestClassifier`.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.SKLEARN: [Framework.SKLEARN]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['model']]
pop_kwargs = ['self', 'x_ref', 'model', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader', 'use_calibration', 'calibration_kwargs', 'use_oob']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = ClassifierDriftTF(*args, **kwargs)
elif backend == Framework.PYTORCH:
pop_kwargs = ['use_calibration', 'calibration_kwargs', 'use_oob']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
self._detector = ClassifierDriftTorch(*args, **kwargs)
else:
pop_kwargs = ['reg_loss_fn', 'optimizer', 'learning_rate', 'batch_size', 'preprocess_batch_fn',
'epochs', 'train_kwargs', 'device', 'dataset', 'dataloader', 'verbose']
[kwargs.pop(k, None) for k in pop_kwargs]
self._detector = ClassifierDriftSklearn(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_probs: bool = True, return_model: bool = True) \
-> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries
- ``'meta'`` - has the model's metadata.
- ``'data'`` - contains the drift prediction and optionally the p-value, performance of the classifier \
relative to its expectation under the no-change null, the out-of-fold classifier model \
prediction probabilities on the reference and test data, and the trained model. \
"""
return self._detector.predict(x, return_p_val, return_distance, return_probs, return_model)
| 11,053 | 50.175926 | 117 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/lsdd_online.py | import os
import numpy as np
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
class LSDDDriftOnline(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
backend: str = 'tensorflow',
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
We have made modifications such that a desired ERT can be accurately targeted however.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
backend
Backend used for the LSDD implementation and configuration.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 2*window_size.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['ert'], kwargs['window_size']]
pop_kwargs = ['self', 'x_ref', 'ert', 'window_size', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftOnlineTF(*args, **kwargs)
else:
self._detector = LSDDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
@property
def t(self):
return self._detector.t
@property
def test_stats(self):
return self._detector.test_stats
@property
def thresholds(self):
return [self._detector.thresholds[min(s, self._detector.window_size-1)] for s in range(self.t)]
def reset_state(self):
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._detector.reset_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic (LSDD) and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
return self._detector.predict(x_t, return_test_stat)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (LSDD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
LSDD estimate between reference window and test window.
"""
return self._detector.score(x_t)
def get_config(self) -> dict: # Needed due to need to unnormalize x_ref
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
# Unnormalize x_ref
cfg['x_ref'] = self._detector._unnormalize(cfg['x_ref'])
return cfg
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._detector.save_state(filepath)
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
:meth:`~save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._detector.load_state(filepath)
| 7,617 | 38.677083 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/spot_the_diff.py | import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
from torch.utils.data import DataLoader
if has_tensorflow:
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
class SpotTheDiffDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the training loop implementation.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used to define similarity between instances, defaults to Gaussian RBF
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = SpotTheDiffDriftTF(*args, **kwargs)
else:
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
self._detector = SpotTheDiffDriftTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = True
) -> Dict[str, Dict[str, Union[int, str, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
return self._detector.predict(x, return_p_val, return_distance, return_probs, return_model)
| 9,141 | 47.62766 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/mmd_online.py | import os
import numpy as np
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
class MMDDriftOnline(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
backend: str = 'tensorflow',
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
kernel: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
backend
Backend used for the MMD implementation and configuration.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['ert'], kwargs['window_size']]
pop_kwargs = ['self', 'x_ref', 'ert', 'window_size', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
else:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore
kwargs.update({'kernel': GaussianRBF})
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = MMDDriftOnlineTF(*args, **kwargs)
else:
self._detector = MMDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
@property
def t(self):
return self._detector.t
@property
def test_stats(self):
return self._detector.test_stats
@property
def thresholds(self):
return [self._detector.thresholds[min(s, self._detector.window_size-1)] for s in range(self.t)]
def reset_state(self):
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._detector.reset_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic (squared MMD) and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
return self._detector.predict(x_t, return_test_stat)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
Squared MMD estimate between reference window and test window.
"""
return self._detector.score(x_t)
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._detector.save_state(filepath)
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
`save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._detector.load_state(filepath)
def get_config(self) -> dict: # Needed due to self.x_ref being a torch.Tensor when backend='pytorch'
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
if cfg.get('backend') == 'pytorch':
cfg['x_ref'] = cfg['x_ref'].detach().cpu().numpy()
return cfg
| 7,411 | 37.604167 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/model_uncertainty.py | import logging
import numpy as np
from typing import Callable, Dict, Optional, Union
from functools import partial
from alibi_detect.cd.ks import KSDrift
from alibi_detect.cd.chisquare import ChiSquareDrift
from alibi_detect.cd.preprocess import classifier_uncertainty, regressor_uncertainty
from alibi_detect.cd.utils import encompass_batching, encompass_shuffling_and_batch_filling
from alibi_detect.utils.frameworks import BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
logger = logging.getLogger(__name__)
class ClassifierUncertaintyDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Callable,
p_val: float = .05,
x_ref_preprocessed: bool = False,
backend: Optional[str] = None,
update_x_ref: Optional[Dict[str, int]] = None,
preds_type: str = 'probs',
uncertainty_type: str = 'entropy',
margin_width: float = 0.1,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Test for a change in the number of instances falling into regions on which the model is uncertain.
Performs either a K-S test on prediction entropies or Chi-squared test on 0-1 indicators of predictions
falling into a margin of uncertainty (e.g. probs falling into [0.45, 0.55] in binary case).
Parameters
----------
x_ref
Data used as reference distribution. Should be disjoint from the data the model was trained on
for accurate p-values.
model
Classification model outputting class probabilities (or logits)
backend
Backend to use if model requires batch prediction. Options are 'tensorflow' or 'pytorch'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preds_type
Type of prediction output by the model. Options are 'probs' (in [0,1]) or 'logits' (in [-inf,inf]).
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'entropy' or 'margin'.
margin_width
Width of the margin if uncertainty_type = 'margin'. The model is considered uncertain on an instance
if the highest two class probabilities it assigns to the instance differ by less than margin_width.
batch_size
Batch size used to evaluate model. Only relevant when backend has been specified for batch prediction.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Optional max token length for NLP models.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
# Set config
self._set_config(locals())
if backend:
backend = backend.lower()
BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
None: []},
construct_name=self.__class__.__name__).verify_backend(backend)
if backend is None:
if device not in [None, 'cpu']:
raise NotImplementedError('Non-pytorch/tensorflow models must run on cpu')
model_fn = model
else:
model_fn = encompass_batching(
model=model,
backend=backend,
batch_size=batch_size,
device=device,
preprocess_batch_fn=preprocess_batch_fn,
tokenizer=tokenizer,
max_len=max_len
)
preprocess_fn = partial(
classifier_uncertainty,
model_fn=model_fn,
preds_type=preds_type,
uncertainty_type=uncertainty_type,
margin_width=margin_width,
)
self._detector: Union[KSDrift, ChiSquareDrift]
if uncertainty_type == 'entropy':
self._detector = KSDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
elif uncertainty_type == 'margin':
self._detector = ChiSquareDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
else:
raise NotImplementedError("Only uncertainty types 'entropy' or 'margin' supported.")
self.meta = self._detector.meta
self.meta['name'] = 'ClassifierUncertaintyDrift'
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True) -> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return the corresponding test statistic (K-S for 'entropy', Chi2 for 'margin').
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and test statistic.
"""
return self._detector.predict(x, return_p_val=return_p_val, return_distance=return_distance)
class RegressorUncertaintyDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Callable,
p_val: float = .05,
x_ref_preprocessed: bool = False,
backend: Optional[str] = None,
update_x_ref: Optional[Dict[str, int]] = None,
uncertainty_type: str = 'mc_dropout',
n_evals: int = 25,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Test for a change in the number of instances falling into regions on which the model is uncertain.
Performs either a K-S test on uncertainties estimated from an preditive ensemble given either
explicitly or implicitly as a model with dropout layers.
Parameters
----------
x_ref
Data used as reference distribution. Should be disjoint from the data the model was trained on
for accurate p-values.
model
Regression model outputting class probabilities (or logits)
backend
Backend to use if model requires batch prediction. Options are 'tensorflow' or 'pytorch'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'mc_dropout' or 'ensemble'.
The former should output a scalar per instance. The latter should output a vector of predictions
per instance.
n_evals:
The number of times to evaluate the model under different dropout configurations. Only relevant when using
the 'mc_dropout' uncertainty type.
batch_size
Batch size used to evaluate model. Only relevant when backend has been specified for batch prediction.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Optional max token length for NLP models.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
# Set config
self._set_config(locals())
if backend:
backend = backend.lower()
BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
None: []},
construct_name=self.__class__.__name__).verify_backend(backend)
if backend is None:
model_fn = model
else:
if uncertainty_type == 'mc_dropout':
if backend == Framework.PYTORCH:
from alibi_detect.cd.pytorch.utils import activate_train_mode_for_dropout_layers
model = activate_train_mode_for_dropout_layers(model)
elif backend == Framework.TENSORFLOW:
logger.warning(
"MC dropout being applied to tensorflow model. May not be suitable if model contains"
"non-dropout layers with different train and inference time behaviour"
)
from alibi_detect.cd.tensorflow.utils import activate_train_mode_for_all_layers
model = activate_train_mode_for_all_layers(model)
model_fn = encompass_batching(
model=model,
backend=backend,
batch_size=batch_size,
device=device,
preprocess_batch_fn=preprocess_batch_fn,
tokenizer=tokenizer,
max_len=max_len
)
if uncertainty_type == 'mc_dropout' and backend == Framework.TENSORFLOW:
# To average over possible batchnorm effects as all layers evaluated in training mode.
model_fn = encompass_shuffling_and_batch_filling(model_fn, batch_size=batch_size)
preprocess_fn = partial(
regressor_uncertainty,
model_fn=model_fn,
uncertainty_type=uncertainty_type,
n_evals=n_evals
)
self._detector = KSDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['name'] = 'RegressorUncertaintyDrift'
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True) -> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return the K-S test statistic
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and test statistic.
"""
return self._detector.predict(x, return_p_val=return_p_val, return_distance=return_distance)
| 14,152 | 43.646688 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/context_aware.py | import logging
import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
logger = logging.getLogger(__name__)
class ContextMMDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = None,
c_kernel: Callable = None,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
backend
Backend used for the MMD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether to print progress messages.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['c_ref']]
pop_kwargs = ['self', 'x_ref', 'c_ref', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if x_kernel is None or c_kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
else:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore[assignment]
if x_kernel is None:
kwargs.update({'x_kernel': GaussianRBF})
if c_kernel is None:
kwargs.update({'c_kernel': GaussianRBF})
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = ContextMMDDriftTF(*args, **kwargs)
else:
self._detector = ContextMMDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], c: np.ndarray,
return_p_val: bool = True, return_distance: bool = True, return_coupling: bool = False) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data, given the provided context.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the conditional MMD test statistic between the new batch and reference data.
return_coupling
Whether to return the coupling matrices.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, conditional MMD test \
statistic and coupling matrices.
"""
return self._detector.predict(x, c, return_p_val, return_distance, return_coupling)
def score(self, x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test \
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices \
:math:`(W_{ref,ref}, W_{test,test}, W_{ref,test})`.
"""
return self._detector.score(x, c)
| 7,323 | 42.082353 | 116 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/lsdd.py | import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
class LSDDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the LSDD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftTF(*args, **kwargs)
else:
self._detector = LSDDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the LSDD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and LSDD metric.
"""
return self._detector.predict(x, return_p_val, return_distance)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set, \
and the LSDD threshold above which drift is flagged.
"""
return self._detector.score(x)
def get_config(self) -> dict: # Needed due to need to unnormalize x_ref
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
# Unnormalize x_ref
if self._detector.preprocess_at_init or self._detector.preprocess_fn is None \
or self._detector.x_ref_preprocessed:
cfg['x_ref'] = self._detector._unnormalize(cfg['x_ref'])
return cfg
| 6,658 | 41.96129 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_ks.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable
from alibi_detect.cd import KSDrift
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 750, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
n_features = [1, 10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE})
]
alternative = ['two-sided', 'less', 'greater']
correction = ['bonferroni', 'fdr']
update_x_ref = [{'last': 1000}, {'reservoir_sampling': 1000}]
preprocess_at_init = [True, False]
tests_ksdrift = list(product(n_features, n_enc, preprocess, alternative,
correction, update_x_ref, preprocess_at_init))
n_tests = len(tests_ksdrift)
@pytest.fixture
def ksdrift_params(request):
return tests_ksdrift[request.param]
@pytest.mark.parametrize('ksdrift_params', list(range(n_tests)), indirect=True)
def test_ksdrift(ksdrift_params):
n_features, n_enc, preprocess, alternative, correction, \
update_x_ref, preprocess_at_init = ksdrift_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
if isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = KSDrift(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
alternative=alternative,
)
x = x_ref.copy()
preds_batch = cd.predict(x, drift_type='batch', return_p_val=True)
assert preds_batch['data']['is_drift'] == 0
k = list(update_x_ref.keys())[0]
assert cd.n == x.shape[0] + x_ref.shape[0]
assert cd.x_ref.shape[0] == min(update_x_ref[k], x.shape[0] + x_ref.shape[0])
preds_feature = cd.predict(x, drift_type='feature', return_p_val=True)
assert preds_feature['data']['is_drift'].shape[0] == cd.n_features
preds_by_feature = (preds_feature['data']['p_val'] < cd.p_val).astype(int)
assert (preds_feature['data']['is_drift'] == preds_by_feature).all()
np.random.seed(0)
X_randn = np.random.randn(n * n_features).reshape(n, n_features).astype('float32')
mu, sigma = 5, 5
X_low = sigma * X_randn - mu
X_high = sigma * X_randn + mu
preds_batch = cd.predict(X_high, drift_type='batch')
if alternative != 'less':
assert preds_batch['data']['is_drift'] == 1
preds_batch = cd.predict(X_low, drift_type='batch')
if alternative != 'greater':
assert preds_batch['data']['is_drift'] == 1
assert preds_batch['data']['distance'].min() >= 0.
assert preds_feature['data']['threshold'] == cd.p_val
if correction == 'bonferroni':
assert preds_batch['data']['threshold'] == cd.p_val / cd.n_features
| 4,062 | 37.330189 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_cvm_online.py | import numpy as np
import pytest
from alibi_detect.cd import CVMDriftOnline
from alibi_detect.utils._random import fixed_seed
n, n_test = 200, 500
n_bootstraps = 1000
ert = 50
np.random.seed(0)
window_sizes = [[10], [10, 20]]
batch_size = [None, int(n_bootstraps/4)]
n_features = [1, 3]
@pytest.mark.parametrize('window_sizes', window_sizes)
@pytest.mark.parametrize('batch_size', batch_size)
@pytest.mark.parametrize('n_feat', n_features)
def test_cvmdriftonline(window_sizes, batch_size, n_feat, seed):
with fixed_seed(seed):
# Reference data
x_ref = np.random.normal(0, 1, size=(n, n_feat)).squeeze() # squeeze to test vec input in 1D case
# Instantiate detector
cd = CVMDriftOnline(x_ref=x_ref, ert=ert, window_sizes=window_sizes,
n_bootstraps=n_bootstraps, batch_size=batch_size)
# Test predict
x_h0 = np.random.normal(0, 1, size=(n_test, n_feat))
x_h1 = np.random.normal(1, 1, size=(n_test, n_feat))
# Reference data
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0: # x_t is np.int64 in 1D, np.ndarray in multi-D
t0 = cd.t
pred_t = cd.predict(x_t, return_test_stat=True)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
art = np.array(detection_times_h0).mean() - np.min(window_sizes) + 1
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < art < 3*ert
# Drifted data
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
add = np.array(detection_times_h1).mean() - np.min(window_sizes)
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert add < ert/2
assert np.nanmean(test_stats_h1) > np.nanmean(test_stats_h0)
@pytest.mark.parametrize('n_feat', n_features)
def test_cvm_online_state_online(n_feat, tmp_path, seed):
"""
Test save/load/reset state methods for CVMDriftOnline. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
window_sizes = [10]
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_feat)).squeeze()
x = np.random.normal(0.1, 1, (n, n_feat))
dd = CVMDriftOnline(x_ref, window_sizes=window_sizes, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
np.testing.assert_array_equal(new_pred['data']['test_stat'], test_stats_1[5])
| 4,225 | 35.747826 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_lsdd.py | import numpy as np
import pytest
from alibi_detect.cd import LSDDDrift
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
n, n_features = 100, 5
tests_lsdddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdddrift_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdddrift_params', list(range(n_tests)), indirect=True)
def test_lsdddrift(lsdddrift_params):
backend = lsdddrift_params
x_ref = np.random.randn(*(n, n_features))
try:
cd = LSDDDrift(x_ref=x_ref, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LSDDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LSDDDriftTF)
else:
assert cd is None
| 931 | 26.411765 | 81 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_spot_the_diff.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
import torch
import torch.nn as nn
from alibi_detect.cd import SpotTheDiffDrift
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
n, n_features = 100, 5
class MyKernelTF(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
class MyKernelTorch(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
tests_stddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
backend = stddrift_params
if backend.lower() == 'pytorch':
kernel = MyKernelTorch(n_features)
elif backend.lower() == 'tensorflow':
kernel = MyKernelTF((n_features,))
else:
kernel = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = SpotTheDiffDrift(x_ref=x_ref, kernel=kernel, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, SpotTheDiffDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, SpotTheDiffDriftTF)
else:
assert cd is None
| 2,129 | 28.178082 | 94 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_mmd_online.py | import numpy as np
import pytest
from alibi_detect.cd import MMDDriftOnline
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
n, n_features = 100, 5
tests_mmddriftonline = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmddriftonline_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmddriftonline_params', list(range(n_tests)), indirect=True)
def test_mmddriftonline(mmddriftonline_params):
backend = mmddriftonline_params
x_ref = np.random.randn(*(n, n_features))
# Instantiate and check detector class
try:
cd = MMDDriftOnline(x_ref=x_ref, ert=25, window_size=5, backend=backend, n_bootstraps=100)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, MMDDriftOnlineTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, MMDDriftOnlineTF)
else:
assert cd is None
return
# Test predict
x_t = np.random.randn(n_features)
t0 = cd.t
cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
# Test score
t0 = cd.t
cd.score(x_t)
assert cd.t - t0 == 1
| 1,353 | 27.808511 | 98 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_lsdd_online.py | import numpy as np
import pytest
from alibi_detect.cd import LSDDDriftOnline
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
n, n_features = 100, 5
tests_lsdddriftonline = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdddriftonline_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdddriftonline_params', list(range(n_tests)), indirect=True)
def test_lsdddriftonline(lsdddriftonline_params):
backend = lsdddriftonline_params
x_ref = np.random.randn(*(n, n_features))
try:
cd = LSDDDriftOnline(x_ref=x_ref, ert=25, window_size=5, backend=backend, n_bootstraps=100)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LSDDDriftOnlineTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LSDDDriftOnlineTF)
else:
assert cd is None
return None
# Test predict
x_t = np.random.randn(n_features)
t0 = cd.t
cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
# Test score
t0 = cd.t
cd.score(x_t)
assert cd.t - t0 == 1
| 1,331 | 27.956522 | 99 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_fet_online.py | import numpy as np
import pytest
from functools import partial
from alibi_detect.cd import FETDriftOnline
from alibi_detect.utils._random import fixed_seed
n = 250
n_inits, n_reps = 3, 100
n_bootstraps = 1000
ert = 150
window_sizes = [40]
alternatives = ['less', 'greater']
n_features = [1, 3]
@pytest.mark.parametrize('alternative', alternatives)
@pytest.mark.parametrize('n_feat', n_features)
def test_fetdriftonline(alternative, n_feat, seed):
# Reference data
p_h0 = 0.5
with fixed_seed(seed):
# squeeze to test vector input in 1D case
x_ref = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h0, p_h0]).squeeze()
x_h0 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h0, p_h0])
detection_times_h0 = []
detection_times_h1 = []
for _ in range(n_inits):
# Instantiate detector
with fixed_seed(seed+1):
cd = FETDriftOnline(x_ref=x_ref, ert=ert, window_sizes=window_sizes,
n_bootstraps=n_bootstraps, alternative=alternative)
# Reference data
count = 0
while len(detection_times_h0) < n_reps and count < int(1e6):
count += 1
x_t = int(x_h0()) if n_feat == 1 else x_h0() # x_t is int in 1D case, otherwise ndarray with shape (n_feat)
t0 = cd.t
pred_t = cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
# Drifted data
if alternative == 'less':
p_h1 = 0.1
x_h1 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h1, p_h1])
else:
p_h1 = 0.9
x_h1 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h1, p_h1])
cd.reset_state()
count = 0
while len(detection_times_h1) < n_reps and count < int(1e6):
count += 1
x_t = x_h1().reshape(1, 1) if n_feat == 1 else x_h1() # test shape (1,1) in 1D case here
pred_t = cd.predict(x_t)
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
art = np.array(detection_times_h0).mean() - np.min(window_sizes) + 1
add = np.array(detection_times_h1).mean() - np.min(window_sizes)
assert ert / 3 < art < 3 * ert
assert add + 1 < ert/2
@pytest.mark.parametrize('n_feat', n_features)
def test_fet_online_state_online(n_feat, tmp_path, seed):
"""
Test save/load/reset state methods for FETDriftOnline. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
p_h0 = 0.5
p_h1 = 0.3
with fixed_seed(seed):
# squeeze to test vector input in 1D case
x_ref = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h0, p_h0]).squeeze()
x = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h1, p_h1])
dd = FETDriftOnline(x_ref, window_sizes=window_sizes, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
np.testing.assert_array_equal(new_pred['data']['test_stat'], test_stats_1[5])
| 4,578 | 35.927419 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_mmd.py | import numpy as np
import pytest
from alibi_detect.cd import MMDDrift
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.utils.frameworks import has_keops
if has_keops:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
n, n_features = 100, 5
tests_mmddrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet']
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmddrift_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmddrift_params', list(range(n_tests)), indirect=True)
def test_mmddrift(mmddrift_params):
backend = mmddrift_params
x_ref = np.random.randn(*(n, n_features)).astype('float32')
try:
cd = MMDDrift(x_ref=x_ref, backend=backend)
except (NotImplementedError, ImportError):
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, MMDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, MMDDriftTF)
elif backend.lower() == 'keops' and has_keops:
assert isinstance(cd._detector, MMDDriftKeops)
else:
assert cd is None
| 1,185 | 29.410256 | 80 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_learned_kernel.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
import torch
import torch.nn as nn
from alibi_detect.cd import LearnedKernelDrift
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
from alibi_detect.utils.frameworks import has_keops
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
from pykeops.torch import LazyTensor
n, n_features = 100, 5
class MyKernelTF(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
class MyKernelTorch(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
if has_keops:
class MyKernelKeops(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor:
return (- ((x - y) ** 2).sum(-1)).exp()
tests_lkdrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet']
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
backend = lkdrift_params
if backend.lower() == 'pytorch':
kernel = MyKernelTorch(n_features)
elif backend.lower() == 'tensorflow':
kernel = MyKernelTF(n_features)
elif has_keops and backend.lower() == 'keops':
kernel = MyKernelKeops()
else:
kernel = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = LearnedKernelDrift(x_ref=x_ref, kernel=kernel, backend=backend)
except NotImplementedError:
cd = None
except ImportError:
assert not has_keops
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LearnedKernelDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LearnedKernelDriftTF)
elif has_keops and backend.lower() == 'keops':
assert isinstance(cd._detector, LearnedKernelDriftKeops)
else:
assert cd is None
| 2,811 | 29.901099 | 94 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_classifier.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
import torch
import torch.nn as nn
from alibi_detect.cd import ClassifierDrift
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
from sklearn.neural_network import MLPClassifier
from typing import Tuple
n, n_features = 100, 5
def tensorflow_model(input_shape: Tuple[int]):
x_in = Input(shape=input_shape)
x = Dense(20, activation=tf.nn.relu)(x_in)
x_out = Dense(2, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def pytorch_model(input_shape: int):
return torch.nn.Sequential(
nn.Linear(input_shape, 20),
nn.ReLU(),
nn.Linear(20, 2)
)
def sklearn_model():
return MLPClassifier(hidden_layer_sizes=(20, ))
tests_clfdrift = ['tensorflow', 'pytorch', 'PyToRcH', 'sklearn', 'mxnet']
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
backend = clfdrift_params
if backend.lower() == 'pytorch':
model = pytorch_model(n_features)
elif backend.lower() == 'tensorflow':
model = tensorflow_model((n_features,))
elif backend.lower() == 'sklearn':
model = sklearn_model()
else:
model = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = ClassifierDrift(x_ref=x_ref, model=model, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, ClassifierDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, ClassifierDriftTF)
elif backend.lower() == 'sklearn':
assert isinstance(cd._detector, ClassifierDriftSklearn)
else:
assert cd is None
| 2,093 | 28.492958 | 80 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_preprocess.py | from itertools import product
import numpy as np
import pytest
import torch.nn as nn
import torch
from sklearn.linear_model import LogisticRegression
from alibi_detect.cd.preprocess import classifier_uncertainty, regressor_uncertainty
n, n_features = 100, 10
shape = (n_features,)
X_train = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
y_train_reg = np.random.rand(n).astype('float32')
y_train_clf = np.random.choice(2, n)
X_test = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
preds_type = ['probs', 'logits']
uncertainty_type = ['entropy', 'margin']
tests_cu = list(product(preds_type, uncertainty_type))
n_tests_cu = len(tests_cu)
@pytest.fixture
def cu_params(request):
return tests_cu[request.param]
@pytest.mark.parametrize('cu_params', list(range(n_tests_cu)), indirect=True)
def test_classifier_uncertainty(cu_params):
preds_type, uncertainty_type = cu_params
clf = LogisticRegression().fit(X_train, y_train_clf)
model_fn = clf.predict_log_proba if preds_type == 'logits' else clf.predict_proba
uncertainties = classifier_uncertainty(
X_test, model_fn, preds_type=preds_type, uncertainty_type=uncertainty_type
)
assert uncertainties.shape == (X_test.shape[0], 1)
tests_ru = ['mc_dropout', 'ensemble']
n_tests_ru = len(tests_ru)
@pytest.fixture
def ru_params(request):
return tests_ru[request.param]
@pytest.mark.parametrize('ru_params', list(range(n_tests_ru)), indirect=True)
def test_regressor_uncertainty(ru_params):
uncertainty_type = ru_params
if uncertainty_type == 'dropout':
model = nn.Sequential(
nn.Linear(n_features, 10),
nn.Dropout(0.5),
nn.Linear(10, 1)
)
else:
model = nn.Linear(n_features, 42)
def model_fn(x):
with torch.no_grad():
return np.array(model(torch.as_tensor(x)))
uncertainties = regressor_uncertainty(
X_test, model_fn, uncertainty_type=uncertainty_type
)
assert uncertainties.shape == (X_test.shape[0], 1)
| 2,067 | 29.411765 | 85 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_contextmmd.py | import numpy as np
import pytest
from alibi_detect.cd import ContextMMDDrift
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
n, n_features = 100, 5
tests_context_mmddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmddrift_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmddrift_params', list(range(n_tests)), indirect=True)
def test_context_mmddrift(context_mmddrift_params):
backend = context_mmddrift_params
c_ref = np.random.randn(*(n, 1))
x_ref = c_ref + np.random.randn(*(n, n_features))
try:
cd = ContextMMDDrift(x_ref=x_ref, c_ref=c_ref, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, ContextMMDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, ContextMMDDriftTF)
else:
assert cd is None
| 1,099 | 30.428571 | 88 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tests/test_model_uncertainty.py | import numpy as np
import pytest
from functools import partial
from itertools import product
import scipy
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Softmax, Dropout
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd import ClassifierUncertaintyDrift, RegressorUncertaintyDrift
n = 500
def tf_model(n_features, n_labels, softmax=False, dropout=False):
x_in = Input(shape=(n_features,))
x = Dense(20, activation=tf.nn.relu)(x_in)
if dropout:
x = Dropout(0.5)(x)
x = Dense(n_labels)(x)
if softmax:
x = Softmax()(x)
return tf.keras.models.Model(inputs=x_in, outputs=x)
class PtModel(nn.Module):
def __init__(self, n_features, n_labels, softmax=False, dropout=False):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, n_labels)
self.dropout = nn.Dropout(0.5) if dropout else lambda x: x
self.softmax = nn.Softmax() if softmax else lambda x: x
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
x = self.dropout(x)
x = self.dense2(x)
x = self.softmax(x)
return x
def dumb_model(x, n_labels, softmax=False):
if isinstance(x, list):
x = np.concatenate(x, axis=0)
x = np.stack([np.mean(x * (i + 1), axis=-1) for i in range(n_labels)], axis=-1)
if softmax:
x = scipy.special.softmax(x, axis=-1)
return x
def gen_model(n_features, n_labels, backend, softmax=False, dropout=False):
if backend == 'tensorflow':
return tf_model(n_features, n_labels, softmax, dropout)
elif backend == 'pytorch':
return PtModel(n_features, n_labels, softmax, dropout)
elif backend is None:
return partial(dumb_model, n_labels=n_labels, softmax=softmax)
def id_fn(x: list, to_pt: bool = False) -> Union[np.ndarray, torch.Tensor]:
x = np.concatenate(x, axis=0)
if to_pt:
return torch.from_numpy(x)
else:
return x # type: ignore[return-value]
p_val = [.05]
backend = ['tensorflow', 'pytorch', None]
n_features = [16]
n_labels = [3]
preds_type = ['probs', 'logits']
uncertainty_type = ['entropy', 'margin']
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
to_list = [True, False]
tests_clfuncdrift = list(product(p_val, backend, n_features, n_labels, preds_type,
uncertainty_type, update_x_ref, to_list))
n_tests = len(tests_clfuncdrift)
@pytest.fixture
def clfuncdrift_params(request):
return tests_clfuncdrift[request.param]
@pytest.mark.parametrize('clfuncdrift_params', list(range(n_tests)), indirect=True)
def test_clfuncdrift(clfuncdrift_params):
p_val, backend, n_features, n_labels, preds_type, uncertainty_type, update_x_ref, to_list = clfuncdrift_params
np.random.seed(0)
tf.random.set_seed(0)
model = gen_model(n_features, n_labels, backend, preds_type == 'probs')
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test0 = x_ref.copy()
x_test1 = np.ones_like(x_ref)
if to_list:
x_ref = [x[None, :] for x in x_ref]
x_test0 = [x[None, :] for x in x_test0]
x_test1 = [x[None, :] for x in x_test1]
cd = ClassifierUncertaintyDrift(
x_ref=x_ref,
model=model,
p_val=p_val,
backend=backend,
update_x_ref=update_x_ref,
preds_type=preds_type,
uncertainty_type=uncertainty_type,
margin_width=0.1,
batch_size=10,
preprocess_batch_fn=partial(id_fn, to_pt=backend == 'pytorch') if to_list else None
)
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
p_val = [.05]
backend = ['tensorflow', 'pytorch']
n_features = [16]
uncertainty_type = ['mc_dropout', 'ensemble']
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
to_list = [True, False]
tests_reguncdrift = list(product(p_val, backend, n_features, uncertainty_type, update_x_ref, to_list))
n_tests = len(tests_reguncdrift)
@pytest.fixture
def reguncdrift_params(request):
return tests_reguncdrift[request.param]
@pytest.mark.parametrize('reguncdrift_params', list(range(n_tests)), indirect=True)
def test_reguncdrift(reguncdrift_params):
p_val, backend, n_features, uncertainty_type, update_x_ref, to_list = reguncdrift_params
np.random.seed(0)
tf.random.set_seed(0)
if uncertainty_type == 'mc_dropout':
n_labels = 1
dropout = True
elif uncertainty_type == 'ensemble':
n_labels = 5
dropout = False
model = gen_model(n_features, n_labels, backend, dropout=dropout)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test0 = x_ref.copy()
x_test1 = np.ones_like(x_ref)
if to_list:
x_ref = [x[None, :] for x in x_ref]
x_test0 = [x[None, :] for x in x_test0]
x_test1 = [x[None, :] for x in x_test1]
cd = RegressorUncertaintyDrift(
x_ref=x_ref,
model=model,
p_val=p_val,
backend=backend,
update_x_ref=update_x_ref,
uncertainty_type=uncertainty_type,
n_evals=5,
batch_size=10,
preprocess_batch_fn=partial(id_fn, to_pt=backend == 'pytorch') if to_list else None
)
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 6,110 | 31.163158 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/keops/learned_kernel.py | from copy import deepcopy
from functools import partial
from tqdm import tqdm
import numpy as np
from pykeops.torch import LazyTensor
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, List, Optional, Union, Tuple
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.pytorch import get_device, predict_batch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.frameworks import Framework
class LearnedKernelDriftKeops(BaseLearnedKernelDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: torch.optim.Optimizer = torch.optim.Adam, # type: ignore
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 1000000,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch module that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.KEOPS.value})
# Set device, define model and training kwargs
self.device = get_device(device)
self.original_kernel = kernel
self.kernel = deepcopy(kernel)
# Check kernel format
self.has_proj = hasattr(self.kernel, 'proj') and isinstance(self.kernel.proj, nn.Module)
self.has_kernel_b = hasattr(self.kernel, 'kernel_b') and isinstance(self.kernel.kernel_b, nn.Module)
# Define kwargs for dataloader and trainer
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True,
drop_last=True, num_workers=num_workers)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftKeops.JHat(
self.kernel, var_reg, self.has_proj, self.has_kernel_b).to(self.device)
# Set prediction and permutation batch sizes
self.batch_size_predict = batch_size_predict
self.batch_size_perms = batch_size_permutations
self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations
class JHat(nn.Module):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: nn.Module, var_reg: float, has_proj: bool, has_kernel_b: bool):
super().__init__()
self.kernel = kernel
self.has_proj = has_proj
self.has_kernel_b = has_kernel_b
self.var_reg = var_reg
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
n = len(x)
if self.has_proj and isinstance(self.kernel.proj, nn.Module):
x_proj, y_proj = self.kernel.proj(x), self.kernel.proj(y)
else:
x_proj, y_proj = x, y
x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :])
y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :])
if self.has_kernel_b:
x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :])
y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :])
else:
x, x2, y, y2 = None, None, None, None
k_xy = self.kernel(x_proj, y2_proj, x, y2)
k_xx = self.kernel(x_proj, x2_proj, x, x2)
k_yy = self.kernel(y_proj, y2_proj, y, y2)
h_mat = k_xx + k_yy - k_xy - k_xy.t()
h_i = h_mat.sum(1).squeeze(-1)
h = h_i.sum()
mmd2_est = (h - n) / (n * (n - 1))
var_est = 4 * h_i.square().sum() / (n ** 3) - 4 * h.square() / (n ** 4)
reg_var_est = var_est + self.var_reg
return mmd2_est/reg_var_est.sqrt()
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
dl_ref_tr, dl_cur_tr = self.dataloader(self.dataset(x_ref_tr)), self.dataloader(self.dataset(x_cur_tr))
self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel
self.kernel = self.kernel.to(self.device)
train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device]
LearnedKernelDriftKeops.trainer(*train_args, **self.train_kwargs) # type: ignore
m, n = len(x_ref_te), len(x_cur_te)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = torch.from_numpy(np.concatenate([x_ref_te, x_cur_te], axis=0)).float()
else:
x_all = x_ref_te + x_cur_te # type: ignore[assignment]
perms = [torch.randperm(m + n) for _ in range(self.n_permutations)]
mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
def _mmd2(self, x_all: Union[list, torch.Tensor], perms: List[torch.Tensor], m: int, n: int) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations.
Parameters
----------
x_all
Concatenated reference and test instances.
perms
List with permutation vectors.
m
Number of reference instances.
n
Number of test instances.
Returns
-------
MMD^2 statistic for the original and permuted reference and test sets.
"""
preprocess_batch_fn = self.train_kwargs['preprocess_fn']
if isinstance(preprocess_batch_fn, Callable): # type: ignore[arg-type]
x_all = preprocess_batch_fn(x_all) # type: ignore[operator]
if self.has_proj:
x_all_proj = predict_batch(x_all, self.kernel.proj, device=self.device, batch_size=self.batch_size_predict,
dtype=x_all.dtype if isinstance(x_all, torch.Tensor) else torch.float32)
else:
x_all_proj = x_all
x, x2, y, y2 = None, None, None, None
k_xx, k_yy, k_xy = [], [], []
for batch in range(self.n_batches):
i, j = batch * self.batch_size_perms, (batch + 1) * self.batch_size_perms
# Stack a batch of permuted reference and test tensors and their projections
x_proj = torch.cat([x_all_proj[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y_proj = torch.cat([x_all_proj[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if self.has_kernel_b:
x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if batch == 0:
x_proj = torch.cat([x_all_proj[None, :m, :], x_proj], 0)
y_proj = torch.cat([x_all_proj[None, m:, :], y_proj], 0)
if self.has_kernel_b:
x = torch.cat([x_all[None, :m, :], x], 0) # type: ignore[call-overload]
y = torch.cat([x_all[None, m:, :], y], 0) # type: ignore[call-overload]
x_proj, y_proj = x_proj.to(self.device), y_proj.to(self.device)
if self.has_kernel_b:
x, y = x.to(self.device), y.to(self.device)
# Batch-wise kernel matrix computation over the permutations
with torch.no_grad():
x2_proj, x_proj = LazyTensor(x_proj[:, None, :, :]), LazyTensor(x_proj[:, :, None, :])
y2_proj, y_proj = LazyTensor(y_proj[:, None, :, :]), LazyTensor(y_proj[:, :, None, :])
if self.has_kernel_b:
x2, x = LazyTensor(x[:, None, :, :]), LazyTensor(x[:, :, None, :])
y2, y = LazyTensor(y[:, None, :, :]), LazyTensor(y[:, :, None, :])
k_xy.append(self.kernel(x_proj, y2_proj, x, y2).sum(1).sum(1).squeeze(-1))
k_xx.append(self.kernel(x_proj, x2_proj, x, x2).sum(1).sum(1).squeeze(-1))
k_yy.append(self.kernel(y_proj, y2_proj, y, y2).sum(1).sum(1).squeeze(-1))
c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n)
# Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's
stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy)
return stats[0], stats[1:]
@staticmethod
def trainer(
j_hat: JHat,
dataloaders: Tuple[DataLoader, DataLoader],
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
optimizer = optimizer(j_hat.parameters(), lr=learning_rate)
j_hat.train()
loss_ma = 0.
for epoch in range(epochs):
dl_ref, dl_cur = dataloaders
dl = tqdm(enumerate(zip(dl_ref, dl_cur)), total=min(len(dl_ref), len(dl_cur))) if verbose == 1 else \
enumerate(zip(dl_ref, dl_cur))
for step, (x_ref, x_cur) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
x_ref, x_cur = x_ref.to(device), x_cur.to(device)
optimizer.zero_grad() # type: ignore
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss=loss_ma))
| 16,939 | 48.244186 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/keops/mmd.py | import logging
import numpy as np
from pykeops.torch import LazyTensor
import torch
from typing import Callable, Dict, List, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.keops.kernels import GaussianRBF
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class MMDDriftKeops(BaseMMDDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.KEOPS.value})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma).to(self.device) if kernel == GaussianRBF else kernel
# set the correct MMD^2 function based on the batch size for the permutations
self.batch_size = batch_size_permutations
self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations
# infer the kernel bandwidth from the reference data
if isinstance(sigma, torch.Tensor):
self.infer_sigma = False
elif self.infer_sigma:
x = torch.from_numpy(self.x_ref).to(self.device)
_ = self.kernel(LazyTensor(x[:, None, :]), LazyTensor(x[None, :, :]), infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.infer_sigma = True
def _mmd2(self, x_all: torch.Tensor, perms: List[torch.Tensor], m: int, n: int) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations.
Parameters
----------
x_all
Concatenated reference and test instances.
perms
List with permutation vectors.
m
Number of reference instances.
n
Number of test instances.
Returns
-------
MMD^2 statistic for the original and permuted reference and test sets.
"""
k_xx, k_yy, k_xy = [], [], []
for batch in range(self.n_batches):
i, j = batch * self.batch_size, (batch + 1) * self.batch_size
# construct stacked tensors with a batch of permutations for the reference set x and test set y
x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if batch == 0:
x = torch.cat([x_all[None, :m, :], x], 0)
y = torch.cat([x_all[None, m:, :], y], 0)
x, y = x.to(self.device), y.to(self.device)
# batch-wise kernel matrix computation over the permutations
k_xy.append(self.kernel(
LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :]), self.infer_sigma).sum(1).sum(1).squeeze(-1))
k_xx.append(self.kernel(
LazyTensor(x[:, :, None, :]), LazyTensor(x[:, None, :, :])).sum(1).sum(1).squeeze(-1))
k_yy.append(self.kernel(
LazyTensor(y[:, :, None, :]), LazyTensor(y[:, None, :, :])).sum(1).sum(1).squeeze(-1))
c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n)
# Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's
stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy)
return stats[0], stats[1:]
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).float() # type: ignore[assignment]
x = torch.from_numpy(x).float() # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test
m, n = x_ref.shape[0], x.shape[0]
perms = [torch.randperm(m + n) for _ in range(self.n_permutations)]
# TODO - Rethink typings (related to https://github.com/SeldonIO/alibi-detect/issues/540)
x_all = torch.cat([x_ref, x], 0) # type: ignore[list-item]
mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
# compute distance threshold
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
| 8,642 | 45.972826 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/keops/tests/test_mmd_keops.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.utils.frameworks import has_keops
from alibi_detect.utils.pytorch import GaussianRBF, mmd2_from_kernel_matrix
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
if has_keops:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
batch_size_permutations = [10, 1000000]
configure_kernel_from_x_ref = [True, False]
tests_mmddrift = list(product(n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref,
batch_size_permutations, configure_kernel_from_x_ref))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref, \
batch_size_permutations, configure_kernel_from_x_ref = mmd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = MMDDriftKeops(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
batch_size_permutations=batch_size_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
# ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel
if not isinstance(x_ref, list) and update_x_ref is None:
p_val, mmd2, distance_threshold = cd.score(x_h1)
kernel = GaussianRBF(sigma=cd.kernel.sigma)
if isinstance(preprocess_fn, Callable):
x_ref, x_h1 = cd.preprocess(x_h1)
x_ref = torch.from_numpy(x_ref).float()
x_h1 = torch.from_numpy(x_h1).float()
x_all = torch.cat([x_ref, x_h1], 0)
kernel_mat = kernel(x_all, x_all)
mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, x_h1.shape[0])
np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6)
| 4,726 | 38.066116 | 110 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/keops/tests/test_learned_kernel_keops.py | from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from alibi_detect.utils.frameworks import has_keops
from alibi_detect.utils.pytorch import GaussianRBF as GaussianRBFTorch
from alibi_detect.utils.pytorch import mmd2_from_kernel_matrix
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
from alibi_detect.utils.keops import GaussianRBF
from pykeops.torch import LazyTensor
n = 50 # number of instances used for the reference and test data samples in the tests
if has_keops:
class MyKernel(nn.Module):
def __init__(self, n_features: int, proj: bool):
super().__init__()
sigma = .1
self.kernel = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma]))
self.has_proj = proj
if proj:
self.proj = nn.Linear(n_features, 2)
self.kernel_b = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma]))
def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None,
y: Optional[LazyTensor] = None) -> LazyTensor:
similarity = self.kernel(x_proj, y_proj)
if self.has_proj:
similarity = similarity + self.kernel_b(x, y)
return similarity
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
preprocess_at_init = [True, False]
update_x_ref = [None, {'reservoir_sampling': 1000}]
preprocess_fn = [None, identity_fn]
n_permutations = [10]
batch_size_permutations = [5, 1000000]
train_size = [.5]
retrain_from_scratch = [True]
batch_size_predict = [1000000]
preprocess_batch = [None, identity_fn]
has_proj = [True, False]
tests_lkdrift = list(product(p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn,
n_permutations, batch_size_permutations, train_size, retrain_from_scratch,
batch_size_predict, preprocess_batch, has_proj))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn, \
n_permutations, batch_size_permutations, train_size, retrain_from_scratch, \
batch_size_predict, preprocess_batch, has_proj = lkdrift_params
np.random.seed(0)
torch.manual_seed(0)
kernel = MyKernel(n_features, has_proj)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None and preprocess_fn is None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftKeops(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
batch_size_permutations=batch_size_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
batch_size_predict=batch_size_predict,
preprocess_batch_fn=preprocess_batch,
batch_size=32,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
# ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel
if not isinstance(x_ref, list) and update_x_ref is None and not has_proj:
if isinstance(preprocess_fn, Callable):
x_ref, x_test1 = cd.preprocess(x_test1)
n_ref, n_test = x_ref.shape[0], x_test1.shape[0]
x_all = torch.from_numpy(np.concatenate([x_ref, x_test1], axis=0)).float()
perms = [torch.randperm(n_ref + n_test) for _ in range(n_permutations)]
mmd2 = cd._mmd2(x_all, perms, n_ref, n_test)[0]
if isinstance(preprocess_batch, Callable):
x_all = preprocess_batch(x_all)
kernel = GaussianRBFTorch(sigma=cd.kernel.kernel.sigma)
kernel_mat = kernel(x_all, x_all)
mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, n_test)
np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6)
| 4,968 | 36.931298 | 103 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/learned_kernel.py | from copy import deepcopy
from functools import partial
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class LearnedKernelDriftTorch(BaseLearnedKernelDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: torch.optim.Optimizer = torch.optim.Adam, # type: ignore
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch module that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device, define model and training kwargs
self.device = get_device(device)
self.original_kernel = kernel
self.kernel = deepcopy(kernel)
# define kwargs for dataloader and trainer
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True,
drop_last=True, num_workers=num_workers)
self.kernel_mat_fn = partial(
batch_compute_kernel_matrix, device=self.device, preprocess_fn=preprocess_batch_fn,
batch_size=batch_size_predict
)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftTorch.JHat(self.kernel, var_reg).to(self.device)
class JHat(nn.Module):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: nn.Module, var_reg: float):
super().__init__()
self.kernel = kernel
self.var_reg = var_reg
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
k_xx, k_yy, k_xy = self.kernel(x, x), self.kernel(y, y), self.kernel(x, y)
h_mat = k_xx + k_yy - k_xy - k_xy.t()
n = len(x)
mmd2_est = (h_mat.sum()-h_mat.trace())/(n*(n-1))
var_est = 4*h_mat.sum(-1).square().sum()/(n**3) - 4*h_mat.sum().square()/(n**4)
reg_var_est = var_est + self.var_reg
return mmd2_est/reg_var_est.sqrt()
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
dl_ref_tr, dl_cur_tr = self.dataloader(self.dataset(x_ref_tr)), self.dataloader(self.dataset(x_cur_tr))
self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel
self.kernel = self.kernel.to(self.device)
train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device]
LearnedKernelDriftTorch.trainer(*train_args, **self.train_kwargs)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
else:
x_all = x_ref_te + x_cur_te
kernel_mat = self.kernel_mat_fn(x_all, x_all, self.kernel)
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag()) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=False, zero_diag=False)
mmd2_permuted = torch.Tensor(
[mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=True, zero_diag=False)
for _ in range(self.n_permutations)]
)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
@staticmethod
def trainer(
j_hat: JHat,
dataloaders: Tuple[DataLoader, DataLoader],
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
optimizer = optimizer(j_hat.parameters(), lr=learning_rate)
j_hat.train()
loss_ma = 0.
for epoch in range(epochs):
dl_ref, dl_cur = dataloaders
dl = tqdm(enumerate(zip(dl_ref, dl_cur)), total=min(len(dl_ref), len(dl_cur))) if verbose == 1 else \
enumerate(zip(dl_ref, dl_cur))
for step, (x_ref, x_cur) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
x_ref, x_cur = x_ref.to(device), x_cur.to(device)
optimizer.zero_grad() # type: ignore
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss=loss_ma))
| 12,289 | 46.451737 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/mmd.py | import logging
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class MMDDriftTorch(BaseMMDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma).to(self.device) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
if self.infer_sigma or isinstance(sigma, torch.Tensor):
x = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(x, x, infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.k_xx, self.infer_sigma = None, True
def kernel_matrix(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
""" Compute and return full kernel matrix between arrays x and y. """
k_xy = self.kernel(x, y, self.infer_sigma)
k_xx = self.k_xx if self.k_xx is not None and self.update_x_ref is None else self.kernel(x, x)
k_yy = self.kernel(y, y)
kernel_mat = torch.cat([torch.cat([k_xx, k_xy], 1), torch.cat([k_xy.T, k_yy], 1)], 0)
return kernel_mat
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
x = torch.from_numpy(x).to(self.device) # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix
# TODO: (See https://github.com/SeldonIO/alibi-detect/issues/540)
n = x.shape[0]
kernel_mat = self.kernel_matrix(x_ref, x) # type: ignore[arg-type]
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag()) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, n, permute=False, zero_diag=False)
mmd2_permuted = torch.Tensor(
[mmd2_from_kernel_matrix(kernel_mat, n, permute=True, zero_diag=False) for _ in range(self.n_permutations)]
)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
# compute distance threshold
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
| 6,868 | 46.372414 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/utils.py | from torch import nn
from typing import Callable
def activate_train_mode_for_dropout_layers(model: Callable) -> Callable:
model.eval() # type: ignore
n_dropout_layers = 0
for module in model.modules(): # type: ignore
if isinstance(module, nn.Dropout):
module.train()
n_dropout_layers += 1
if n_dropout_layers == 0:
raise ValueError("No dropout layers identified.")
return model
| 444 | 25.176471 | 72 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/classifier.py | from copy import deepcopy
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from scipy.special import softmax
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.models.pytorch.trainer import trainer
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.pytorch.prediction import predict_batch
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class ClassifierDriftTorch(BaseClassifierDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
PyTorch classification model used for drift detection.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'logits'
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
input_shape=input_shape,
data_type=data_type
)
if preds_type not in ['probs', 'logits']:
raise ValueError("'preds_type' should be 'probs' or 'logits'")
self.meta.update({'backend': Framework.PYTORCH.value})
# set device, define model and training kwargs
self.device = get_device(device)
self.original_model = model
self.model = deepcopy(model)
# define kwargs for dataloader and trainer
self.loss_fn = nn.CrossEntropyLoss() if (self.preds_type == 'logits') else nn.NLLLoss()
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True)
self.predict_fn = partial(predict_batch, device=self.device,
preprocess_fn=preprocess_batch_fn, batch_size=batch_size)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
def score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance \
and that which we'd expect under the null assumption of no drift, \
and the out-of-fold classifier model prediction probabilities on the reference and test data \
as well as the associated reference and test instances of the out-of-fold predictions.
"""
x_ref, x = self.preprocess(x)
x, y, splits = self.get_splits(x_ref, x) # type: ignore
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
preds_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = y[idx_tr]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
ds_tr = self.dataset(x_tr, y_tr)
dl_tr = self.dataloader(ds_tr)
self.model = deepcopy(self.original_model) if self.retrain_from_scratch else self.model
self.model = self.model.to(self.device)
train_args = [self.model, self.loss_fn, dl_tr, self.device]
trainer(*train_args, **self.train_kwargs) # type: ignore
preds = self.predict_fn(x_te, self.model.eval())
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
preds_oof = np.concatenate(preds_oof_list, axis=0)
probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
n_cur = y_oof.sum()
n_ref = len(y_oof) - n_cur
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
idx_sort = np.argsort(idx_oof)
probs_sort = probs_oof[idx_sort]
if isinstance(x, np.ndarray):
x_oof = x[idx_oof]
x_sort = x_oof[idx_sort]
else:
x_oof = [x[_] for _ in idx_oof]
x_sort = [x_oof[_] for _ in idx_sort]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]
| 10,365 | 46.990741 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/lsdd_online.py | from tqdm import tqdm
import numpy as np
import torch
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch import GaussianRBF, permed_lsdds, quantile
from alibi_detect.utils.frameworks import Framework
class LSDDDriftOnlineTorch(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xtc')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
We have made modifications such that a desired ERT can be accurately targeted however.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 2*window_size.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.PYTORCH.value
self.meta.update({'backend': self.backend})
self.n_kernel_centers = n_kernel_centers
self.lambda_rd_max = lambda_rd_max
# set device
self.device = get_device(device)
self._configure_normalization()
# initialize kernel
if sigma is None:
x_ref = torch.from_numpy(self.x_ref).to(self.device) # type: ignore[assignment]
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = GaussianRBF(sigma)
if self.n_kernel_centers is None:
self.n_kernel_centers = 2 * window_size
self._configure_kernel_centers()
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _configure_normalization(self, eps: float = 1e-12):
"""
Configure the normalization functions used to normalize reference and test data to zero mean and unit variance.
The reference data `x_ref` is also normalized here.
"""
x_ref = torch.from_numpy(self.x_ref).to(self.device)
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (x - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps) + x_ref_means).cpu().numpy()
self.x_ref = self._normalize(x_ref).cpu().numpy()
def _configure_kernel_centers(self):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.n)
self.c_inds, self.non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = torch.from_numpy(self.x_ref[self.c_inds]).to(self.device)
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape) * 1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
self.x_ref_eff = torch.from_numpy(self.x_ref[self.non_c_inds]).to(self.device) # the effective reference set
self.k_xc = self.kernel(self.x_ref_eff, self.kernel_centers)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2 * w_size - 1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
perms = [torch.randperm(nkc_size) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:rw_size] for perm in perms]
y_inds_all = [perm[rw_size:] for perm in perms]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
# Compute lsdds for first test-window. We infer regularisation constant lambda here.
y_inds_all_0 = [y_inds[:w_size] for y_inds in y_inds_all]
lsdds_0, H_lam_inv = permed_lsdds(
self.k_xc, x_inds_all, y_inds_all_0, H, lam_rd_max=self.lambda_rd_max,
)
# Can compute threshold for first window
thresholds = [quantile(lsdds_0, 1 - self.fpr)]
# And now to iterate through the other W-1 overlapping windows
p_bar = tqdm(range(1, w_size), "Computing thresholds") if self.verbose else range(1, w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:(w + w_size)] for y_inds in y_inds_all]
lsdds_w, _ = permed_lsdds(self.k_xc, x_inds_all, y_inds_all_w, H, H_lam_inv=H_lam_inv)
thresholds.append(quantile(lsdds_w, 1 - self.fpr))
x_inds_all = [x_inds_all[i] for i in range(len(x_inds_all)) if lsdds_w[i] < thresholds[-1]]
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if lsdds_w[i] < thresholds[-1]]
self.thresholds = thresholds
self.H_lam_inv = H_lam_inv
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = self.x_ref_eff[self.init_test_inds]
self.k_xtc = self.kernel(self.test_window, self.kernel_centers)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2 * self.window_size - 1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
lsdd_init = None
while lsdd_init is None or lsdd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(nkc_size)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
# Compute initial lsdd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xtc
self.c2s = self.k_xc[self.ref_inds].mean(0) # (below Eqn 21)
h_init = self.c2s - self.k_xtc.mean(0) # (Eqn 21)
lsdd_init = h_init[None, :] @ self.H_lam_inv @ h_init[:, None] # (Eqn 11)
def _update_state(self, x_t: torch.Tensor): # type: ignore[override]
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
k_xtc = self.kernel(x_t, self.kernel_centers)
self.test_window = torch.cat([self.test_window[(1 - self.window_size):], x_t], 0)
self.k_xtc = torch.cat([self.k_xtc[(1 - self.window_size):], k_xtc], 0)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (LSDD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
LSDD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
x_t = torch.from_numpy(x_t).to(self.device)
x_t = self._normalize(x_t)
self._update_state(x_t)
h = self.c2s - self.k_xtc.mean(0) # (Eqn 21)
lsdd = h[None, :] @ self.H_lam_inv @ h[:, None] # (Eqn 11)
return float(lsdd.detach().cpu())
| 11,629 | 47.057851 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/spot_the_diff.py | import logging
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, Optional, Union
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.pytorch import GaussianRBF
from alibi_detect.utils.pytorch.prediction import predict_batch
logger = logging.getLogger(__name__)
class SpotTheDiffDriftTorch:
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Optional[nn.Module] = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Differentiable Pytorch model used to define similarity between instances, defaults to Gaussian RBF.
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if preprocess_fn is not None and preprocess_batch_fn is not None:
raise ValueError("SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both.")
if n_folds is not None and n_folds > 1:
logger.warning("When using multiple folds the returned diffs will correspond to the final fold only.")
if not x_ref_preprocessed and preprocess_fn is not None:
x_ref_proc = preprocess_fn(x_ref)
elif not x_ref_preprocessed and preprocess_batch_fn is not None:
x_ref_proc = predict_batch(
x_ref, lambda x: x, preprocess_fn=preprocess_batch_fn,
device=torch.device('cpu'), batch_size=batch_size
)
else:
x_ref_proc = x_ref
if kernel is None:
kernel = GaussianRBF(trainable=True)
if initial_diffs is None:
initial_diffs = np.random.normal(size=(n_diffs,) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
else:
if len(initial_diffs) != n_diffs:
raise ValueError("Should have initial_diffs.shape[0] == n_diffs")
model = SpotTheDiffDriftTorch.InterpretableClf(kernel, x_ref_proc, initial_diffs)
reg_loss_fn = (lambda model: model.diffs.abs().mean() * l1_reg)
self._detector = ClassifierDriftTorch(
x_ref=x_ref,
model=model,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=None,
preprocess_fn=preprocess_fn,
preds_type='logits',
binarize_preds=binarize_preds,
reg_loss_fn=reg_loss_fn,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
optimizer=optimizer,
learning_rate=learning_rate,
batch_size=batch_size,
preprocess_batch_fn=preprocess_batch_fn,
epochs=epochs,
verbose=verbose,
train_kwargs=train_kwargs,
device=device,
dataset=dataset,
dataloader=dataloader,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['params']['name'] = 'SpotTheDiffDrift'
self.meta['params']['n_diffs'] = n_diffs
self.meta['params']['l1_reg'] = l1_reg
self.meta['params']['initial_diffs'] = initial_diffs
class InterpretableClf(nn.Module):
def __init__(self, kernel: nn.Module, x_ref: np.ndarray, initial_diffs: np.ndarray):
super().__init__()
self.kernel = kernel
self.mean = nn.Parameter(torch.as_tensor(x_ref.mean(0)), requires_grad=False)
self.diffs = nn.Parameter(torch.as_tensor(initial_diffs, dtype=torch.float32))
self.bias = nn.Parameter(torch.zeros((1,)))
self.coeffs = nn.Parameter(torch.zeros((len(initial_diffs),)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
k_xtl = self.kernel(x, self.mean + self.diffs)
logits = self.bias + k_xtl @ self.coeffs[:, None]
return torch.cat([-logits, logits], 1)
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = False
) -> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.detach().cpu().numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.detach().cpu().numpy()
if not return_model:
del preds['data']['model']
return preds
| 11,086 | 46.995671 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/mmd_online.py | from tqdm import tqdm
import numpy as np
import torch
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch import zero_diag, quantile
from alibi_detect.utils.frameworks import Framework
class MMDDriftOnlineTorch(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xy')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.PYTORCH.value
self.meta.update({'backend': self.backend})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
self.x_ref = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=(sigma is None))
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = self.x_ref[self.init_test_inds]
self.k_xy = self.kernel(self.x_ref[self.ref_inds], self.test_window)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2 * self.window_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
mmd_init = None
while mmd_init is None or mmd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(self.n)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
# Compute initial mmd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xy
self.k_xx_sub = self.k_xx[self.ref_inds][:, self.ref_inds]
self.k_xx_sub_sum = zero_diag(self.k_xx_sub).sum() / (rw_size * (rw_size - 1))
k_yy = self.kernel(self.test_window, self.test_window)
mmd_init = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum() / (self.window_size * (self.window_size - 1)) -
2 * self.k_xy.mean()
)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2 * w_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = sub-ref window
perms = [torch.randperm(self.n) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:-etw_size] for perm in perms]
y_inds_all = [perm[-etw_size:] for perm in perms]
if self.verbose:
print("Generating permutations of kernel matrix..")
# Need to compute mmd for each bs for each of W overlapping windows
# Most of the computation can be done once however
# We avoid summing the rw_size^2 submatrix for each bootstrap sample by instead computing the full
# sum once and then subtracting the relavent parts (k_xx_sum = k_full_sum - 2*k_xy_sum - k_yy_sum).
# We also reduce computation of k_xy_sum from O(nW) to O(W) by caching column sums
k_full_sum = zero_diag(self.k_xx).sum()
k_xy_col_sums_all = [
self.k_xx[x_inds][:, y_inds].sum(0) for x_inds, y_inds in
(tqdm(zip(x_inds_all, y_inds_all), total=self.n_bootstraps) if self.verbose else
zip(x_inds_all, y_inds_all))
]
k_xx_sums_all = [(
k_full_sum - zero_diag(self.k_xx[y_inds][:, y_inds]).sum() - 2 * k_xy_col_sums.sum()
) / (rw_size * (rw_size - 1)) for y_inds, k_xy_col_sums in zip(y_inds_all, k_xy_col_sums_all)]
k_xy_col_sums_all = [k_xy_col_sums / (rw_size * w_size) for k_xy_col_sums in k_xy_col_sums_all]
# Now to iterate through the W overlapping windows
thresholds = []
p_bar = tqdm(range(w_size), "Computing thresholds") if self.verbose else range(w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:w + w_size] for y_inds in y_inds_all] # test windows of size w_size
mmds = [(
k_xx_sum +
zero_diag(self.k_xx[y_inds_w][:, y_inds_w]).sum() / (w_size * (w_size - 1)) -
2 * k_xy_col_sums[w:w + w_size].sum())
for k_xx_sum, y_inds_w, k_xy_col_sums in zip(k_xx_sums_all, y_inds_all_w, k_xy_col_sums_all)
]
mmds = torch.tensor(mmds) # an mmd for each bootstrap sample
# Now we discard all bootstrap samples for which mmd is in top (1/ert)% and record the thresholds
thresholds.append(quantile(mmds, 1 - self.fpr))
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if mmds[i] < thresholds[-1]]
k_xx_sums_all = [
k_xx_sums_all[i] for i in range(len(k_xx_sums_all)) if mmds[i] < thresholds[-1]
]
k_xy_col_sums_all = [
k_xy_col_sums_all[i] for i in range(len(k_xy_col_sums_all)) if mmds[i] < thresholds[-1]
]
self.thresholds = thresholds
def _update_state(self, x_t: torch.Tensor): # type: ignore[override]
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
kernel_col = self.kernel(self.x_ref[self.ref_inds], x_t)
self.test_window = torch.cat([self.test_window[(1 - self.window_size):], x_t], 0)
self.k_xy = torch.cat([self.k_xy[:, (1 - self.window_size):], kernel_col], 1)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
Squared MMD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
x_t = torch.from_numpy(x_t).to(self.device)
self._update_state(x_t)
k_yy = self.kernel(self.test_window, self.test_window)
mmd = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum() / (self.window_size * (self.window_size - 1)) -
2 * self.k_xy.mean()
)
return float(mmd.detach().cpu())
| 10,720 | 45.816594 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/context_aware.py | import logging
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseContextMMDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.cd._domain_clf import _SVCDomainClf
from tqdm import tqdm
logger = logging.getLogger(__name__)
class ContextMMDDriftTorch(BaseContextMMDDrift):
lams: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = GaussianRBF,
c_kernel: Callable = GaussianRBF,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__(
x_ref=x_ref,
c_ref=c_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
x_kernel=x_kernel,
c_kernel=c_kernel,
n_permutations=n_permutations,
prop_c_held=prop_c_held,
n_folds=n_folds,
batch_size=batch_size,
input_shape=input_shape,
data_type=data_type,
verbose=verbose,
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# initialize kernel
self.x_kernel = x_kernel(init_sigma_fn=_sigma_median_diag) if x_kernel == GaussianRBF else x_kernel
self.c_kernel = c_kernel(init_sigma_fn=_sigma_median_diag) if c_kernel == GaussianRBF else c_kernel
# Initialize classifier (hardcoded for now)
self.clf = _SVCDomainClf(self.c_kernel)
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test \
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices \
(W_{ref,ref}, W_{test,test}, W_{ref,test}).
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
c_ref = torch.from_numpy(self.c_ref).to(self.device)
# Hold out a portion of contexts for conditioning on
n, n_held = len(c), int(len(c)*self.prop_c_held)
inds_held = np.random.choice(n, n_held, replace=False)
inds_test = np.setdiff1d(np.arange(n), inds_held)
c_held = torch.as_tensor(c[inds_held]).to(self.device)
c = torch.as_tensor(c[inds_test]).to(self.device) # type: ignore[assignment]
x = torch.as_tensor(x[inds_test]).to(self.device) # type: ignore[assignment]
n_ref, n_test = len(x_ref), len(x)
bools = torch.cat([torch.zeros(n_ref), torch.ones(n_test)]).to(self.device)
# Compute kernel matrices
x_all = torch.cat([x_ref, x], dim=0) # type: ignore[list-item]
c_all = torch.cat([c_ref, c], dim=0) # type: ignore[list-item]
K = self.x_kernel(x_all, x_all)
L = self.c_kernel(c_all, c_all)
L_held = self.c_kernel(c_held, c_all)
# Fit and calibrate the domain classifier
c_all_np, bools_np = c_all.cpu().numpy(), bools.cpu().numpy()
self.clf.fit(c_all_np, bools_np)
self.clf.calibrate(c_all_np, bools_np)
# Obtain n_permutations conditional reassignments
prop_scores = torch.as_tensor(self.clf.predict(c_all_np))
self.redrawn_bools = [torch.bernoulli(prop_scores) for _ in range(self.n_permutations)]
iters = tqdm(self.redrawn_bools, total=self.n_permutations) if self.verbose else self.redrawn_bools
# Compute test stat on original and reassigned data
stat, coupling_xx, coupling_yy, coupling_xy = self._cmmd(K, L, bools, L_held=L_held)
permuted_stats = torch.stack([self._cmmd(K, L, perm_bools, L_held=L_held)[0] for perm_bools in iters])
# Compute p-value
p_val = (stat <= permuted_stats).float().mean()
coupling = (coupling_xx.numpy(), coupling_yy.numpy(), coupling_xy.numpy())
# compute distance threshold
idx_threshold = int(self.p_val * len(permuted_stats))
distance_threshold = torch.sort(permuted_stats, descending=True).values[idx_threshold]
return p_val.numpy().item(), stat.numpy().item(), distance_threshold.numpy(), coupling
def _cmmd(self, K: torch.Tensor, L: torch.Tensor, bools: torch.Tensor, L_held: torch.Tensor = None) \
-> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Private method to compute the MMD-ADiTT test statistic.
"""
# Get ref/test indices
idx_0, idx_1 = torch.where(bools == 0)[0], torch.where(bools == 1)[0]
n_ref, n_test = len(idx_0), len(idx_1)
# Form kernel matrices
L_0, L_1 = L[idx_0][:, idx_0], L[idx_1][:, idx_1]
K_0, K_1 = K[idx_0][:, idx_0], K[idx_1][:, idx_1]
# Initialise regularisation parameters
# Implemented only for first _cmmd call which corresponds to original window assignment
if self.lams is None:
possible_lams = torch.tensor([2**(-i) for i in range(20)]).to(K.device)
lam_0 = self._pick_lam(possible_lams, K_0, L_0, n_folds=self.n_folds)
lam_1 = self._pick_lam(possible_lams, K_1, L_1, n_folds=self.n_folds)
self.lams = (lam_0, lam_1)
# Compute stat
L_0_inv = torch.linalg.inv(L_0 + n_ref*self.lams[0]*torch.eye(int(n_ref)).to(L_0.device))
L_1_inv = torch.linalg.inv(L_1 + n_test*self.lams[1]*torch.eye(int(n_test)).to(L_1.device))
A_0 = L_held[:, idx_0] @ L_0_inv
A_1 = L_held[:, idx_1] @ L_1_inv
# Allow batches of MMDs to be computed at a time (rather than all)
if self.batch_size is not None:
bs = self.batch_size
coupling_xx = torch.stack([torch.einsum('ij,ik->ijk', A_0_i, A_0_i).mean(0)
for A_0_i in A_0.split(bs)]).mean(0)
coupling_yy = torch.stack([torch.einsum('ij,ik->ijk', A_1_i, A_1_i).mean(0)
for A_1_i in A_1.split(bs)]).mean(0)
coupling_xy = torch.stack([
torch.einsum('ij,ik->ijk', A_0_i, A_1_i).mean(0) for A_0_i, A_1_i in zip(A_0.split(bs), A_1.split(bs))
]).mean(0)
else:
coupling_xx = torch.einsum('ij,ik->ijk', A_0, A_0).mean(0)
coupling_yy = torch.einsum('ij,ik->ijk', A_1, A_1).mean(0)
coupling_xy = torch.einsum('ij,ik->ijk', A_0, A_1).mean(0)
sim_xx = (K[idx_0][:, idx_0]*coupling_xx).sum()
sim_yy = (K[idx_1][:, idx_1]*coupling_yy).sum()
sim_xy = (K[idx_0][:, idx_1]*coupling_xy).sum()
stat = sim_xx + sim_yy - 2*sim_xy
return stat.cpu(), coupling_xx.cpu(), coupling_yy.cpu(), coupling_xy.cpu()
def _pick_lam(self, lams: torch.Tensor, K: torch.Tensor, L: torch.Tensor, n_folds: int = 5) -> torch.Tensor:
"""
The conditional mean embedding is estimated as the solution of a regularised regression problem.
This private method function uses cross validation to select the regularisation parameter that
minimises squared error on the out-of-fold instances. The error is a distance in the RKHS and is
therefore an MMD-like quantity itself.
"""
n = len(L)
fold_size = n // n_folds
K, L = K.type(torch.float64), L.type(torch.float64)
perm = torch.randperm(n)
K, L = K[perm][:, perm], L[perm][:, perm]
losses = torch.zeros_like(lams, dtype=torch.float).to(K.device)
for fold in range(n_folds):
inds_oof = list(np.arange(n)[(fold*fold_size):((fold+1)*fold_size)])
inds_if = list(np.setdiff1d(np.arange(n), inds_oof))
K_if, L_if = K[inds_if][:, inds_if], L[inds_if][:, inds_if]
n_if = len(K_if)
L_inv_lams = torch.stack(
[torch.linalg.inv(L_if + n_if*lam*torch.eye(n_if).to(L.device)) for lam in lams]) # n_lam x n_if x n_if
KW = torch.einsum('ij,ljk->lik', K_if, L_inv_lams)
lW = torch.einsum('ij,ljk->lik', L[inds_oof][:, inds_if], L_inv_lams)
lWKW = torch.einsum('lij,ljk->lik', lW, KW)
lWKWl = torch.einsum('lkj,jk->lk', lWKW, L[inds_if][:, inds_oof]) # n_lam x n_oof
lWk = torch.einsum('lij,ji->li', lW, K[inds_if][:, inds_oof]) # n_lam x n_oof
kxx = torch.ones_like(lWk).to(lWk.device) * torch.max(K)
losses += (lWKWl + kxx - 2*lWk).sum(-1)
return lams[torch.argmin(losses)]
def _sigma_median_diag(x: torch.Tensor, y: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
"""
Private version of the bandwidth estimation function :py:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`,
with the +n (and -1) term excluded to account for the diagonal of the kernel matrix.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n_median = np.prod(dist.shape) // 2
sigma = (.5 * dist.flatten().sort().values[int(n_median)].unsqueeze(dim=-1)) ** .5
return sigma
| 13,224 | 46.232143 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
UAE, HiddenOutput, preprocess_drift = import_optional(
'alibi_detect.cd.pytorch.preprocess',
names=['UAE', 'HiddenOutput', 'preprocess_drift'])
__all__ = [
"UAE",
"HiddenOutput",
"preprocess_drift"
]
| 297 | 23.833333 | 74 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/lsdd.py | import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseLSDDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch.distance import permed_lsdds
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class LSDDDriftTorch(BaseLSDDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
n_permutations=n_permutations,
n_kernel_centers=n_kernel_centers,
lambda_rd_max=lambda_rd_max,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# TODO: TBD: the several type:ignore's below are because x_ref is typed as an np.ndarray
# in the method signature, so we can't cast it to torch.Tensor unless we change the signature
# to also accept torch.Tensor. We also can't redefine it's type as that would involve enabling
# --allow-redefinitions in mypy settings (which we might do eventually).
if self.preprocess_at_init or self.preprocess_fn is None or self.x_ref_preprocessed:
x_ref = torch.as_tensor(self.x_ref).to(self.device) # type: ignore[assignment]
self._configure_normalization(x_ref) # type: ignore[arg-type]
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref) # type: ignore[arg-type]
self._configure_kernel_centers(x_ref) # type: ignore[arg-type]
self.x_ref = x_ref.cpu().numpy() # type: ignore[union-attr]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
def _initialize_kernel(self, x_ref: torch.Tensor):
if self.sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(self.sigma)
self.kernel = GaussianRBF(sigma)
def _configure_normalization(self, x_ref: torch.Tensor, eps: float = 1e-12):
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps)
+ x_ref_means).cpu().numpy()
def _configure_kernel_centers(self, x_ref: torch.Tensor):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.x_ref.shape[0])
c_inds, non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = x_ref[c_inds]
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape) * 1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
x_ref_eff = x_ref[non_c_inds] # the effective reference set
self.k_xc = self.kernel(x_ref_eff, self.kernel_centers)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set, \
and the LSDD threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
x = torch.from_numpy(x).to(self.device) # type: ignore[assignment]
if self.preprocess_fn is not None and self.preprocess_at_init is False and not self.x_ref_preprocessed:
self._configure_normalization(x_ref) # type: ignore[arg-type]
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref) # type: ignore[arg-type]
self._configure_kernel_centers(x_ref) # type: ignore[arg-type]
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
x = self._normalize(x)
k_yc = self.kernel(x, self.kernel_centers)
k_all_c = torch.cat([self.k_xc, k_yc], 0)
n_x = x_ref.shape[0] - self.n_kernel_centers
n_all = k_all_c.shape[0]
perms = [torch.randperm(n_all) for _ in range(self.n_permutations)]
x_perms = [perm[:n_x] for perm in perms]
y_perms = [perm[n_x:] for perm in perms]
lsdd_permuted, _, lsdd = permed_lsdds( # type: ignore
k_all_c, x_perms, y_perms, self.H, lam_rd_max=self.lambda_rd_max, return_unpermed=True
)
p_val = (lsdd <= lsdd_permuted).float().mean()
idx_threshold = int(self.p_val * len(lsdd_permuted))
distance_threshold = torch.sort(lsdd_permuted, descending=True).values[idx_threshold]
return float(p_val.cpu()), float(lsdd.cpu().numpy()), distance_threshold.cpu().numpy()
| 8,982 | 49.466292 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/preprocess.py | from typing import Callable, Dict, Optional, Type, Union
import numpy as np
import torch
import torch.nn as nn
from alibi_detect.utils.pytorch.prediction import (predict_batch,
predict_batch_transformer)
class _Encoder(nn.Module):
def __init__(
self,
input_layer: Optional[nn.Module],
mlp: Optional[nn.Module] = None,
input_dim: Optional[int] = None,
enc_dim: Optional[int] = None,
step_dim: Optional[int] = None,
) -> None:
super().__init__()
self.input_layer = input_layer
if isinstance(mlp, nn.Module):
self.mlp = mlp
elif isinstance(enc_dim, int) and isinstance(step_dim, int):
self.mlp = nn.Sequential(
nn.Flatten(),
nn.Linear(input_dim, enc_dim + 2 * step_dim),
nn.ReLU(),
nn.Linear(enc_dim + 2 * step_dim, enc_dim + step_dim),
nn.ReLU(),
nn.Linear(enc_dim + step_dim, enc_dim)
)
else:
raise ValueError('Need to provide either `enc_dim` and `step_dim` or a '
'nn.Module `mlp`')
def forward(self, x: Union[np.ndarray, torch.Tensor, Dict[str, torch.Tensor]]) -> torch.Tensor:
if self.input_layer is not None:
x = self.input_layer(x)
return self.mlp(x)
class UAE(nn.Module):
def __init__(
self,
encoder_net: Optional[nn.Module] = None,
input_layer: Optional[nn.Module] = None,
shape: Optional[tuple] = None,
enc_dim: Optional[int] = None
) -> None:
super().__init__()
is_enc = isinstance(encoder_net, nn.Module)
is_enc_dim = isinstance(enc_dim, int)
if is_enc:
self.encoder = encoder_net
elif not is_enc and is_enc_dim: # set default encoder
input_dim = np.prod(shape)
step_dim = int((input_dim - enc_dim) / 3)
self.encoder = _Encoder(input_layer, input_dim=input_dim, enc_dim=enc_dim, step_dim=step_dim)
elif not is_enc and not is_enc_dim:
raise ValueError('Need to provide either `enc_dim` or a nn.Module'
' `encoder_net`.')
def forward(self, x: Union[np.ndarray, torch.Tensor, Dict[str, torch.Tensor]]) -> torch.Tensor:
return self.encoder(x)
class HiddenOutput(nn.Module):
def __init__(
self,
model: Union[nn.Module, nn.Sequential],
layer: int = -1,
flatten: bool = False
) -> None:
super().__init__()
layers = list(model.children())[:layer]
if flatten:
layers += [nn.Flatten()]
self.model = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
def preprocess_drift(x: Union[np.ndarray, list], model: Union[nn.Module, nn.Sequential],
device: Optional[torch.device] = None, preprocess_batch_fn: Callable = None,
tokenizer: Optional[Callable] = None, max_len: Optional[int] = None,
batch_size: int = int(1e10), dtype: Union[Type[np.generic], torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor, tuple]:
"""
Prediction function used for preprocessing step of drift detector.
Parameters
----------
x
Batch of instances.
model
Model used for preprocessing.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the PyTorch model.
tokenizer
Optional tokenizer for text drift.
max_len
Optional max token length for text drift.
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with predictions.
"""
if tokenizer is None:
return predict_batch(x, model, device=device, batch_size=batch_size,
preprocess_fn=preprocess_batch_fn, dtype=dtype)
else:
return predict_batch_transformer(x, model, tokenizer, max_len, device=device,
batch_size=batch_size, dtype=dtype)
| 4,599 | 36.398374 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_mmd_pt.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = mmd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = MMDDriftTorch(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,494 | 34.30303 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_learned_kernel_pt.py | from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
n = 100
class MyKernel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_lkdrift = list(product(p_val, n_features, train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, train_size, preprocess_batch, update_x_ref = lkdrift_params
np.random.seed(0)
torch.manual_seed(0)
kernel = MyKernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftTorch(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 2,264 | 26.621951 | 92 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_classifier_pt.py | from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
n = 100
class MyModel(nn.Module):
def __init__(self, n_features: int, softmax: bool = False):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
self.softmax = softmax
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
x = self.dense2(x)
if self.softmax:
x = nn.Softmax()(x)
return x
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
preds_type = ['probs', 'logits']
binarize_preds = [True, False]
n_folds = [None, 2]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_clfdrift = list(product(p_val, n_features, preds_type, binarize_preds, n_folds,
train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
p_val, n_features, preds_type, binarize_preds, n_folds, \
train_size, preprocess_batch, update_x_ref = clfdrift_params
np.random.seed(0)
torch.manual_seed(0)
model = MyModel(n_features, softmax=(preds_type == 'probs'))
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftTorch(
x_ref=x_ref,
model=model,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
preds_type=preds_type,
binarize_preds=binarize_preds,
preprocess_batch_fn=preprocess_batch,
batch_size=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
| 2,921 | 28.515152 | 85 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_spot_the_diff_pt.py | from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
n = 100
class MyKernel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
kernel = [None, MyKernel]
n_diffs = [1, 5]
tests_stddrift = list(product(p_val, n_features, train_size, preprocess_batch, kernel, n_diffs))
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
p_val, n_features, train_size, preprocess_batch, kernel, n_diffs = stddrift_params
np.random.seed(0)
torch.manual_seed(0)
if kernel is not None:
kernel = kernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
cd = SpotTheDiffDriftTorch(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
n_diffs=n_diffs,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['diffs'].shape == (n_diffs, n_features)
assert preds_0['data']['diff_coeffs'].shape == (n_diffs,)
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['n_diffs'] == n_diffs
| 2,440 | 27.057471 | 96 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.