repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_lsdd_online_pt.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_lsdddriftonline = list(product(n_features, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdd_online_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdd_online_params', list(range(n_tests)), indirect=True)
def test_lsdd_online(lsdd_online_params, seed):
n_features, ert, window_size, preprocess, n_bootstraps = lsdd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
with fixed_seed(seed):
cd = LSDDDriftOnlineTorch(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_lsdd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for LSDDDriftOnlineTorch. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = LSDDDriftOnlineTorch(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 5,861 | 34.96319 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_preprocess_pt.py | from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import HiddenOutput
n, dim1, dim2, n_classes, latent_dim, n_hidden = 100, 2, 3, 5, 2, 7
n_features = dim1 * dim2
shape = (n, dim1, dim2)
X = np.random.rand(n * n_features).reshape(shape).astype('float32')
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.dense1 = nn.Linear(dim2, n_hidden)
self.dense2 = nn.Linear(n_hidden, n_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.dense1(x)
return self.dense2(x)
model2 = nn.Sequential(
nn.Linear(dim2, n_hidden),
nn.Linear(n_hidden, n_classes)
)
model = [1, 2]
layer = [0, 1, 2]
flatten = [True, False]
tests_hidden_output = list(product(model, layer, flatten))
n_tests_hidden_output = len(tests_hidden_output)
@pytest.fixture
def hidden_output_params(request):
return tests_hidden_output[request.param]
@pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True)
def test_hidden_output(hidden_output_params):
model, layer, flatten = hidden_output_params
model = Model1() if model == 1 else model2
X_hidden = HiddenOutput(model=model, layer=layer, flatten=flatten)(torch.from_numpy(X))
if layer == 0:
assert_shape = (n, dim1, dim2)
elif layer == 1:
assert_shape = (n, dim1, n_hidden)
elif layer == 2:
assert_shape = (n, dim1, n_classes)
if flatten:
assert_shape = (assert_shape[0],) + (np.prod(assert_shape[1:]),)
assert X_hidden.shape == assert_shape
| 1,651 | 28.5 | 99 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_contextmmd_pt.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n = 250
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_ref = [{'last': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_context_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_ref, preprocess_at_init))
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmd_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmd_params', list(range(n_tests)), indirect=True)
def test_context_mmd(context_mmd_params):
n_features, n_enc, preprocess, n_permutations, update_ref, preprocess_at_init = context_mmd_params
np.random.seed(0)
torch.manual_seed(0)
c_ref = np.random.randn(*(n, 1)).astype(np.float32)
x_ref = c_ref + np.random.randn(*(n, n_features)).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
pytest.skip("Skip tests where preprocess_at_init=False and x_ref is list.")
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = ContextMMDDriftTorch(
x_ref=x_ref,
c_ref=c_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
c = c_ref.copy()
x = x_ref.copy()
preds = cd.predict(x, c, return_p_val=True, return_distance=False, return_coupling=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
assert preds['data']['distance'] is None
assert isinstance(preds['data']['coupling_xy'], np.ndarray)
if isinstance(update_ref, dict):
k = list(update_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_ref[k], len(x) + len(x_ref))
assert cd.c_ref.shape[0] == min(update_ref[k], len(x) + len(c_ref))
c_h1 = np.random.randn(*(n, 1)).astype(np.float32)
x_h1 = c_h1 + np.random.randn(*(n, n_features)).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, c_h1, return_p_val=True, return_distance=True, return_coupling=False)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
assert 'coupling_xy' not in preds['data']
| 4,034 | 36.71028 | 102 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_lsdd_pt.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_lsdddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdd_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdd_params', list(range(n_tests)), indirect=True)
def test_lsdd(lsdd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = lsdd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = LSDDDriftTorch(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
perturbation = np.random.normal(size=(n, n_features)) / 100 # LSDD struggles with copies/repeats
x = x_ref.copy() + perturbation.astype(np.float32)
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,599 | 34.643564 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/pytorch/tests/test_mmd_online_pt.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_mmddriftonline = list(product(n_features, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmd_online_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmd_online_params', list(range(n_tests)), indirect=True)
def test_mmd_online(mmd_online_params, seed):
n_features, ert, window_size, preprocess, n_bootstraps = mmd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
with fixed_seed(seed):
cd = MMDDriftOnlineTorch(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_mmd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for MMDDriftOnlineTorch. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = MMDDriftOnlineTorch(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 5,847 | 34.877301 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/learned_kernel.py | from functools import partial
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow.misc import clone_model
from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import OptimizerTF
class LearnedKernelDriftTF(BaseLearnedKernelDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: tf.keras.Model,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable TensorFlow model that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.TENSORFLOW.value})
# define and compile kernel
self.original_kernel = kernel
self.kernel = clone_model(kernel)
self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
self.kernel_mat_fn = partial(
batch_compute_kernel_matrix, preprocess_fn=preprocess_batch_fn, batch_size=batch_size_predict
)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'learning_rate': learning_rate,
'reg_loss_fn': reg_loss_fn, 'preprocess_fn': preprocess_batch_fn, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftTF.JHat(self.kernel, var_reg)
class JHat(tf.keras.Model):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: tf.keras.Model, var_reg: float):
super().__init__()
self.config = {'kernel': kernel, 'var_reg': var_reg}
self.kernel = kernel
self.var_reg = var_reg
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
k_xx, k_yy, k_xy = self.kernel(x, x), self.kernel(y, y), self.kernel(x, y)
h_mat = k_xx + k_yy - k_xy - tf.transpose(k_xy)
n = len(x)
mmd2_est = (tf.reduce_sum(h_mat)-tf.linalg.trace(h_mat))/(n*(n-1))
var_est = (4*tf.reduce_sum(tf.reduce_sum(h_mat, axis=-1)**2)/(n**3) -
4*tf.reduce_sum(h_mat)**2/(n**4))
reg_var_est = var_est + self.var_reg
return mmd2_est/tf.math.sqrt(reg_var_est)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
ds_ref_tr, ds_cur_tr = self.dataset(x_ref_tr), self.dataset(x_cur_tr)
self.kernel = clone_model(self.original_kernel) if self.retrain_from_scratch else self.kernel
train_args = [self.j_hat, (ds_ref_tr, ds_cur_tr)]
LearnedKernelDriftTF.trainer(*train_args, **self.train_kwargs)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
else:
x_all = x_ref_te + x_cur_te
kernel_mat = self.kernel_mat_fn(x_all, x_all, self.kernel)
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat)) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=False, zero_diag=False).numpy()
mmd2_permuted = np.array(
[mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=True, zero_diag=False).numpy()
for _ in range(self.n_permutations)]
)
p_val = (mmd2 <= mmd2_permuted).mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = np.sort(mmd2_permuted)[::-1][idx_threshold]
return p_val, mmd2, distance_threshold
@staticmethod
def trainer(
j_hat: JHat,
datasets: Tuple[tf.keras.utils.Sequence, tf.keras.utils.Sequence],
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
ds_ref, ds_cur = datasets
optimizer = optimizer(learning_rate=learning_rate) if isinstance(optimizer, type) else optimizer
n_minibatch = min(len(ds_ref), len(ds_cur))
# iterate over epochs
loss_ma = 0.
for epoch in range(epochs):
if verbose:
pbar = tf.keras.utils.Progbar(n_minibatch, 1)
for step, (x_ref, x_cur) in enumerate(zip(ds_ref, ds_cur)):
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
with tf.GradientTape() as tape:
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
grads = tape.gradient(loss, j_hat.trainable_weights)
optimizer.apply_gradients(zip(grads, j_hat.trainable_weights))
if verbose == 1:
loss_ma = loss_ma + (loss - loss_ma) / (step + 1)
pbar_values = [('loss', loss_ma)]
pbar.add(1, values=pbar_values)
| 11,206 | 46.28692 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/classifier.py | from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import BinaryCrossentropy
from scipy.special import softmax
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow.misc import clone_model
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import OptimizerTF
class ClassifierDriftTF(BaseClassifierDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: np.ndarray,
model: tf.keras.Model,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
TensorFlow classification model used for drift detection.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'logits'.
binarize_preds
Whether to test for discrepency on soft (e.g. prob/log-prob) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier.
0 is silent, 1 a progress bar and 2 prints the statistics after each epoch.
train_kwargs
Optional additional kwargs when fitting the classifier.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
input_shape=input_shape,
data_type=data_type
)
if preds_type not in ['probs', 'logits']:
raise ValueError("'preds_type' should be 'probs' or 'logits'")
self.meta.update({'backend': Framework.TENSORFLOW.value})
# define and compile classifier model
self.original_model = model
self.model = clone_model(model)
self.loss_fn = BinaryCrossentropy(from_logits=(self.preds_type == 'logits'))
self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
self.predict_fn = partial(predict_batch, preprocess_fn=preprocess_batch_fn, batch_size=batch_size)
optimizer = optimizer(learning_rate=learning_rate) if isinstance(optimizer, type) else optimizer
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs,
'reg_loss_fn': reg_loss_fn, 'preprocess_fn': preprocess_batch_fn, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray, # type: ignore[override]
Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance \
and that which we'd expect under the null assumption of no drift, \
and the out-of-fold classifier model prediction probabilities on the reference and test data \
as well as the associated reference and test instances of the out-of-fold predictions.
"""
x_ref, x = self.preprocess(x) # type: ignore[assignment]
x, y, splits = self.get_splits(x_ref, x) # type: ignore
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
preds_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = np.eye(2)[y[idx_tr]]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
ds_tr = self.dataset(x_tr, y_tr)
if self.retrain_from_scratch:
# clone model to re-initialise
self.model = clone_model(self.original_model)
# Clone optimizer to prevent error due to cloned model (with new tf>=2.11 optimizers)
optimizer = self.train_kwargs['optimizer']
self.train_kwargs['optimizer'] = optimizer.__class__.from_config(optimizer.get_config())
train_args = [self.model, self.loss_fn, None]
self.train_kwargs.update({'dataset': ds_tr})
trainer(*train_args, **self.train_kwargs)
preds = self.predict_fn(x_te, self.model)
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
preds_oof = np.concatenate(preds_oof_list, axis=0)
probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
n_cur = y_oof.sum()
n_ref = len(y_oof) - n_cur
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
idx_sort = np.argsort(idx_oof)
probs_sort = probs_oof[idx_sort]
if isinstance(x, np.ndarray):
x_oof = x[idx_oof]
x_sort = x_oof[idx_sort]
else:
x_oof = [x[_] for _ in idx_oof]
x_sort = [x_oof[_] for _ in idx_sort]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]
| 10,298 | 48.514423 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/spot_the_diff.py | import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Union
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow import GaussianRBF
from alibi_detect.utils.tensorflow.prediction import predict_batch
logger = logging.getLogger(__name__)
class SpotTheDiffDriftTF:
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Optional[tf.keras.Model] = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Differentiable TensorFlow model used to define similarity between instances, defaults to Gaussian RBF.
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if preprocess_fn is not None and preprocess_batch_fn is not None:
raise ValueError("SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both.")
if n_folds is not None and n_folds > 1:
logger.warning("When using multiple folds the returned diffs will correspond to the final fold only.")
if not x_ref_preprocessed and preprocess_fn is not None:
x_ref_proc = preprocess_fn(x_ref)
elif not x_ref_preprocessed and preprocess_batch_fn is not None:
x_ref_proc = predict_batch(
x_ref, lambda x: x, preprocess_fn=preprocess_batch_fn, batch_size=batch_size
)
else:
x_ref_proc = x_ref
if kernel is None:
kernel = GaussianRBF(trainable=True)
if initial_diffs is None:
initial_diffs = np.random.normal(size=(n_diffs,) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
else:
if len(initial_diffs) != n_diffs:
raise ValueError("Should have initial_diffs.shape[0] == n_diffs")
model = SpotTheDiffDriftTF.InterpretableClf(kernel, x_ref_proc, initial_diffs)
reg_loss_fn = (lambda model: tf.reduce_mean(tf.abs(model.diffs)) * l1_reg)
self._detector = ClassifierDriftTF(
x_ref=x_ref,
model=model,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=None,
preprocess_fn=preprocess_fn,
preds_type='logits',
binarize_preds=binarize_preds,
reg_loss_fn=reg_loss_fn,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
optimizer=optimizer,
learning_rate=learning_rate,
batch_size=batch_size,
preprocess_batch_fn=preprocess_batch_fn,
epochs=epochs,
verbose=verbose,
train_kwargs=train_kwargs,
dataset=dataset,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['params']['name'] = 'SpotTheDiffDrift'
self.meta['params']['n_diffs'] = n_diffs
self.meta['params']['l1_reg'] = l1_reg
self.meta['params']['initial_diffs'] = initial_diffs
class InterpretableClf(tf.keras.Model):
def __init__(self, kernel: tf.keras.Model, x_ref: np.ndarray, initial_diffs: np.ndarray):
super().__init__()
self.config = {'kernel': kernel, 'x_ref': x_ref, 'initial_diffs': initial_diffs}
self.kernel = kernel
self.mean = tf.convert_to_tensor(x_ref.mean(0))
self.diffs = tf.Variable(initial_diffs, dtype=np.float32)
self.bias = tf.Variable(tf.zeros((1,)))
self.coeffs = tf.Variable(tf.zeros((len(initial_diffs),)))
def call(self, x: tf.Tensor) -> tf.Tensor:
k_xtl = self.kernel(x, self.mean + self.diffs)
logits = self.bias + k_xtl @ self.coeffs[:, None]
return tf.concat([-logits, logits], axis=-1)
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = False
) -> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.numpy()
if not return_model:
del preds['data']['model']
return preds
| 10,788 | 46.528634 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/preprocess.py | from typing import Callable, Dict, Optional, Type, Union
import numpy as np
import tensorflow as tf
from alibi_detect.utils.tensorflow.prediction import (
predict_batch, predict_batch_transformer)
from tensorflow.keras.layers import Dense, Flatten, Input, InputLayer
from tensorflow.keras.models import Model
class _Encoder(tf.keras.Model):
def __init__(
self,
input_layer: Union[tf.keras.layers.Layer, tf.keras.Model],
mlp: Optional[tf.keras.Model] = None,
enc_dim: Optional[int] = None,
step_dim: Optional[int] = None
) -> None:
super().__init__()
self.input_layer = input_layer
if isinstance(mlp, tf.keras.Model):
self.mlp = mlp
elif isinstance(enc_dim, int) and isinstance(step_dim, int):
self.mlp = tf.keras.Sequential(
[
Flatten(),
Dense(enc_dim + 2 * step_dim, activation=tf.nn.relu),
Dense(enc_dim + step_dim, activation=tf.nn.relu),
Dense(enc_dim, activation=None)
]
)
else:
raise ValueError('Need to provide either `enc_dim` and `step_dim` or a '
'tf.keras.Sequential or tf.keras.Model `mlp`')
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
x = self.input_layer(x)
return self.mlp(x)
class UAE(tf.keras.Model):
def __init__(
self,
encoder_net: Optional[tf.keras.Model] = None,
input_layer: Optional[Union[tf.keras.layers.Layer, tf.keras.Model]] = None,
shape: Optional[tuple] = None,
enc_dim: Optional[int] = None
) -> None:
super().__init__()
is_enc = isinstance(encoder_net, tf.keras.Model)
is_enc_dim = isinstance(enc_dim, int)
if is_enc:
self.encoder = encoder_net
elif not is_enc and is_enc_dim: # set default encoder
input_layer = InputLayer(input_shape=shape) if input_layer is None else input_layer
input_dim = np.prod(shape)
step_dim = int((input_dim - enc_dim) / 3)
self.encoder = _Encoder(input_layer, enc_dim=enc_dim, step_dim=step_dim)
elif not is_enc and not is_enc_dim:
raise ValueError('Need to provide either `enc_dim` or a tf.keras.Sequential'
' or tf.keras.Model `encoder_net`.')
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
return self.encoder(x)
class HiddenOutput(tf.keras.Model):
def __init__(
self,
model: tf.keras.Model,
layer: int = -1,
input_shape: tuple = None,
flatten: bool = False
) -> None:
super().__init__()
if input_shape and not model.inputs:
inputs = Input(shape=input_shape)
model.call(inputs)
else:
inputs = model.inputs
self.model = Model(inputs=inputs, outputs=model.layers[layer].output)
self.flatten = Flatten() if flatten else tf.identity
def call(self, x: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
return self.flatten(self.model(x))
def preprocess_drift(x: Union[np.ndarray, list], model: tf.keras.Model,
preprocess_batch_fn: Callable = None, tokenizer: Callable = None,
max_len: int = None, batch_size: int = int(1e10), dtype: Type[np.generic] = np.float32) \
-> Union[np.ndarray, tf.Tensor]:
"""
Prediction function used for preprocessing step of drift detector.
Parameters
----------
x
Batch of instances.
model
Model used for preprocessing.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the TensorFlow model.
tokenizer
Optional tokenizer for text drift.
max_len
Optional max token length for text drift.
batch_size
Batch size.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array with predictions.
"""
if tokenizer is None:
return predict_batch(x, model, batch_size=batch_size, preprocess_fn=preprocess_batch_fn, dtype=dtype)
else:
return predict_batch_transformer(x, model, tokenizer, max_len, batch_size=batch_size, dtype=dtype)
| 4,509 | 36.272727 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_contextmmd_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 250, 10, 3
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_ref = [{'last': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_context_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_ref, preprocess_at_init))
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmd_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmd_params', list(range(n_tests)), indirect=True)
def test_context_mmd(context_mmd_params):
n_features, n_enc, preprocess, n_permutations, update_ref, preprocess_at_init = context_mmd_params
np.random.seed(0)
c_ref = np.random.randn(*(n, 1)).astype(np.float32)
x_ref = c_ref + np.random.randn(*(n, n_features)).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
pytest.skip("Skip tests where preprocess_at_init=False and x_ref is list.")
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
cd = ContextMMDDriftTF(
x_ref=x_ref,
c_ref=c_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
c = c_ref.copy()
x = x_ref.copy()
preds = cd.predict(x, c, return_p_val=True, return_distance=False, return_coupling=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
assert preds['data']['distance'] is None
assert isinstance(preds['data']['coupling_xy'], np.ndarray)
if isinstance(update_ref, dict):
k = list(update_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_ref[k], len(x) + len(x_ref))
assert cd.c_ref.shape[0] == min(update_ref[k], len(x) + len(c_ref))
c_h1 = np.random.randn(*(n, 1)).astype(np.float32)
x_h1 = c_h1 + np.random.randn(*(n, n_features)).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, c_h1, return_p_val=True, return_distance=True, return_coupling=False)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
assert 'coupling_xy' not in preds['data']
| 4,514 | 37.262712 | 102 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_classifier_tf.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from typing import Union
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
n = 100
def mymodel(shape, softmax: bool = True):
x_in = Input(shape=shape)
x = Dense(20, activation=tf.nn.relu)(x_in)
x = Dense(2)(x)
if softmax:
x = tf.nn.softmax(x)
return tf.keras.models.Model(inputs=x_in, outputs=x)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
preds_type = ['probs', 'logits']
binarize_preds = [True, False]
n_folds = [None, 2]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_clfdrift = list(product(p_val, n_features, preds_type, binarize_preds, n_folds,
train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
p_val, n_features, preds_type, binarize_preds, n_folds, \
train_size, preprocess_batch, update_x_ref = clfdrift_params
np.random.seed(0)
tf.random.set_seed(0)
model = mymodel((n_features,), softmax=(preds_type == 'probs'))
x_ref = np.random.randn(*(n, n_features))
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftTF(
x_ref=x_ref,
model=model,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
preds_type=preds_type,
binarize_preds=binarize_preds,
preprocess_batch_fn=preprocess_batch,
batch_size=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
| 2,729 | 28.354839 | 85 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_lsdd_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_x_ref = [None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_lsdddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdd_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdd_params', list(range(n_tests)), indirect=True)
def test_lsdd(lsdd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = lsdd_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = LSDDDriftTF(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
perturbation = np.random.normal(size=(n, n_features)) / 100 # LSDD struggles with copies/repeats
x = x_ref.copy() + perturbation.astype(np.float32)
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 4,088 | 35.508929 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_lsdd_online_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
n_enc = [None, 3]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_lsdddriftonline = list(product(n_features, n_enc, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdd_online_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdd_online_params', list(range(n_tests)), indirect=True)
def test_lsdd_online(lsdd_online_params, seed):
n_features, n_enc, ert, window_size, preprocess, n_bootstraps = lsdd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
with fixed_seed(0):
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
with fixed_seed(seed):
cd = LSDDDriftOnlineTF(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_lsdd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for LSDDDriftOnlineTF. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = LSDDDriftOnlineTF(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 6,400 | 35.787356 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_spot_the_diff_tf.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
from typing import Union
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
n = 100
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
kernel = [None, MyKernel]
n_diffs = [1, 5]
tests_stddrift = list(product(p_val, n_features, train_size, preprocess_batch, kernel, n_diffs))
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
p_val, n_features, train_size, preprocess_batch, kernel, n_diffs = stddrift_params
np.random.seed(0)
tf.random.set_seed(0)
if kernel is not None:
kernel = kernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
cd = SpotTheDiffDriftTF(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
n_diffs=n_diffs,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['diffs'].shape == (n_diffs, n_features)
assert preds_0['data']['diff_coeffs'].shape == (n_diffs,)
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['n_diffs'] == n_diffs
| 2,674 | 27.157895 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_mmd_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = mmd_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = MMDDriftTF(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,983 | 35.218182 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_mmd_online_tf.py | from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
n_enc = [None, 3]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_mmddriftonline = list(product(n_features, n_enc, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmd_online_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmd_online_params', list(range(n_tests)), indirect=True)
def test_mmd_online(mmd_online_params, seed):
n_features, n_enc, ert, window_size, preprocess, n_bootstraps = mmd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
with fixed_seed(0):
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
with fixed_seed(seed):
cd = MMDDriftOnlineTF(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
print(detection_times_h0, average_delay_h0)
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_mmd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for MMDDriftOnlineTF. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = MMDDriftOnlineTF(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 6,434 | 35.771429 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_preprocess_tf.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
X_uae = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(latent_dim)
]
)
tests_uae = [encoder_net, latent_dim]
n_tests_uae = len(tests_uae)
@pytest.fixture
def uae_params(request):
return tests_uae[request.param]
@pytest.mark.parametrize('uae_params', list(range(n_tests_uae)), indirect=True)
def test_uae(uae_params):
enc = uae_params
if isinstance(enc, tf.keras.Sequential):
encoder_net, enc_dim = enc, None
elif isinstance(enc, int):
encoder_net, enc_dim = None, enc
X_enc = UAE(encoder_net=encoder_net, shape=X_uae.shape[1:], enc_dim=enc_dim)(X_uae)
assert X_enc.shape == (n, latent_dim)
dim1, dim2, n_hidden = 2, 3, 7
n_features = dim1 * dim2
shape = (dim1, dim2)
X_h = np.random.rand(n * n_features).reshape((n,) + shape).astype('float32')
class Model1(tf.keras.Model):
def __init__(self):
super(Model1, self).__init__()
self.dense1 = Dense(n_hidden)
self.dense2 = Dense(n_classes, activation='softmax')
def call(self, x: np.ndarray) -> tf.Tensor:
x = self.dense1(x)
return self.dense2(x)
def model2():
x_in = Input(shape=(dim1, dim2))
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
tests_hidden_output = [
(1, -2, shape, True), (1, -2, shape, False),
(1, -1, shape, True), (1, -1, shape, False),
(2, -2, None, True), (2, -2, None, False),
(2, -1, None, True), (2, -1, None, False),
(2, -1, shape, True), (2, -1, shape, False)
]
n_tests_hidden_output = len(tests_hidden_output)
@pytest.fixture
def hidden_output_params(request):
return tests_hidden_output[request.param]
@pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True)
def test_hidden_output(hidden_output_params):
model, layer, input_shape, flatten = hidden_output_params
print(model, layer, input_shape, flatten)
model = Model1() if model == 1 else model2()
X_hidden = HiddenOutput(model=model, layer=layer, input_shape=input_shape, flatten=flatten)(X_h)
if layer == -2:
assert_shape = (n, dim1, n_hidden)
elif layer == -1:
assert_shape = (n, dim1, n_classes)
if flatten:
assert_shape = (assert_shape[0],) + (np.prod(assert_shape[1:]),)
assert X_hidden.shape == assert_shape
| 2,732 | 29.707865 | 100 | py |
alibi-detect | alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_learned_kernel_tf.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
from typing import Union
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
n = 100
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_lkdrift = list(product(p_val, n_features, train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, train_size, preprocess_batch, update_x_ref = lkdrift_params
np.random.seed(0)
tf.random.set_seed(0)
kernel = MyKernel(n_features)
x_ref = np.random.randn(*(n, n_features))
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftTF(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 2,479 | 26.555556 | 92 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/missing_optional_dependency.py | """Functionality for optional importing
This module provides a way to import optional dependencies. In the case that the user imports some functionality from
alibi-detect that is not usable due to missing optional dependencies this code is used to allow the import but replace
it with an object that throws an error on use. This way we avoid errors at import time that prevent the user using
functionality independent of the missing dependency.
"""
from typing import Union, List, Optional, Any
from string import Template
from importlib import import_module
err_msg_template = Template((
"Attempted to use $object_name without the correct optional dependencies installed. To install "
+ "the correct optional dependencies, run `pip install alibi-detect[$missing_dependency]` "
+ "from the command line. For more information, check the Installation documentation "
+ "at https://docs.seldon.io/projects/alibi-detect/en/stable/overview/getting_started.html."
))
"""Mapping used to ensure correct pip install message is generated if a missing optional dependency is detected. This
dict is used to control two behaviours:
1. When we import objects from missing dependencies we check that any `ModuleNotFoundError` or `ImportError`
corresponds to a missing optional dependency by checking the name of the missing dependency is in `ERROR_TYPES`. We
then map this name to the corresponding optional dependency bucket that will resolve the issue.
2. Some optional dependencies have multiple names such as `torch` and `pytorch`, instead of enforcing a single
naming convention across the whole code base we instead use `ERROR_TYPES` to capture both cases. This is done right
before the pip install message is issued as this is the most robust place to capture these differences.
"""
ERROR_TYPES = {
"prophet": 'prophet',
"tensorflow_probability": 'tensorflow',
"tensorflow": 'tensorflow',
"torch": 'torch',
"pytorch": 'torch',
"keops": 'keops',
"pykeops": 'keops',
}
class MissingDependency:
"""Missing Dependency Class.
Used to replace any object that requires unmet optional dependencies. Attribute access or calling the __call__
method on this object will raise an error.
"""
def __init__(self,
object_name: str,
err: Union[ModuleNotFoundError, ImportError],
missing_dependency: str = 'all',):
"""Metaclass for MissingDependency classes.
Parameters
----------
object_name
Name of object we are replacing
err
Error to be raised when the class is initialized or used
missing_dependency
Name of missing dependency required for object
"""
self.missing_dependency = missing_dependency
self.object_name = object_name
self.err = err
@property
def err_msg(self) -> str:
"""Generate error message informing user to install missing dependencies."""
return err_msg_template.substitute(
object_name=self.object_name,
missing_dependency=self.missing_dependency)
def __getattr__(self, key):
"""Raise an error when attributes are accessed."""
raise ImportError(self.err_msg) from self.err
def __call__(self, *args, **kwargs):
"""If called, raise an error."""
raise ImportError(self.err_msg) from self.err
def import_optional(module_name: str, names: Optional[List[str]] = None) -> Any:
"""Import a module that depends on optional dependencies
Note: This function is used to import modules that depend on optional dependencies. Because it mirrors the python
import functionality its return type has to be `Any`. Using objects imported with this function can lead to
misspecification of types as `Any` when the developer intended to be more restrictive.
Parameters
----------
module_name
The module to import
names
The names to import from the module. If None, all names are imported.
Returns
-------
The module or named objects within the modules if names is not None. If the import fails due to a \
`ModuleNotFoundError` or `ImportError` then the requested module or named objects are replaced with instances of \
the MissingDependency class above.
"""
try:
module = import_module(module_name)
if names is not None:
objs = tuple(getattr(module, name) for name in names)
return objs if len(objs) > 1 else objs[0]
return module
except (ImportError, ModuleNotFoundError) as err:
if err.name is None:
raise err
dep_name, *_ = err.name.split('.')
if str(dep_name) not in ERROR_TYPES:
raise err
missing_dependency = ERROR_TYPES[dep_name]
if names is not None:
missing_dependencies = \
tuple(MissingDependency(
missing_dependency=missing_dependency,
object_name=name,
err=err) for name in names)
return missing_dependencies if len(missing_dependencies) > 1 else missing_dependencies[0]
return MissingDependency(
missing_dependency=missing_dependency,
object_name=module_name,
err=err)
| 5,347 | 41.110236 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/_types.py | """
Defining types compatible with different Python versions and defining custom types.
"""
import sys
from sklearn.base import BaseEstimator # import here (instead of later) since sklearn currently a core dep
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch
from typing import Union, Type
# Literal for typing
if sys.version_info >= (3, 8):
from typing import Literal # noqa
else:
from typing_extensions import Literal # noqa
from typing_extensions import TypeAlias
# Optional dep dependent tuples of types, for isinstance checks and pydantic
supported_models_tf: tuple = ()
supported_models_torch: tuple = ()
supported_optimizers_tf: tuple = ()
supported_optimizers_torch: tuple = ()
if has_tensorflow:
import tensorflow as tf
supported_models_tf = (tf.keras.Model, )
if hasattr(tf.keras.optimizers, 'legacy'):
supported_optimizers_tf = (tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, type)
else:
supported_optimizers_tf = (tf.keras.optimizers.Optimizer, type)
if has_pytorch:
import torch
supported_models_torch = (torch.nn.Module, )
supported_optimizers_torch = (type, ) # Note type not object!
supported_models_sklearn = (BaseEstimator, )
supported_models_all = supported_models_tf + supported_models_torch + supported_models_sklearn
supported_optimizers_all = supported_optimizers_tf + supported_optimizers_torch
# type aliases, for use with mypy (must be FwdRef's if involving opt. deps.)
OptimizerTF: TypeAlias = Union['tf.keras.optimizers.Optimizer', 'tf.keras.optimizers.legacy.Optimizer',
Type['tf.keras.optimizers.Optimizer'], Type['tf.keras.optimizers.legacy.Optimizer']]
| 1,719 | 42 | 115 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/_random.py | """
This submodule contains utility functions to manage random number generator (RNG) seeds. It may change
depending on how we decide to handle randomisation in tests (and elsewhere) going forwards. See
https://github.com/SeldonIO/alibi-detect/issues/250.
"""
from contextlib import contextmanager
import random
import numpy as np
import os
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow
if has_tensorflow:
import tensorflow as tf
if has_pytorch:
import torch
# Init global seed
_ALIBI_SEED = None
def set_seed(seed: int):
"""
Sets the Python, NumPy, TensorFlow and PyTorch random seeds, and the PYTHONHASHSEED env variable.
Parameters
----------
seed
Value of the random seed to set.
"""
global _ALIBI_SEED
seed = max(seed, 0) # TODO: This is a fix to allow --randomly-seed=0 in setup.cfg. To be removed in future
_ALIBI_SEED = seed
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
if has_tensorflow:
tf.random.set_seed(seed)
if has_pytorch:
torch.manual_seed(seed)
def get_seed() -> int:
"""
Gets the seed set by :func:`set_seed`.
Example
-------
>>> from alibi_detect.utils._random import set_seed, get_seed
>>> set_seed(42)
>>> get_seed()
42
"""
if _ALIBI_SEED is not None:
return _ALIBI_SEED
else:
raise RuntimeError('`set_seed` must be called before `get_seed` can be called.')
@contextmanager
def fixed_seed(seed: int):
"""
A context manager to run with a requested random seed (applied to all the RNG's set by :func:`set_seed`).
Parameters
----------
seed
Value of the random seed to set in the isolated context.
Example
-------
.. code-block :: python
set_seed(0)
with fixed_seed(42):
dd = cd.LSDDDrift(X_ref) # seeds equal 42 here
p_val = dd.predict(X_h0)['data']['p_val']
# seeds equal 0 here
"""
orig_seed = get_seed()
set_seed(seed)
try:
yield
finally:
set_seed(orig_seed)
| 2,127 | 24.035294 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/frameworks.py | from .missing_optional_dependency import ERROR_TYPES
from typing import Optional, List, Dict, Iterable
from enum import Enum
class Framework(str, Enum):
PYTORCH = 'pytorch'
TENSORFLOW = 'tensorflow'
KEOPS = 'keops'
SKLEARN = 'sklearn'
try:
import tensorflow as tf # noqa
import tensorflow_probability as tfp # noqa
has_tensorflow = True
except ImportError:
has_tensorflow = False
try:
import torch # noqa
has_pytorch = True
except ImportError:
has_pytorch = False
try:
import pykeops # noqa
import torch # noqa
has_keops = True
except ImportError:
has_keops = False
# Map from backend name to boolean value indicating its presence
HAS_BACKEND = {
'tensorflow': has_tensorflow,
'pytorch': has_pytorch,
'sklearn': True,
'keops': has_keops,
}
def _iter_to_str(iterable: Iterable[str]) -> str:
""" Correctly format iterable of items to comma seperated sentence string."""
items = [f'`{option}`' for option in iterable]
last_item_str = f'{items[-1]}' if not items[:-1] else f' and {items[-1]}'
return ', '.join(items[:-1]) + last_item_str
class BackendValidator:
def __init__(self, backend_options: Dict[Optional[str], List[str]], construct_name: str):
"""Checks for required sets of backend options.
Takes a dictionary of backends plus extra dependencies and generates correct error messages if they are unmet.
Parameters
----------
backend_options
Dictionary from backend to list of dependencies that must be satisfied. The keys are the available options
for the user and the values should be a list of dependencies that are checked via the `HAS_BACKEND` map
defined in this module. An example of `backend_options` would be `{'tensorflow': ['tensorflow'], 'pytorch':
['pytorch'], None: []}`.This would mean `'tensorflow'`, `'pytorch'` or `None` are available backend options.
If the user passes a different backend they will receive and error listing the correct backends. In
addition, if one of the dependencies in the `backend_option` values is missing for the specified backend
the validator will issue an error message telling the user what dependency bucket to install.
construct_name
Name of the object that has a set of backends we need to verify.
"""
self.backend_options = backend_options
self.construct_name = construct_name
def verify_backend(self, backend: str):
"""Verifies backend choice.
Verifies backend is implemented and that the correct dependencies are installed for the requested backend. If
the backend is not implemented or a dependency is missing then an error is issued.
Parameters
----------
backend
Choice of backend the user wishes to initialize the alibi-detect construct with. Must be one of the keys
in the `self.backend_options` dictionary.
Raises
------
NotImplementedError
If backend is not a member of `self.backend_options.keys()` a `NotImplementedError` is raised. Note `None`
is a valid choice of backend if it is set as a key on `self.backend_options.keys()`. If a backend is not
implemented for an alibi-detect object then it should not have a key on `self.backend_options`.
ImportError
If one of the dependencies in `self.backend_options[backend]` is missing then an ImportError will be thrown
including a message informing the user how to install.
"""
if backend not in self.backend_options:
self._raise_implementation_error(backend)
dependencies = self.backend_options[backend]
missing_deps = []
for dependency in dependencies:
if not HAS_BACKEND[dependency]:
missing_deps.append(dependency)
if missing_deps:
self._raise_import_error(missing_deps, backend)
def _raise_import_error(self, missing_deps: List[str], backend: str):
"""Raises import error if backend choice has missing dependency."""
optional_dependencies = list(ERROR_TYPES[missing_dep] for missing_dep in missing_deps)
optional_dependencies.sort()
missing_deps_str = _iter_to_str(missing_deps)
error_msg = (f'{missing_deps_str} not installed. Cannot initialize and run {self.construct_name} '
f'with {backend} backend.')
pip_msg = '' if not optional_dependencies else \
(f'The necessary missing dependencies can be installed using '
f'`pip install alibi-detect[{" ".join(optional_dependencies)}]`.')
raise ImportError(f'{error_msg} {pip_msg}')
def _raise_implementation_error(self, backend: str):
"""Raises NotImplementedError error if backend choice is not implemented."""
backend_list = _iter_to_str(self.backend_options.keys())
raise NotImplementedError(f"{backend} backend not implemented. Use one of {backend_list} instead.")
| 5,128 | 40.699187 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tests/test_saving_legacy.py | """
Tests for saving/loading of detectors with legacy .dill state_dict. As legacy save/load functionality becomes
deprecated, these tests will be removed, and more tests will be added to test_saving.py.
"""
from alibi_detect.utils.missing_optional_dependency import MissingDependency
from functools import partial
import numpy as np
import pytest
from sklearn.model_selection import StratifiedKFold
from tempfile import TemporaryDirectory
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Callable
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift
from alibi_detect.cd.tensorflow import UAE, preprocess_drift
from alibi_detect.models.tensorflow.autoencoder import DecoderLSTM, EncoderLSTM
from alibi_detect.od import (IForest, LLR, Mahalanobis, OutlierAEGMM, OutlierVAE, OutlierVAEGMM,
OutlierProphet, SpectralResidual, OutlierSeq2Seq, OutlierAE)
from alibi_detect.saving import save_detector, load_detector
input_dim = 4
latent_dim = 2
n_gmm = 2
threshold = 10.
threshold_drift = .55
n_folds_drift = 5
samples = 6
seq_len = 10
p_val = .05
X_ref = np.random.rand(samples * input_dim).reshape(samples, input_dim)
X_ref_cat = np.tile(np.array([np.arange(samples)] * input_dim).T, (2, 1))
X_ref_mix = X_ref.copy()
X_ref_mix[:, 0] = np.tile(np.array(np.arange(samples // 2)), (1, 2)).T[:, 0]
n_permutations = 10
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
kwargs = {'encoder_net': encoder_net,
'decoder_net': decoder_net}
preprocess_fn = partial(preprocess_drift, model=UAE(encoder_net=encoder_net))
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(5, activation=tf.nn.relu)
]
)
# define model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
detector = [
AdversarialAE(threshold=threshold,
model=model,
**kwargs),
ModelDistillation(threshold=threshold,
model=model,
distilled_model=model),
IForest(threshold=threshold),
LLR(threshold=threshold, model=model),
Mahalanobis(threshold=threshold),
OutlierAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
**kwargs),
OutlierVAE(threshold=threshold,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierAE(threshold=threshold,
**kwargs),
OutlierVAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples,
**kwargs),
SpectralResidual(threshold=threshold,
window_amp=10,
window_local=10),
OutlierSeq2Seq(input_dim,
seq_len,
threshold=threshold,
threshold_net=threshold_net,
latent_dim=latent_dim),
KSDrift(X_ref,
p_val=p_val,
preprocess_x_ref=False,
preprocess_fn=preprocess_fn),
MMDDrift(X_ref,
p_val=p_val,
preprocess_x_ref=False,
preprocess_fn=preprocess_fn,
configure_kernel_from_x_ref=True,
n_permutations=n_permutations),
ChiSquareDrift(X_ref_cat,
p_val=p_val,
preprocess_x_ref=True),
TabularDrift(X_ref_mix,
p_val=p_val,
categories_per_feature={0: None},
preprocess_x_ref=True),
ClassifierDrift(X_ref,
model=model,
p_val=p_val,
n_folds=n_folds_drift,
train_size=None)
]
if not isinstance(OutlierProphet, MissingDependency):
detector.append(
OutlierProphet(threshold=.7,
growth='logistic')
)
n_tests = len(detector)
@pytest.fixture
def select_detector(request):
return detector[request.param]
@pytest.mark.parametrize('select_detector', list(range(n_tests)), indirect=True)
def test_save_load(select_detector):
det = select_detector
det_name = det.meta['name']
with TemporaryDirectory() as temp_dir:
temp_dir += '/'
save_detector(det, temp_dir, legacy=True)
det_load = load_detector(temp_dir)
det_load_name = det_load.meta['name']
assert det_load_name == det_name
if not type(det_load) in [
OutlierProphet, ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift
]:
assert det_load.threshold == det.threshold == threshold
if type(det_load) in [OutlierVAE, OutlierVAEGMM]:
assert det_load.samples == det.samples == samples
if type(det_load) == AdversarialAE or type(det_load) == ModelDistillation:
for layer in det_load.model.layers:
assert not layer.trainable
if type(det_load) == OutlierAEGMM:
assert isinstance(det_load.aegmm.encoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.aegmm, tf.keras.Model)
assert det_load.aegmm.n_gmm == n_gmm
elif type(det_load) == OutlierVAEGMM:
assert isinstance(det_load.vaegmm.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.vaegmm, tf.keras.Model)
assert det_load.vaegmm.latent_dim == latent_dim
assert det_load.vaegmm.n_gmm == n_gmm
elif type(det_load) in [AdversarialAE, OutlierAE]:
assert isinstance(det_load.ae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae, tf.keras.Model)
elif type(det_load) == ModelDistillation:
assert isinstance(det_load.model, tf.keras.Sequential) or isinstance(det_load.model, tf.keras.Model)
assert (isinstance(det_load.distilled_model, tf.keras.Sequential) or
isinstance(det_load.distilled_model, tf.keras.Model))
elif type(det_load) == OutlierVAE:
assert isinstance(det_load.vae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae, tf.keras.Model)
assert det_load.vae.latent_dim == latent_dim
elif type(det_load) == Mahalanobis:
assert det_load.clip is None
assert det_load.mean == det_load.C == det_load.n == 0
assert det_load.meta['detector_type'] == 'outlier'
assert det_load.meta['online']
elif type(det_load) == OutlierProphet:
assert det_load.model.interval_width == .7
assert det_load.model.growth == 'logistic'
assert det_load.meta['data_type'] == 'time-series'
elif type(det_load) == SpectralResidual:
assert det_load.window_amp == 10
assert det_load.window_local == 10
elif type(det_load) == OutlierSeq2Seq:
assert isinstance(det_load.seq2seq, tf.keras.Model)
assert isinstance(det_load.seq2seq.threshold_net, tf.keras.Sequential)
assert isinstance(det_load.seq2seq.encoder, EncoderLSTM)
assert isinstance(det_load.seq2seq.decoder, DecoderLSTM)
assert det_load.latent_dim == latent_dim
assert det_load.threshold == threshold
assert det_load.shape == (-1, seq_len, input_dim)
elif type(det_load) == KSDrift:
assert det_load.n_features == latent_dim
assert det_load.p_val == p_val
assert (det_load.x_ref == X_ref).all()
assert isinstance(det_load.preprocess_fn, Callable)
assert det_load.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) in [ChiSquareDrift, TabularDrift]:
assert isinstance(det_load.x_ref_categories, dict)
assert det_load.p_val == p_val
x = X_ref_cat.copy() if isinstance(det_load, ChiSquareDrift) else X_ref_mix.copy()
assert (det_load.x_ref == x).all()
elif type(det_load) == MMDDrift:
assert not det_load._detector.infer_sigma
assert det_load._detector.n_permutations == n_permutations
assert det_load._detector.p_val == p_val
assert (det_load._detector.x_ref == X_ref).all()
assert isinstance(det_load._detector.preprocess_fn, Callable)
assert det_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) == ClassifierDrift:
assert det_load._detector.p_val == p_val
assert (det_load._detector.x_ref == X_ref).all()
assert isinstance(det_load._detector.skf, StratifiedKFold)
assert isinstance(det_load._detector.train_kwargs, dict)
assert isinstance(det_load._detector.model, tf.keras.Model)
elif type(det_load) == LLR:
assert isinstance(det_load.dist_s, tf.keras.Model)
assert isinstance(det_load.dist_b, tf.keras.Model)
assert not det_load.sequential
assert not det_load.has_log_prob
| 10,367 | 40.806452 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tests/test_random.py | from alibi_detect.utils._random import set_seed, get_seed, fixed_seed
import numpy as np
import tensorflow as tf
import torch
def test_set_get_seed(seed):
"""
Tests the set_seed and get_seed fuctions.
"""
# Check initial seed within test is the one set by pytest-randomly
current_seed = get_seed()
assert current_seed == seed
# Set another seed and check
new_seed = seed + 42
set_seed(new_seed)
current_seed = get_seed()
assert current_seed == new_seed
def test_fixed_seed(seed):
"""
Tests the fixed_seed context manager.
"""
n = 5 # Length of random number sequences
nums0 = []
tmp_seed = seed + 42
with fixed_seed(tmp_seed):
# Generate a sequence of random numbers
for i in range(n):
nums0.append(np.random.normal([1]))
nums0.append(tf.random.normal([1]))
nums0.append(torch.normal(torch.tensor([1.0])))
# Check seed unchanged after RNG calls
assert get_seed() == tmp_seed
# Generate another sequence of random numbers with same seed, and check equal
nums1 = []
tmp_seed = seed + 42
with fixed_seed(tmp_seed):
for i in range(n):
nums1.append(np.random.normal([1]))
nums1.append(tf.random.normal([1]))
nums1.append(torch.normal(torch.tensor([1.0])))
assert nums0 == nums1
# Generate another sequence of random numbers with different seed, and check not equal
nums2 = []
tmp_seed = seed + 99
with fixed_seed(tmp_seed):
for i in range(n):
nums2.append(np.random.normal([1]))
nums2.append(tf.random.normal([1]))
nums2.append(torch.normal(torch.tensor([1.0])))
assert nums1 != nums2
# Check seeds were reset upon exit of context managers
assert get_seed() == seed
| 1,848 | 28.822581 | 90 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/state/state.py | import os
from pathlib import Path
import logging
from abc import ABC
from typing import Union, Tuple
import numpy as np
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils.state._pytorch import save_state_dict as _save_state_dict_pt,\
load_state_dict as _load_state_dict_pt
logger = logging.getLogger(__name__)
class StateMixin(ABC):
"""
Utility class that provides methods to save and load stateful attributes to disk.
"""
t: int
online_state_keys: Tuple[str, ...]
def _set_state_dir(self, dirpath: Union[str, os.PathLike]):
"""
Set the directory path to store state in, and create an empty directory if it doesn't already exist.
Parameters
----------
dirpath
The directory to save state file inside.
"""
self.state_dir = Path(dirpath)
self.state_dir.mkdir(parents=True, exist_ok=True)
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._set_state_dir(filepath)
suffix = '.pt' if hasattr(self, 'backend') and self.backend == Framework.PYTORCH else '.npz'
_save_state_dict(self, self.online_state_keys, self.state_dir.joinpath('state' + suffix))
logger.info('Saved state for t={} to {}'.format(self.t, self.state_dir))
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
`save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._set_state_dir(filepath)
suffix = '.pt' if hasattr(self, 'backend') and self.backend == Framework.PYTORCH else '.npz'
_load_state_dict(self, self.state_dir.joinpath('state' + suffix), raise_error=True)
logger.info('State loaded for t={} from {}'.format(self.t, self.state_dir))
def _save_state_dict(detector: StateMixin, keys: tuple, filepath: Path):
"""
Utility function to save a detector's state dictionary to a filepath.
Parameters
----------
detector
The detector to extract state attributes from.
keys
Tuple of state dict keys to populate dictionary with.
filepath
The file to save state dictionary to.
"""
# Construct state dictionary
state_dict = {key: getattr(detector, key, None) for key in keys}
# Save to disk
if filepath.suffix == '.pt':
_save_state_dict_pt(state_dict, filepath)
else:
np.savez(filepath, **state_dict)
def _load_state_dict(detector: StateMixin, filepath: Path, raise_error: bool = True):
"""
Utility function to load a detector's state dictionary from a filepath, and update the detectors attributes with
the values in the state dictionary.
Parameters
----------
detector
The detector to update.
filepath
File to load state dictionary from.
raise_error
Whether to raise an error if a file is not found at `filepath`. Otherwise, raise a warning and skip loading.
Returns
-------
None. The detector is updated inplace.
"""
if filepath.is_file():
if filepath.suffix == '.pt':
state_dict = _load_state_dict_pt(filepath)
else:
state_dict = np.load(str(filepath))
for key, value in state_dict.items():
setattr(detector, key, value)
else:
if raise_error:
raise FileNotFoundError('State file not found at {}.'.format(filepath))
else:
logger.warning('State file not found at {}. Skipping loading of state.'.format(filepath))
| 3,881 | 32.756522 | 116 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/state/_pytorch/state.py | """
Submodule to handle saving and loading of detector state dictionaries when the dictionaries contain `torch.Tensor`'s.
"""
from pathlib import Path
import torch
def save_state_dict(state_dict: dict, filepath: Path):
"""
Utility function to save a detector's state dictionary to a filepath using `torch.save`.
Parameters
----------
state_dict
The state dictionary to save.
filepath
Directory to save state dictionary to.
"""
# Save to disk
torch.save(state_dict, filepath)
def load_state_dict(filepath: Path) -> dict:
"""
Utility function to load a detector's state dictionary from a filepath with `torch.load`.
Parameters
----------
filepath
Directory to load state dictionary from.
Returns
-------
The loaded state dictionary.
"""
return torch.load(filepath)
| 870 | 22.540541 | 117 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/state/_pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
save_state_dict, load_state_dict = import_optional(
'alibi_detect.utils.state._pytorch.state',
names=['save_state_dict', 'load_state_dict']
)
__all__ = [
"save_state_dict",
"load_state_dict",
]
| 288 | 21.230769 | 74 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/keops/kernels.py | from pykeops.torch import LazyTensor
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import Literal
from copy import deepcopy
def sigma_mean(x: LazyTensor, y: LazyTensor, dist: LazyTensor, n_min: int = 100) -> torch.Tensor:
"""
Set bandwidth to the mean distance between instances x and y.
Parameters
----------
x
LazyTensor of instances with dimension [Nx, 1, features] or [batch_size, Nx, 1, features].
The singleton dimension is necessary for broadcasting.
y
LazyTensor of instances with dimension [1, Ny, features] or [batch_size, 1, Ny, features].
The singleton dimension is necessary for broadcasting.
dist
LazyTensor with dimensions [Nx, Ny] or [batch_size, Nx, Ny] containing the
pairwise distances between `x` and `y`.
n_min
In order to check whether x equals y after squeezing the singleton dimensions, we check if the
diagonal of the distance matrix (which is a lazy tensor from which the diagonal cannot be directly extracted)
consists of all zeros. We do this by computing the k-min distances and k-argmin indices over the
columns of the distance matrix. We then check if the distances on the diagonal of the distance matrix
are all zero or not. If they are all zero, then we do not use these distances (zeros) when computing
the mean pairwise distance as bandwidth. If Nx becomes very large, it is advised to set `n_min`
to a low enough value to avoid OOM issues. By default we set it to 100 instances.
Returns
-------
The computed bandwidth, `sigma`.
"""
batched = len(dist.shape) == 3
if not batched:
nx, ny = dist.shape
axis = 1
else:
batch_size, nx, ny = dist.shape
axis = 2
n_mean = nx * ny
if nx == ny:
n_min = min(n_min, nx) if isinstance(n_min, int) else nx
d_min, id_min = dist.Kmin_argKmin(n_min, axis=axis)
if batched:
d_min, id_min = d_min[0], id_min[0] # first instance in permutation test contains the original data
rows, cols = torch.where(id_min.cpu() == torch.arange(nx)[:, None])
if (d_min[rows, cols] == 0.).all():
n_mean = nx * (nx - 1)
dist_sum = dist.sum(1).sum(1)[0] if batched else dist.sum(1).sum().unsqueeze(-1)
sigma = (.5 * dist_sum / n_mean) ** .5
return sigma
class GaussianRBF(nn.Module):
def __init__(
self,
sigma: Optional[torch.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x and y and returns the kernel matrix.
x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features].
y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features].
The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny].
x, y and the returned kernel matrix are all lazy tensors.
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.keops.kernels.sigma_mean`,
meaning that it should take in the lazy tensors `x`, `y` and `dist` and return a tensor `sigma`.
trainable
Whether or not to track gradients w.r.t. `sigma` to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_mean if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = nn.Parameter(torch.empty(1), requires_grad=trainable)
self.init_required = True
else:
sigma = sigma.reshape(-1) # [Ns,]
self.log_sigma = nn.Parameter(sigma.log(), requires_grad=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> torch.Tensor:
return self.log_sigma.exp()
def forward(self, x: LazyTensor, y: LazyTensor, infer_sigma: bool = False) -> LazyTensor:
dist = ((x - y) ** 2).sum(-1)
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
with torch.no_grad():
self.log_sigma.copy_(sigma.log().clone())
self.init_required = False
gamma = 1. / (2. * self.sigma ** 2)
gamma = LazyTensor(gamma[None, None, :]) if len(dist.shape) == 2 else LazyTensor(gamma[None, None, None, :])
kernel_mat = (- gamma * dist).exp()
if len(dist.shape) < len(gamma.shape):
kernel_mat = kernel_mat.sum(-1) / len(self.sigma)
return kernel_mat
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = deepcopy(self.config)
if isinstance(cfg['sigma'], torch.Tensor):
cfg['sigma'] = cfg['sigma'].detach().cpu().numpy().tolist()
cfg.update({'flavour': Framework.KEOPS.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(nn.Module):
def __init__(
self,
proj: nn.Module,
kernel_a: Union[nn.Module, Literal['rbf']] = 'rbf',
kernel_b: Optional[Union[nn.Module, Literal['rbf']]] = 'rbf',
eps: Union[float, Literal['trainable']] = 'trainable'
) -> None:
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes an already projected batch of instances x_proj and y_proj and optionally
(if k_b is present) a batch of instances x and y and returns the kernel matrix.
x_proj can be of shape [Nx, 1, features_proj] or [batch_size, Nx, 1, features_proj].
y_proj can be of shape [1, Ny, features_proj] or [batch_size, 1, Ny, features_proj].
x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features].
y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features].
The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny].
x, y and the returned kernel matrix are all lazy tensors.
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent if kernel_b is not None.
"""
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a: Callable = kernel_a
self.kernel_b: Callable = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, Literal['trainable']]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
self.logit_eps = nn.Parameter(torch.tensor(eps).logit(), requires_grad=False)
elif eps == 'trainable':
self.logit_eps = nn.Parameter(torch.tensor(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> torch.Tensor:
return self.logit_eps.sigmoid() if self.kernel_b is not None else torch.tensor(0.)
def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None,
y: Optional[LazyTensor] = None) -> LazyTensor:
similarity = self.kernel_a(x_proj, y_proj)
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y)
return similarity
def get_config(self) -> dict:
return deepcopy(self.config)
@classmethod
def from_config(cls, config):
return cls(**config)
| 9,448 | 42.543779 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/keops/tests/test_kernels_keops.py | from itertools import product
import numpy as np
from alibi_detect.utils.frameworks import has_keops
import pytest
import torch
import torch.nn as nn
if has_keops:
from pykeops.torch import LazyTensor
from alibi_detect.utils.keops import DeepKernel, GaussianRBF
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
batch_size = [None, 5]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, batch_size, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, batch_size, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
if batch_size:
xshape = (batch_size, ) + xshape
yshape = (batch_size, ) + yshape
sigma = sigma if sigma is None else torch.from_numpy(sigma).float()
x = torch.from_numpy(np.random.random(xshape)).float()
y = torch.from_numpy(np.random.random(yshape)).float()
if batch_size:
x_lazy, y_lazy = LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :])
x_lazy2 = LazyTensor(x[:, None, :, :])
else:
x_lazy, y_lazy = LazyTensor(x[:, None, :]), LazyTensor(y[None, :, :])
x_lazy2 = LazyTensor(x[None, :, :])
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(ValueError):
kernel(x_lazy, y_lazy, infer_sigma=infer_sigma)
else:
k_xy = kernel(x_lazy, y_lazy, infer_sigma=infer_sigma)
k_xx = kernel(x_lazy, x_lazy2, infer_sigma=infer_sigma)
k_xy_shape = n_instances
k_xx_shape = (n_instances[0], n_instances[0])
axis = 1
if batch_size:
k_xy_shape = (batch_size, ) + k_xy_shape
k_xx_shape = (batch_size, ) + k_xx_shape
axis = 2
assert k_xy.shape == k_xy_shape and k_xx.shape == k_xx_shape
k_xx_argmax = k_xx.argmax(axis=axis)
k_xx_min, k_xy_min = k_xx.min(axis=axis), k_xy.min(axis=axis)
if batch_size:
k_xx_argmax, k_xx_min, k_xy_min = k_xx_argmax[0], k_xx_min[0], k_xy_min[0]
assert (torch.arange(n_instances[0]) == k_xx_argmax.cpu().view(-1)).all()
assert (k_xx_min >= 0.).all() and (k_xy_min >= 0.).all()
if has_keops:
class MyKernel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor:
return (- ((x - y) ** 2).sum(-1)).exp()
n_features = [5]
n_instances = [(100, 100), (100, 75)]
kernel_a = ['GaussianRBF', 'MyKernel']
kernel_b = ['GaussianRBF', 'MyKernel', None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
proj = nn.Linear(n_features, n_features)
kernel_a = MyKernel() if kernel_a == 'MyKernel' else GaussianRBF(trainable=True)
if kernel_b == 'MyKernel':
kernel_b = MyKernel()
elif kernel_b == 'GaussianRBF':
kernel_b = GaussianRBF(trainable=True)
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = torch.as_tensor(np.random.random(xshape).astype('float32'))
y = torch.as_tensor(np.random.random(yshape).astype('float32'))
x_proj, y_proj = kernel.proj(x), kernel.proj(y)
x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :])
y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :])
if kernel_b:
x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :])
y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :])
else:
x, x2, y, y2 = None, None, None, None
k_xy = kernel(x_proj, y2_proj, x, y2)
k_yx = kernel(y_proj, x2_proj, y, x2)
k_xx = kernel(x_proj, x2_proj, x, x2)
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (k_xx.Kmin_argKmin(1, axis=1)[0] > 0.).all()
assert (torch.abs(k_xy.sum(1).sum(1) - k_yx.t().sum(1).sum(1)) < 1e-5).all()
| 4,889 | 39.081967 | 90 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/fetching/fetching.py | import logging
import os
from io import BytesIO
from pathlib import Path
from typing import Union, TYPE_CHECKING, Tuple
import dill
import requests
from requests import RequestException
import tensorflow as tf
from tensorflow.python.keras import backend
from alibi_detect.models.tensorflow import PixelCNN
from alibi_detect.saving import load_detector
if TYPE_CHECKING:
# Import the true objects directly for typechecking. (See note in CONTRIBUTING.md in Optional Dependencies section)
from alibi_detect.ad.adversarialae import AdversarialAE # noqa
from alibi_detect.ad.model_distillation import ModelDistillation # noqa
from alibi_detect.base import BaseDetector # noqa
from alibi_detect.od.llr import LLR # noqa
from alibi_detect.od.isolationforest import IForest # noqa
from alibi_detect.od.mahalanobis import Mahalanobis # noqa
from alibi_detect.od.aegmm import OutlierAEGMM # noqa
from alibi_detect.od.ae import OutlierAE # noqa
from alibi_detect.od.prophet import OutlierProphet # noqa
from alibi_detect.od.seq2seq import OutlierSeq2Seq # noqa
from alibi_detect.od.vae import OutlierVAE # noqa
from alibi_detect.od.vaegmm import OutlierVAEGMM # noqa
from alibi_detect.od.sr import SpectralResidual # noqa
from alibi_detect.utils.url import _join_url
# do not extend pickle dispatch table so as not to change pickle behaviour
dill.extend(use_dill=False)
logger = logging.getLogger(__name__)
Data = Union[
'BaseDetector',
'AdversarialAE',
'ModelDistillation',
'IForest',
'LLR',
'Mahalanobis',
'OutlierAEGMM',
'OutlierAE',
'OutlierProphet',
'OutlierSeq2Seq',
'OutlierVAE',
'OutlierVAEGMM',
'SpectralResidual'
]
"""Number of seconds to wait for URL requests before raising an error."""
TIMEOUT = 10
def get_pixelcnn_default_kwargs():
dist = PixelCNN(
image_shape=(28, 28, 1),
num_resnet=5,
num_hierarchies=2,
num_filters=32,
num_logistic_mix=1,
receptive_field_dims=(3, 3),
dropout_p=.3,
l2_weight=0.
)
KWARGS_PIXELCNN = {
'dist_s': dist,
'dist_b': dist.copy(),
'input_shape': (28, 28, 1)
}
return KWARGS_PIXELCNN
def fetch_tf_model(dataset: str, model: str) -> tf.keras.Model:
"""
Fetch pretrained tensorflow models from the google cloud bucket.
Parameters
----------
dataset
Dataset trained on.
model
Model name.
Returns
-------
Pretrained tensorflow model.
"""
url = 'https://storage.googleapis.com/seldon-models/alibi-detect/classifier/'
path_model = _join_url(url, [dataset, model, 'model.h5'])
save_path = tf.keras.utils.get_file(Path(model + '.h5').resolve(), path_model)
if dataset == 'cifar10' and model == 'resnet56':
custom_objects = {'backend': backend}
else:
custom_objects = None
clf = tf.keras.models.load_model(save_path, custom_objects=custom_objects)
return clf
def fetch_enc_dec(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download encoder and decoder networks.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('encoder_net.h5'),
_join_url(url_models, 'encoder_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('decoder_net.h5'),
_join_url(url_models, 'decoder_net.h5')
)
def fetch_ae(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download AE outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
fetch_enc_dec(url, filepath)
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.index'),
_join_url(url_models, 'ae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00000-of-00001'),
_join_url(url_models, 'ae.ckpt.data-00000-of-00001')
)
def fetch_ad_ae(url: str, filepath: Union[str, os.PathLike], state_dict: dict) -> None:
"""
Download AE adversarial detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
state_dict
Dictionary containing the detector's parameters.
"""
fetch_enc_dec(url, filepath)
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.index'),
_join_url(url_models, 'ae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00000-of-00002'),
_join_url(url_models, 'ae.ckpt.data-00000-of-00002')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00001-of-00002'),
_join_url(url_models, 'ae.ckpt.data-00001-of-00002')
)
hidden_layer_kld = state_dict['hidden_layer_kld']
if hidden_layer_kld:
for i, (_, _) in enumerate(hidden_layer_kld.items()):
hl = 'model_hl_' + str(i)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.index'),
_join_url(url_models, hl + '.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.data-00000-of-00002'),
_join_url(url_models, hl + '.ckpt.data-00000-of-00002')
)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.data-00001-of-00002'),
_join_url(url_models, hl + '.ckpt.data-00001-of-00002')
)
def fetch_ad_md(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download model and distilled model.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('distilled_model.h5'),
_join_url(url_models, 'distilled_model.h5')
)
def fetch_aegmm(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download AEGMM outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
# save encoder and decoder
fetch_enc_dec(url, filepath)
# save GMM network
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('gmm_density_net.h5'),
_join_url(url_models, 'gmm_density_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('aegmm.ckpt.index'),
_join_url(url_models, 'aegmm.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('aegmm.ckpt.data-00000-of-00001'),
_join_url(url_models, 'aegmm.ckpt.data-00000-of-00001')
)
def fetch_vae(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download VAE outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
fetch_enc_dec(url, filepath)
# save VAE weights
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('vae.ckpt.index'),
_join_url(url_models, 'vae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('vae.ckpt.data-00000-of-00001'),
_join_url(url_models, 'vae.ckpt.data-00000-of-00001')
)
def fetch_vaegmm(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download VAEGMM outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
# save encoder and decoder
fetch_enc_dec(url, filepath)
# save GMM network
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('gmm_density_net.h5'),
_join_url(url_models, 'gmm_density_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('vaegmm.ckpt.index'),
_join_url(url_models, 'vaegmm.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('vaegmm.ckpt.data-00000-of-00001'),
_join_url(url_models, 'vaegmm.ckpt.data-00000-of-00001')
)
def fetch_seq2seq(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download sequence-to-sequence outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# save seq2seq
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('seq2seq.ckpt.index'),
_join_url(url_models, 'seq2seq.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('seq2seq.ckpt.data-00000-of-00001'),
_join_url(url_models, 'seq2seq.ckpt.data-00000-of-00001')
)
# save threshold network
tf.keras.utils.get_file(
model_path.joinpath('threshold_net.h5'),
_join_url(url_models, 'threshold_net.h5')
)
def fetch_llr(url: str, filepath: Union[str, os.PathLike]) -> str:
"""
Download Likelihood Ratio outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
try:
tf.keras.utils.get_file(
model_path.joinpath('model_s.h5'),
_join_url(url_models, 'model_s.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('model_b.h5'),
_join_url(url_models, 'model_b.h5')
)
model_type = 'weights'
return model_type
except Exception:
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('model_background.h5'),
_join_url(url_models, 'model_background.h5')
)
return 'model'
def fetch_state_dict(url: str, filepath: Union[str, os.PathLike],
save_state_dict: bool = True) -> Tuple[dict, dict]:
"""
Fetch the metadata and state/hyperparameter values of pre-trained detectors.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
save_state_dict
Whether to save the state dict locally.
Returns
-------
Detector metadata and state.
"""
# Check if metadata stored as dill or pickle
try:
url_meta = _join_url(url, 'meta.dill')
resp = requests.get(url_meta, timeout=TIMEOUT)
resp.raise_for_status()
suffix = '.dill'
except RequestException:
try:
url_meta = _join_url(url, 'meta.pickle')
resp = requests.get(url_meta, timeout=TIMEOUT)
resp.raise_for_status()
suffix = '.pickle'
except RequestException:
logger.exception('Timed out while searching for meta.dill or meta.pickle files at %s.', url)
raise
# Load metadata and state_dict
meta = dill.load(BytesIO(resp.content))
try:
url_state = _join_url(url, meta['name'] + suffix)
resp = requests.get(url_state)
resp.raise_for_status()
except RequestException:
logger.exception('Timed out while searching for corresponding state file at %s.', url)
raise
state_dict = dill.load(BytesIO(resp.content))
# Save state
if save_state_dict:
filepath = Path(filepath)
with open(filepath.joinpath('meta.dill'), 'wb') as f:
dill.dump(meta, f)
with open(filepath.joinpath(meta['name'] + '.dill'), 'wb') as f:
dill.dump(state_dict, f)
return meta, state_dict
def fetch_detector(filepath: Union[str, os.PathLike],
detector_type: str,
dataset: str,
detector_name: str,
model: str = None) -> Data:
"""
Fetch an outlier or adversarial detector from a google bucket, save it locally and return
the initialised detector.
Parameters
----------
filepath
Local directory to save detector to.
detector_type
`outlier` or `adversarial`.
dataset
Dataset of pre-trained detector. E.g. `kddcup`, `cifar10` or `ecg`.
detector_name
Name of the detector in the bucket.
model
Classification model used for adversarial detection.
Returns
-------
Initialised pre-trained detector.
"""
# create path (if needed)
filepath = Path(filepath)
if not filepath.is_dir():
filepath.mkdir(parents=True, exist_ok=True)
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
# create url of detector
url = 'https://storage.googleapis.com/seldon-models/alibi-detect/'
if detector_type == 'adversarial':
url = _join_url(url, ['ad', dataset, model, detector_name])
elif detector_type == 'outlier':
url = _join_url(url, ['od', detector_name, dataset])
# fetch the metadata and state dict
meta, state_dict = fetch_state_dict(url, filepath, save_state_dict=True)
# load detector
name = meta['name']
kwargs: dict = {}
if name == 'OutlierAE':
fetch_ae(url, filepath)
elif name == 'OutlierAEGMM':
fetch_aegmm(url, filepath)
elif name == 'OutlierVAE':
fetch_vae(url, filepath)
elif name == 'OutlierVAEGMM':
fetch_vaegmm(url, filepath)
elif name == 'OutlierSeq2Seq':
fetch_seq2seq(url, filepath)
elif name == 'AdversarialAE':
fetch_ad_ae(url, filepath, state_dict)
if model == 'resnet56':
kwargs = {'custom_objects': {'backend': backend}}
elif name == 'ModelDistillation':
fetch_ad_md(url, filepath)
if model == 'resnet56':
kwargs = {'custom_objects': {'backend': backend}}
elif name == 'LLR':
model_type = fetch_llr(url, filepath)
if model_type == 'weights':
kwargs = get_pixelcnn_default_kwargs()
detector = load_detector(filepath, **kwargs)
return detector # type: ignore[return-value] # load_detector returns drift detectors but `Data` doesn't inc. them
# TODO - above type ignore can be removed once all detectors use the config based approach.
| 16,646 | 30.115888 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/losses.py | import torch
def hinge_loss(preds: torch.Tensor) -> torch.Tensor:
"L(pred) = max(0, 1-pred) averaged over multiple preds"
linear_inds = preds < 1
return (((1 - preds)*linear_inds).sum(0))/len(preds)
| 213 | 25.75 | 59 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/kernels.py | import numpy as np
import torch
from torch import nn
from . import distance
from typing import Optional, Union, Callable
from alibi_detect.utils.frameworks import Framework
def sigma_median(x: torch.Tensor, y: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
"""
Bandwidth estimation using the median heuristic :cite:t:`Gretton2012`.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n = min(x.shape[0], y.shape[0])
n = n if (x[:n] == y[:n]).all() and x.shape == y.shape else 0
n_median = n + (np.prod(dist.shape) - n) // 2 - 1
sigma = (.5 * dist.flatten().sort().values[int(n_median)].unsqueeze(dim=-1)) ** .5
return sigma
class GaussianRBF(nn.Module):
def __init__(
self,
sigma: Optional[torch.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x [Nx, features] and y [Ny, features] and returns the kernel
matrix [Nx, Ny].
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`,
meaning that it should take in the tensors `x`, `y` and `dist` and return `sigma`. If `None`, it is set to
:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`.
trainable
Whether or not to track gradients w.r.t. `sigma` to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_median if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = nn.Parameter(torch.empty(1), requires_grad=trainable)
self.init_required = True
else:
sigma = sigma.reshape(-1) # [Ns,]
self.log_sigma = nn.Parameter(sigma.log(), requires_grad=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> torch.Tensor:
return self.log_sigma.exp()
def forward(self, x: Union[np.ndarray, torch.Tensor], y: Union[np.ndarray, torch.Tensor],
infer_sigma: bool = False) -> torch.Tensor:
x, y = torch.as_tensor(x), torch.as_tensor(y)
dist = distance.squared_pairwise_distance(x.flatten(1), y.flatten(1)) # [Nx, Ny]
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
with torch.no_grad():
self.log_sigma.copy_(sigma.log().clone())
self.init_required = False
gamma = 1. / (2. * self.sigma ** 2) # [Ns,]
# TODO: do matrix multiplication after all?
kernel_mat = torch.exp(- torch.cat([(g * dist)[None, :, :] for g in gamma], dim=0)) # [Ns, Nx, Ny]
return kernel_mat.mean(dim=0) # [Nx, Ny]
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = self.config.copy()
if isinstance(cfg['sigma'], torch.Tensor):
cfg['sigma'] = cfg['sigma'].detach().cpu().numpy().tolist()
cfg.update({'flavour': Framework.PYTORCH.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(nn.Module):
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes a batch of instances x [Nx, features] and y [Ny, features] and returns
the kernel matrix [Nx, Ny].
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent if kernel_b is not None.
"""
def __init__(
self,
proj: nn.Module,
kernel_a: Union[nn.Module, str] = 'rbf',
kernel_b: Optional[Union[nn.Module, str]] = 'rbf',
eps: Union[float, str] = 'trainable'
) -> None:
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a = kernel_a
self.kernel_b = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, str]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
self.logit_eps = nn.Parameter(torch.tensor(eps).logit(), requires_grad=False)
elif eps == 'trainable':
self.logit_eps = nn.Parameter(torch.tensor(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> torch.Tensor:
return self.logit_eps.sigmoid() if self.kernel_b is not None else torch.tensor(0.)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
similarity = self.kernel_a(self.proj(x), self.proj(y)) # type: ignore[operator]
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y) # type: ignore[operator]
return similarity
def get_config(self) -> dict:
return self.config.copy()
@classmethod
def from_config(cls, config):
return cls(**config)
| 7,088 | 37.737705 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/prediction.py | from functools import partial
from typing import Callable, Optional, Type, Union
import numpy as np
import torch
import torch.nn as nn
from alibi_detect.utils.pytorch.misc import get_device
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, torch.Tensor], model: Union[Callable, nn.Module, nn.Sequential],
device: Optional[torch.device] = None, batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[Type[np.generic], torch.dtype] = np.float32) -> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array, torch tensor or tuples of those with model outputs.
"""
device = get_device(device)
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, torch.dtype)
return_list = False
preds: Union[list, tuple] = []
with torch.no_grad():
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch.to(device)) # type: ignore
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
if device.type == 'cuda' and isinstance(p, torch.Tensor):
p = p.cpu()
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, torch.Tensor)):
if device.type == 'cuda' and isinstance(preds_tmp, torch.Tensor):
preds_tmp = preds_tmp.cpu()
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or torch.Tensor.')
concat = partial(np.concatenate, axis=0) if return_np else partial(torch.cat, dim=0) # type: ignore[arg-type]
out: Union[tuple, np.ndarray, torch.Tensor] = tuple(concat(p) for p in preds) if isinstance(preds, tuple) \
else concat(preds)
if return_list:
out = list(out) # type: ignore[assignment]
return out # TODO: update return type with list
def predict_batch_transformer(x: Union[list, np.ndarray], model: Union[nn.Module, nn.Sequential],
tokenizer: Callable, max_len: int, device: Optional[torch.device] = None,
batch_size: int = int(1e10), dtype: Union[Type[np.generic], torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
tokenizer
Tokenizer for model.
max_len
Max sequence length for tokens.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='pt')
return predict_batch(x, model, device=device, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
| 4,639 | 42.364486 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/misc.py | import logging
from typing import Optional, Union, Type
import torch
logger = logging.getLogger(__name__)
def zero_diag(mat: torch.Tensor) -> torch.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - torch.diag(mat.diag())
def quantile(sample: torch.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else sample.sort().values
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
def get_device(device: Optional[Union[str, torch.device]] = None) -> torch.device:
"""
Instantiates a PyTorch device object.
Parameters
----------
device
Either `None`, a str ('gpu' or 'cpu') indicating the device to choose, or an already instantiated device
object. If `None`, the GPU is selected if it is detected, otherwise the CPU is used as a fallback.
Returns
-------
The instantiated device object.
"""
if isinstance(device, torch.device): # Already a torch device
return device
else: # Instantiate device
if device is None or device.lower() in ['gpu', 'cuda']:
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch_device.type == 'cpu':
logger.warning('No GPU detected, fall back on CPU.')
else:
torch_device = torch.device('cpu')
if device.lower() != 'cpu':
logger.warning('Requested device not recognised, fall back on CPU.')
return torch_device
def get_optimizer(name: str = 'Adam') -> Type[torch.optim.Optimizer]:
"""
Get an optimizer class from its name.
Parameters
----------
name
Name of the optimizer.
Returns
-------
The optimizer class.
"""
optimizer = getattr(torch.optim, name, None)
if optimizer is None:
raise NotImplementedError(f"Optimizer {name} not implemented.")
return optimizer
| 3,133 | 26.017241 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/data.py | import numpy as np
import torch
from typing import Tuple, Union
Indexable = Union[np.ndarray, torch.Tensor, list]
class TorchDataset(torch.utils.data.Dataset):
def __init__(self, *indexables: Union[Tuple[Indexable, ...], Indexable]) -> None:
self.indexables = indexables
def __getitem__(self, idx: int) -> Union[Tuple[Indexable, ...], Indexable]:
output = tuple(indexable[idx] for indexable in self.indexables)
return output if len(output) > 1 else output[0]
def __len__(self) -> int:
return len(self.indexables[0])
| 565 | 30.444444 | 85 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/__init__.py | from alibi_detect.utils.missing_optional_dependency import import_optional
TorchDataset = import_optional(
'alibi_detect.utils.pytorch.data',
names=['TorchDataset']
)
mmd2, mmd2_from_kernel_matrix, squared_pairwise_distance, permed_lsdds, batch_compute_kernel_matrix = import_optional(
'alibi_detect.utils.pytorch.distance',
names=['mmd2', 'mmd2_from_kernel_matrix', 'squared_pairwise_distance',
'permed_lsdds', 'batch_compute_kernel_matrix']
)
GaussianRBF, DeepKernel = import_optional(
'alibi_detect.utils.pytorch.kernels',
names=['GaussianRBF', 'DeepKernel']
)
predict_batch, predict_batch_transformer = import_optional(
'alibi_detect.utils.pytorch.prediction',
names=['predict_batch', 'predict_batch_transformer']
)
get_device, quantile, zero_diag = import_optional(
'alibi_detect.utils.pytorch.misc',
names=['get_device', 'quantile', 'zero_diag']
)
__all__ = [
"batch_compute_kernel_matrix",
"mmd2",
"mmd2_from_kernel_matrix",
"squared_pairwise_distance",
"GaussianRBF",
"DeepKernel",
"permed_lsdds",
"predict_batch",
"predict_batch_transformer",
"get_device",
"quantile",
"zero_diag",
"TorchDataset"
]
| 1,218 | 26.088889 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/distance.py | import logging
import torch
from torch import nn
import numpy as np
from typing import Callable, List, Tuple, Optional, Union
logger = logging.getLogger(__name__)
@torch.jit.script
def squared_pairwise_distance(x: torch.Tensor, y: torch.Tensor, a_min: float = 1e-30) -> torch.Tensor:
"""
PyTorch pairwise squared Euclidean distance between samples x and y.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
a_min
Lower bound to clip distance values.
Returns
-------
Pairwise squared Euclidean distance [Nx, Ny].
"""
x2 = x.pow(2).sum(dim=-1, keepdim=True)
y2 = y.pow(2).sum(dim=-1, keepdim=True)
dist = torch.addmm(y2.transpose(-2, -1), x, y.transpose(-2, -1), alpha=-2).add_(x2)
return dist.clamp_min_(a_min)
def batch_compute_kernel_matrix(
x: Union[list, np.ndarray, torch.Tensor],
y: Union[list, np.ndarray, torch.Tensor],
kernel: Union[nn.Module, nn.Sequential],
device: torch.device = None,
batch_size: int = int(1e10),
preprocess_fn: Callable[..., torch.Tensor] = None,
) -> torch.Tensor:
"""
Compute the kernel matrix between x and y by filling in blocks of size
batch_size x batch_size at a time.
Parameters
----------
x
Reference set.
y
Test set.
kernel
PyTorch module.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
Returns
-------
Kernel matrix in the form of a torch tensor
"""
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if type(x) != type(y):
raise ValueError("x and y should be of the same type")
if isinstance(x, np.ndarray):
x, y = torch.from_numpy(x), torch.from_numpy(y)
n_x, n_y = len(x), len(y)
n_batch_x, n_batch_y = int(np.ceil(n_x / batch_size)), int(np.ceil(n_y / batch_size))
with torch.no_grad():
k_is: List[torch.Tensor] = []
for i in range(n_batch_x):
istart, istop = i * batch_size, min((i + 1) * batch_size, n_x)
x_batch = x[istart:istop]
if preprocess_fn is not None:
x_batch = preprocess_fn(x_batch)
x_batch = x_batch.to(device) # type: ignore
k_ijs: List[torch.Tensor] = []
for j in range(n_batch_y):
jstart, jstop = j * batch_size, min((j + 1) * batch_size, n_y)
y_batch = y[jstart:jstop]
if preprocess_fn is not None:
y_batch = preprocess_fn(y_batch)
y_batch = y_batch.to(device) # type: ignore
k_ijs.append(kernel(x_batch, y_batch).cpu())
k_is.append(torch.cat(k_ijs, 1))
k_mat = torch.cat(k_is, 0)
return k_mat
def mmd2_from_kernel_matrix(kernel_mat: torch.Tensor, m: int, permute: bool = False,
zero_diag: bool = True) -> torch.Tensor:
"""
Compute maximum mean discrepancy (MMD^2) between 2 samples x and y from the
full kernel matrix between the samples.
Parameters
----------
kernel_mat
Kernel matrix between samples x and y.
m
Number of instances in y.
permute
Whether to permute the row indices. Used for permutation tests.
zero_diag
Whether to zero out the diagonal of the kernel matrix.
Returns
-------
MMD^2 between the samples from the kernel matrix.
"""
n = kernel_mat.shape[0] - m
if zero_diag:
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag())
if permute:
idx = torch.randperm(kernel_mat.shape[0])
kernel_mat = kernel_mat[idx][:, idx]
k_xx, k_yy, k_xy = kernel_mat[:-m, :-m], kernel_mat[-m:, -m:], kernel_mat[-m:, :-m]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
mmd2 = c_xx * k_xx.sum() + c_yy * k_yy.sum() - 2. * k_xy.mean()
return mmd2
def mmd2(x: torch.Tensor, y: torch.Tensor, kernel: Callable) -> float:
"""
Compute MMD^2 between 2 samples.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
kernel
Kernel function.
Returns
-------
MMD^2 between the samples x and y.
"""
n, m = x.shape[0], y.shape[0]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
k_xx, k_yy, k_xy = kernel(x, x), kernel(y, y), kernel(x, y)
return c_xx * (k_xx.sum() - k_xx.trace()) + c_yy * (k_yy.sum() - k_yy.trace()) - 2. * k_xy.mean()
def permed_lsdds(
k_all_c: torch.Tensor,
x_perms: List[torch.Tensor],
y_perms: List[torch.Tensor],
H: torch.Tensor,
H_lam_inv: Optional[torch.Tensor] = None,
lam_rd_max: float = 0.2,
return_unpermed: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Compute LSDD estimates from kernel matrix across various ref and test window samples
Parameters
----------
k_all_c
Kernel matrix of similarities between all samples and the kernel centers.
x_perms
List of B reference window index vectors
y_perms
List of B test window index vectors
H
Special (scaled) kernel matrix of similarities between kernel centers
H_lam_inv
Function of H corresponding to a particular regulariation parameter lambda.
See Eqn 11 of Bu et al. (2017)
lam_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2. Only relavent if H_lam_inv is not supplied.
return_unpermed
Whether or not to return value corresponding to unpermed order defined by k_all_c
Returns
-------
Vector of B LSDD estimates for each permutation, H_lam_inv which may have been inferred, and optionally \
the unpermed LSDD estimate.
"""
# Compute (for each bootstrap) the average distance to each kernel center (Eqn 7)
k_xc_perms = torch.stack([k_all_c[x_inds] for x_inds in x_perms], 0)
k_yc_perms = torch.stack([k_all_c[y_inds] for y_inds in y_perms], 0)
h_perms = k_xc_perms.mean(1) - k_yc_perms.mean(1)
if H_lam_inv is None:
# We perform the initialisation for multiple candidate lambda values and pick the largest
# one for which the relative difference (RD) between two difference estimates is below lambda_rd_max.
# See Appendix A
candidate_lambdas = [1/(4**i) for i in range(10)] # TODO: More principled selection
H_plus_lams = torch.stack(
[H+torch.eye(H.shape[0], device=H.device)*can_lam for can_lam in candidate_lambdas], 0
)
H_plus_lam_invs = torch.inverse(H_plus_lams)
H_plus_lam_invs = H_plus_lam_invs.permute(1, 2, 0) # put lambdas in final axis
omegas = torch.einsum('jkl,bk->bjl', H_plus_lam_invs, h_perms) # (Eqn 8)
h_omegas = torch.einsum('bj,bjl->bl', h_perms, omegas)
omega_H_omegas = torch.einsum('bkl,bkl->bl', torch.einsum('bjl,jk->bkl', omegas, H), omegas)
rds = (1 - (omega_H_omegas/h_omegas)).mean(0)
less_than_rd_inds = (rds < lam_rd_max).nonzero()
if len(less_than_rd_inds) == 0:
repeats = k_all_c.shape[0] - torch.unique(k_all_c, dim=0).shape[0]
if repeats > 0:
msg = "Too many repeat instances for LSDD-based detection. \
Try using MMD-based detection instead"
else:
msg = "Unknown error. Try using MMD-based detection instead"
raise ValueError(msg)
lam_index = less_than_rd_inds[0]
lam = candidate_lambdas[lam_index]
logger.info(f"Using lambda value of {lam:.2g} with RD of {float(rds[lam_index]):.2g}")
H_plus_lam_inv = H_plus_lam_invs[:, :, lam_index.item()]
H_lam_inv = 2*H_plus_lam_inv - (H_plus_lam_inv.transpose(0, 1) @ H @ H_plus_lam_inv) # (below Eqn 11)
# Now to compute an LSDD estimate for each permutation
lsdd_perms = (h_perms * (H_lam_inv @ h_perms.transpose(0, 1)).transpose(0, 1)).sum(-1) # (Eqn 11)
if return_unpermed:
n_x = x_perms[0].shape[0]
h = k_all_c[:n_x].mean(0) - k_all_c[n_x:].mean(0)
lsdd_unpermed = (h[None, :] * (H_lam_inv @ h[:, None]).transpose(0, 1)).sum()
return lsdd_perms, H_lam_inv, lsdd_unpermed
else:
return lsdd_perms, H_lam_inv
| 8,767 | 36.470085 | 110 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/tests/test_data_pt.py | import numpy as np
import pytest
from alibi_detect.utils.pytorch.data import TorchDataset
# test on numpy array and list
n, f = 100, 5
shape = (n, f)
tests_ds = [list, np.ndarray]
n_tests_ds = len(tests_ds)
@pytest.fixture
def ds_params(request):
return tests_ds[request.param]
@pytest.mark.parametrize('ds_params', list(range(n_tests_ds)), indirect=True)
def test_torchdataset(ds_params):
xtype = ds_params
x = np.random.randn(*shape)
y = np.random.randn(*(n,))
if xtype == list:
x = list(x)
ds = TorchDataset(x, y)
for step, data in enumerate(ds):
pass
assert data[0].shape == (f,) and data[1].shape == ()
assert step == len(ds) - 1
| 692 | 22.896552 | 77 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/tests/test_distance_pt.py | import numpy as np
from itertools import product
import pytest
import torch
from alibi_detect.utils.pytorch import GaussianRBF, mmd2, mmd2_from_kernel_matrix, permed_lsdds
from alibi_detect.utils.pytorch import squared_pairwise_distance, batch_compute_kernel_matrix
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
tests_pairwise = list(product(n_features, n_instances))
n_tests_pairwise = len(tests_pairwise)
@pytest.fixture
def pairwise_params(request):
return tests_pairwise[request.param]
@pytest.mark.parametrize('pairwise_params', list(range(n_tests_pairwise)), indirect=True)
def test_pairwise(pairwise_params):
n_features, n_instances = pairwise_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
dist_xx = squared_pairwise_distance(x, x).numpy()
dist_xy = squared_pairwise_distance(x, y).numpy()
assert dist_xx.shape == (xshape[0], xshape[0])
assert dist_xy.shape == n_instances
np.testing.assert_almost_equal(dist_xx.trace(), 0., decimal=5)
tests_mmd = tests_pairwise
n_tests_mmd = n_tests_pairwise
@pytest.fixture
def mmd_params(request):
return tests_mmd[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests_mmd)), indirect=True)
def test_mmd(mmd_params):
n_features, n_instances = mmd_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
mmd_xx = mmd2(x, x, kernel=GaussianRBF(sigma=torch.ones(1)))
mmd_xy = mmd2(x, y, kernel=GaussianRBF(sigma=torch.ones(1)))
assert mmd_xy > mmd_xx
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
batch_size = [1, 5]
tests_bckm = list(product(n_features, n_instances, batch_size))
n_tests_bckm = len(tests_bckm)
@pytest.fixture
def bckm_params(request):
return tests_bckm[request.param]
@pytest.mark.parametrize('bckm_params', list(range(n_tests_bckm)), indirect=True)
def test_bckm(bckm_params):
n_features, n_instances, batch_size = bckm_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
kernel = GaussianRBF(sigma=torch.tensor(1.))
kernel_mat = kernel(x, y).detach().numpy()
bc_kernel_mat = batch_compute_kernel_matrix(x, y, kernel, batch_size=batch_size).detach().numpy()
np.testing.assert_almost_equal(kernel_mat, bc_kernel_mat, decimal=6)
n = [10, 100]
m = [10, 100]
permute = [True, False]
zero_diag = [True, False]
tests_mmd_from_kernel_matrix = list(product(n, m, permute, zero_diag))
n_tests_mmd_from_kernel_matrix = len(tests_mmd_from_kernel_matrix)
@pytest.fixture
def mmd_from_kernel_matrix_params(request):
return tests_mmd_from_kernel_matrix[request.param]
@pytest.mark.parametrize('mmd_from_kernel_matrix_params',
list(range(n_tests_mmd_from_kernel_matrix)), indirect=True)
def test_mmd_from_kernel_matrix(mmd_from_kernel_matrix_params):
n, m, permute, zero_diag = mmd_from_kernel_matrix_params
n_tot = n + m
shape = (n_tot, n_tot)
kernel_mat = np.random.uniform(0, 1, size=shape)
kernel_mat_2 = kernel_mat.copy()
kernel_mat_2[-m:, :-m] = 1.
kernel_mat_2[:-m, -m:] = 1.
kernel_mat = torch.from_numpy(kernel_mat)
kernel_mat_2 = torch.from_numpy(kernel_mat_2)
if not zero_diag:
kernel_mat -= torch.diag(kernel_mat.diag())
kernel_mat_2 -= torch.diag(kernel_mat_2.diag())
mmd = mmd2_from_kernel_matrix(kernel_mat, m, permute=permute, zero_diag=zero_diag)
mmd_2 = mmd2_from_kernel_matrix(kernel_mat_2, m, permute=permute, zero_diag=zero_diag)
if not permute:
assert mmd_2.numpy() < mmd.numpy()
n = [10]
m = [10]
d = [3]
B = [20]
n_kcs = [5]
tests_permed_lsdds = list(product(n, m, d, B, n_kcs))
n_tests_permed_lsdds = len(tests_permed_lsdds)
@pytest.fixture
def permed_lsdds_params(request):
return tests_permed_lsdds[request.param]
@pytest.mark.parametrize('permed_lsdds_params',
list(range(n_tests_permed_lsdds)), indirect=True)
def test_permed_lsdds(permed_lsdds_params):
n, m, d, B, n_kcs = permed_lsdds_params
kcs = torch.randn(n_kcs, d)
x_ref = torch.randn(n, d)
x_cur = 10 + 0.2*torch.randn(m, d)
x_full = torch.cat([x_ref, x_cur], axis=0)
sigma = torch.tensor((1.,))
k_all_c = GaussianRBF(sigma)(x_full, kcs)
H = GaussianRBF(np.sqrt(2.)*sigma)(kcs, kcs)
perms = [torch.randperm(n+m) for _ in range(B)]
x_perms = [perm[:n] for perm in perms]
y_perms = [perm[n:] for perm in perms]
lsdd_perms, H_lam_inv, lsdd_unpermed = permed_lsdds(
k_all_c, x_perms, y_perms, H, return_unpermed=True
)
assert int((lsdd_perms > lsdd_unpermed).sum()) == 0
assert H_lam_inv.shape == (n_kcs, n_kcs)
| 5,181 | 32.869281 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/tests/test_misc_pt.py | from itertools import product
import pytest
import torch
import numpy as np
from alibi_detect.utils.pytorch import zero_diag, quantile
def test_zero_diag():
ones = torch.ones(10, 10)
ones_zd = zero_diag(ones)
assert ones_zd.shape == (10, 10)
assert float(ones_zd.trace()) == 0
assert float(ones_zd.sum()) == 90
type = [6, 7, 8]
sorted = [True, False]
tests_quantile = list(product(type, sorted))
n_tests_quantile = len(tests_quantile)
@pytest.fixture
def quantile_params(request):
return tests_quantile[request.param]
@pytest.mark.parametrize('quantile_params', list(range(n_tests_quantile)), indirect=True)
def test_quantile(quantile_params):
type, sorted = quantile_params
sample = (0.5+torch.arange(1e6))/1e6
if not sorted:
sample = sample[torch.randperm(len(sample))]
np.testing.assert_almost_equal(quantile(sample, 0.001, type=type, sorted=sorted), 0.001, decimal=6)
np.testing.assert_almost_equal(quantile(sample, 0.999, type=type, sorted=sorted), 0.999, decimal=6)
assert quantile(torch.ones(100), 0.42, type=type, sorted=sorted) == 1
with pytest.raises(ValueError):
quantile(torch.ones(10), 0.999, type=type, sorted=sorted)
with pytest.raises(ValueError):
quantile(torch.ones(100, 100), 0.5, type=type, sorted=sorted)
| 1,317 | 29.651163 | 103 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/tests/test_prediction_pt.py | import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Tuple, Union
from alibi_detect.utils.pytorch import predict_batch
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
x = np.zeros((n, n_features), dtype=np.float32)
class MyModel(nn.Module):
def __init__(self, multi_out: bool = False):
super(MyModel, self).__init__()
self.dense = nn.Linear(n_features, n_classes)
self.multi_out = multi_out
def forward(self, x: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
out = self.dense(x)
if not self.multi_out:
return out
else:
return out, out
AutoEncoder = nn.Sequential(
nn.Linear(n_features, latent_dim),
nn.Linear(latent_dim, n_features)
)
def id_fn(x: Union[np.ndarray, torch.Tensor, list]) -> Union[np.ndarray, torch.Tensor]:
if isinstance(x, list):
return torch.from_numpy(np.concatenate(x, axis=0))
else:
return x
# model, batch size, dtype, preprocessing function, list as input
tests_predict = [
(MyModel(multi_out=False), 2, np.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, None, False),
(MyModel(multi_out=False), int(1e10), torch.float32, None, False),
(MyModel(multi_out=True), int(1e10), torch.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, id_fn, False),
(AutoEncoder, 2, np.float32, None, False),
(AutoEncoder, int(1e10), np.float32, None, False),
(AutoEncoder, int(1e10), torch.float32, None, False),
(id_fn, 2, np.float32, None, False),
(id_fn, 2, torch.float32, None, False),
(id_fn, 2, np.float32, id_fn, True),
]
n_tests = len(tests_predict)
@pytest.fixture
def predict_batch_params(request):
return tests_predict[request.param]
@pytest.mark.parametrize('predict_batch_params', list(range(n_tests)), indirect=True)
def test_predict_batch(predict_batch_params):
model, batch_size, dtype, preprocess_fn, to_list = predict_batch_params
x_batch = [x] if to_list else x
preds = predict_batch(x_batch, model, batch_size=batch_size, preprocess_fn=preprocess_fn, dtype=dtype)
if isinstance(preds, tuple):
preds = preds[0]
assert preds.dtype == dtype
if isinstance(model, nn.Sequential) or hasattr(model, '__name__') and model.__name__ == 'id_fn':
assert preds.shape == x.shape
elif isinstance(model, nn.Module):
assert preds.shape == (n, n_classes)
| 2,498 | 33.232877 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/pytorch/tests/test_kernels_pt.py | from itertools import product
import numpy as np
import pytest
import torch
from torch import nn
from alibi_detect.utils.pytorch import GaussianRBF, DeepKernel
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
sigma = sigma if sigma is None else torch.from_numpy(sigma)
x = torch.from_numpy(np.random.random(xshape)).float()
y = torch.from_numpy(np.random.random(yshape)).float()
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(Exception):
kernel(x, y, infer_sigma=infer_sigma)
else:
k_xy = kernel(x, y, infer_sigma=infer_sigma).detach().numpy()
k_xx = kernel(x, x, infer_sigma=infer_sigma).detach().numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
np.testing.assert_almost_equal(k_xx.trace(), xshape[0], decimal=4)
assert (k_xx > 0.).all() and (k_xy > 0.).all()
class MyKernel(nn.Module): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.linear = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.linear(x), self.linear(y))
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
kernel_a = [GaussianRBF(trainable=True), MyKernel]
kernel_b = [GaussianRBF(trainable=True), MyKernel, None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = torch.as_tensor(np.random.random(xshape).astype('float32'))
y = torch.as_tensor(np.random.random(yshape).astype('float32'))
proj = nn.Linear(n_features, n_features)
kernel_a = kernel_a(n_features) if kernel_a == MyKernel else kernel_a
kernel_b = kernel_b(n_features) if kernel_b == MyKernel else kernel_b
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
k_xy = kernel(x, y).detach().numpy()
k_yx = kernel(y, x).detach().numpy()
k_xx = kernel(x, x).detach().numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (np.diag(k_xx) > 0.).all()
np.testing.assert_almost_equal(k_xy, np.transpose(k_yx), decimal=5)
| 3,294 | 38.22619 | 90 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/kernels.py | import tensorflow as tf
import numpy as np
from . import distance
from typing import Optional, Union, Callable
from scipy.special import logit
from alibi_detect.utils.frameworks import Framework
def sigma_median(x: tf.Tensor, y: tf.Tensor, dist: tf.Tensor) -> tf.Tensor:
"""
Bandwidth estimation using the median heuristic :cite:t:`Gretton2012`.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n = min(x.shape[0], y.shape[0])
n = n if tf.reduce_all(x[:n] == y[:n]) and x.shape == y.shape else 0
n_median = n + (tf.math.reduce_prod(dist.shape) - n) // 2 - 1
sigma = tf.expand_dims((.5 * tf.sort(tf.reshape(dist, (-1,)))[n_median]) ** .5, axis=0)
return sigma
class GaussianRBF(tf.keras.Model):
def __init__(
self,
sigma: Optional[tf.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x [Nx, features] and y [Ny, features] and returns the kernel
matrix [Nx, Ny].
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`,
meaning that it should take in the tensors `x`, `y` and `dist` and return `sigma`. If `None`, it is set to
:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`.
trainable
Whether or not to track gradients w.r.t. sigma to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_median if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = tf.Variable(np.empty(1), dtype=tf.keras.backend.floatx(), trainable=trainable)
self.init_required = True
else:
sigma = tf.cast(tf.reshape(sigma, (-1,)), dtype=tf.keras.backend.floatx()) # [Ns,]
self.log_sigma = tf.Variable(tf.math.log(sigma), trainable=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> tf.Tensor:
return tf.math.exp(self.log_sigma)
def call(self, x: tf.Tensor, y: tf.Tensor, infer_sigma: bool = False) -> tf.Tensor:
y = tf.cast(y, x.dtype)
x, y = tf.reshape(x, (x.shape[0], -1)), tf.reshape(y, (y.shape[0], -1)) # flatten
dist = distance.squared_pairwise_distance(x, y) # [Nx, Ny]
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
self.log_sigma.assign(tf.math.log(sigma))
self.init_required = False
gamma = tf.constant(1. / (2. * self.sigma ** 2), dtype=x.dtype) # [Ns,]
# TODO: do matrix multiplication after all?
kernel_mat = tf.exp(- tf.concat([(g * dist)[None, :, :] for g in gamma], axis=0)) # [Ns, Nx, Ny]
return tf.reduce_mean(kernel_mat, axis=0) # [Nx, Ny]
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = self.config.copy()
if isinstance(cfg['sigma'], tf.Tensor):
cfg['sigma'] = cfg['sigma'].numpy().tolist()
cfg.update({'flavour': Framework.TENSORFLOW.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(tf.keras.Model):
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes a batch of instances x [Nx, features] and y [Ny, features] and returns
the kernel matrix [Nx, Ny].
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent is kernel_b is not None.
"""
def __init__(
self,
proj: tf.keras.Model,
kernel_a: Union[tf.keras.Model, str] = 'rbf',
kernel_b: Optional[Union[tf.keras.Model, str]] = 'rbf',
eps: Union[float, str] = 'trainable'
) -> None:
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a = kernel_a
self.kernel_b = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, str]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
eps = tf.constant(eps)
self.logit_eps = tf.Variable(tf.constant(logit(eps)), trainable=False)
elif eps == 'trainable':
self.logit_eps = tf.Variable(tf.constant(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> tf.Tensor:
return tf.math.sigmoid(self.logit_eps) if self.kernel_b is not None else tf.constant(0.)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
similarity = self.kernel_a(self.proj(x), self.proj(y)) # type: ignore[operator]
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y) # type: ignore[operator]
return similarity
def get_config(self) -> dict:
return self.config.copy()
@classmethod
def from_config(cls, config):
return cls(**config)
| 7,226 | 38.708791 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/prediction.py | from functools import partial
from typing import Callable, Type, Union
import numpy as np
import tensorflow as tf
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, tf.Tensor], model: Union[Callable, tf.keras.Model],
batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[Type[np.generic], tf.DType] = np.float32) -> Union[np.ndarray, tf.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
tf.keras model or one of the other permitted types defined in Data.
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array, tensorflow tensor or tuples of those with model outputs.
"""
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, tf.DType)
return_list = False
preds: Union[list, tuple] = []
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch)
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, tf.Tensor)):
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or tf.Tensor.')
concat = np.concatenate if return_np else tf.concat
out = tuple(concat(p, axis=0) for p in preds) if isinstance(preds, tuple) else concat(preds, axis=0)
if return_list:
out = list(out)
return out
def predict_batch_transformer(x: Union[list, np.ndarray], model: tf.keras.Model, tokenizer: Callable,
max_len: int, batch_size: int = int(1e10),
dtype: Union[Type[np.generic], tf.DType] = np.float32) \
-> Union[np.ndarray, tf.Tensor]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
Transformer model.
tokenizer
Tokenizer for model.
max_len
Max token length.
batch_size
Batch size.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array or tensorflow tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='tf')
return predict_batch(x, model, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
| 3,418 | 36.988889 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/misc.py | import tensorflow as tf
def zero_diag(mat: tf.Tensor) -> tf.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - tf.linalg.diag(tf.linalg.diag_part(mat))
def quantile(sample: tf.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else tf.sort(sample)
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
def subset_matrix(mat: tf.Tensor, inds_0: tf.Tensor, inds_1: tf.Tensor) -> tf.Tensor:
"""
Take a matrix and return the submatrix correspond to provided row and column indices
Parameters
----------
mat
A 2D matrix
inds_0
A vector of row indices
inds_1
A vector of column indices
Returns
-------
A submatrix of shape (len(inds_0), len(inds_1))
"""
if len(mat.shape) != 2:
raise ValueError("Subsetting only supported for matrices (2D)")
subbed_rows = tf.gather(mat, inds_0, axis=0)
subbed_rows_cols = tf.gather(subbed_rows, inds_1, axis=1)
return subbed_rows_cols
def clone_model(model: tf.keras.Model) -> tf.keras.Model:
""" Clone a sequential, functional or subclassed tf.keras.Model. """
try: # sequential or functional model
return tf.keras.models.clone_model(model)
except ValueError: # subclassed model
try:
config = model.get_config()
except NotImplementedError:
config = {}
return model.__class__.from_config(config)
| 2,661 | 26.163265 | 93 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/data.py | import numpy as np
import tensorflow as tf
from typing import Tuple, Union
Indexable = Union[np.ndarray, tf.Tensor, list]
class TFDataset(tf.keras.utils.Sequence):
def __init__(
self, *indexables: Indexable, batch_size: int = int(1e10), shuffle: bool = True,
) -> None:
self.indexables = indexables
self.batch_size = batch_size
self.shuffle = shuffle
def __getitem__(self, idx: int) -> Union[Tuple[Indexable, ...], Indexable]:
istart, istop = idx * self.batch_size, (idx + 1) * self.batch_size
output = tuple(indexable[istart:istop] for indexable in self.indexables)
return output if len(output) > 1 else output[0]
def __len__(self) -> int:
return len(self.indexables[0]) // self.batch_size
def on_epoch_end(self) -> None:
if self.shuffle:
perm = np.random.permutation(len(self.indexables[0]))
self.indexables = tuple(
[indexable[i] for i in perm] if isinstance(indexable, list) else indexable[perm]
for indexable in self.indexables
)
| 1,105 | 34.677419 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/distance.py | import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Tuple, List, Optional, Union
logger = logging.getLogger(__name__)
def squared_pairwise_distance(x: tf.Tensor, y: tf.Tensor, a_min: float = 1e-30, a_max: float = 1e30) -> tf.Tensor:
"""
TensorFlow pairwise squared Euclidean distance between samples x and y.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
a_min
Lower bound to clip distance values.
a_max
Upper bound to clip distance values.
Returns
-------
Pairwise squared Euclidean distance [Nx, Ny].
"""
x2 = tf.reduce_sum(x ** 2, axis=-1, keepdims=True)
y2 = tf.reduce_sum(y ** 2, axis=-1, keepdims=True)
dist = x2 + tf.transpose(y2, (1, 0)) - 2. * x @ tf.transpose(y, (1, 0))
return tf.clip_by_value(dist, a_min, a_max)
def batch_compute_kernel_matrix(
x: Union[list, np.ndarray, tf.Tensor],
y: Union[list, np.ndarray, tf.Tensor],
kernel: Union[Callable, tf.keras.Model],
batch_size: int = int(1e10),
preprocess_fn: Callable = None,
) -> tf.Tensor:
"""
Compute the kernel matrix between x and y by filling in blocks of size
batch_size x batch_size at a time.
Parameters
----------
x
Reference set.
y
Test set.
kernel
tf.keras model
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
Returns
-------
Kernel matrix in the form of a tensorflow tensor
"""
if type(x) != type(y):
raise ValueError("x and y should be of the same type")
n_x, n_y = len(x), len(y)
n_batch_x, n_batch_y = int(np.ceil(n_x / batch_size)), int(np.ceil(n_y / batch_size))
k_is = []
for i in range(n_batch_x):
istart, istop = i * batch_size, min((i + 1) * batch_size, n_x)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
k_ijs = []
for j in range(n_batch_y):
jstart, jstop = j * batch_size, min((j + 1) * batch_size, n_y)
y_batch = y[jstart:jstop]
if isinstance(preprocess_fn, Callable): # type: ignore
y_batch = preprocess_fn(y_batch)
k_ijs.append(kernel(x_batch, y_batch))
k_is.append(tf.concat(k_ijs, axis=1))
k_mat = tf.concat(k_is, axis=0)
return k_mat
def mmd2_from_kernel_matrix(kernel_mat: tf.Tensor, m: int, permute: bool = False,
zero_diag: bool = True) -> tf.Tensor:
"""
Compute maximum mean discrepancy (MMD^2) between 2 samples x and y from the
full kernel matrix between the samples.
Parameters
----------
kernel_mat
Kernel matrix between samples x and y.
m
Number of instances in y.
permute
Whether to permute the row indices. Used for permutation tests.
zero_diag
Whether to zero out the diagonal of the kernel matrix.
Returns
-------
MMD^2 between the samples from the kernel matrix.
"""
n = kernel_mat.shape[0] - m
if zero_diag:
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat))
if permute:
idx = np.random.permutation(kernel_mat.shape[0])
kernel_mat = tf.gather(tf.gather(kernel_mat, indices=idx, axis=0), indices=idx, axis=1)
k_xx, k_yy, k_xy = kernel_mat[:-m, :-m], kernel_mat[-m:, -m:], kernel_mat[-m:, :-m]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
mmd2 = c_xx * tf.reduce_sum(k_xx) + c_yy * tf.reduce_sum(k_yy) - 2. * tf.reduce_mean(k_xy)
return mmd2
def mmd2(x: tf.Tensor, y: tf.Tensor, kernel: Callable) -> float:
"""
Compute MMD^2 between 2 samples.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
kernel
Kernel function.
Returns
-------
MMD^2 between the samples x and y.
"""
n, m = x.shape[0], y.shape[0]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
k_xx, k_yy, k_xy = kernel(x, x), kernel(y, y), kernel(x, y)
return (c_xx * (tf.reduce_sum(k_xx) - tf.linalg.trace(k_xx)) +
c_yy * (tf.reduce_sum(k_yy) - tf.linalg.trace(k_yy)) - 2. * tf.reduce_mean(k_xy))
def relative_euclidean_distance(x: tf.Tensor, y: tf.Tensor, eps: float = 1e-12, axis: int = -1) -> tf.Tensor:
"""
Relative Euclidean distance.
Parameters
----------
x
Tensor used in distance computation.
y
Tensor used in distance computation.
eps
Epsilon added to denominator for numerical stability.
axis
Axis used to compute distance.
Returns
-------
Tensor with relative Euclidean distance across specified axis.
"""
denom = tf.concat([tf.reshape(tf.norm(x, ord=2, axis=axis), (-1, 1)),
tf.reshape(tf.norm(y, ord=2, axis=axis), (-1, 1))], axis=1)
dist = tf.norm(x - y, ord=2, axis=axis) / (tf.reduce_min(denom, axis=axis) + eps)
return dist
def permed_lsdds(
k_all_c: tf.Tensor,
x_perms: List[tf.Tensor],
y_perms: List[tf.Tensor],
H: tf.Tensor,
H_lam_inv: Optional[tf.Tensor] = None,
lam_rd_max: float = 0.2,
return_unpermed: bool = False,
) -> Union[Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor]]:
"""
Compute LSDD estimates from kernel matrix across various ref and test window samples
Parameters
----------
k_all_c
Kernel matrix of similarities between all samples and the kernel centers.
x_perms
List of B reference window index vectors
y_perms
List of B test window index vectors
H
Special (scaled) kernel matrix of similarities between kernel centers
H_lam_inv
Function of H corresponding to a particular regulariation parameter lambda.
See Eqn 11 of Bu et al. (2017)
lam_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2. Only relavent if H_lam_inv is not supplied.
return_unpermed
Whether or not to return value corresponding to unpermed order defined by k_all_c
Returns
-------
Vector of B LSDD estimates for each permutation, H_lam_inv which may have been inferred, and optionally \
the unpermed LSDD estimate.
"""
# Compute (for each bootstrap) the average distance to each kernel center (Eqn 7)
k_xc_perms = tf.stack([tf.gather(k_all_c, x_inds) for x_inds in x_perms], axis=0)
k_yc_perms = tf.stack([tf.gather(k_all_c, y_inds) for y_inds in y_perms], axis=0)
h_perms = tf.reduce_mean(k_xc_perms, axis=1) - tf.reduce_mean(k_yc_perms, axis=1)
if H_lam_inv is None:
# We perform the initialisation for multiple candidate lambda values and pick the largest
# one for which the relative difference (RD) between two difference estimates is below lambda_rd_max.
# See Appendix A
candidate_lambdas = [1/(4**i) for i in range(10)] # TODO: More principled selection
H_plus_lams = tf.stack([H+tf.eye(H.shape[0], dtype=H.dtype)*can_lam for can_lam in candidate_lambdas], axis=0)
H_plus_lam_invs = tf.transpose(tf.linalg.inv(H_plus_lams), [1, 2, 0]) # lambdas last
omegas = tf.einsum('jkl,bk->bjl', H_plus_lam_invs, h_perms) # (Eqn 8)
h_omegas = tf.einsum('bj,bjl->bl', h_perms, omegas)
omega_H_omegas = tf.einsum('bkl,bkl->bl', tf.einsum('bjl,jk->bkl', omegas, H), omegas)
rds = tf.reduce_mean(1 - (omega_H_omegas/h_omegas), axis=0)
less_than_rd_inds = tf.where(rds < lam_rd_max)
if len(less_than_rd_inds) == 0:
repeats = k_all_c.shape[0] - np.unique(k_all_c, axis=0).shape[0]
if repeats > 0:
msg = "Too many repeat instances for LSDD-based detection. \
Try using MMD-based detection instead"
else:
msg = "Unknown error. Try using MMD-based detection instead"
raise ValueError(msg)
lambda_index = int(less_than_rd_inds[0])
lam = candidate_lambdas[lambda_index]
logger.info(f"Using lambda value of {lam:.2g} with RD of {float(rds[lambda_index]):.2g}")
H_plus_lam_inv = tf.linalg.inv(H+lam*tf.eye(H.shape[0], dtype=H.dtype))
H_lam_inv = 2*H_plus_lam_inv - (tf.transpose(H_plus_lam_inv, [1, 0]) @ H @ H_plus_lam_inv) # (blw Eqn 11)
# Now to compute an LSDD estimate for each permutation
lsdd_perms = tf.reduce_sum(
h_perms * tf.transpose(H_lam_inv @ tf.transpose(h_perms, [1, 0]), [1, 0]), axis=1
) # (Eqn 11)
if return_unpermed:
n_x = x_perms[0].shape[0]
h = tf.reduce_mean(k_all_c[:n_x], axis=0) - tf.reduce_mean(k_all_c[n_x:], axis=0)
lsdd_unpermed = tf.reduce_sum(h[None, :] * tf.transpose(H_lam_inv @ h[:, None], [1, 0]))
return lsdd_perms, H_lam_inv, lsdd_unpermed
else:
return lsdd_perms, H_lam_inv
| 9,181 | 36.024194 | 118 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_data_tf.py | from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.tensorflow.data import TFDataset
# test on numpy array and list
n, f = 100, 5
shape = (n, f)
xtype = [list, np.ndarray]
shuffle = [True, False]
batch_size = [2, 10]
tests_ds = list(product(xtype, batch_size, shuffle))
n_tests_ds = len(tests_ds)
@pytest.fixture
def ds_params(request):
return tests_ds[request.param]
@pytest.mark.parametrize('ds_params', list(range(n_tests_ds)), indirect=True)
def test_torchdataset(ds_params):
xtype, batch_size, shuffle = ds_params
x = np.random.randn(*shape)
y = np.random.randn(*(n,))
if xtype == list:
x = list(x)
ds = TFDataset(x, y, batch_size=batch_size, shuffle=shuffle)
for step, data in enumerate(ds):
pass
if xtype == list:
assert len(data[0]) == batch_size and data[0][0].shape == (f,)
else:
assert data[0].shape == (batch_size, f)
assert data[1].shape == (batch_size,)
assert step == len(ds) - 1
if not shuffle:
assert (data[0][-1] == x[-1 - (n % batch_size)]).all()
| 1,094 | 27.076923 | 77 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_prediction_tf.py | import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Tuple, Union
from alibi_detect.utils.tensorflow import predict_batch
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
x = np.zeros((n, n_features), dtype=np.float32)
class MyModel(tf.keras.Model):
def __init__(self, multi_out: bool = False):
super(MyModel, self).__init__()
self.dense = Dense(n_classes, activation='softmax')
self.multi_out = multi_out
def call(self, x: np.ndarray) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
out = self.dense(x)
if not self.multi_out:
return out
else:
return out, out
AutoEncoder = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(latent_dim),
Dense(n_features)
]
)
def id_fn(x: Union[np.ndarray, tf.Tensor, list]) -> Union[np.ndarray, tf.Tensor]:
if isinstance(x, list):
return np.concatenate(x, axis=0)
else:
return x
# model, batch size, dtype, preprocessing function, list as input
tests_predict = [
(MyModel(multi_out=False), 2, np.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, None, False),
(MyModel(multi_out=False), int(1e10), tf.float32, None, False),
(MyModel(multi_out=True), int(1e10), tf.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, id_fn, False),
(AutoEncoder, 2, np.float32, None, False),
(AutoEncoder, int(1e10), np.float32, None, False),
(AutoEncoder, int(1e10), tf.float32, None, False),
(id_fn, 2, np.float32, None, False),
(id_fn, 2, tf.float32, None, False),
(id_fn, 2, np.float32, id_fn, True),
]
n_tests = len(tests_predict)
@pytest.fixture
def predict_batch_params(request):
return tests_predict[request.param]
@pytest.mark.parametrize('predict_batch_params', list(range(n_tests)), indirect=True)
def test_predict_batch(predict_batch_params):
model, batch_size, dtype, preprocess_fn, to_list = predict_batch_params
x_batch = [x] if to_list else x
preds = predict_batch(x_batch, model, batch_size=batch_size, preprocess_fn=preprocess_fn, dtype=dtype)
if isinstance(preds, tuple):
preds = preds[0]
assert preds.dtype == dtype
if isinstance(model, tf.keras.Sequential) or hasattr(model, '__name__') and model.__name__ == 'id_fn':
assert preds.shape == x.shape
elif isinstance(model, tf.keras.Model):
assert preds.shape == (n, n_classes)
| 2,557 | 32.657895 | 106 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_misc_tf.py | from itertools import product
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
import numpy as np
from alibi_detect.utils.tensorflow import zero_diag, quantile, subset_matrix
from alibi_detect.utils.tensorflow.misc import clone_model
def test_zero_diag():
ones = tf.ones((10, 10))
ones_zd = zero_diag(ones)
assert ones_zd.shape == (10, 10)
assert float(tf.linalg.trace(ones_zd)) == 0
assert float(tf.reduce_sum(ones_zd)) == 90
type = [6, 7, 8]
sorted = [True, False]
tests_quantile = list(product(type, sorted))
n_tests_quantile = len(tests_quantile)
@pytest.fixture
def quantile_params(request):
return tests_quantile[request.param]
@pytest.mark.parametrize('quantile_params', list(range(n_tests_quantile)), indirect=True)
def test_quantile(quantile_params):
type, sorted = quantile_params
sample = (0.5+tf.range(1e6))/1e6
if not sorted:
sample = tf.random.shuffle(sample)
np.testing.assert_almost_equal(quantile(sample, 0.001, type=type, sorted=sorted), 0.001, decimal=6)
np.testing.assert_almost_equal(quantile(sample, 0.999, type=type, sorted=sorted), 0.999, decimal=6)
assert quantile(tf.ones((100,)), 0.42, type=type, sorted=sorted) == 1
with pytest.raises(ValueError):
quantile(tf.ones((10,)), 0.999, type=type, sorted=sorted)
with pytest.raises(ValueError):
quantile(tf.ones((100, 100)), 0.5, type=type, sorted=sorted)
def test_subset_matrix():
mat = tf.range(5)[None, :] * tf.range(5)[:, None]
inds_0 = [2, 3]
inds_1 = [2, 1, 4]
sub_mat = subset_matrix(mat, tf.constant(inds_0), tf.constant(inds_1))
assert sub_mat.shape == (2, 3)
for i, ind_0 in enumerate(inds_0):
for j, ind_1 in enumerate(inds_1):
assert sub_mat[i, j] == ind_0 * ind_1
with pytest.raises(ValueError):
subset_matrix(tf.ones((10, 10, 10)), inds_0, inds_1)
with pytest.raises(ValueError):
subset_matrix(tf.ones((10,)), inds_0, inds_1)
n_in, n_out = 10, 5
# sequential model
model_seq = tf.keras.Sequential([InputLayer(n_in, ), Dense(n_out)])
# functional model
inputs = Input(n_in, )
outputs = Dense(n_out)(inputs)
model_func = tf.keras.Model(inputs=inputs, outputs=outputs)
# subclassed model
class Model(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = Dense(5)
def call(self, x):
return self.dense(x)
@classmethod
def from_config(cls, config):
return cls(**config)
model_sub = Model()
def test_clone_model():
model_seq_clone = clone_model(model_seq)
assert not (model_seq_clone.weights[0] == model_seq.weights[0]).numpy().any()
model_func_clone = clone_model(model_func)
assert not (model_func_clone.weights[0] == model_func.weights[0]).numpy().any()
model_sub_clone = clone_model(model_sub)
_ = model_sub(tf.zeros((1, 10)))
_ = model_sub_clone(tf.zeros((1, 10)))
assert not (model_sub_clone.weights[0] == model_sub.weights[0]).numpy().any()
| 3,046 | 29.168317 | 103 | py |
alibi-detect | alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_kernels_tf.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from alibi_detect.utils.tensorflow import GaussianRBF, DeepKernel
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(Exception):
kernel(x, y, infer_sigma=infer_sigma)
else:
k_xy = kernel(x, y, infer_sigma=infer_sigma).numpy()
k_xx = kernel(x, x, infer_sigma=infer_sigma).numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
np.testing.assert_almost_equal(k_xx.trace(), xshape[0], decimal=4)
assert (k_xx > 0.).all() and (k_xy > 0.).all()
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
kernel_a = [GaussianRBF(trainable=True), MyKernel]
kernel_b = [GaussianRBF(trainable=True), MyKernel, None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
proj = tf.keras.Sequential([Input(shape=(n_features,)), Dense(n_features)])
kernel_a = kernel_a(n_features) if kernel_a == MyKernel else kernel_a
kernel_b = kernel_b(n_features) if kernel_b == MyKernel else kernel_b
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
k_xy = kernel(x, y).numpy()
k_yx = kernel(y, x).numpy()
k_xx = kernel(x, x).numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (np.diag(k_xx) > 0.).all()
np.testing.assert_almost_equal(k_xy, np.transpose(k_yx), decimal=5)
| 3,271 | 38.421687 | 92 | py |
alibi-detect | alibi-detect-master/alibi_detect/ad/model_distillation.py | import logging
from typing import Callable, Dict, Tuple, Union, cast
import numpy as np
import tensorflow as tf
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
adversarial_prediction_dict)
from alibi_detect.models.tensorflow.losses import loss_distillation
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
from tensorflow.keras.losses import categorical_crossentropy, kld
logger = logging.getLogger(__name__)
class ModelDistillation(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
distilled_model: tf.keras.Model = None,
model: tf.keras.Model = None,
loss_type: str = 'kld',
temperature: float = 1.,
data_type: str = None
) -> None:
"""
Model distillation concept drift and adversarial detector.
Parameters
----------
threshold
Threshold used for score to determine adversarial instances.
distilled_model
A tf.keras model to distill.
model
A trained tf.keras classification model.
loss_type
Loss for distillation. Supported: 'kld', 'xent'
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.model = model
for layer in self.model.layers: # freeze model layers
layer.trainable = False
if isinstance(distilled_model, tf.keras.Model):
self.distilled_model = distilled_model
else:
raise TypeError('No valid format detected for `distilled_model` (tf.keras.Model) ')
self.loss_type = loss_type
self.temperature = temperature
# set metadata
self.meta['detector_type'] = 'adversarial'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_distillation,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 128,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
preprocess_fn: Callable = None
) -> None:
"""
Train ModelDistillation detector.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
preprocess_fn
Preprocessing function applied to each training batch.
"""
# train arguments
args = [self.distilled_model, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'preprocess_fn': preprocess_fn,
'loss_fn_kwargs': {
'model': self.model,
'loss_type': self.loss_type,
'temperature': self.temperature
}
}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 99.,
margin: float = 0.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
adversarial in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the adversarial score.
margin
Add margin to threshold. Useful if adversarial instances have significantly higher scores and there
is no adversarial instance in X.
batch_size
Batch size used when computing scores.
"""
# compute adversarial scores
adv_score = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(adv_score, threshold_perc) + margin
def score(self, X: np.ndarray, batch_size: int = int(1e10), return_predictions: bool = False) \
-> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
"""
Compute adversarial scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when computing scores.
return_predictions
Whether to return the predictions of the classifier on the original and reconstructed instances.
Returns
-------
Array with adversarial scores for each instance in the batch.
"""
# model predictions
y = predict_batch(X, self.model, batch_size=batch_size)
y_distilled = predict_batch(X, self.distilled_model, batch_size=batch_size)
y = cast(np.ndarray, y) # help mypy out
y_distilled = cast(np.ndarray, y_distilled) # help mypy out
# scale predictions
if self.temperature != 1.:
y = y ** (1 / self.temperature)
y = (y / tf.reshape(tf.reduce_sum(y, axis=-1), (-1, 1))).numpy()
if self.loss_type == 'kld':
score = kld(y, y_distilled).numpy()
elif self.loss_type == 'xent':
score = categorical_crossentropy(y, y_distilled).numpy()
else:
raise NotImplementedError
if return_predictions:
return score, y, y_distilled
else:
return score
def predict(self, X: np.ndarray, batch_size: int = int(1e10), return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Predict whether instances are adversarial instances or not.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the adversarial predictions and instance level adversarial scores.
"""
score = self.score(X, batch_size=batch_size)
# values above threshold are adversarial
pred = (score > self.threshold).astype(int) # type: ignore
# populate output dict
ad = adversarial_prediction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = pred
if return_instance_score:
ad['data']['instance_score'] = score
return ad
| 7,963 | 34.238938 | 111 | py |
alibi-detect | alibi-detect-master/alibi_detect/ad/adversarialae.py | import logging
from typing import Callable, Dict, List, Tuple, Union, cast
import numpy as np
import tensorflow as tf
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
adversarial_correction_dict,
adversarial_prediction_dict)
from alibi_detect.models.tensorflow.autoencoder import AE
from alibi_detect.models.tensorflow.losses import loss_adv_ae
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.losses import kld
from tensorflow.keras.models import Model
logger = logging.getLogger(__name__)
class DenseHidden(tf.keras.Model):
def __init__(self, model: tf.keras.Model, hidden_layer: int, output_dim: int, hidden_dim: int = None) -> None:
"""
Dense layer that extracts the feature map of a hidden layer in a model and computes
output probabilities over that layer.
Parameters
----------
model
tf.keras classification model.
hidden_layer
Hidden layer from model where feature map is extracted from.
output_dim
Output dimension for softmax layer.
hidden_dim
Dimension of optional additional dense layer.
"""
super(DenseHidden, self).__init__()
self.partial_model = Model(inputs=model.inputs, outputs=model.layers[hidden_layer].output)
for layer in self.partial_model.layers: # freeze model layers
layer.trainable = False
self.hidden_dim = hidden_dim
if hidden_dim is not None:
self.dense_layer = Dense(hidden_dim, activation=tf.nn.relu)
self.output_layer = Dense(output_dim, activation=tf.nn.softmax)
def call(self, x: tf.Tensor) -> tf.Tensor:
x = self.partial_model(x)
x = Flatten()(x)
if self.hidden_dim is not None:
x = self.dense_layer(x)
return self.output_layer(x)
class AdversarialAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
ae: tf.keras.Model = None,
model: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
model_hl: List[tf.keras.Model] = None,
hidden_layer_kld: dict = None,
w_model_hl: list = None,
temperature: float = 1.,
data_type: str = None
) -> None:
"""
Autoencoder (AE) based adversarial detector.
Parameters
----------
threshold
Threshold used for adversarial score to determine adversarial instances.
ae
A trained tf.keras autoencoder model if available.
model
A trained tf.keras classification model.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
model_hl
List with tf.keras models for the hidden layer K-L divergence computation.
hidden_layer_kld
Dictionary with as keys the hidden layer(s) of the model which are extracted and used
during training of the AE, and as values the output dimension for the hidden layer.
w_model_hl
Weights assigned to the loss of each model in model_hl.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.model = model
for layer in self.model.layers: # freeze model layers
layer.trainable = False
# check if model can be loaded, otherwise initialize AE model
if isinstance(ae, tf.keras.Model):
self.ae = ae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.ae = AE(encoder_net, decoder_net) # define AE model
else:
raise TypeError('No valid format detected for `ae` (tf.keras.Model) '
'or `encoder_net` and `decoder_net` (tf.keras.Sequential).')
# intermediate feature map outputs for KLD and loss weights
self.hidden_layer_kld = hidden_layer_kld
if isinstance(model_hl, list):
self.model_hl = model_hl
elif isinstance(hidden_layer_kld, dict):
self.model_hl = []
for hidden_layer, output_dim in hidden_layer_kld.items():
self.model_hl.append(DenseHidden(self.model, hidden_layer, output_dim))
else:
self.model_hl = None
self.w_model_hl = w_model_hl
if self.w_model_hl is None and isinstance(self.model_hl, list):
self.w_model_hl = list(np.ones(len(self.model_hl)))
self.temperature = temperature
# set metadata
self.meta['detector_type'] = 'adversarial'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_adv_ae,
w_model: float = 1.,
w_recon: float = 0.,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 128,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
preprocess_fn: Callable = None
) -> None:
"""
Train Adversarial AE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_model
Weight on model prediction loss term.
w_recon
Weight on MSE reconstruction error loss term.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
preprocess_fn
Preprocessing function applied to each training batch.
"""
# train arguments
args = [self.ae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'preprocess_fn': preprocess_fn,
'loss_fn_kwargs': {
'model': self.model,
'model_hl': self.model_hl,
'w_model': w_model,
'w_recon': w_recon,
'w_model_hl': self.w_model_hl,
'temperature': self.temperature
}
}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 99.,
margin: float = 0.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
adversarial in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the adversarial score.
margin
Add margin to threshold. Useful if adversarial instances have significantly higher scores and there
is no adversarial instance in X.
batch_size
Batch size used when computing scores.
"""
# compute adversarial scores
adv_score = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(adv_score, threshold_perc) + margin
def score(self, X: np.ndarray, batch_size: int = int(1e10), return_predictions: bool = False) \
-> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
"""
Compute adversarial scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when computing scores.
return_predictions
Whether to return the predictions of the classifier on the original and reconstructed instances.
Returns
-------
Array with adversarial scores for each instance in the batch.
"""
# reconstructed instances
X_recon = predict_batch(X, self.ae, batch_size=batch_size)
# model predictions
y = predict_batch(X, self.model, batch_size=batch_size)
y_recon = predict_batch(X_recon, self.model, batch_size=batch_size)
y = cast(np.ndarray, y) # help mypy out
y_recon = cast(np.ndarray, y_recon) # help mypy out
# scale predictions
if self.temperature != 1.:
y = y ** (1 / self.temperature)
y = (y / tf.reshape(tf.reduce_sum(y, axis=-1), (-1, 1))).numpy()
adv_score = kld(y, y_recon).numpy()
# hidden layer predictions
if isinstance(self.model_hl, list):
for m, w in zip(self.model_hl, self.w_model_hl):
h = predict_batch(X, m, batch_size=batch_size)
h_recon = predict_batch(X_recon, m, batch_size=batch_size)
adv_score += w * kld(h, h_recon).numpy()
if return_predictions:
return adv_score, y, y_recon
else:
return adv_score
def predict(self, X: np.ndarray, batch_size: int = int(1e10), return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Predict whether instances are adversarial instances or not.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the adversarial predictions and instance level adversarial scores.
"""
adv_score = self.score(X, batch_size=batch_size)
# values above threshold are adversarial
adv_pred = (adv_score > self.threshold).astype(int) # type: ignore
# populate output dict
ad = adversarial_prediction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = adv_pred
if return_instance_score:
ad['data']['instance_score'] = adv_score
return ad
def correct(self, X: np.ndarray, batch_size: int = int(1e10),
return_instance_score: bool = True, return_all_predictions: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Correct adversarial instances if the adversarial score is above the threshold.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
return_all_predictions
Whether to return the predictions on the original and the reconstructed data.
Returns
-------
Dict with corrected predictions and information whether an instance is adversarial or not.
"""
adv_score, y, y_recon = self.score(X, batch_size=batch_size, return_predictions=True)
# values above threshold are adversarial
adv_pred = (adv_score > self.threshold).astype(int)
idx_adv = np.where(adv_pred == 1)[0]
# correct predictions on adversarial instances
y = y.argmax(axis=-1)
y_recon = y_recon.argmax(axis=-1)
y_correct = y.copy()
y_correct[idx_adv] = y_recon[idx_adv]
# populate output dict
ad = adversarial_correction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = adv_pred
if return_instance_score:
ad['data']['instance_score'] = adv_score
ad['data']['corrected'] = y_correct
if return_all_predictions:
ad['data']['no_defense'] = y
ad['data']['defense'] = y_recon
return ad
| 13,401 | 36.858757 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/ad/tests/test_admd.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from alibi_detect.ad import ModelDistillation
from alibi_detect.version import __version__
threshold = [None, 5.]
loss_type = ['kld', 'xent']
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, loss_type, threshold_perc, return_instance_score))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
y = to_categorical(y)
input_dim = X.shape[1]
latent_dim = 2
# define and train model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(X, y, batch_size=150, epochs=10)
@pytest.fixture
def adv_md_params(request):
return tests[request.param]
@pytest.mark.parametrize('adv_md_params', list(range(n_tests)), indirect=True)
def test_adv_md(adv_md_params):
# ModelDistillation parameters
threshold, loss_type, threshold_perc, return_instance_score = adv_md_params
# define ancillary model
layers = [tf.keras.layers.InputLayer(input_shape=(input_dim)),
tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)]
distilled_model = tf.keras.Sequential(layers)
# init ModelDistillation detector
admd = ModelDistillation(
threshold=threshold,
model=model,
distilled_model=distilled_model,
loss_type=loss_type
)
assert admd.threshold == threshold
assert admd.meta == {'name': 'ModelDistillation', 'detector_type': 'adversarial', 'data_type': None,
'online': False, 'version': __version__}
for layer in admd.model.layers:
assert not layer.trainable
# fit AdversarialVAE, infer threshold and compute scores
admd.fit(X, epochs=5, verbose=False)
admd.infer_threshold(X, threshold_perc=threshold_perc)
iscore = admd.score(X)
perc_score = 100 * (iscore < admd.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 1 > perc_score > threshold_perc - 1
# make and check predictions
ad_preds = admd.predict(X, return_instance_score=return_instance_score)
assert ad_preds['meta'] == admd.meta
if return_instance_score:
assert ad_preds['data']['is_adversarial'].sum() == (ad_preds['data']['instance_score']
> admd.threshold).astype(int).sum()
else:
assert ad_preds['data']['instance_score'] is None
| 2,717 | 33.405063 | 104 | py |
alibi-detect | alibi-detect-master/alibi_detect/ad/tests/test_adae.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.utils import to_categorical
from alibi_detect.ad import AdversarialAE
from alibi_detect.version import __version__
threshold = [None, 5.]
w_model = [1., .5]
w_recon = [0., 1e-5]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, w_model, w_recon, threshold_perc, return_instance_score))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
y = to_categorical(y)
input_dim = X.shape[1]
latent_dim = 2
# define and train model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(X, y, batch_size=150, epochs=10)
@pytest.fixture
def adv_ae_params(request):
return tests[request.param]
@pytest.mark.parametrize('adv_ae_params', list(range(n_tests)), indirect=True)
def test_adv_vae(adv_ae_params):
# AdversarialAE parameters
threshold, w_model, w_recon, threshold_perc, return_instance_score = adv_ae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierVAE
advae = AdversarialAE(
threshold=threshold,
model=model,
encoder_net=encoder_net,
decoder_net=decoder_net
)
assert advae.threshold == threshold
assert advae.meta == {'name': 'AdversarialAE', 'detector_type': 'adversarial', 'data_type': None,
'online': False, 'version': __version__}
for layer in advae.model.layers:
assert not layer.trainable
# fit AdversarialVAE, infer threshold and compute scores
advae.fit(X, w_model=w_model, w_recon=w_recon, epochs=5, verbose=False)
advae.infer_threshold(X, threshold_perc=threshold_perc)
iscore = advae.score(X)
perc_score = 100 * (iscore < advae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 1 > perc_score > threshold_perc - 1
# make and check predictions
ad_preds = advae.predict(X, return_instance_score=return_instance_score)
assert ad_preds['meta'] == advae.meta
if return_instance_score:
assert ad_preds['data']['is_adversarial'].sum() == (ad_preds['data']['instance_score']
> advae.threshold).astype(int).sum()
else:
assert ad_preds['data']['instance_score'] is None
| 3,044 | 31.741935 | 101 | py |
alibi-detect | alibi-detect-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Hide RemovedInSphinx40Warning. Can remove once upgraded to sphinx>=4.0
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "alibi-detect"
copyright = "2019, Seldon Technologies Ltd"
author = "Seldon Technologies Ltd"
# The short X.Y version
# import alibi_detect
exec(open("../../alibi_detect/version.py").read())
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinxcontrib.apidoc", # automatically generate API docs, see https://github.com/rtfd/readthedocs.org/issues/1139
"sphinxcontrib.bibtex",
"nbsphinx",
"myst_parser",
"sphinx_design",
]
# -- nbsphinx settings -------------------------------------------------------
nbsphinx_execute = "auto"
# Create symlinks for example notebooks
import glob
nb_files = [os.path.basename(f) for f in glob.glob(os.path.join('examples','*.ipynb'))
if not os.path.basename(f).startswith('temp_')]
for nb_file in nb_files:
target = os.path.join('../../examples', nb_file)
if os.path.exists(target):
os.remove(target)
os.symlink(os.path.join('../doc/source/examples', nb_file), target)
# -- Bibliography ------------------------------------------------------------
bibtex_bibfiles = ['refs.bib']
bibtex_default_style = 'unsrtalpha'
# apidoc settings
apidoc_module_dir = "../../alibi_detect"
apidoc_output_dir = "api"
apidoc_excluded_paths = ["**/*test*"]
apidoc_module_first = True
apidoc_separate_modules = True
apidoc_extra_args = ["-d 6"]
# mock imports
# numpy, pandas and matplotlib are not included as these are installed on
# ReadTheDocs PYTHON_VERSION_39 docker image (https://hub.docker.com/r/readthedocs/build/dockerfile/)
autodoc_mock_imports = [
"sklearn",
"skimage",
"requests",
"cv2",
"bs4",
"keras",
"seaborn",
"PIL",
"tensorflow",
"spacy",
"tensorflow_probability",
"scipy",
"prophet",
"torch",
"transformers",
"tqdm",
"dill",
"joblib",
"numba",
"pydantic",
"toml",
"catalogue",
"pykeops"
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_logo = '_static/Alibi_Detect_Logo_white.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"logo_only": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# override default theme width
html_css_files = ['theme_overrides.css', 'custom_docs.css'] # override wide tables in RTD theme
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "alibi-detectdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
# - Replace unicode characters with utf8.
# (U+2588 and U+258E are used in tqdm progress bars)
# - Use enumitem for lists to prevent "too deeply nested" latex error
'preamble': r'''
\DeclareUnicodeCharacter{2588}{=}
\DeclareUnicodeCharacter{258E}{|}
\DeclareUnicodeCharacter{274C}{$\times$}
\DeclareUnicodeCharacter{2705}{$\checkmark$}
\usepackage{enumitem}
\setlistdepth{99}
''',
#
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "alibi-detect.tex", "alibi-detect Documentation", "Seldon Technologies Ltd", "manual")]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "alibi-detect", "alibi-detect Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "alibi-detect", "alibi-detect Documentation", author, "alibi-detect", "One line description of project.", "Miscellaneous")
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'sklearn': ('https://scikit-learn.org/stable/', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- nbsphinx prolog ---------------------------------------------------------
# from https://github.com/vidartf/nbsphinx-link/blob/master/docs/source/conf.py for custom tags
import subprocess
try:
git_rev = subprocess.check_output(["git", "describe", "--exact-match", "HEAD"], universal_newlines=True)
except subprocess.CalledProcessError:
try:
git_rev = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True)
except subprocess.CalledProcessError:
git_rev = ""
if git_rev:
git_rev = git_rev.splitlines()[0] + "/"
nbsphinx_prolog = (
r"""
{% set docname = env.doc2path(env.docname, base=False) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docname }}`__.
__ https://github.com/SeldonIO/alibi-detect/blob/
"""
+ git_rev
+ "doc/source/"
+ r"{{ docname }}"
)
# -- Override order of preference for image formats --------------------------
# Need to set gif above png so that it is chosen over png if present
from sphinx.builders.html import StandaloneHTMLBuilder
StandaloneHTMLBuilder.supported_image_types = [
'image/svg+xml',
'image/gif',
'image/png',
'image/jpeg'
]
# -- myst-parser configuration -----------------------------------------------
# See https://myst-parser.readthedocs.io/en/stable/syntax/optional.html for
# details of available extensions.
myst_enable_extensions = [
"dollarmath",
"amsmath",
"colon_fence",
"smartquotes",
"tasklist",
"html_image",
]
# Create heading anchors for h1 to h3 (useful for local toc's)
myst_heading_anchors = 3
# Below code fixes a problem with sphinx>=3.2.0 processing functions with
# torch.jit.script decorator. Probably occuring because torch is being mocked
# (see https://github.com/sphinx-doc/sphinx/issues/6709).
def call_mock(self, *args, **kw):
from types import FunctionType, MethodType
if args and type(args[0]) in [type, FunctionType, MethodType]:
# Appears to be a decorator, pass through unchanged
return args[0]
return self
from sphinx.ext.autodoc.mock import _MockObject
_MockObject.__call__ = call_mock
| 11,119 | 31.138728 | 139 | py |
StarGANv2-VC | StarGANv2-VC-main/losses.py | #coding:utf-8
import os
import torch
from torch import nn
from munch import Munch
from transforms import build_transforms
import torch.nn.functional as F
import numpy as np
def compute_d_loss(nets, args, x_real, y_org, y_trg, z_trg=None, x_ref=None, use_r1_reg=True, use_adv_cls=False, use_con_reg=False):
args = Munch(args)
assert (z_trg is None) != (x_ref is None)
# with real audios
x_real.requires_grad_()
out = nets.discriminator(x_real, y_org)
loss_real = adv_loss(out, 1)
# R1 regularizaition (https://arxiv.org/abs/1801.04406v4)
if use_r1_reg:
loss_reg = r1_reg(out, x_real)
else:
loss_reg = torch.FloatTensor([0]).to(x_real.device)
# consistency regularization (bCR-GAN: https://arxiv.org/abs/2002.04724)
loss_con_reg = torch.FloatTensor([0]).to(x_real.device)
if use_con_reg:
t = build_transforms()
out_aug = nets.discriminator(t(x_real).detach(), y_org)
loss_con_reg += F.smooth_l1_loss(out, out_aug)
# with fake audios
with torch.no_grad():
if z_trg is not None:
s_trg = nets.mapping_network(z_trg, y_trg)
else: # x_ref is not None
s_trg = nets.style_encoder(x_ref, y_trg)
F0 = nets.f0_model.get_feature_GAN(x_real)
x_fake = nets.generator(x_real, s_trg, masks=None, F0=F0)
out = nets.discriminator(x_fake, y_trg)
loss_fake = adv_loss(out, 0)
if use_con_reg:
out_aug = nets.discriminator(t(x_fake).detach(), y_trg)
loss_con_reg += F.smooth_l1_loss(out, out_aug)
# adversarial classifier loss
if use_adv_cls:
out_de = nets.discriminator.classifier(x_fake)
loss_real_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_org[y_org != y_trg])
if use_con_reg:
out_de_aug = nets.discriminator.classifier(t(x_fake).detach())
loss_con_reg += F.smooth_l1_loss(out_de, out_de_aug)
else:
loss_real_adv_cls = torch.zeros(1).mean()
loss = loss_real + loss_fake + args.lambda_reg * loss_reg + \
args.lambda_adv_cls * loss_real_adv_cls + \
args.lambda_con_reg * loss_con_reg
return loss, Munch(real=loss_real.item(),
fake=loss_fake.item(),
reg=loss_reg.item(),
real_adv_cls=loss_real_adv_cls.item(),
con_reg=loss_con_reg.item())
def compute_g_loss(nets, args, x_real, y_org, y_trg, z_trgs=None, x_refs=None, use_adv_cls=False):
args = Munch(args)
assert (z_trgs is None) != (x_refs is None)
if z_trgs is not None:
z_trg, z_trg2 = z_trgs
if x_refs is not None:
x_ref, x_ref2 = x_refs
# compute style vectors
if z_trgs is not None:
s_trg = nets.mapping_network(z_trg, y_trg)
else:
s_trg = nets.style_encoder(x_ref, y_trg)
# compute ASR/F0 features (real)
with torch.no_grad():
F0_real, GAN_F0_real, cyc_F0_real = nets.f0_model(x_real)
ASR_real = nets.asr_model.get_feature(x_real)
# adversarial loss
x_fake = nets.generator(x_real, s_trg, masks=None, F0=GAN_F0_real)
out = nets.discriminator(x_fake, y_trg)
loss_adv = adv_loss(out, 1)
# compute ASR/F0 features (fake)
F0_fake, GAN_F0_fake, _ = nets.f0_model(x_fake)
ASR_fake = nets.asr_model.get_feature(x_fake)
# norm consistency loss
x_fake_norm = log_norm(x_fake)
x_real_norm = log_norm(x_real)
loss_norm = ((torch.nn.ReLU()(torch.abs(x_fake_norm - x_real_norm) - args.norm_bias))**2).mean()
# F0 loss
loss_f0 = f0_loss(F0_fake, F0_real)
# style F0 loss (style initialization)
if x_refs is not None and args.lambda_f0_sty > 0 and not use_adv_cls:
F0_sty, _, _ = nets.f0_model(x_ref)
loss_f0_sty = F.l1_loss(compute_mean_f0(F0_fake), compute_mean_f0(F0_sty))
else:
loss_f0_sty = torch.zeros(1).mean()
# ASR loss
loss_asr = F.smooth_l1_loss(ASR_fake, ASR_real)
# style reconstruction loss
s_pred = nets.style_encoder(x_fake, y_trg)
loss_sty = torch.mean(torch.abs(s_pred - s_trg))
# diversity sensitive loss
if z_trgs is not None:
s_trg2 = nets.mapping_network(z_trg2, y_trg)
else:
s_trg2 = nets.style_encoder(x_ref2, y_trg)
x_fake2 = nets.generator(x_real, s_trg2, masks=None, F0=GAN_F0_real)
x_fake2 = x_fake2.detach()
_, GAN_F0_fake2, _ = nets.f0_model(x_fake2)
loss_ds = torch.mean(torch.abs(x_fake - x_fake2))
loss_ds += F.smooth_l1_loss(GAN_F0_fake, GAN_F0_fake2.detach())
# cycle-consistency loss
s_org = nets.style_encoder(x_real, y_org)
x_rec = nets.generator(x_fake, s_org, masks=None, F0=GAN_F0_fake)
loss_cyc = torch.mean(torch.abs(x_rec - x_real))
# F0 loss in cycle-consistency loss
if args.lambda_f0 > 0:
_, _, cyc_F0_rec = nets.f0_model(x_rec)
loss_cyc += F.smooth_l1_loss(cyc_F0_rec, cyc_F0_real)
if args.lambda_asr > 0:
ASR_recon = nets.asr_model.get_feature(x_rec)
loss_cyc += F.smooth_l1_loss(ASR_recon, ASR_real)
# adversarial classifier loss
if use_adv_cls:
out_de = nets.discriminator.classifier(x_fake)
loss_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_trg[y_org != y_trg])
else:
loss_adv_cls = torch.zeros(1).mean()
loss = args.lambda_adv * loss_adv + args.lambda_sty * loss_sty \
- args.lambda_ds * loss_ds + args.lambda_cyc * loss_cyc\
+ args.lambda_norm * loss_norm \
+ args.lambda_asr * loss_asr \
+ args.lambda_f0 * loss_f0 \
+ args.lambda_f0_sty * loss_f0_sty \
+ args.lambda_adv_cls * loss_adv_cls
return loss, Munch(adv=loss_adv.item(),
sty=loss_sty.item(),
ds=loss_ds.item(),
cyc=loss_cyc.item(),
norm=loss_norm.item(),
asr=loss_asr.item(),
f0=loss_f0.item(),
adv_cls=loss_adv_cls.item())
# for norm consistency loss
def log_norm(x, mean=-4, std=4, dim=2):
"""
normalized log mel -> mel -> norm -> log(norm)
"""
x = torch.log(torch.exp(x * std + mean).norm(dim=dim))
return x
# for adversarial loss
def adv_loss(logits, target):
assert target in [1, 0]
if len(logits.shape) > 1:
logits = logits.reshape(-1)
targets = torch.full_like(logits, fill_value=target)
logits = logits.clamp(min=-10, max=10) # prevent nan
loss = F.binary_cross_entropy_with_logits(logits, targets)
return loss
# for R1 regularization loss
def r1_reg(d_out, x_in):
# zero-centered gradient penalty for real images
batch_size = x_in.size(0)
grad_dout = torch.autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert(grad_dout2.size() == x_in.size())
reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
return reg
# for F0 consistency loss
def compute_mean_f0(f0):
f0_mean = f0.mean(-1)
f0_mean = f0_mean.expand(f0.shape[-1], f0_mean.shape[0]).transpose(0, 1) # (B, M)
return f0_mean
def f0_loss(x_f0, y_f0):
"""
x.shape = (B, 1, M, L): predict
y.shape = (B, 1, M, L): target
"""
# compute the mean
x_mean = compute_mean_f0(x_f0)
y_mean = compute_mean_f0(y_f0)
loss = F.l1_loss(x_f0 / x_mean, y_f0 / y_mean)
return loss | 7,608 | 34.390698 | 132 | py |
StarGANv2-VC | StarGANv2-VC-main/optimizers.py | #coding:utf-8
import os, sys
import os.path as osp
import numpy as np
import torch
from torch import nn
from torch.optim import Optimizer
from functools import reduce
from torch.optim import AdamW
class MultiOptimizer:
def __init__(self, optimizers={}, schedulers={}):
self.optimizers = optimizers
self.schedulers = schedulers
self.keys = list(optimizers.keys())
self.param_groups = reduce(lambda x,y: x+y, [v.param_groups for v in self.optimizers.values()])
def state_dict(self):
state_dicts = [(key, self.optimizers[key].state_dict())\
for key in self.keys]
return state_dicts
def load_state_dict(self, state_dict):
for key, val in state_dict:
try:
self.optimizers[key].load_state_dict(val)
except:
print("Unloaded %s" % key)
def step(self, key=None, scaler=None):
keys = [key] if key is not None else self.keys
_ = [self._step(key, scaler) for key in keys]
def _step(self, key, scaler=None):
if scaler is not None:
scaler.step(self.optimizers[key])
scaler.update()
else:
self.optimizers[key].step()
def zero_grad(self, key=None):
if key is not None:
self.optimizers[key].zero_grad()
else:
_ = [self.optimizers[key].zero_grad() for key in self.keys]
def scheduler(self, *args, key=None):
if key is not None:
self.schedulers[key].step(*args)
else:
_ = [self.schedulers[key].step(*args) for key in self.keys]
def define_scheduler(optimizer, params):
print(params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=params.get('max_lr', 2e-4),
epochs=params.get('epochs', 200),
steps_per_epoch=params.get('steps_per_epoch', 1000),
pct_start=params.get('pct_start', 0.0),
div_factor=1,
final_div_factor=1)
return scheduler
def build_optimizer(parameters_dict, scheduler_params_dict):
optim = dict([(key, AdamW(params, lr=1e-4, weight_decay=1e-4, betas=(0.0, 0.99), eps=1e-9))
for key, params in parameters_dict.items()])
schedulers = dict([(key, define_scheduler(opt, scheduler_params_dict[key])) \
for key, opt in optim.items()])
multi_optim = MultiOptimizer(optim, schedulers)
return multi_optim | 2,460 | 32.256757 | 103 | py |
StarGANv2-VC | StarGANv2-VC-main/meldataset.py | #coding: utf-8
import os
import time
import random
import random
import torch
import torchaudio
import numpy as np
import soundfile as sf
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
np.random.seed(1)
random.seed(1)
SPECT_PARAMS = {
"n_fft": 2048,
"win_length": 1200,
"hop_length": 300
}
MEL_PARAMS = {
"n_mels": 80,
"n_fft": 2048,
"win_length": 1200,
"hop_length": 300
}
class MelDataset(torch.utils.data.Dataset):
def __init__(self,
data_list,
sr=24000,
validation=False,
):
_data_list = [l[:-1].split('|') for l in data_list]
self.data_list = [(path, int(label)) for path, label in _data_list]
self.data_list_per_class = {
target: [(path, label) for path, label in self.data_list if label == target] \
for target in list(set([label for _, label in self.data_list]))}
self.sr = sr
self.to_melspec = torchaudio.transforms.MelSpectrogram(**MEL_PARAMS)
self.mean, self.std = -4, 4
self.validation = validation
self.max_mel_length = 192
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
data = self.data_list[idx]
mel_tensor, label = self._load_data(data)
ref_data = random.choice(self.data_list)
ref_mel_tensor, ref_label = self._load_data(ref_data)
ref2_data = random.choice(self.data_list_per_class[ref_label])
ref2_mel_tensor, _ = self._load_data(ref2_data)
return mel_tensor, label, ref_mel_tensor, ref2_mel_tensor, ref_label
def _load_data(self, path):
wave_tensor, label = self._load_tensor(path)
if not self.validation: # random scale for robustness
random_scale = 0.5 + 0.5 * np.random.random()
wave_tensor = random_scale * wave_tensor
mel_tensor = self.to_melspec(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mean) / self.std
mel_length = mel_tensor.size(1)
if mel_length > self.max_mel_length:
random_start = np.random.randint(0, mel_length - self.max_mel_length)
mel_tensor = mel_tensor[:, random_start:random_start + self.max_mel_length]
return mel_tensor, label
def _preprocess(self, wave_tensor, ):
mel_tensor = self.to_melspec(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mean) / self.std
return mel_tensor
def _load_tensor(self, data):
wave_path, label = data
label = int(label)
wave, sr = sf.read(wave_path)
wave_tensor = torch.from_numpy(wave).float()
return wave_tensor, label
class Collater(object):
"""
Args:
adaptive_batch_size (bool): if true, decrease batch size when long data comes.
"""
def __init__(self, return_wave=False):
self.text_pad_index = 0
self.return_wave = return_wave
self.max_mel_length = 192
self.mel_length_step = 16
self.latent_dim = 16
def __call__(self, batch):
batch_size = len(batch)
nmels = batch[0][0].size(0)
mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
labels = torch.zeros((batch_size)).long()
ref_mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
ref2_mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
ref_labels = torch.zeros((batch_size)).long()
for bid, (mel, label, ref_mel, ref2_mel, ref_label) in enumerate(batch):
mel_size = mel.size(1)
mels[bid, :, :mel_size] = mel
ref_mel_size = ref_mel.size(1)
ref_mels[bid, :, :ref_mel_size] = ref_mel
ref2_mel_size = ref2_mel.size(1)
ref2_mels[bid, :, :ref2_mel_size] = ref2_mel
labels[bid] = label
ref_labels[bid] = ref_label
z_trg = torch.randn(batch_size, self.latent_dim)
z_trg2 = torch.randn(batch_size, self.latent_dim)
mels, ref_mels, ref2_mels = mels.unsqueeze(1), ref_mels.unsqueeze(1), ref2_mels.unsqueeze(1)
return mels, labels, ref_mels, ref2_mels, ref_labels, z_trg, z_trg2
def build_dataloader(path_list,
validation=False,
batch_size=4,
num_workers=1,
device='cpu',
collate_config={},
dataset_config={}):
dataset = MelDataset(path_list, validation=validation)
collate_fn = Collater(**collate_config)
data_loader = DataLoader(dataset,
batch_size=batch_size,
shuffle=(not validation),
num_workers=num_workers,
drop_last=True,
collate_fn=collate_fn,
pin_memory=(device != 'cpu'))
return data_loader
| 5,161 | 32.089744 | 100 | py |
StarGANv2-VC | StarGANv2-VC-main/models.py | """
StarGAN v2
Copyright (c) 2020-present NAVER Corp.
This work is licensed under the Creative Commons Attribution-NonCommercial
4.0 International License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""
import os
import os.path as osp
import copy
import math
from munch import Munch
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
return F.avg_pool2d(x, 2)
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class UpSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.interpolate(x, scale_factor=(2, 1), mode='nearest')
elif self.layer_type == 'half':
return F.interpolate(x, scale_factor=2, mode='nearest')
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features*2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
class AdainResBlk(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0,
actv=nn.LeakyReLU(0.2), upsample='none'):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = UpSample(upsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
x = self.upsample(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
x = self.upsample(x)
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x, s):
out = self._residual(x, s)
if self.w_hpf == 0:
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
class HighPass(nn.Module):
def __init__(self, w_hpf, device):
super(HighPass, self).__init__()
self.filter = torch.tensor([[-1, -1, -1],
[-1, 8., -1],
[-1, -1, -1]]).to(device) / w_hpf
def forward(self, x):
filter = self.filter.unsqueeze(0).unsqueeze(1).repeat(x.size(1), 1, 1, 1)
return F.conv2d(x, filter, padding=1, groups=x.size(1))
class Generator(nn.Module):
def __init__(self, dim_in=48, style_dim=48, max_conv_dim=48*8, w_hpf=1, F0_channel=0):
super().__init__()
self.stem = nn.Conv2d(1, dim_in, 3, 1, 1)
self.encode = nn.ModuleList()
self.decode = nn.ModuleList()
self.to_out = nn.Sequential(
nn.InstanceNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.2),
nn.Conv2d(dim_in, 1, 1, 1, 0))
self.F0_channel = F0_channel
# down/up-sampling blocks
repeat_num = 4 #int(np.log2(img_size)) - 4
if w_hpf > 0:
repeat_num += 1
for lid in range(repeat_num):
if lid in [1, 3]:
_downtype = 'timepreserve'
else:
_downtype = 'half'
dim_out = min(dim_in*2, max_conv_dim)
self.encode.append(
ResBlk(dim_in, dim_out, normalize=True, downsample=_downtype))
self.decode.insert(
0, AdainResBlk(dim_out, dim_in, style_dim,
w_hpf=w_hpf, upsample=_downtype)) # stack-like
dim_in = dim_out
# bottleneck blocks (encoder)
for _ in range(2):
self.encode.append(
ResBlk(dim_out, dim_out, normalize=True))
# F0 blocks
if F0_channel != 0:
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out, style_dim, w_hpf=w_hpf))
# bottleneck blocks (decoder)
for _ in range(2):
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out + int(F0_channel / 2), style_dim, w_hpf=w_hpf))
if F0_channel != 0:
self.F0_conv = nn.Sequential(
ResBlk(F0_channel, int(F0_channel / 2), normalize=True, downsample="half"),
)
if w_hpf > 0:
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.hpf = HighPass(w_hpf, device)
def forward(self, x, s, masks=None, F0=None):
x = self.stem(x)
cache = {}
for block in self.encode:
if (masks is not None) and (x.size(2) in [32, 64, 128]):
cache[x.size(2)] = x
x = block(x)
if F0 is not None:
F0 = self.F0_conv(F0)
F0 = F.adaptive_avg_pool2d(F0, [x.shape[-2], x.shape[-1]])
x = torch.cat([x, F0], axis=1)
for block in self.decode:
x = block(x, s)
if (masks is not None) and (x.size(2) in [32, 64, 128]):
mask = masks[0] if x.size(2) in [32] else masks[1]
mask = F.interpolate(mask, size=x.size(2), mode='bilinear')
x = x + self.hpf(mask * cache[x.size(2)])
return self.to_out(x)
class MappingNetwork(nn.Module):
def __init__(self, latent_dim=16, style_dim=48, num_domains=2, hidden_dim=384):
super().__init__()
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(3):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
self.shared = nn.Sequential(*layers)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, style_dim))]
def forward(self, z, y):
h = self.shared(z)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class StyleEncoder(nn.Module):
def __init__(self, dim_in=48, style_dim=48, num_domains=2, max_conv_dim=384):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
repeat_num = 4
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Linear(dim_out, style_dim)]
def forward(self, x, y):
h = self.shared(x)
h = h.view(h.size(0), -1)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class Discriminator(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
# real/fake discriminator
self.dis = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
# adversarial classifier
self.cls = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
self.num_domains = num_domains
def forward(self, x, y):
return self.dis(x, y)
def classifier(self, x):
return self.cls.get_feature(x)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class Discriminator2d(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
for lid in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
def get_feature(self, x):
out = self.main(x)
out = out.view(out.size(0), -1) # (batch, num_domains)
return out
def forward(self, x, y):
out = self.get_feature(x)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out
def build_model(args, F0_model, ASR_model):
generator = Generator(args.dim_in, args.style_dim, args.max_conv_dim, w_hpf=args.w_hpf, F0_channel=args.F0_channel)
mapping_network = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains, hidden_dim=args.max_conv_dim)
style_encoder = StyleEncoder(args.dim_in, args.style_dim, args.num_domains, args.max_conv_dim)
discriminator = Discriminator(args.dim_in, args.num_domains, args.max_conv_dim, args.n_repeat)
generator_ema = copy.deepcopy(generator)
mapping_network_ema = copy.deepcopy(mapping_network)
style_encoder_ema = copy.deepcopy(style_encoder)
nets = Munch(generator=generator,
mapping_network=mapping_network,
style_encoder=style_encoder,
discriminator=discriminator,
f0_model=F0_model,
asr_model=ASR_model)
nets_ema = Munch(generator=generator_ema,
mapping_network=mapping_network_ema,
style_encoder=style_encoder_ema)
return nets, nets_ema | 13,766 | 34.3 | 124 | py |
StarGANv2-VC | StarGANv2-VC-main/train.py | #!/usr/bin/env python3
#coding:utf-8
import os
import os.path as osp
import re
import sys
import yaml
import shutil
import numpy as np
import torch
import click
import warnings
warnings.simplefilter('ignore')
from functools import reduce
from munch import Munch
from meldataset import build_dataloader
from optimizers import build_optimizer
from models import build_model
from trainer import Trainer
from torch.utils.tensorboard import SummaryWriter
from Utils.ASR.models import ASRCNN
from Utils.JDC.model import JDCNet
import logging
from logging import StreamHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
torch.backends.cudnn.benchmark = True #
@click.command()
@click.option('-p', '--config_path', default='Configs/config.yml', type=str)
def main(config_path):
config = yaml.safe_load(open(config_path))
log_dir = config['log_dir']
if not osp.exists(log_dir): os.makedirs(log_dir, exist_ok=True)
shutil.copy(config_path, osp.join(log_dir, osp.basename(config_path)))
writer = SummaryWriter(log_dir + "/tensorboard")
# write logs
file_handler = logging.FileHandler(osp.join(log_dir, 'train.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
batch_size = config.get('batch_size', 10)
device = config.get('device', 'cpu')
epochs = config.get('epochs', 1000)
save_freq = config.get('save_freq', 20)
train_path = config.get('train_data', None)
val_path = config.get('val_data', None)
stage = config.get('stage', 'star')
fp16_run = config.get('fp16_run', False)
# load data
train_list, val_list = get_data_path_list(train_path, val_path)
train_dataloader = build_dataloader(train_list,
batch_size=batch_size,
num_workers=4,
device=device)
val_dataloader = build_dataloader(val_list,
batch_size=batch_size,
validation=True,
num_workers=2,
device=device)
# load pretrained ASR model
ASR_config = config.get('ASR_config', False)
ASR_path = config.get('ASR_path', False)
with open(ASR_config) as f:
ASR_config = yaml.safe_load(f)
ASR_model_config = ASR_config['model_params']
ASR_model = ASRCNN(**ASR_model_config)
params = torch.load(ASR_path, map_location='cpu')['model']
ASR_model.load_state_dict(params)
_ = ASR_model.eval()
# load pretrained F0 model
F0_path = config.get('F0_path', False)
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(F0_path, map_location='cpu')['net']
F0_model.load_state_dict(params)
# build model
model, model_ema = build_model(Munch(config['model_params']), F0_model, ASR_model)
scheduler_params = {
"max_lr": float(config['optimizer_params'].get('lr', 2e-4)),
"pct_start": float(config['optimizer_params'].get('pct_start', 0.0)),
"epochs": epochs,
"steps_per_epoch": len(train_dataloader),
}
_ = [model[key].to(device) for key in model]
_ = [model_ema[key].to(device) for key in model_ema]
scheduler_params_dict = {key: scheduler_params.copy() for key in model}
scheduler_params_dict['mapping_network']['max_lr'] = 2e-6
optimizer = build_optimizer({key: model[key].parameters() for key in model},
scheduler_params_dict=scheduler_params_dict)
trainer = Trainer(args=Munch(config['loss_params']), model=model,
model_ema=model_ema,
optimizer=optimizer,
device=device,
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
logger=logger,
fp16_run=fp16_run)
if config.get('pretrained_model', '') != '':
trainer.load_checkpoint(config['pretrained_model'],
load_only_params=config.get('load_only_params', True))
for _ in range(1, epochs+1):
epoch = trainer.epochs
train_results = trainer._train_epoch()
eval_results = trainer._eval_epoch()
results = train_results.copy()
results.update(eval_results)
logger.info('--- epoch %d ---' % epoch)
for key, value in results.items():
if isinstance(value, float):
logger.info('%-15s: %.4f' % (key, value))
writer.add_scalar(key, value, epoch)
else:
for v in value:
writer.add_figure('eval_spec', v, epoch)
if (epoch % save_freq) == 0:
trainer.save_checkpoint(osp.join(log_dir, 'epoch_%05d.pth' % epoch))
return 0
def get_data_path_list(train_path=None, val_path=None):
if train_path is None:
train_path = "Data/train_list.txt"
if val_path is None:
val_path = "Data/val_list.txt"
with open(train_path, 'r') as f:
train_list = f.readlines()
with open(val_path, 'r') as f:
val_list = f.readlines()
return train_list, val_list
if __name__=="__main__":
main()
| 5,523 | 34.184713 | 90 | py |
StarGANv2-VC | StarGANv2-VC-main/trainer.py | # -*- coding: utf-8 -*-
import os
import os.path as osp
import sys
import time
from collections import defaultdict
import numpy as np
import torch
from torch import nn
from PIL import Image
from tqdm import tqdm
from losses import compute_d_loss, compute_g_loss
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Trainer(object):
def __init__(self,
args,
model=None,
model_ema=None,
optimizer=None,
scheduler=None,
config={},
device=torch.device("cpu"),
logger=logger,
train_dataloader=None,
val_dataloader=None,
initial_steps=0,
initial_epochs=0,
fp16_run=False
):
self.args = args
self.steps = initial_steps
self.epochs = initial_epochs
self.model = model
self.model_ema = model_ema
self.optimizer = optimizer
self.scheduler = scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.config = config
self.device = device
self.finish_train = False
self.logger = logger
self.fp16_run = fp16_run
def _train_epoch(self):
"""Train model one epoch."""
raise NotImplementedError
@torch.no_grad()
def _eval_epoch(self):
"""Evaluate model one epoch."""
pass
def save_checkpoint(self, checkpoint_path):
"""Save checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be saved.
"""
state_dict = {
"optimizer": self.optimizer.state_dict(),
"steps": self.steps,
"epochs": self.epochs,
"model": {key: self.model[key].state_dict() for key in self.model}
}
if self.model_ema is not None:
state_dict['model_ema'] = {key: self.model_ema[key].state_dict() for key in self.model_ema}
if not os.path.exists(os.path.dirname(checkpoint_path)):
os.makedirs(os.path.dirname(checkpoint_path))
torch.save(state_dict, checkpoint_path)
def load_checkpoint(self, checkpoint_path, load_only_params=False):
"""Load checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be loaded.
load_only_params (bool): Whether to load only model parameters.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
for key in self.model:
self._load(state_dict["model"][key], self.model[key])
if self.model_ema is not None:
for key in self.model_ema:
self._load(state_dict["model_ema"][key], self.model_ema[key])
if not load_only_params:
self.steps = state_dict["steps"]
self.epochs = state_dict["epochs"]
self.optimizer.load_state_dict(state_dict["optimizer"])
def _load(self, states, model, force_load=True):
model_states = model.state_dict()
for key, val in states.items():
try:
if key not in model_states:
continue
if isinstance(val, nn.Parameter):
val = val.data
if val.shape != model_states[key].shape:
self.logger.info("%s does not have same shape" % key)
print(val.shape, model_states[key].shape)
if not force_load:
continue
min_shape = np.minimum(np.array(val.shape), np.array(model_states[key].shape))
slices = [slice(0, min_index) for min_index in min_shape]
model_states[key][slices].copy_(val[slices])
else:
model_states[key].copy_(val)
except:
self.logger.info("not exist :%s" % key)
print("not exist ", key)
@staticmethod
def get_gradient_norm(model):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = np.sqrt(total_norm)
return total_norm
@staticmethod
def length_to_mask(lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def _get_lr(self):
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
break
return lr
@staticmethod
def moving_average(model, model_test, beta=0.999):
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
def _train_epoch(self):
self.epochs += 1
train_losses = defaultdict(list)
_ = [self.model[k].train() for k in self.model]
scaler = torch.cuda.amp.GradScaler() if (('cuda' in str(self.device)) and self.fp16_run) else None
use_con_reg = (self.epochs >= self.args.con_reg_epoch)
use_adv_cls = (self.epochs >= self.args.adv_cls_epoch)
for train_steps_per_epoch, batch in enumerate(tqdm(self.train_dataloader, desc="[train]"), 1):
### load data
batch = [b.to(self.device) for b in batch]
x_real, y_org, x_ref, x_ref2, y_trg, z_trg, z_trg2 = batch
# train the discriminator (by random reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
d_loss, d_losses_latent = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
scaler.scale(d_loss).backward()
else:
d_loss, d_losses_latent = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
d_loss.backward()
self.optimizer.step('discriminator', scaler=scaler)
# train the discriminator (by target reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
d_loss, d_losses_ref = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
scaler.scale(d_loss).backward()
else:
d_loss, d_losses_ref = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
d_loss.backward()
self.optimizer.step('discriminator', scaler=scaler)
# train the generator (by random reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
scaler.scale(g_loss).backward()
else:
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
g_loss.backward()
self.optimizer.step('generator', scaler=scaler)
self.optimizer.step('mapping_network', scaler=scaler)
self.optimizer.step('style_encoder', scaler=scaler)
# train the generator (by target reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
scaler.scale(g_loss).backward()
else:
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
g_loss.backward()
self.optimizer.step('generator', scaler=scaler)
# compute moving average of network parameters
self.moving_average(self.model.generator, self.model_ema.generator, beta=0.999)
self.moving_average(self.model.mapping_network, self.model_ema.mapping_network, beta=0.999)
self.moving_average(self.model.style_encoder, self.model_ema.style_encoder, beta=0.999)
self.optimizer.scheduler()
for key in d_losses_latent:
train_losses["train/%s" % key].append(d_losses_latent[key])
for key in g_losses_latent:
train_losses["train/%s" % key].append(g_losses_latent[key])
train_losses = {key: np.mean(value) for key, value in train_losses.items()}
return train_losses
@torch.no_grad()
def _eval_epoch(self):
use_adv_cls = (self.epochs >= self.args.adv_cls_epoch)
eval_losses = defaultdict(list)
eval_images = defaultdict(list)
_ = [self.model[k].eval() for k in self.model]
for eval_steps_per_epoch, batch in enumerate(tqdm(self.val_dataloader, desc="[eval]"), 1):
### load data
batch = [b.to(self.device) for b in batch]
x_real, y_org, x_ref, x_ref2, y_trg, z_trg, z_trg2 = batch
# train the discriminator
d_loss, d_losses_latent = compute_d_loss(
self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_r1_reg=False, use_adv_cls=use_adv_cls)
d_loss, d_losses_ref = compute_d_loss(
self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_r1_reg=False, use_adv_cls=use_adv_cls)
# train the generator
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
for key in d_losses_latent:
eval_losses["eval/%s" % key].append(d_losses_latent[key])
for key in g_losses_latent:
eval_losses["eval/%s" % key].append(g_losses_latent[key])
# if eval_steps_per_epoch % 10 == 0:
# # generate x_fake
# s_trg = self.model_ema.style_encoder(x_ref, y_trg)
# F0 = self.model.f0_model.get_feature_GAN(x_real)
# x_fake = self.model_ema.generator(x_real, s_trg, masks=None, F0=F0)
# # generate x_recon
# s_real = self.model_ema.style_encoder(x_real, y_org)
# F0_fake = self.model.f0_model.get_feature_GAN(x_fake)
# x_recon = self.model_ema.generator(x_fake, s_real, masks=None, F0=F0_fake)
# eval_images['eval/image'].append(
# ([x_real[0, 0].cpu().numpy(),
# x_fake[0, 0].cpu().numpy(),
# x_recon[0, 0].cpu().numpy()]))
eval_losses = {key: np.mean(value) for key, value in eval_losses.items()}
eval_losses.update(eval_images)
return eval_losses
| 11,715 | 40.399293 | 175 | py |
StarGANv2-VC | StarGANv2-VC-main/transforms.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
import torchaudio.functional as audio_F
import random
## 1. RandomTimeStrech
class TimeStrech(nn.Module):
def __init__(self, scale):
super(TimeStrech, self).__init__()
self.scale = scale
def forward(self, x):
mel_size = x.size(-1)
x = F.interpolate(x, scale_factor=(1, self.scale), align_corners=False,
recompute_scale_factor=True, mode='bilinear').squeeze()
if x.size(-1) < mel_size:
noise_length = (mel_size - x.size(-1))
random_pos = random.randint(0, x.size(-1)) - noise_length
if random_pos < 0:
random_pos = 0
noise = x[..., random_pos:random_pos + noise_length]
x = torch.cat([x, noise], dim=-1)
else:
x = x[..., :mel_size]
return x.unsqueeze(1)
## 2. PitchShift
class PitchShift(nn.Module):
def __init__(self, shift):
super(PitchShift, self).__init__()
self.shift = shift
def forward(self, x):
if len(x.shape) == 2:
x = x.unsqueeze(0)
x = x.squeeze()
mel_size = x.shape[1]
shift_scale = (mel_size + self.shift) / mel_size
x = F.interpolate(x.unsqueeze(1), scale_factor=(shift_scale, 1.), align_corners=False,
recompute_scale_factor=True, mode='bilinear').squeeze(1)
x = x[:, :mel_size]
if x.size(1) < mel_size:
pad_size = mel_size - x.size(1)
x = torch.cat([x, torch.zeros(x.size(0), pad_size, x.size(2)).to(x.device)], dim=1)
x = x.squeeze()
return x.unsqueeze(1)
## 3. ShiftBias
class ShiftBias(nn.Module):
def __init__(self, bias):
super(ShiftBias, self).__init__()
self.bias = bias
def forward(self, x):
return x + self.bias
## 4. Scaling
class SpectScaling(nn.Module):
def __init__(self, scale):
super(SpectScaling, self).__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
## 5. Time Flip
class TimeFlip(nn.Module):
def __init__(self, length):
super(TimeFlip, self).__init__()
self.length = round(length)
def forward(self, x):
if self.length > 1:
start = np.random.randint(0, x.shape[-1] - self.length)
x_ret = x.clone()
x_ret[..., start:start + self.length] = torch.flip(x[..., start:start + self.length], dims=[-1])
x = x_ret
return x
class PhaseShuffle2d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle2d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :, :move]
right = x[:, :, :, move:]
shuffled = torch.cat([right, left], dim=3)
return shuffled
def build_transforms():
transforms = [
lambda M: TimeStrech(1+ (np.random.random()-0.5)*M*0.2),
lambda M: SpectScaling(1 + (np.random.random()-1)*M*0.1),
lambda M: PhaseShuffle2d(192),
]
N, M = len(transforms), np.random.random()
composed = nn.Sequential(
*[trans(M) for trans in np.random.choice(transforms, N)]
)
return composed
| 3,552 | 28.363636 | 106 | py |
StarGANv2-VC | StarGANv2-VC-main/Utils/JDC/model.py | """
Implementation of model from:
Kum et al. - "Joint Detection and Classification of Singing Voice Melody Using
Convolutional Recurrent Neural Networks" (2019)
Link: https://www.semanticscholar.org/paper/Joint-Detection-and-Classification-of-Singing-Voice-Kum-Nam/60a2ad4c7db43bace75805054603747fcd062c0d
"""
import torch
from torch import nn
class JDCNet(nn.Module):
"""
Joint Detection and Classification Network model for singing voice melody.
"""
def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):
super().__init__()
self.seq_len = seq_len # 31
self.num_class = num_class
# input = (b, 1, 31, 513), b = batch size
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)
)
# res blocks
self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)
self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)
self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)
# pool block
self.pool_block = nn.Sequential(
nn.BatchNorm2d(num_features=256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)
nn.Dropout(p=0.5),
)
# maxpool layers (for auxiliary network inputs)
# in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)
self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))
# in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)
self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))
# in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)
self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))
# in = (b, 640, 31, 2), out = (b, 256, 31, 2)
self.detector_conv = nn.Sequential(
nn.Conv2d(640, 256, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Dropout(p=0.5),
)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_classifier = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_detector = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b * 31, 512)
self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)
# input: (b * 31, 512)
self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier
# initialize weights
self.apply(self.init_weights)
def get_feature_GAN(self, x):
seq_len = x.shape[-2]
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
return poolblock_out.transpose(-1, -2)
def forward(self, x):
"""
Returns:
classification_prediction, detection_prediction
sizes: (b, 31, 722), (b, 31, 2)
"""
###############################
# forward pass for classifier #
###############################
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
GAN_feature = poolblock_out.transpose(-1, -2)
poolblock_out = self.pool_block[2](poolblock_out)
# (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)
classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, self.seq_len, 512))
classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states
classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)
classifier_out = self.classifier(classifier_out)
classifier_out = classifier_out.view((-1, self.seq_len, self.num_class)) # (b, 31, num_class)
# sizes: (b, 31, 722), (b, 31, 2)
# classifier output consists of predicted pitch classes per frame
# detector output consists of: (isvoice, notvoice) estimates per frame
return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out
@staticmethod
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):
for p in m.parameters():
if p.data is None:
continue
if len(p.shape) >= 2:
nn.init.orthogonal_(p.data)
else:
nn.init.normal_(p.data)
class ResBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01):
super().__init__()
self.downsample = in_channels != out_channels
# BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper
self.pre_conv = nn.Sequential(
nn.BatchNorm2d(num_features=in_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only
)
# conv layers
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
)
# 1 x 1 convolution layer to match the feature dimensions
self.conv1by1 = None
if self.downsample:
self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
def forward(self, x):
x = self.pre_conv(x)
if self.downsample:
x = self.conv(x) + self.conv1by1(x)
else:
x = self.conv(x) + x
return x | 7,157 | 39.670455 | 144 | py |
StarGANv2-VC | StarGANv2-VC-main/Utils/ASR/layers.py | import math
import torch
from torch import nn
from typing import Optional, Any
from torch import Tensor
import torch.nn.functional as F
import torchaudio
import torchaudio.functional as audio_F
import random
random.seed(0)
def _get_activation_fn(activ):
if activ == 'relu':
return nn.ReLU()
elif activ == 'lrelu':
return nn.LeakyReLU(0.2)
elif activ == 'swish':
return lambda x: x*torch.sigmoid(x)
else:
raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear', param=None):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class CausualConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
super(CausualConv, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2) * 2
else:
self.padding = padding * 2
self.conv = nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=self.padding,
dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
def forward(self, x):
x = self.conv(x)
x = x[:, :, :-self.padding]
return x
class CausualBlock(nn.Module):
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'):
super(CausualBlock, self).__init__()
self.blocks = nn.ModuleList([
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
for i in range(n_conv)])
def forward(self, x):
for block in self.blocks:
res = x
x = block(x)
x += res
return x
def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2):
layers = [
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
_get_activation_fn(activ),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=dropout_p),
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
_get_activation_fn(activ),
nn.Dropout(p=dropout_p)
]
return nn.Sequential(*layers)
class ConvBlock(nn.Module):
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'):
super().__init__()
self._n_groups = 8
self.blocks = nn.ModuleList([
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
for i in range(n_conv)])
def forward(self, x):
for block in self.blocks:
res = x
x = block(x)
x += res
return x
def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2):
layers = [
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
_get_activation_fn(activ),
nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim),
nn.Dropout(p=dropout_p),
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
_get_activation_fn(activ),
nn.Dropout(p=dropout_p)
]
return nn.Sequential(*layers)
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class ForwardAttentionV2(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(ForwardAttentionV2, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float(1e20)
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: prev. and cumulative att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask, log_alpha):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
log_energy = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
#log_energy =
if mask is not None:
log_energy.data.masked_fill_(mask, self.score_mask_value)
#attention_weights = F.softmax(alignment, dim=1)
#content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME]
#log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1]
#log_total_score = log_alpha + content_score
#previous_attention_weights = attention_weights_cat[:,0,:]
log_alpha_shift_padded = []
max_time = log_energy.size(1)
for sft in range(2):
shifted = log_alpha[:,:max_time-sft]
shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value)
log_alpha_shift_padded.append(shift_padded.unsqueeze(2))
biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2)
log_alpha_new = biased + log_energy
attention_weights = F.softmax(log_alpha_new, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights, log_alpha_new
class PhaseShuffle2d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle2d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :, :move]
right = x[:, :, :, move:]
shuffled = torch.cat([right, left], dim=3)
return shuffled
class PhaseShuffle1d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle1d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :move]
right = x[:, :, move:]
shuffled = torch.cat([right, left], dim=2)
return shuffled
class MFCC(nn.Module):
def __init__(self, n_mfcc=40, n_mels=80):
super(MFCC, self).__init__()
self.n_mfcc = n_mfcc
self.n_mels = n_mels
self.norm = 'ortho'
dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm)
self.register_buffer('dct_mat', dct_mat)
def forward(self, mel_specgram):
if len(mel_specgram.shape) == 2:
mel_specgram = mel_specgram.unsqueeze(0)
unsqueezed = True
else:
unsqueezed = False
# (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
# -> (channel, time, n_mfcc).tranpose(...)
mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2)
# unpack batch
if unsqueezed:
mfcc = mfcc.squeeze(0)
return mfcc
| 13,454 | 36.901408 | 143 | py |
StarGANv2-VC | StarGANv2-VC-main/Utils/ASR/models.py | import math
import torch
from torch import nn
from torch.nn import TransformerEncoder
import torch.nn.functional as F
from .layers import MFCC, Attention, LinearNorm, ConvNorm, ConvBlock
class ASRCNN(nn.Module):
def __init__(self,
input_dim=80,
hidden_dim=256,
n_token=35,
n_layers=6,
token_embedding_dim=256,
):
super().__init__()
self.n_token = n_token
self.n_down = 1
self.to_mfcc = MFCC()
self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)
self.cnns = nn.Sequential(
*[nn.Sequential(
ConvBlock(hidden_dim),
nn.GroupNorm(num_groups=1, num_channels=hidden_dim)
) for n in range(n_layers)])
self.projection = ConvNorm(hidden_dim, hidden_dim // 2)
self.ctc_linear = nn.Sequential(
LinearNorm(hidden_dim//2, hidden_dim),
nn.ReLU(),
LinearNorm(hidden_dim, n_token))
self.asr_s2s = ASRS2S(
embedding_dim=token_embedding_dim,
hidden_dim=hidden_dim//2,
n_token=n_token)
def forward(self, x, src_key_padding_mask=None, text_input=None):
x = self.to_mfcc(x)
x = self.init_cnn(x)
x = self.cnns(x)
x = self.projection(x)
x = x.transpose(1, 2)
ctc_logit = self.ctc_linear(x)
if text_input is not None:
_, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)
return ctc_logit, s2s_logit, s2s_attn
else:
return ctc_logit
def get_feature(self, x):
x = self.to_mfcc(x.squeeze(1))
x = self.init_cnn(x)
x = self.cnns(x)
x = self.projection(x)
return x
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)
return mask
def get_future_mask(self, out_length, unmask_future_steps=0):
"""
Args:
out_length (int): returned mask shape is (out_length, out_length).
unmask_futre_steps (int): unmasking future step size.
Return:
mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False
"""
index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)
mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)
return mask
class ASRS2S(nn.Module):
def __init__(self,
embedding_dim=256,
hidden_dim=512,
n_location_filters=32,
location_kernel_size=63,
n_token=40):
super(ASRS2S, self).__init__()
self.embedding = nn.Embedding(n_token, embedding_dim)
val_range = math.sqrt(6 / hidden_dim)
self.embedding.weight.data.uniform_(-val_range, val_range)
self.decoder_rnn_dim = hidden_dim
self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token)
self.attention_layer = Attention(
self.decoder_rnn_dim,
hidden_dim,
hidden_dim,
n_location_filters,
location_kernel_size
)
self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim)
self.project_to_hidden = nn.Sequential(
LinearNorm(self.decoder_rnn_dim * 2, hidden_dim),
nn.Tanh())
self.sos = 1
self.eos = 2
def initialize_decoder_states(self, memory, mask):
"""
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
"""
B, L, H = memory.shape
self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
self.attention_weights = torch.zeros((B, L)).type_as(memory)
self.attention_weights_cum = torch.zeros((B, L)).type_as(memory)
self.attention_context = torch.zeros((B, H)).type_as(memory)
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
self.unk_index = 3
self.random_mask = 0.1
def forward(self, memory, memory_mask, text_input):
"""
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
moemory_mask.shape = (B, L, )
texts_input.shape = (B, T)
"""
self.initialize_decoder_states(memory, memory_mask)
# text random mask
random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device)
_text_input = text_input.clone()
_text_input.masked_fill_(random_mask, self.unk_index)
decoder_inputs = self.embedding(_text_input).transpose(0, 1) # -> [T, B, channel]
start_embedding = self.embedding(
torch.LongTensor([self.sos]*decoder_inputs.size(1)).to(decoder_inputs.device))
decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0)
hidden_outputs, logit_outputs, alignments = [], [], []
while len(hidden_outputs) < decoder_inputs.size(0):
decoder_input = decoder_inputs[len(hidden_outputs)]
hidden, logit, attention_weights = self.decode(decoder_input)
hidden_outputs += [hidden]
logit_outputs += [logit]
alignments += [attention_weights]
hidden_outputs, logit_outputs, alignments = \
self.parse_decoder_outputs(
hidden_outputs, logit_outputs, alignments)
return hidden_outputs, logit_outputs, alignments
def decode(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
cell_input,
(self.decoder_hidden, self.decoder_cell))
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)),dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.decoder_hidden,
self.memory,
self.processed_memory,
attention_weights_cat,
self.mask)
self.attention_weights_cum += self.attention_weights
hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1)
hidden = self.project_to_hidden(hidden_and_context)
# dropout to increasing g
logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training))
return hidden, logit, self.attention_weights
def parse_decoder_outputs(self, hidden, logit, alignments):
# -> [B, T_out + 1, max_time]
alignments = torch.stack(alignments).transpose(0,1)
# [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols]
logit = torch.stack(logit).transpose(0, 1).contiguous()
hidden = torch.stack(hidden).transpose(0, 1).contiguous()
return hidden, logit, alignments
| 7,272 | 37.893048 | 118 | py |
Signal-is-Harder | Signal-is-Harder-main/train_vanilla.py | import os
import yaml
import argparse
import wandb
import time
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from data.util import get_dataset, IdxDataset
from module.util import get_model
from util import set_seed, get_optimizer, evaluate
def main():
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
parser = argparse.ArgumentParser()
parser.add_argument("--name", default=None, help = "Name of experiment")
parser.add_argument("--bias_conflicting_perc", default=None, type=float, help = "Percentage of bias conflicting samples in dataset")
parser.add_argument("--severity", default=None, type=int, help = "Severity of bias")
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--model_tag", default=None, help = "Choice of model")
parser.add_argument("--q", default=None, type=float, help = "q for GCE loss")
parser.add_argument("--random_state", default=None, type=int, help="Random state for seed")
parser.add_argument("--results_filename", default=None, help="Name of file to store results")
parser.add_argument("--epochs", default=None, type=int, help="Number of training epochs")
args = parser.parse_args()
# Replace all specified arguments
updateable = [config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["train"]["epochs"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["train"]["epochs"] = values
# configuration sanity check
if not (
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP") or
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP_VAE") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20")):
print("Are you sure you want to use the dataset "+config["data"]["dataset"]+" with the model "+ config["model"]["tag"]+"?")
# define variables from config
batch_size = config["train"]["batch_size"]
epochs = config["train"]["epochs"]
random_state = config["random_state"]
# wandb support
mode = "online" if config['wandb_logging'] else "disabled"
wandb.init(
project="Interpretable Debiasing",
entity="interpretable-debiasing",
config=config,
mode=mode
)
print("Running experiment: {}".format(config["name"]))
# set seed
set_seed(random_state)
# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"\nUsing device: {device}")
# load dataset
train_dataset = get_dataset(
config,
dataset_split="train"
)
test_dataset = get_dataset(
config,
dataset_split="eval"
)
train_dataset = IdxDataset(train_dataset)
test_dataset = IdxDataset(test_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
#num_workers=16,
pin_memory=True,
drop_last=True
)
test_loader = DataLoader(
test_dataset,
batch_size=256,
shuffle=False,
#num_workers=16,
pin_memory=True,
)
# define model
model = get_model(config).to(device)
# define optimizer
optimizer = get_optimizer(model, config)
# define loss function
criterion = torch.nn.CrossEntropyLoss()
# training & validation & test
for epoch in range(epochs):
train(model, train_loader, optimizer, criterion, epoch, epochs, device, config)
#validate()
test_acc = test(model, test_loader, device)
with open(config["results_filename"]+'.txt', 'a') as f:
f.writelines((['{} vanilla: {:8.4f}\n'.format(config["name"], test_acc)]))
timestamp = time.strftime(' %d-%b-%Y_%H:%M', time.localtime())
os.makedirs("./saved_models/vanilla/", exist_ok=True)
torch.save(model.state_dict(), "./saved_models/vanilla/" + config["name"] + timestamp + ".pth")
wandb.finish(quiet=True)
def train(
model,
train_loader,
optimizer,
criterion,
epoch,
epochs,
device,
config
):
"""Main training loop, where the network is trained
Args:
model: baseline model
train_loader: loader with the training data
optimizer: optimizer for backpropagation
criterion: loss function
epoch: current epoch
epochs: max number of epochs
device: current device (cpu or gpu)
"""
train_loader = tqdm(train_loader, position=0, leave=False)
train_loader.set_description(f"Epoch [{epoch}/{epochs}]")
total_acc, total_count = 0, 0
# training loop
model.train()
for idx, (data_index, data, attr) in enumerate(train_loader):
data, attr = data.to(device), attr.to(device)
label = attr[:, 0]
# bias = attr[:, 1]
optimizer.zero_grad()
mean, logvar = model.encoder(data)
logit = model.predict(mean)
loss = criterion(logit,label)
loss.backward()
optimizer.step()
corr = (logit.argmax(1) == label).sum().item()
batch_len = label.size(0)
total_acc += corr
total_count += batch_len
train_loader.set_postfix(loss=loss.item(), acc= corr / batch_len)
wandb.log({"train_loss": loss})
wandb.log({"train_accuracy": total_acc / total_count, "epoch": epoch})
print(
"| epoch {:3d} | training accuracy {:8.3f}".format(
epoch, total_acc / total_count
)
)
def test(model, test_loader, device):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
test_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model.eval()
test_acc_aligned, test_acc_conflicting, test_acc = evaluate(model, test_loader, device)
wandb.log({"conflicting_test_accuracy_vanilla": test_acc_conflicting})
wandb.log({"aligned_test_accuracy_vanilla": test_acc_aligned})
wandb.log({"test_accuracy_vanilla": test_acc})
print("test accuracy {:8.3f}".format(test_acc))
return test_acc
if __name__ == "__main__":
main() | 7,018 | 32.42381 | 249 | py |
Signal-is-Harder | Signal-is-Harder-main/make_dataset.py | '''Modified from https://github.com/alinlab/LfF/blob/master/util.py'''
import os
from re import A
from xmlrpc.client import Boolean
from tqdm import tqdm
import pickle
import numpy as np
import torch
from torchvision.datasets import CIFAR10, MNIST
import torchvision.transforms as T
from data.corrupted_cifar10_protocol import CORRUPTED_CIFAR10_PROTOCOL
from data.colored_mnist_protocol import COLORED_MNIST_PROTOCOL
from data.rotated_mnist_protocol import ROTATED_MNIST_PROTOCOL
from data.shifted_mnist_protocol import SHIFTED_MNIST_PROTOCOL
import yaml
import argparse
from util import set_seed
def make_attr_labels(target_labels, bias_aligned_ratio):
num_classes = target_labels.max().item() + 1
num_samples_per_class = np.array(
[
torch.sum(target_labels == label).item()
for label in range(num_classes)
]
)
ratios_per_class = bias_aligned_ratio * np.eye(num_classes) + (
1 - bias_aligned_ratio
) / (num_classes - 1) * (1 - np.eye(num_classes))
corruption_milestones_per_class = (
num_samples_per_class[:, np.newaxis]
* np.cumsum(ratios_per_class, axis=1)
).round()
attr_labels = torch.zeros_like(target_labels)
for label in range(num_classes):
indices = (target_labels == label).nonzero().squeeze()
corruption_milestones = corruption_milestones_per_class[label]
for corruption_idx, idx in enumerate(indices):
attr_labels[idx] = np.min(
np.nonzero(corruption_milestones > corruption_idx)[0]
).item()
return attr_labels
def make_corrupted_cifar10(
data_dir, skewed_ratio, corruption_names, severity, config, postfix="0"
):
cifar10_dir = os.path.join(data_dir, "CIFAR10")
corrupted_cifar10_dir = os.path.join(
data_dir, f"CorruptedCIFAR10-Type{postfix}-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(corrupted_cifar10_dir, exist_ok=True)
print(corrupted_cifar10_dir)
protocol = CORRUPTED_CIFAR10_PROTOCOL
convert_img = T.Compose([T.ToTensor(), T.ToPILImage()])
attr_names = ["object", "corruption"]
attr_names_path = os.path.join(corrupted_cifar10_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = CIFAR10(cifar10_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(corrupted_cifar10_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1-skewed_ratio
else:
bias_aligned_ratio = 0.1
corruption_labels = make_attr_labels(
torch.LongTensor(dataset.targets), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, corruption_label in tqdm(
zip(dataset.data, dataset.targets, corruption_labels),
total=len(corruption_labels),
):
method_name = corruption_names[corruption_label]
corrupted_img = protocol[method_name](convert_img(img), severity+1)
images.append(np.array(corrupted_img).astype(np.uint8))
attrs.append([target_label, corruption_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
if postfix == '0':
path = config["Dfa"]["data_dir"]+f'/cifar10c/{dfa_ratio:g}pct'
elif postfix == '1': path = config["Dfa"]["data_dir"]+f'/cifar10ct1/{dfa_ratio:g}pct'
else: raise NotImplementedError
attr = np.array(attrs)
imgs = np.array(images)
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i,:,:,:])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(corrupted_cifar10_dir, split, "images.npy")
np.save(image_path, np.array(images).astype(np.uint8))
attr_path = os.path.join(corrupted_cifar10_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_colored_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
colored_mnist_dir = os.path.join(
data_dir, f"ColoredMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(colored_mnist_dir, exist_ok=True)
print(colored_mnist_dir)
protocol = COLORED_MNIST_PROTOCOL
attr_names = ["digit", "color"]
attr_names_path = os.path.join(colored_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(colored_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.1
color_labels = make_attr_labels(
torch.LongTensor(dataset.targets), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, color_label in tqdm(
zip(dataset.data, dataset.targets, color_labels),
total=len(color_labels),
):
colored_img = protocol[color_label.item()](img, severity)
# Change RBG from first to last dimension
colored_img = np.moveaxis(np.uint8(colored_img), 0, 2)
images.append(colored_img)
attrs.append([target_label, color_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
print("Creating dataset for Dfa too!")
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/cmnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = np.array(images)
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i,:,:,:])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(colored_mnist_dir, split, "images.npy")
np.save(image_path, np.array(images).astype(np.uint8))
attr_path = os.path.join(colored_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_rotated_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
rotated_mnist_dir = os.path.join(
data_dir, f"RotatedMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(rotated_mnist_dir, exist_ok=True)
print(rotated_mnist_dir)
protocol = ROTATED_MNIST_PROTOCOL
attr_names = ["digit", "rotation"]
attr_names_path = os.path.join(rotated_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(rotated_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.5
#Keep only 3 and 8 and change their classes to 0,1
targets = ((dataset.targets==3) | (dataset.targets==8)).nonzero()
data = dataset.data[targets].squeeze(1)
data_labels = dataset.targets[targets].squeeze(1)
data_labels[(data_labels == 3).nonzero()] = 0
data_labels[(data_labels == 8).nonzero()] = 1
rotation_labels = make_attr_labels(
torch.LongTensor(data_labels), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, rotation_label in tqdm(
zip(data, data_labels, rotation_labels),
total=len(rotation_labels),
):
rotated_img = protocol[rotation_label.item()](img, severity)
images.append(rotated_img)
attrs.append([target_label, rotation_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/rmnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [np.array(image).astype(np.uint8) for image in images]
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(rotated_mnist_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(rotated_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_shifted_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
shifted_mnist_dir = os.path.join(
data_dir, f"ShiftedMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(shifted_mnist_dir, exist_ok=True)
print(shifted_mnist_dir)
protocol = SHIFTED_MNIST_PROTOCOL
attr_names = ["digit", "rotation"]
attr_names_path = os.path.join(shifted_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(shifted_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.5
#Keep only 3 and 8 and change their classes to 0,1
targets = ((dataset.targets==3) | (dataset.targets==8)).nonzero()
data = dataset.data[targets].squeeze(1)
data_labels = dataset.targets[targets].squeeze(1)
data_labels[(data_labels == 3).nonzero()] = 0
data_labels[(data_labels == 8).nonzero()] = 1
shifted_labels = make_attr_labels(
torch.LongTensor(data_labels), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, shifted_label in tqdm(
zip(data, data_labels, shifted_labels),
total=len(shifted_labels),
):
shifted_img = protocol[shifted_label.item()](img, severity)
images.append(shifted_img)
attrs.append([target_label, shifted_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/smnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [np.array(image).astype(np.uint8) for image in images]
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(shifted_mnist_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(shifted_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_camelyon17_type0(data_dir,skewed_ratio,config):
# Type0: Using data from all 4 training hospitals in testset with 50-50 positive and negative ratio
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data]) # NOT test_data
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
pos = np.zeros(2)
while len(np.unique(pos))==1:
pos = np.random.randint(0,4,2)
pos[pos>=2]+=1 # 2 is test hospital
#bias_label = np.zeros(4)
#bias_label[pos] += 1
#assert np.median(bias_label)==0.5
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type0-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
test_images, test_attrs = [], []
images, attrs = [], []
bias_aligned_ratio = 1-skewed_ratio
test_count = np.zeros((5,2)) # Count images in testset of all combinations
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
if test_count[metadata[:,0].item(),y.item()]<1250: #10'000 testset images
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
test_count[metadata[:,0].item(),y.item()]+=1
else:
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
assert ((test_count==0) | (test_count==1250)).all()
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type0/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
#Testset
test_attr = np.array(test_attrs)
test_imgs = [image for image in test_images]
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in tqdm(range(len(test_attr[:,0]))):
path_img = os.path.join(path, "test", f'{test_attr[i,0]}', f"{i}_{test_attr[i,0]}_{test_attr[i,1]}.png")
imageio.imwrite(path_img, test_imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
image_path = os.path.join(camelyon_dir, "train", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, "train", "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
image_path = os.path.join(camelyon_dir, "test", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in test_images])
attr_path = os.path.join(camelyon_dir, "test", "attrs.npy")
np.save(attr_path, np.array(test_attrs).astype(np.uint8))
def make_camelyon17_type1(data_dir,skewed_ratio, config):
# Type1: Using data hospital 5 as testset as in original wilds
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
test_data = dataset.get_subset(
"test",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data]) # NOT test_data bcs this stays testset
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
test_loader = DataLoader(
test_data,
shuffle=True,
batch_size=1)
pos = np.zeros(2)
while len(np.unique(pos))==1:
pos = np.random.randint(0,4,2)
pos[pos>=2]+=1 # 2 is test hospital
#bias_label = np.zeros(4)
#bias_label[pos] += 1
#assert np.median(bias_label)==0.5
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type1-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
bias_aligned_ratio = 1-skewed_ratio
if split == "train":
images, attrs = [], []
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
else:
images, attrs = [], []
for idx, (x, y, metadata) in enumerate(tqdm(test_loader)):
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x.squeeze(0)).astype(np.uint8))
attrs.append([y.squeeze(), metadata[:,0].squeeze()])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type1/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
if split == "train":
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in tqdm(range(len(attr[:,0]))):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(camelyon_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_camelyon17_type2(data_dir,skewed_ratio,config):
# Type2: Same as type0 but using first and testset hospital. hospital 1 is mostly positive, hospital 0 is mostly negative
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
test_data = dataset.get_subset(
"test",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data,test_data])
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
pos = np.array([1])
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type2-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
test_images, test_attrs = [], []
images, attrs = [], []
bias_aligned_ratio = 1-skewed_ratio
test_count = np.zeros((5,2)) # Count images in testset of all combinations
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
# if not (metadata[:,0].item() in [0,2]): continue
# elif test_count[metadata[:,0].item(),y.item()]<1250: # 5'000 testset images
# x = np.moveaxis(np.array(x), 1, 3)
# x *= 255
# test_count[metadata[:,0].item(),y.item()]+=1
# if metadata[:,0].squeeze() == 2: # Changing bias label s.t. it's 0&1. Only for this setting!
# metadata[:,0] = 1
# test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
# test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
if test_count[metadata[:,0].item(),y.item()]<1250 and (not metadata[:,0].item() in [0,2]): # 7'500 testset images
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
test_count[metadata[:,0].item(),y.item()]+=1
test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
elif (not metadata[:,0].item() in [0,2]): continue
else:
if metadata[:,0].squeeze() == 2: # Changing bias label s.t. it's 0&1. Only for this setting!
metadata[:,0] = 1
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
assert ((test_count==0) | (test_count==1250)).all()
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type2/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "align", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "align", f'{j}'), f))
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "conflict", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "conflict", f'{j}'), f))
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
#Testset
test_attr = np.array(test_attrs)
test_imgs = [image for image in test_images]
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "test", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "test", f'{j}'), f))
for i in tqdm(range(len(test_attr[:,0]))):
path_img = os.path.join(path, "test", f'{test_attr[i,0]}', f"{i}_{test_attr[i,0]}_{test_attr[i,1]}.png")
imageio.imwrite(path_img, test_imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
image_path = os.path.join(camelyon_dir, "train", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, "train", "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
image_path = os.path.join(camelyon_dir, "test", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in test_images])
attr_path = os.path.join(camelyon_dir, "test", "attrs.npy")
np.save(attr_path, np.array(test_attrs).astype(np.uint8))
def make(make_target):
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--random_state", default=None, type=int, help = "Random Seed")
parser.add_argument("--Dfa_dataset", default=None, type=bool, help = "Create dataset for Dfa too?")
args = parser.parse_args()
# replace all specified arguments
updateable = [config["data"]["dataset"],config["random_state"],config["Dfa"]["dataset"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["data"]["dataset"], config["random_state"], config["Dfa"]["dataset"] = values
if make_target == None:
make_target = config["data"]["dataset"]
data_dir = config["user"]["data_dir"]
random_state = config["random_state"]
# Reproducibility
set_seed(random_state)
for skewed_ratio in [2e-1, 1e-1, 5e-2, 2e-2, 1e-2, 5e-3]:
#for severity in [1, 2, 3, 4]: This if from LfF but we only look at severity 4 here!
for severity in [4]:
if make_target == "colored_mnist":
make_colored_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "cifar10_type0":
make_corrupted_cifar10(
data_dir=data_dir,
corruption_names=[
"Snow",
"Frost",
"Fog",
"Brightness",
"Contrast",
"Spatter",
"Elastic",
"JPEG",
"Pixelate",
"Saturate",
],
skewed_ratio=skewed_ratio,
severity=severity,
config=config,
postfix="0"
)
if make_target == "cifar10_type1":
make_corrupted_cifar10(
data_dir=data_dir,
corruption_names=[
"Gaussian Noise",
"Shot Noise",
"Impulse Noise",
"Speckle Noise",
"Gaussian Blur",
"Defocus Blur",
"Glass Blur",
"Motion Blur",
"Zoom Blur",
"Original",
],
skewed_ratio=skewed_ratio,
severity=severity,
config=config,
postfix="1"
)
if make_target == "rotated_mnist":
make_rotated_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "shifted_mnist":
make_shifted_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "camelyon17_type0":
make_camelyon17_type0(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
if make_target == "camelyon17_type1":
make_camelyon17_type1(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
if make_target == "camelyon17_type2":
make_camelyon17_type2(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
make(make_target=None) | 41,294 | 46.194286 | 133 | py |
Signal-is-Harder | Signal-is-Harder-main/util.py | import os
import random
import numpy as np
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torchvision.transforms.functional import normalize
def set_seed(seed):
"""
Set all random seeds
Args:
seed (int): integer for reproducible experiments
"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def get_optimizer(model, config):
"""Resolve the optimizer according to the configs
Args:
model: model on which the optimizer is applied on
config: configuration dict
returns:
optimizer
"""
if config["optimizer"]["name"] == "SGD":
optimizer = torch.optim.SGD(
model.parameters(),
lr=config["optimizer"]["lr"],
momentum=config["optimizer"]["momentum"],
weight_decay=config["optimizer"]["weight_decay"],
)
elif config["optimizer"]["name"] == "Adam":
optimizer = torch.optim.Adam(
model.parameters(),
lr=config["optimizer"]["lr"],
betas=config["optimizer"]["betas"],
weight_decay=config["optimizer"]["weight_decay"],
)
else: raise NotImplementedError("Optimizer not implemented.")
return optimizer
class GeneralizedCELoss(nn.Module):
def __init__(self, config):
super(GeneralizedCELoss, self).__init__()
self.q = config["loss"]["GCE_q"]
def forward(self, logits, targets):
p = F.softmax(logits, dim=1)
if np.isnan(p.mean().item()):
raise NameError("GCE_p")
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()**self.q) # Do we really need *self.q? I think like now is correct.
if np.isnan(Yg.mean().item()):
raise NameError("GCE_Yg")
# note that we don't return the average but the loss for each datum separately
loss = F.cross_entropy(logits, targets, reduction="none") * loss_weight
return loss
# evaluation code for testset
def evaluate(model, test_loader, device):
model.eval()
test_loader = tqdm(test_loader, position=0, leave=False)
total_correct_aligned, total_num_aligned = 0, 0
total_correct_conflicting, total_num_conflicting = 0, 0
for idx, (data_index, data, attr) in enumerate(test_loader):
label = attr[:, 0]
bias = attr[:, 1]
data = data.to(device)
label = label.to(device)
bias = bias.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
pred = model.predict(parameters[0]).argmax(1)
correct = (pred == label).long()
aligned = (label == bias).long()
total_correct_aligned += correct[aligned==True].sum()
total_correct_conflicting += correct[aligned==False].sum()
total_num_aligned += correct[aligned==True].size(0)
total_num_conflicting += correct[aligned==False].size(0)
acc_aligned = total_correct_aligned/float(total_num_aligned)
acc_conflicting = total_correct_conflicting/float(total_num_conflicting)
acc = (total_correct_aligned+total_correct_conflicting)/(float(total_num_aligned)+float(total_num_conflicting))
model.train()
return acc_aligned, acc_conflicting, acc
def evaluate_batch(logit, attr, loss):
label = attr[:, 0]
bias = attr[:, 1]
pred = logit.data.argmax(1)
correct = (pred == label).long()
aligned = (label == bias).long()
aligned_len = aligned.sum()
conflicting_len = (1-aligned).sum()
batch_len = label.size(0)
assert(batch_len == aligned_len + conflicting_len)
corr_aligned = correct[aligned==True].sum()
corr_conflicting = correct[aligned==False].sum()
corr = correct.sum()
assert(corr == corr_aligned + corr_conflicting)
loss_aligned = loss[aligned==True].mean()
loss_conflicting = loss[aligned==False].mean()
loss = loss.mean()
return corr_aligned, corr_conflicting, corr, loss_aligned, loss_conflicting, loss, aligned_len, conflicting_len, batch_len
def save_img(model_s, model_b, data_loader, config, device):
# Logging image
set_seed(config["random_state"])
model_s.eval()
model_b.eval()
sample1, sample2 = random.sample(list(data_loader), 2)
data = torch.stack((sample1[1][0],sample2[1][0]))
data = data.to(device)
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
mean = torch.cat((mean_s, mean_b), dim=1)
swap1 = torch.cat((mean_s[0],mean_b[1]))
swap2 = torch.cat((mean_s[1],mean_b[0]))
x_reconstructed = model_s.reconstruct(mean)
mean[0] = swap1
mean[1] = swap2
swap_reconstr = model_s.reconstruct(mean)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(2,3,28,28)
x_reconstructed = x_reconstructed.view(2,3,28,28)
swap_reconstr = swap_reconstr.view(2,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
save_img = torchvision.utils.make_grid([data[0],x_reconstructed[0],swap_reconstr[0],
data[1],x_reconstructed[1],swap_reconstr[1]],nrow=3)
save_img = wandb.Image(save_img, caption="Left: Original image, Middle: Reconstructed image, Right: Keeping signal, swapping bias")
wandb.log({"Visualization": save_img})
def save_img_adv(model_s, model_b, data_loader, epoch, config, device, training=False):
# Logging image
model_s.eval()
model_b.eval()
rand_batches = random.sample(list(data_loader), 5)
if training == True:
data_batches = [item[2] for item in rand_batches] # 1 higher index because we also have subsetindex in trainset
attr = [item[3] for item in rand_batches]
data = torch.stack([item[0] for item in data_batches])
label = torch.stack([item[0,0] for item in attr])
else:
data_batches = [item[1] for item in rand_batches]
attr = [item[2] for item in rand_batches]
data_unpacked = list()
attr_unpacked = list()
for index, item in enumerate(attr):
idx = torch.where(item[:,0] == item[:,1])[0][0]
data_unpacked.append(data_batches[index][idx])
attr_unpacked.append(item[idx])
data = torch.stack(data_unpacked)
label = torch.stack(attr_unpacked)[:,0]
data = data.to(device)
label = label.to(device)
assert data.shape[0:2] ==torch.Size([5, 3])
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
attack = DeepFool(model_b.classifier,device,steps=10,overshoot=config["perturb"]["overshoot"])
mean_b_adv, label_adv = attack.forward(mean_b, label)
mean = torch.cat((mean_s, mean_b), dim=1)
mean_adv = torch.cat((mean_s, mean_b_adv), dim=1)
x_reconstructed = model_s.reconstruct(mean)
x_adv_reconstr = model_s.reconstruct(mean_adv)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(5,3,28,28)
x_reconstructed = x_reconstructed.view(5,3,28,28)
x_adv_reconstr = x_adv_reconstr.view(5,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
imgs = torch.cat((data, x_reconstructed,x_adv_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed image, Bottom: Reconstructed adv. perturbation")
if training == True:
wandb.log({"Adversarial Visualization Training": save_img, "epoch": epoch})
else:
wandb.log({"Adversarial Visualization": save_img, "epoch": epoch})
model_s.train()
model_b.train()
## DeepFool is adapted from https://adversarial-attacks-pytorch.readthedocs.io/en/latest/_modules/torchattacks/attacks/deepfool.html
class DeepFool:
r"""
'DeepFool: A Simple and Accurate Method to Fool Deep Neural Networks'
[https://arxiv.org/abs/1511.04599]
Distance Measure : L2
Arguments:
model (nn.Module): model to attack.
steps (int): number of steps. (Default: 50)
overshoot (float): parameter for enhancing the noise. (Default: 0.02)
Adaptation Note: This algorithm is designed for the purpose of this work.
Therefore it does not work with bounded images but unbounded latent dimensions.
We still call the input "image" for consistency with the original algorithm.
Additionally the algorithm is optimized by approximating the closest hyperplane by the second highest predicted class.
This is in order to reduce computational complexity 50fold as it allows parallelizing.
In my tests the approximation gave the optimal(Oracle being original DeepFool) solution in 4/5 of cases.
Examples::
>>> attack = DeepFool(model, steps=50, overshoot=0.02)
>>> adv_images = attack(images, labels)
"""
def __init__(self, model, device, steps=50, overshoot=0.02):
self.model = model
self.steps = steps
self.overshoot = overshoot
self.device = device
self.num_classes = model.num_classes
def forward(self, images, labels, target_true_label=False):
adv_images = images.clone().detach().to(self.device)
adv_images.requires_grad = True
labels = labels.clone().detach().to(self.device)
batch_size = len(adv_images)
correct = torch.tensor([True]*batch_size).to(self.device)
if target_true_label:
target_label = labels
else:
target_labels = torch.ones([batch_size,self.num_classes])/(self.num_classes-1)
target_labels = target_labels.to(self.device)
for i in range(self.num_classes):
target_labels[labels == i,i] = 0
target_label = torch.multinomial(target_labels,1).squeeze(-1)
curr_steps = 0
# Note that with this implementation it's possible that the target label switches between iterations
while (True in correct) and (curr_steps < self.steps):
if adv_images.grad is not None:
adv_images.grad.zero_()
logits = self.model(adv_images[correct]) # Forward pass only for correct classifications
values, predicted = torch.topk(logits, 2, dim=1) # Predicted label
#target_label[correct] = predicted[:,0]
correct_new = (predicted[:,0]!=target_label[correct])
#correct_new = (predicted[:,0]==labels[correct])
# Some indexing to backprop only wrt correctly classified labels
#diff = values[:,1] - logits.gather(1,labels[correct].unsqueeze(1)).squeeze(-1) # second highest label as target
diff = logits.gather(1,target_label[correct].unsqueeze(1)).squeeze(-1) - values[:,0] # target label as target
diff[~correct_new] = 0
diff_backprop = diff.sum() # "Trick" to backprop wrt all inputs: Summing individual differences
diff_backprop.backward()
delta = (torch.abs(diff[correct_new])/(torch.norm(adv_images.grad[correct][correct_new], p=2,dim=tuple(range(adv_images.ndim)[1:]))**2+1e-8)).view(-1,*(1,)*(adv_images.dim()-1)) * adv_images.grad[correct][correct_new]
assert not torch.isnan(delta).any()
correct[correct.clone()] = correct_new
with torch.no_grad():
adv_images[correct] = (adv_images[correct] + (1+self.overshoot)*delta).detach()
curr_steps += 1
return adv_images, target_label
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=3, verbose=False, delta=0, path_s = 'checkpoint.pt', path_b = None, trace_func=print, saveEveryEpoch=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path_s = path_s
self.path_b = path_b
self.trace_func = trace_func
self.saveEveryEpoch = saveEveryEpoch
def __call__(self, val_loss, model_s, model_b, epoch):
score = -val_loss
if self.saveEveryEpoch:
path_s = self.path_s[:-3] + "_epoch_" + str(epoch) + ".pt"
path_b = self.path_b[:-3] + "_epoch_" + str(epoch) + ".pt"
self.save_checkpoint(val_loss, model_s, model_b, path_s, path_b)
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model_s, model_b, self.path_s, self.path_b)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
self.trace_func(f'Early Stopping biased model at epoch {self.counter}')
else:
self.best_score = score
self.save_checkpoint(val_loss, model_s, model_b, self.path_s, self.path_b)
self.counter = 0
def save_checkpoint(self, val_loss, model_s, model_b, path_s, path_b):
"""Saves model when validation loss decreases."""
if self.verbose:
self.trace_func(f'Biased val loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving models...\n')
torch.save(model_s.state_dict(), path_s)
torch.save(model_b.state_dict(), path_b)
self.val_loss_min = val_loss
def get_reconst_loss(model_s, model_b, data_loader, device, config, mode="train"):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
data_loader = tqdm(data_loader, position=0, leave=False)
total_loss = 0
if mode=="train":
for idx, (subset_idx, full_idx, data, attr) in enumerate(data_loader):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model_s.encoder(data)
parameters_b = model_b.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
x_reconst = model_s.decoder(torch.cat((parameters[0],parameters_b[0]),dim=1))
if config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
else: raise NotImplementedError("reconst_loss")
total_loss += reconst_loss.sum()
elif mode=="test":
for idx, (full_idx, data, attr) in enumerate(data_loader):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model_s.encoder(data)
parameters_b = model_b.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
x_reconst = model_s.decoder(torch.cat((parameters[0],parameters_b[0]),dim=1))
if config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
else: raise NotImplementedError("reconst_loss")
total_loss += reconst_loss.sum()
total_loss /= len(data_loader.iterable.dataset)
print("total reconstruction loss of VAE: {:8.6f}".format(total_loss))
return total_loss
def early_stop(model_s, model_b, val_loader_biased, stopping_criteria, scheduler_s, scheduler_b, epoch, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_b.eval()
model_s.eval()
val_loader_biased = tqdm(val_loader_biased, position=0, leave=False)
total_loss = 0
total_reconst_loss = 0
for idx, (subset_idx, full_idx, data, attr) in enumerate(val_loader_biased):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters_b = model_b.encoder(data)
assert len(parameters_b) == 2 # No new outputs of encoders
logits_b = model_b.predict(parameters_b[0])
parameters_s = model_s.encoder(data)
assert len(parameters_s) == 2 # No new outputs of encoders
logits_s = model_s.predict(parameters_s[0])
loss_s = F.cross_entropy(logits_s, label,reduction="none")
loss_b = F.cross_entropy(logits_b, label,reduction="none")
mean = torch.cat((parameters_s[0], parameters_b[0]), dim=1)
logvar = torch.cat((parameters_s[1], parameters_b[1]), dim=1)
x_reconst = model_s.reconstruct(mean)
p = F.softmax(logits_b, dim=1)
if np.isnan(p.mean().item()):
raise NameError("GCE_p")
Yg = torch.gather(p, 1, torch.unsqueeze(label, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()) # Do we really need *self.q? I think like now is correct.
if np.isnan(Yg.mean().item()):
raise NameError("GCE_Yg")
# note that we don't return the average but the loss for each datum separately
loss_b = (loss_b * loss_weight**config["loss"]["GCE_q"])
loss_s = (loss_s * (1-loss_weight)**config["loss"]["GCE_q"])
# VAE losses
# Compute reconstruction loss and kl divergence for both encoders together
# Sum over dimensions, average over batch to have loss weighting hyperparameters being independent of batch size
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
elif config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1"or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
else: raise NotImplementedError("reconst_loss")
kl_div = - 0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(),dim=1)
# Rescaling both VAE losses in order to be invariant to image resolution in hyperparametertuning.
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
kl_div /= x_reconst.view(len(x_reconst),-1).shape[1]
reconst_loss *=config["loss"]["reconst_weight"]
total_loss = total_loss + loss_s.sum() + loss_b.sum() + reconst_loss.sum() + kl_div.sum()
total_reconst_loss += reconst_loss.sum()
total_loss /= len(val_loader_biased.iterable.dataset)
total_reconst_loss /= len(val_loader_biased.iterable.dataset)
# scheduling
if config["optimizer"]["lr_decay"]:
scheduler_s.step(total_loss)
scheduler_b.step(total_loss)
wandb.log({"loss_biasedval": total_loss, 'reconstruction_loss_biasedval': total_reconst_loss, "epoch": epoch})
print("total validation loss {:8.3f}".format(total_loss))
stopping_criteria(total_loss, model_s, model_b, epoch)
model_s.train()
model_b.train()
return stopping_criteria.early_stop
def capture_dataset(data_loader,config):
imgs = []
for idx, (subset_idx, full_idx, data, attr) in enumerate(data_loader):
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
label = attr[:, 0]
bias = attr[:,1]
aligned = (label==bias)
data_aligned = data[aligned]
label_aligned = label[aligned]
for j in range(3):
for i in np.unique(label_aligned):
imgs.append(data_aligned[label_aligned==i][j])
import torchvision
save_img = torchvision.utils.make_grid(imgs,nrow=len(np.unique(label_aligned)))
save_img = wandb.Image(save_img)
wandb.log({"Dataset": save_img})
break
def bias_visualization(model_s, model_b, data_loader, config, device):
# Visualizing Bias.
model_s.eval()
model_b.eval()
rand_batches = random.sample(list(data_loader), 5)
data_batches = [item[2] for item in rand_batches]
attr = [item[3] for item in rand_batches]
data_unpacked = list()
attr_unpacked = list()
for index, item in enumerate(attr):
# Extract 5 bias-conflicting images (w/o using bias label as it's theoretically unknown)
batch = data_batches[index].to(device)
item = item.to(device)
parameters_b = model_b.encoder(batch)
assert len(parameters_b) == 2 # No new outputs of encoders
pred_b = model_b.predict(parameters_b[0]).argmax(1)
correct_b = (pred_b == item[:,0]).long()
parameters_s = model_s.encoder(batch)
assert len(parameters_s) == 2 # No new outputs of encoders
pred_s = model_s.predict(parameters_s[0]).argmax(1)
correct_s = (pred_s == item[:,0]).long()
bias_aligned = (correct_s*correct_b).bool()
data_unpacked.append(data_batches[index][bias_aligned.cpu()][0])
attr_unpacked.append(item[bias_aligned.cpu()][0])
data = torch.stack(data_unpacked)
label = torch.stack(attr_unpacked)[:,0]
data = data.to(device)
label = label.to(device)
assert data.shape[0:2] ==torch.Size([5, 3])
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
attack = DeepFool(model_b.classifier,device,steps=20,overshoot=config["perturb"]["overshoot"])
mean_b_adv, label_adv = attack.forward(mean_b, label)
mean = torch.cat((mean_s, mean_b), dim=1)
mean_adv = torch.cat((mean_s, mean_b_adv), dim=1)
x_reconstructed = model_s.reconstruct(mean)
x_adv_reconstr = model_s.reconstruct(mean_adv)
##### FOR DFA THEN FIND SAMPLES WHERE BIAS PREDICTS LABEL OF DATA#####
# Create bias-aligned samples by finding samples whose bias dimensions makes biased classifier predict correct label.
j = 0
mean_b_swap = torch.zeros_like(mean_b)
while j<5:
rand_batch = random.sample(list(data_loader), 1)
batch_data = rand_batch[0][2].to(device)
#batch_label = rand_batch[0][2][:,0].to(device)
parameters_b = model_b.encoder(batch_data)
assert len(parameters_b) == 2 # No new outputs of encoders
pred_b = model_b.predict(parameters_b[0]).argmax(1)
corr_bias = (pred_b == label_adv[j])
if corr_bias.sum()>0:
mean_b_swap[j] = parameters_b[0][corr_bias][0]
j+=1
mean_swap = torch.cat((mean_s, mean_b_swap), dim=1)
x_swap_reconstr = model_s.reconstruct(mean_swap)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(5,3,28,28)
x_reconstructed = x_reconstructed.view(5,3,28,28)
x_adv_reconstr = x_adv_reconstr.view(5,3,28,28)
x_swap_reconstr = x_swap_reconstr.view(5,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
imgs = torch.cat((data, x_reconstructed,x_adv_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed bias-conflicting image, Bottom: Reconstructed bias-aligned image by adv. perturbation")
wandb.log({"Adversarial Visualization Ours": save_img})
imgs = torch.cat((data, x_reconstructed,x_swap_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed bias-conflicting image, Bottom: Reconstructed bias-aligned image by swapping")
wandb.log({"Adversarial Visualization DisEnt": save_img})
model_s.train()
model_b.train()
| 28,920 | 45.646774 | 229 | py |
Signal-is-Harder | Signal-is-Harder-main/train.py | import os
import yaml
import argparse
import wandb
import time
import sys
from tqdm import tqdm
import numpy as np
from uuid import uuid4
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms.functional import normalize
from data.util import get_dataset, IdxDataset, IdxDataset2
from module.util import get_model
from util import set_seed, get_optimizer, evaluate, \
GeneralizedCELoss, evaluate_batch, save_img, save_img_adv, \
EarlyStopping, early_stop, capture_dataset, bias_visualization
def main():
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
print(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument("--name", default=None, help = "Name of experiment")
parser.add_argument("--bias_conflicting_perc", default=None, type=float, help = "Percentage of bias conflicting samples in dataset")
parser.add_argument("--severity", default=None, type=int, help = "Severity of bias")
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--model_tag", default=None, help = "Choice of model")
parser.add_argument("--q", default=None, type=float, help = "q for GCE loss")
parser.add_argument("--random_state", default=None, type=int, help="Random state for seed")
parser.add_argument("--results_filename", default=None, help="Name of file to store results")
parser.add_argument("--VAE_weight", default=None, type=float, help="Weight of KL&Reconstruction loss")
parser.add_argument("--reconst_weight", default=None, type=float, help="Weight of Reconstruction loss")
args = parser.parse_args()
# Replace all specified arguments
updateable = [config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["loss"]["VAE_weight"],config["loss"]["reconst_weight"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["loss"]["VAE_weight"],config["loss"]["reconst_weight"] = values
# configuration sanity check
if not (
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP") or
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP_VAE") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "camelyon17_type0" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "camelyon17_type1" and config["model"]["tag"] == "ResNet_VAE")
):
print("Are you sure you want to use the dataset "+config["data"]["dataset"]+" with the model "+ config["model"]["tag"]+"?")
# define variables from config
batch_size = config["train"]["batch_size"]
epochs = config["train"]["epochs"]
random_state = config["random_state"]
# wandb support
mode = "online" if config['wandb_logging'] else "disabled"
#wandb.login(key="e34806ecc80c88cfb408eda2e5848fa494272f15")
wandb.init(
project="Signalisharder",
entity="username",
config=config,
mode=mode
)
wandb.run.name = wandb.run.name.split("-")[-1] + "-"+config['name']
#wandb.run.save()
print("Running experiment: {}".format(config["name"]))
# set seed
set_seed(random_state)
# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"\nUsing device: {device}")
# load dataset
train_dataset = get_dataset(
config,
dataset_split="train"
)
test_dataset = get_dataset(
config,
dataset_split="eval"
)
# Adapt train dataset twice to get indices of subset as well as full dataset
train_dataset = IdxDataset(train_dataset)
test_dataset = IdxDataset(test_dataset)
train_dataset_splitted = IdxDataset2(train_dataset)
train_loader = DataLoader(
train_dataset_splitted,
batch_size=batch_size,
shuffle=True,
num_workers=1,
pin_memory=True,
drop_last=True
)
# train_loader = DataLoader(
# train_dataset,
# batch_size=batch_size,
# shuffle=True,
# #num_workers=16,
# pin_memory=True,
# )
test_loader = DataLoader(
test_dataset,
batch_size=256,
shuffle=False,
num_workers=1,
pin_memory=True,
)
# define signal and bias model and model for approximating disentaglement loss
model_s = get_model(config).to(device)
model_b = get_model(config).to(device)
# Decoder of bias network not used
for p in model_b.decoder.parameters():
p.requires_grad = False
# define optimizer
optimizer_s = get_optimizer(model_s, config)
optimizer_b = get_optimizer(model_b, config)
# define scheduler
if config["model"]["tag"] == "MLP_VAE":
patience = config["early_stop"]["patience_MLP"]
elif config["model"]["tag"] == "ResNet_VAE":
patience = config["early_stop"]["patience_ResNet"]
else: raise NotImplementedError("Patience")
scheduler_s = optim.lr_scheduler.ReduceLROnPlateau(optimizer_s, verbose=True, patience = int(patience/2)-1, factor=config["optimizer"]["lr_gamma"],threshold=0.00001)
scheduler_b = optim.lr_scheduler.ReduceLROnPlateau(optimizer_b, verbose=True, patience = int(patience/2)-1, factor=config["optimizer"]["lr_gamma"],threshold=0.00001)
# define loss function
criterion_s = nn.CrossEntropyLoss(reduction='none')
criterion_b = GeneralizedCELoss(config)
# early stopping
os.makedirs("./saved_models/ours_s/", exist_ok=True)
os.makedirs("./saved_models/ours_b/", exist_ok=True)
timestamp = time.strftime(' %d-%b-%Y_%H:%M', time.localtime())
id = str(uuid4())
save_path_s = "./saved_models/ours_s/" + config["name"] + timestamp + id + ".pt"
save_path_b = "./saved_models/ours_b/" + config["name"] + timestamp + id + ".pt"
stopping_criteria_vae = EarlyStopping(patience=patience, verbose=True, path_s = save_path_s, path_b = save_path_b, delta=0.00001) # Same as scheduler
early_stop_v = False
# training & validation & test
for epoch in range(epochs):
if early_stop_v == False:
train_dataset_splitted.make_train()
train(model_s, model_b, train_loader, early_stop_v, optimizer_s, optimizer_b, criterion_s, criterion_b, epoch, epochs, device, config)
train_dataset_splitted.make_biased_val() # Biased validation set to determine early stopping of vae
early_stop_v = early_stop(model_s, model_b, train_loader, stopping_criteria_vae, scheduler_s, scheduler_b, epoch, device, config)
if early_stop_v == True: # Reverse models to early stopping points
model_s.load_state_dict(torch.load(save_path_s, map_location=device))
model_b.load_state_dict(torch.load(save_path_b, map_location=device))
for p in model_s.encoder.parameters(): # Freeze signal part of VAE.
p.requires_grad = False
for p in model_s.decoder.parameters():
p.requires_grad = False
for p in model_b.parameters():
p.requires_grad = False
train_dataset_splitted.make_biased_val()
else: break
validate(model_s, model_b, test_loader, epoch, device, config) # Important: Until having made a decision we use the test set as validation set for model analysis!
#capture_dataset(train_loader, config)
test_acc_s, test_acc_b = test(model_s, model_b, test_loader, epochs, device, config)
# train_reconst_loss = get_reconst_loss(model_s, model_b, train_loader, device, config, mode = "train")
# test_reconst_loss = get_reconst_loss(model_s, model_b, test_loader, device, config, mode = "test")
# Saving result & Checkpoint
with open(config["results_filename"]+'.txt', 'a') as f:
f.writelines((['{} signal: {:8.4f}\n'.format(config["name"], test_acc_s)]))
# f.writelines((['{} biased: {:8.4f}\n'.format(config["name"], test_acc_b)]))
# f.writelines((['{} train_reconst: {:8.4f}\n'.format(config["name"], train_reconst_loss)]))
# f.writelines((['{} test_reconst: {:8.4f}\n'.format(config["name"], test_reconst_loss)]))
# Save images to wandb
save_img(model_s, model_b, test_loader, config, device)
for i in range(5):
save_img_adv(model_s, model_b, test_loader, epoch, config, device, training=False)
bias_visualization(model_s, model_b, train_loader, config, device) #Using biased validation set to have (enough) bias-aliged images
wandb.finish(quiet=True)
os.remove(save_path_s)
os.remove(save_path_b)
def train(
model_s,
model_b,
train_loader,
early_stop_v,
optimizer_s,
optimizer_b,
criterion_s,
criterion_b,
epoch,
epochs,
device,
config
):
"""Main training loop, where the network is trained
Args:
UPDATE ARG DESCRIPTION
model: baseline model
train_loader: loader with the training data
optimizer: optimizer for backpropagation
criterion: loss function
epoch: current epoch
epochs: max number of epochs
device: current device (cpu or gpu)
"""
train_loader = tqdm(train_loader, position=0, leave=False)
train_loader.set_description(f"Epoch [{epoch}/{epochs}]")
total_corr_aligned_s, total_corr_conflicting_s, total_corr_s, total_count_aligned, total_count_conflicting, total_count = 0, 0, 0, 0, 0, 0
total_corr_aligned_b, total_corr_conflicting_b, total_corr_b, = 0, 0, 0
total_corr_aligned_s_adv, total_corr_conflicting_s_adv, total_corr_s_adv, = 0, 0, 0
# training loop
model_s.train()
model_b.train()
if early_stop_v:
model_b.eval()
for idx, (subset_idx, full_idx, data, attr) in enumerate(train_loader):
data, attr = data.to(device), attr.to(device)
label = attr[:, 0] # Assuming label is in first column and bias in second of variable attr!
# bias = attr[:, 1]
# Getting predictions
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
# z_s_avging_pos = z_s[label.bool()][torch.randperm(sum(label==1))]
# z_s_avging_neg = z_s[~label.bool()][torch.randperm(sum(label==0))]
# z_s[label.bool()] = (z_s[label.bool()] + z_s_avging_pos.detach())/2 ###Detach yes/no?
# z_s[~label.bool()] = (z_s[~label.bool()] + z_s_avging_neg.detach())/2 ###Detach yes/no?
# logits_s = model_s.predict(z_s)
z = torch.cat((z_s, z_b), dim=1)
mean = torch.cat((mean_s, mean_b), dim=1)
logvar = torch.cat((logvar_s, logvar_b), dim=1)
x_reconst = model_s.reconstruct(z)
# VAE losses
# Compute reconstruction loss and kl divergence for both encoders together
# Sum over dimensions, average over batch to have loss weighting hyperparameters being independent of batch size
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
elif config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
else: raise NotImplementedError("reconst_loss")
kl_div = - 0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(),dim=1)
# Rescaling both VAE losses in order to be invariant to image resolution in hyperparametertuning.
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
kl_div /= x_reconst.view(len(x_reconst),-1).shape[1]
reconst_loss *=config["loss"]["reconst_weight"]
# 1-yhat_b instead of RDS
prob_b = F.softmax(logits_b, dim=1)
if np.isnan(prob_b.mean().item()):
raise NameError("prob_b")
y_hat_b = torch.gather(prob_b, 1, torch.unsqueeze(label, 1)).squeeze().detach().cpu()
if np.isnan(y_hat_b.mean().item()):
raise NameError("y_hat_b")
loss_weight = (1-y_hat_b)**config["loss"]["GCE_q"] # 1-yhat for hard-to-learn samples.
rel_diff_score = loss_weight.detach().to(device)
# Calculate and weigh classifier losses
loss_indiv_s = criterion_s(logits_s,label)* rel_diff_score
loss_indiv_b = criterion_b(logits_b,label)
# Evaluate metrics for logging and backpropagating
corr_aligned_s, corr_conflicting_s, corr_s, loss_aligned_s, loss_conflicting_s, loss_s, aligned_len, conflicting_len, batch_len = evaluate_batch(logits_s,attr,loss_indiv_s)
corr_aligned_b, corr_conflicting_b, corr_b, loss_aligned_b, loss_conflicting_b, loss_b = evaluate_batch(logits_b,attr,loss_indiv_b)[0:6]
if torch.isnan(loss_s):
raise NameError('loss_s_update')
if torch.isnan(loss_b):
raise NameError('loss_b_update')
# Backprop model
optimizer_s.zero_grad()
optimizer_b.zero_grad()
loss = loss_s + loss_b + reconst_loss.mean() + kl_div.mean()
loss.backward()
optimizer_s.step()
optimizer_b.step()
# Calculate metrics for logging
total_corr_aligned_s += corr_aligned_s
total_corr_conflicting_s += corr_conflicting_s
total_corr_s += corr_s
total_corr_aligned_b += corr_aligned_b
total_corr_conflicting_b += corr_conflicting_b
total_corr_b += corr_b
total_count_aligned += aligned_len
total_count_conflicting += conflicting_len
total_count += batch_len
train_loader.set_postfix({"loss_s": "{:.3f}".format(loss_s.item()), "loss_b": "{:.3f}".format(loss_b.item()),
"acc_s": "{:.3f}".format(corr_s.item() / batch_len), "acc_b": "{:.3f}".format(corr_b.item() / batch_len)})
wandb.log({"loss_s": loss_s, "loss_s_align": loss_aligned_s, "loss_s_conflict": loss_conflicting_s, "reconstruction_loss": reconst_loss.mean()})
wandb.log({"loss_b": loss_b, "loss_b_align": loss_aligned_b, "loss_b_conflict": loss_conflicting_b, "loss": loss})
if config["wandb_logging"]:
save_img_adv(model_s, model_b, train_loader, epoch, config, device, training=True)
wandb.log({"acc_s_train": total_corr_s / total_count, "acc_s_train_align": total_corr_aligned_s / total_count_aligned,
"acc_s_train_conflict": total_corr_conflicting_s / total_count_conflicting, "epoch": epoch})
wandb.log({"acc_b_train": total_corr_b / total_count, "acc_b_train_align": total_corr_aligned_b / total_count_aligned,
"acc_b_train_conflict": total_corr_conflicting_b / total_count_conflicting, "epoch": epoch})
wandb.log({"acc_s_train_adv": total_corr_s_adv / total_count, "acc_s_train_align_adv": total_corr_aligned_s_adv / total_count_aligned,
"acc_s_train_conflict_adv": total_corr_conflicting_s_adv / total_count_conflicting, "epoch": epoch})
print(
"| epoch {:3d} | training accuracy_biased {:8.3f}".format(
epoch, total_corr_b / total_count
)
)
def validate(model_s, model_b, val_loader, epoch, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
val_acc_aligned_s, val_acc_conflicting_s, val_acc_s = evaluate(model_s, val_loader, device)
val_acc_aligned_b, val_acc_conflicting_b, val_acc_b = evaluate(model_b, val_loader, device)
if config["loss"]["perturbation"]:
save_img_adv(model_s, model_b, val_loader, epoch, config, device)
wandb.log({"acc_s_val": val_acc_s, "acc_s_val_align": val_acc_aligned_s, "acc_s_val_conflict": val_acc_conflicting_s, "epoch": epoch})
wandb.log({"acc_b_val": val_acc_b, "acc_b_val_align": val_acc_aligned_b, "acc_b_val_conflict": val_acc_conflicting_b, "epoch": epoch})
print("validation accuracy of unbiased model {:8.3f}".format(val_acc_s))
print("validation accuracy of biased model {:8.3f}".format(val_acc_b))
def test(model_s, model_b, test_loader, epochs, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
test_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
test_acc_aligned_s, test_acc_conflicting_s, test_acc_s = evaluate(model_s, test_loader, device)
test_acc_aligned_b, test_acc_conflicting_b, test_acc_b = evaluate(model_b, test_loader, device)
wandb.log({"acc_s_test": test_acc_s, "acc_s_test_align": test_acc_aligned_s, "acc_s_test_conflict": test_acc_conflicting_s, "epoch": epochs})
wandb.log({"acc_b_test": test_acc_b, "acc_b_test_align": test_acc_aligned_b, "acc_b_test_conflict": test_acc_conflicting_b, "epoch": epochs})
print("test accuracy of unbiased model {:8.3f}".format(test_acc_s))
print("test accuracy of biased model {:8.3f}".format(test_acc_b))
return test_acc_s, test_acc_b
if __name__ == "__main__":
main() | 18,917 | 48.010363 | 285 | py |
Signal-is-Harder | Signal-is-Harder-main/module/resnet_vae.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torchvision.models import resnet18
from module.resnet import resnet20
class ResNet_VAE(nn.Module):
def __init__(self, num_classes = 10, bottleneck=512):
super(ResNet_VAE, self).__init__()
self.encoder = ResNet_Encoder(bottleneck)
self.decoder = ResNet18Dec(bottleneck=bottleneck)
self.classifier = ResNet_Classifier(num_classes = num_classes, bottleneck=bottleneck)
def forward(self, x):
mean, logvar = self.encoder(x)
z = self.reparameterization(mean, logvar)
logits = self.predict(z)
return z, logits, mean, logvar
def reconstruct(self, z):
x_recon = self.decoder(z)
return x_recon
def predict(self, z):
logits = self.classifier(z)
return logits
def reparameterization(self, mu, logvar):
std = logvar.mul(0.5).exp_()
z = torch.distributions.Normal(mu, std+1e-8).rsample()
return z
class ResNet_Classifier(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 64):
super(ResNet_Classifier, self).__init__()
self.num_classes = num_classes
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(bottleneck,num_classes)
)
def forward(self, z):
logits = self.classifier(z)
return logits
class ResNet_Encoder(nn.Module):
def __init__(self, bottleneck=512):
super(ResNet_Encoder, self).__init__()
resnet = resnet18() # Make sure to put bottleneck = 512
self.conv1 = nn.Conv2d(3, 64, kernel_size=4,
stride=2, padding=1, bias=False)
self.encoder = torch.nn.Sequential(self.conv1,*(list(resnet.children())[1:3]),*(list(resnet.children())[4:-2]))
##############################################
self.conv_mean = nn.Conv2d(bottleneck, bottleneck, kernel_size=2, stride=1, padding=0)
self.conv_logvar = nn.Conv2d(bottleneck, bottleneck, kernel_size=2, stride=1, padding=0)
self.fc_mean = nn.Linear(bottleneck,bottleneck)
self.fc_logvar = nn.Linear(bottleneck,bottleneck)
def forward(self, x):
x = self.encoder(x)
mean = self.conv_mean(x)
logvar = self.conv_logvar(x)
mean = mean.squeeze()
logvar = logvar.squeeze()
logvar = logvar.clamp(max=5) # Numerical stability. Equals max(std)==12.1825
return mean, logvar
class ResizeConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'):
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=1)
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
x = self.conv(x)
return x
class BasicBlockDec(nn.Module):
def __init__(self, in_planes, stride=1):
super().__init__()
planes = int(in_planes/stride)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
# self.bn1 could have been placed here, but that messes up the order of the layers when printing the class
if stride == 1:
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
else:
self.conv1 = ResizeConv2d(in_planes, planes, kernel_size=3, scale_factor=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential(
ResizeConv2d(in_planes, planes, kernel_size=3, scale_factor=stride),
nn.BatchNorm2d(planes)
)
def forward(self, x):
out = torch.relu(self.bn2(self.conv2(x)))
out = self.bn1(self.conv1(out))
out += self.shortcut(x)
out = torch.relu(out)
return out
class ResNet18Dec(nn.Module):
def __init__(self, num_Blocks=[2,2,2,2], bottleneck=512, nc=3):
super().__init__()
self.in_planes = 2*bottleneck
self.linear = nn.Linear(2*bottleneck, 2*bottleneck)
self.layer4 = self._make_layer(BasicBlockDec, int(bottleneck), num_Blocks[3], stride=2)
self.layer3 = self._make_layer(BasicBlockDec, int(bottleneck/2), num_Blocks[2], stride=2)
self.layer2 = self._make_layer(BasicBlockDec, int(bottleneck/4), num_Blocks[1], stride=2)
self.layer1 = self._make_layer(BasicBlockDec, int(bottleneck/8), num_Blocks[0], stride=2)
self.conv1 = ResizeConv2d(int(bottleneck/8), nc, kernel_size=3, scale_factor=2)
def _make_layer(self, BasicBlockDec, planes, num_Blocks, stride):
strides = [stride] + [1]*(num_Blocks-1)
layers = []
for stride in reversed(strides):
layers += [BasicBlockDec(self.in_planes, stride)]
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, z):
#z = F.relu(self.linear(z))
z = z.view(z.size(0), z.size(1), 1, 1)
#x = F.interpolate(x, scale_factor=2)
x = self.layer4(z)
x = self.layer3(x)
x = self.layer2(x)
x = self.layer1(x)
x = torch.sigmoid(self.conv1(x))
#x = x.view(x.size(0), 3, 32, 32)
return x
| 5,542 | 33.216049 | 119 | py |
Signal-is-Harder | Signal-is-Harder-main/module/mlp_vae.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_VAE(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 16):
super(MLP_VAE, self).__init__()
self.encoder = MLP_Encoder(bottleneck = bottleneck)
self.decoder = MLP_Decoder(bottleneck = bottleneck)
self.classifier = MLP_Classifier(num_classes = num_classes, bottleneck = bottleneck)
def forward(self, x):
mean, logvar = self.encoder(x)
z = self.reparameterization(mean, logvar)
logits = self.predict(z)
return z, logits, mean, logvar
def reconstruct(self, z):
x_recon = self.decoder(z)
return x_recon
def predict(self, z):
logits = self.classifier(z)
return logits
def reparameterization(self, mu, logvar):
std = logvar.mul(0.5).exp_()
z = torch.distributions.Normal(mu, std+1e-8).rsample()
return z
class MLP_Encoder(nn.Module):
def __init__(self, bottleneck = 16):
super(MLP_Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU()
)
self.fc_mean = nn.Linear(100, bottleneck)
self.fc_logvar = nn.Linear(100, bottleneck)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.encoder(x)
mean = self.fc_mean(x)
logvar = self.fc_logvar(x)
return mean, logvar
class MLP_Decoder(nn.Module):
def __init__(self, bottleneck = 16):
super(MLP_Decoder, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(bottleneck*2, 512), # Combined representations of signal and bias encoder
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU(),
nn.Linear(1024, 3*28*28)
)
def forward(self, z):
z = self.decoder(z)
x_hat = torch.sigmoid(z)
x_hat = x_hat.view(x_hat.size(0),3,28,28)
return x_hat
class MLP_Classifier(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 16):
super(MLP_Classifier, self).__init__()
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(bottleneck,num_classes),
)
self.num_classes = num_classes #Necessary for DeepFool2
def forward(self, z):
logits = self.classifier(z)
return logits
| 2,499 | 25.041667 | 96 | py |
Signal-is-Harder | Signal-is-Harder-main/module/resnet.py | ''' From https://github.com/alinlab/LfF/blob/master/module/resnet.py '''
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = [
"ResNet",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet1202",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(
3, 16, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
#self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
#out = F.avg_pool2d(out, out.size()[3])
#feat = out.view(out.size(0), -1)
return out
def predict(self, x):
prediction = self.fc(x)
return prediction
def forward(self, x, mode=None):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
#out = self.avgpool(out)
#out = out.view(out.size(0), -1)
#final_out = self.fc(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters(),
)
)
),
)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith("resnet"):
print(net_name)
test(globals()[net_name]())
print() | 6,138 | 27.290323 | 78 | py |
Signal-is-Harder | Signal-is-Harder-main/module/mlp.py | ''' From https://github.com/alinlab/LfF/blob/master/module/MLP.py '''
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_classes = 10):
super(MLP, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3 * 28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU()
)
self.classifier = nn.Linear(100, num_classes)
def forward(self, x, return_feat=False):
x = x.view(x.size(0), -1)# / 255
feat = x = self.feature(x)
x = self.classifier(x)
if return_feat:
return x, feat
else:
return x | 721 | 25.740741 | 69 | py |
Signal-is-Harder | Signal-is-Harder-main/module/util.py | ''' Modified from https://github.com/alinlab/LfF/blob/master/module/util.py '''
import torch.nn as nn
from module.resnet import resnet20
from module.mlp import MLP
from module.mlp_vae import MLP_VAE
from module.resnet_vae import ResNet_VAE
from torchvision.models import resnet18, resnet50
def get_model(config):
model_tag = config["model"]["tag"]
dataset = config["data"]["dataset"]
if dataset in {"colored_mnist", "cifar10_type0", "cifar10_type1"}:
num_classes = 10
elif dataset in {"camelyon17_type0", "camelyon17_type1", "camelyon17_type2"}:
num_classes = 2
else: raise NotImplementedError("Dataset is not integrated.")
if model_tag == "ResNet20":
return resnet20(num_classes)
elif model_tag == "ResNet18":
model = resnet18(pretrained=True)
print("Pretrained&frozen ResNet18")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(512, num_classes)
model.fc.weight.requires_grad = True
model.fc.bias.requires_grad = True
return model
elif model_tag == "ResNet50":
model = resnet50(pretrained=True)
print("Pretrained&frozen ResNet50")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, num_classes)
model.fc.weight.requires_grad = True
model.fc.bias.requires_grad = True
return model
elif model_tag == "MLP":
return MLP(num_classes=num_classes)
elif model_tag == "MLP_VAE":
return MLP_VAE(num_classes=num_classes,bottleneck=config["model"]["bottleneck_MLP"])
elif model_tag == "ResNet_VAE":
return ResNet_VAE(num_classes=num_classes,bottleneck = config["model"]["bottleneck_ResNet"])
else:
raise NotImplementedError("Model not implemented.")
# def get_disentangler(config):
# model_tag = config["model"]["tag"]
# if model_tag == "MLP":
# return FFVAE_Disentangler()
# elif model_tag == "MLP_VAE":
# return FFVAE_Disentangler()
# else:
# raise NotImplementedError("Model not implemented.") | 2,150 | 36.736842 | 100 | py |
Signal-is-Harder | Signal-is-Harder-main/data/rotated_mnist_protocol.py | import os
from functools import partial
import torchvision.transforms.functional as F
dir_path = os.path.dirname(os.path.realpath(__file__))
def rotate(raw_image, severity, attribute_label):
if severity==0:
raise NotImplementedError("Need severity != 0")
rotation = 90/(5-severity)
if attribute_label == 0:
return raw_image
elif attribute_label == 1:
image = F.rotate(raw_image.unsqueeze(0).float(),rotation).squeeze(0)
return image
else: raise NotImplementedError("Only 2class-dataset")
ROTATED_MNIST_PROTOCOL = dict()
for i in range(2):
ROTATED_MNIST_PROTOCOL[i] = partial(rotate, attribute_label = i) | 662 | 32.15 | 76 | py |
Signal-is-Harder | Signal-is-Harder-main/data/attr_dataset.py | '''Modified from https://github.com/alinlab/LfF/blob/master/data/attr_dataset.py'''
import os
import pickle
import torch
import numpy as np
from torch.utils.data import Dataset
class AttributeDataset(Dataset):
def __init__(self, root, split, query_attr_idx=None, transform=None):
super(AttributeDataset, self).__init__()
data_path = os.path.join(root, split, "images.npy")
self.data = np.load(data_path)
attr_path = os.path.join(root, split, "attrs.npy")
self.attr = torch.LongTensor(np.load(attr_path))
attr_names_path = os.path.join(root, "attr_names.pkl")
with open(attr_names_path, "rb") as f:
self.attr_names = pickle.load(f)
self.num_attrs = self.attr.size(1)
self.set_query_attr_idx(query_attr_idx)
self.transform = transform
def set_query_attr_idx(self, query_attr_idx):
if query_attr_idx is None:
query_attr_idx = torch.arange(self.num_attrs)
self.query_attr = self.attr[:, query_attr_idx]
def __len__(self):
return self.attr.size(0)
def __getitem__(self, index):
image, attr = self.data[index], self.query_attr[index]
if self.transform is not None:
image = self.transform(image)
return image, attr | 1,335 | 31.585366 | 83 | py |
Signal-is-Harder | Signal-is-Harder-main/data/shifted_mnist_protocol.py | import os
from functools import partial
import torchvision.transforms.functional as F
dir_path = os.path.dirname(os.path.realpath(__file__))
def shift(raw_image, severity, attribute_label):
if severity==0:
raise NotImplementedError("Need severity != 0")
translation = 8/(5-severity)
if attribute_label == 0:
image = F.affine(raw_image.unsqueeze(0).float(),scale=1,shear=0,angle=0,translate=(translation,translation/2)).squeeze(0)
return image
elif attribute_label == 1:
image = F.affine(raw_image.unsqueeze(0).float(),scale=1,shear=0,angle=0,translate=(-translation,-translation/2)).squeeze(0)
return image
else: raise NotImplementedError("Only 2class-dataset")
SHIFTED_MNIST_PROTOCOL = dict()
for i in range(2):
SHIFTED_MNIST_PROTOCOL[i] = partial(shift, attribute_label = i)
| 845 | 35.782609 | 131 | py |
Signal-is-Harder | Signal-is-Harder-main/data/util.py | '''Modified from https://github.com/alinlab/LfF/blob/master/data/util.py'''
import os
import numpy as np
import torch
from torch.utils.data.dataset import Dataset, Subset
from torch.utils.data import Sampler, random_split
from torchvision import transforms as T
from data.attr_dataset import AttributeDataset
from functools import reduce
class IdxDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
class IdxDataset2(Dataset):
def __init__(self, dataset):
self.full_dataset = dataset
train_set_size = int(len(self.full_dataset) * 0.9)
valid_set_size = len(self.full_dataset) - train_set_size
self.train_set, self.valid_set = random_split(self.full_dataset, [train_set_size, valid_set_size])
self.dataset = self.train_set
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
def make_train(self):
self.dataset = self.train_set
def make_biased_val(self):
self.dataset = self.valid_set
def make_fulltrain(self):
self.dataset = self.full_dataset
class ZippedDataset(Dataset):
def __init__(self, datasets):
super(ZippedDataset, self).__init__()
self.dataset_sizes = [len(d) for d in datasets]
self.datasets = datasets
def __len__(self):
return max(self.dataset_sizes)
def __getitem__(self, idx):
items = []
for dataset_idx, dataset_size in enumerate(self.dataset_sizes):
items.append(self.datasets[dataset_idx][idx % dataset_size])
item = [torch.stack(tensors, dim=0) for tensors in zip(*items)]
return item
transforms = {
"ColoredMNIST": {
"train": T.Compose([T.ToTensor()]),
"eval": T.Compose([T.ToTensor()])
},
"CorruptedCIFAR10": {
"train": T.Compose(
[
T.ToPILImage(),
T.RandomResizedCrop(32,scale=(0.5, 1.0)), #Scale of randomcrop+padding=4 would equal 0.765625
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"eval": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
"Camelyon17": {
"train": T.Compose(
[
T.ToPILImage(),
T.CenterCrop(32),
T.RandomResizedCrop(32,scale=(0.5, 1.0)), #Scale of randomcrop+padding=4 would equal 0.765625
T.RandomHorizontalFlip(),
T.ToTensor(),
]
),
"eval": T.Compose(
[
T.ToPILImage(),
T.CenterCrop(32),
T.ToTensor(),
]
),
},
}
def get_dataset_tag(config):
bias_confl_perc = config["data"]["bias_conflicting_perc"]
severity = config["data"]["severity"]
dataset = config["data"]["dataset"]
if dataset == "colored_mnist":
dataset_tag = f"ColoredMNIST-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "cifar10_type0":
dataset_tag = f"CorruptedCIFAR10-Type0-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "cifar10_type1":
dataset_tag = f"CorruptedCIFAR10-Type1-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "camelyon17_type0":
dataset_tag = f"Camelyon17-Type0-Skewed{bias_confl_perc}"
elif dataset == "camelyon17_type1":
dataset_tag = f"Camelyon17-Type1-Skewed{bias_confl_perc}"
elif dataset == "camelyon17_type2":
dataset_tag = f"Camelyon17-Type2-Skewed{bias_confl_perc}"
else:
raise NotImplementedError("Dataset not implemented.")
return dataset_tag
def get_dataset(config, dataset_split):
dataset_tag = get_dataset_tag(config)
dataset_category = dataset_tag.split("-")[0]
data_dir = config["user"]["data_dir"]
root = os.path.join(data_dir, dataset_tag)
transform = transforms[dataset_category][dataset_split]
dataset_split = "test" if (dataset_split == "eval") else dataset_split
dataset = AttributeDataset(
root=root, split=dataset_split, transform=transform
)
return dataset
| 4,488 | 29.746575 | 109 | py |
Signal-is-Harder | Signal-is-Harder-main/data/colored_mnist_protocol.py | '''Modified from https://github.com/alinlab/LfF/blob/master/data/colored_mnist_protocol.py'''
import os
import torch
import numpy as np
from functools import partial
dir_path = os.path.dirname(os.path.realpath(__file__))
colors_path = os.path.join(dir_path, "resource", "colors.th")
mean_color = torch.load(colors_path)
def colorize(raw_image, severity, attribute_label):
std_color = [0.05, 0.02, 0.01, 0.005, 0.002][severity-1]
image = (
torch.clamp(mean_color[attribute_label]
+ torch.randn((3, 1, 1)) * std_color, 0.0, 1.0)
) * raw_image.unsqueeze(0).float()
return image
COLORED_MNIST_PROTOCOL = dict()
for i in range(10):
COLORED_MNIST_PROTOCOL[i] = partial(colorize, attribute_label = i) | 742 | 31.304348 | 93 | py |
Multi2WOZ | Multi2WOZ-main/specialization/trainer_self.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from transformers.integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.optimization import Adafactor, AdamW, get_scheduler
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
EarlyStoppingCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
from transformers.utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class TrainerSelf:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
do_save_full_model: bool = True,
do_save_adapters: bool = False,
do_save_adapter_fusion: bool = False,
adapter_names: Optional[List[List[str]]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
# adapters used
self.do_save_full_model = do_save_full_model
self.do_save_adapters = do_save_adapters
self.do_save_adapter_fusion = do_save_adapter_fusion
if adapter_names is not None:
self.model.set_active_adapters(adapter_names)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, lengths=lengths, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
if len(self.train_dataset)<200000:
#return RandomSampler(self.train_dataset, replacement=True, num_samples=200000)
return RandomSampler(self.train_dataset)
else:
print("SequentialSampler")
print(len(self.train_dataset))
return SequentialSampler(self.train_dataset)
#print("SequentialSampler")
#return SequentialSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
if hasattr(self.model, "config") and hasattr(self.model.config, "adapter_fusion_models"):
no_decay = [f"adapter_fusion_layer.{n}.value" for n in self.model.config.adapter_fusion_models]
decay_parameters = [name for name in decay_parameters if name not in no_decay]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
adapter_reloaded = False
if resume_from_checkpoint is not None:
if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if self.deepspeed:
# will be resumed in init_deepspeed
pass
elif isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if os.path.isdir(resume_from_checkpoint):
for file_name in os.listdir(resume_from_checkpoint):
if os.path.isdir(os.path.join(resume_from_checkpoint, file_name)):
if "," in file_name:
self.model.load_adapter_fusion(os.path.join(resume_from_checkpoint, file_name))
adapter_reloaded = True
else:
self.model.load_adapter(os.path.join(os.path.join(resume_from_checkpoint, file_name)))
adapter_reloaded = True
if not (os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) or adapter_reloaded):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
print("len_train_dataloader", len(train_dataloader))
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = model.module
self.model_wrapped = model
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if step % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# apply adapter fusion weight regularization on the value matrix
if (
hasattr(self.model.config, "adapter_fusion")
and self.model.config.adapter_fusion["regularization"]
):
fusion_reg_loss = self.model.base_model.get_fusion_regularization_loss()
fusion_reg_loss.backward()
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
if self.do_save_adapters:
logger.info("\n\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\n\n")
else:
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
if self.do_save_full_model:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(model, PreTrainedModel):
self.model = model.from_pretrained(self.state.best_model_checkpoint)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.do_save_adapters:
logger.info(
f"Loading best adapter(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# attempt to re-load all adapters from checkpoint
for adapter in self.model.config.adapters.adapters:
adapter_dir = os.path.join(self.state.best_model_checkpoint, adapter)
if os.path.exists(adapter_dir):
self.model.load_adapter(adapter_dir)
if self.do_save_adapter_fusion:
logger.info(
f"Loading best adapter fusion(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# attempt to re-load all adapter fusions from checkpoint
for fusion in self.model.config.adapter_fusion_models:
fusion_dir = os.path.join(self.state.best_model_checkpoint, fusion)
if os.path.exists(fusion_dir):
self.model.load_adapter_fusion(fusion_dir)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in init_deepspeed
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
elif is_sagemaker_mp_enabled():
return smp.local_rank() == 0
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
elif is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0 | 102,647 | 46.765472 | 146 | py |
Multi2WOZ | Multi2WOZ-main/downstream/main_cl.py | from tqdm import tqdm
import torch.nn as nn
import ast
import glob
import numpy as np
import copy
# utils
from utils.config import *
from utils.utils_general import *
from utils.utils_multiwoz_cl import *
from utils.utils_oos_intent import *
from utils.utils_universal_act import *
# models
from models.BERT_DST_Picklist import *
from models.dual_encoder_ranking import *
# Huggingface models
from transformers import *
import logging
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## model selection
MODELS = {"bert": (BertModel, BertTokenizer, BertConfig),
"todbert": (BertModel, BertTokenizer, BertConfig),
"gpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"todgpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"dialogpt": (AutoModelWithLMHead, AutoTokenizer, GPT2Config),
"albert": (AlbertModel, AlbertTokenizer, AlbertConfig),
"roberta": (RobertaModel, RobertaTokenizer, RobertaConfig),
"distilbert": (DistilBertModel, DistilBertTokenizer, DistilBertConfig),
"electra": (ElectraModel, ElectraTokenizer, ElectraConfig),
"xlmroberta": (XLMRobertaModel, XLMRobertaTokenizer, XLMRobertaConfig)}
## Fix torch random seed
if args["fix_rand_seed"]:
torch.manual_seed(args["rand_seed"])
#logging.info("Running Tgt Language: {}".format(args["tgt_lang"]))
## Reading data and create data loaders
datasets = {}
for ds_name in ast.literal_eval(args["dataset"]):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args)
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
unified_meta = get_unified_meta(datasets)
if "resp_cand_trn" not in unified_meta.keys(): unified_meta["resp_cand_trn"] = {}
args["unified_meta"] = unified_meta
## Create vocab and model class
args["model_type"] = args["model_type"].lower()
model_class, tokenizer_class, config_class = MODELS[args["model_type"]]
tokenizer = tokenizer_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
args["model_class"] = model_class
args["tokenizer"] = tokenizer
if args["model_name_or_path"]:
config = config_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
else:
config = config_class()
args["config"] = config
args["num_labels"] = unified_meta["num_labels"]
## Training and Testing Loop
if args["do_train"]:
result_runs = []
output_dir_origin = str(args["output_dir"])
## Setup logger
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(args["output_dir"], "train.log"),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
## training loop
for run in range(args["nb_runs"]):
## Setup random seed and output dir
rand_seed = SEEDS[run]
if args["fix_rand_seed"]:
torch.manual_seed(rand_seed)
args["rand_seed"] = rand_seed
args["output_dir"] = os.path.join(output_dir_origin, "run{}".format(run))
os.makedirs(args["output_dir"], exist_ok=False)
logging.info("Running Random Seed: {}".format(rand_seed))
## Loading model
model = globals()[args['my_model']](args)
if torch.cuda.is_available(): model = model.cuda()
## Create Dataloader
trn_loader = get_loader(args, "train", tokenizer, datasets, unified_meta)
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
## Create TF Writer
tb_writer = SummaryWriter(comment=args["output_dir"].replace("/", "-"))
# Start training process with early stopping
loss_best, acc_best, cnt, train_step = 1e10, -1, 0, 0
try:
for epoch in range(args["epoch"]):
logging.info("Epoch:{}".format(epoch+1))
train_loss = 0
pbar = tqdm(trn_loader)
for i, d in enumerate(pbar):
model.train()
outputs = model(d)
train_loss += outputs["loss"]
train_step += 1
pbar.set_description("Training Loss: {:.4f}".format(train_loss/(i+1)))
## Dev Evaluation
if (train_step % args["eval_by_step"] == 0 and args["eval_by_step"] != -1) or \
(i == len(pbar)-1 and args["eval_by_step"] == -1):
model.eval()
dev_loss = 0
preds, labels = [], []
ppbar = tqdm(dev_loader)
for d in ppbar:
with torch.no_grad():
outputs = model(d)
#print(outputs)
dev_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
dev_loss = dev_loss / len(dev_loader)
results = model.evaluation(preds, labels)
dev_acc = results[args["earlystop"]] if args["earlystop"] != "loss" else dev_loss
## write to tensorboard
tb_writer.add_scalar("train_loss", train_loss/(i+1), train_step)
tb_writer.add_scalar("eval_loss", dev_loss, train_step)
tb_writer.add_scalar("eval_{}".format(args["earlystop"]), dev_acc, train_step)
if (dev_loss < loss_best and args["earlystop"] == "loss") or \
(dev_acc > acc_best and args["earlystop"] != "loss"):
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
cnt += 1
logging.info("[Info] Early stop count: {}/{}...".format(cnt, args["patience"]))
if cnt > args["patience"]:
logging.info("Ran out of patient, early stop...")
break
logging.info("Trn loss {:.4f}, Dev loss {:.4f}, Dev {} {:.4f}".format(train_loss/(i+1),
dev_loss,
args["earlystop"],
dev_acc))
if cnt > args["patience"]:
tb_writer.close()
break
except KeyboardInterrupt:
logging.info("[Warning] Earlystop by KeyboardInterrupt")
## Load the best model
if args["not_save_model"]:
model.load_state_dict(copy.deepcopy(model_clone.state_dict()))
else:
# Start evaluating on the test set
if torch.cuda.is_available():
model.load_state_dict(torch.load(output_model_file))
else:
model.load_state_dict(torch.load(output_model_file, lambda storage, loc: storage))
## Run test set evaluation
pbar = tqdm(tst_loader)
for nb_eval in range(args["nb_evals"]):
test_loss = 0
preds, labels = [], []
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
result_runs.append(results)
logging.info("[{}] Test Results: ".format(nb_eval) + str(results))
## Average results over runs
if args["nb_runs"] > 1:
f_out = open(os.path.join(output_dir_origin, "eval_results_multi-runs.txt"), "w")
f_out.write("Average over {} runs and {} evals \n".format(args["nb_runs"], args["nb_evals"]))
for key in results.keys():
mean = np.mean([r[key] for r in result_runs])
std = np.std([r[key] for r in result_runs])
f_out.write("{}: mean {} std {} \n".format(key, mean, std))
f_out.close()
else:
## Load Model
print("[Info] Loading model from {}".format(args['my_model']))
model = globals()[args['my_model']](args)
if args["load_path"]:
print("MODEL {} LOADED".format(args["load_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["load_path"]))
else:
model.load_state_dict(torch.load(args["load_path"], lambda storage, loc: storage))
else:
print("[WARNING] No trained model is loaded...")
if torch.cuda.is_available():
model = model.cuda()
print("[Info] Start Evaluation on dev and test set...")
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta)
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
model.eval()
for d_eval in ["tst"]: #["dev", "tst"]:
f_w = open(os.path.join(args["output_dir"], "{}_results.txt".format(d_eval)), "w")
## Start evaluating on the test set
test_loss = 0
preds, labels = [], []
pbar = tqdm(locals()["{}_loader".format(d_eval)])
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
print("{} Results: {}".format(d_eval, str(results)))
f_w.write(str(results))
f_w.close()
| 11,553 | 42.931559 | 114 | py |
Multi2WOZ | Multi2WOZ-main/downstream/tod_xlmr_pretraining.py | import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
from typing import Tuple
import gzip
import shelve
import json
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from concurrent.futures import ThreadPoolExecutor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Sampler
from utils.utils_general import *
from utils.utils_multiwoz import *
from utils.utils_camrest676 import *
from utils.utils_woz import *
from utils.utils_smd import *
from utils.utils_frames import *
from utils.utils_msre2e import *
from utils.utils_taskmaster import *
from utils.utils_metalwoz import *
from utils.utils_schema import *
import gc
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertModel,
BertForMaskedLM,
BertTokenizer,
CamembertConfig,
CamembertForMaskedLM,
CamembertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTConfig,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
PreTrainedTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForMaskedLM,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
"openai-gpt": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"bert-seq": (BertConfig, BertModel, BertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"camembert": (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer),
"xlmr": (XLMRobertaConfig, XLMRobertaForMaskedLM, XLMRobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
if len(glob_checkpoints) <= args.save_total_limit:
return
ordering_and_checkpoint_path = []
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]:
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
inputs = inputs.to("cpu")
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
# padding position value = 0
inputs_pad_pos = (inputs == 0).cpu()
probability_matrix.masked_fill_(inputs_pad_pos, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
try:
labels[~masked_indices] = -100 # We only compute loss on masked tokens
except:
masked_indices = masked_indices.byte()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
try:
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
except:
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool().byte() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
try:
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
except:
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool().byte() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
if inputs.is_cuda:
indices_random = indices_random.to(args.device)
random_words = random_words.to(args.device)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def mask_for_response_selection(batch, tokenizer, args, cand_uttr_sys_dict, others, set_max_resp_len=150):
""" Prepare (context,response) pairs for response contrastive learning (RCL). """
inputs = batch["context"]
inputs = inputs.to("cpu")
batch_size = inputs.size(0)
probability_matrix = torch.full(inputs.shape, 1.0)
usr_token_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(args.usr_token))[0]
sys_token_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(args.sys_token))[0]
cand_uttr_sys = list(cand_uttr_sys_dict.keys())
cand_uttr_sys_tokens = list(cand_uttr_sys_dict.values())
## Find positions of user and system tokens and split them into (context, repsonse) pairs
last_sys_position, last_usr_position = [], []
for bsz_i, batch_sample in enumerate(inputs):
nb_sys_token = len((batch_sample == sys_token_idx).nonzero())
nb_usr_token = len((batch_sample == usr_token_idx).nonzero())
## Randomly select a turn to split
if nb_sys_token == 0 or nb_usr_token == 0:
last_sys_position.append(len(batch_sample)//2)
last_usr_position.append(len(batch_sample))
else:
if nb_sys_token > 2 and nb_usr_token > 2:
rand_pos = random.randint(1, min(nb_sys_token, nb_usr_token)-1)
else:
rand_pos = -1
temp1 = (batch_sample == sys_token_idx).nonzero()[rand_pos][0].item()
last_sys_position.append(temp1)
temp2 = (batch_sample == usr_token_idx).nonzero()[rand_pos][0].item()
if temp2 > temp1:
last_usr_position.append(temp2)
else:
if temp1 + 10 < len(batch_sample):
last_usr_position.append(temp1 + 10)
else:
last_usr_position.append(len(batch_sample))
last_usr_position = np.array(last_usr_position)
last_sys_position = np.array(last_sys_position)
max_last_sys_position = max(last_sys_position)
max_response_len = max(last_usr_position-last_sys_position) + 1
max_response_len = max_response_len if max_response_len < set_max_resp_len else set_max_resp_len
## placeholders
input_contexts = torch.zeros(batch_size, max_last_sys_position).long() #.to(args.device)
input_responses = torch.zeros(batch_size, max_response_len).long() #.to(args.device)
output_labels = torch.tensor(np.arange(batch_size)).long() #.to(args.device)
## assign response indexs by start and end position
responses = []
for bsz_i, (sys_pos, usr_pos) in enumerate(zip(last_sys_position, last_usr_position)):
input_contexts[bsz_i, :sys_pos] = inputs[bsz_i, :sys_pos]
input_responses[bsz_i, 0] = inputs[bsz_i, 0] ## CLS token
responses.append(tokenizer.decode(inputs[bsz_i, sys_pos+1:usr_pos]).replace(" ", ""))
s, e = (sys_pos, usr_pos) if usr_pos-sys_pos < max_response_len else (sys_pos, sys_pos+max_response_len-1)
input_responses[bsz_i, 1:e-s+1] = inputs[bsz_i, s:e]
## Add additional negative samples. Either randomly select from candidate pool or choose by Kmeans.
candidates_tokens = []
if args.negative_sampling_by_kmeans:
for ri, resp in enumerate(responses):
if resp in others["ToD_BERT_SYS_UTTR_KMEANS"].keys():
cur_cluster = others["ToD_BERT_SYS_UTTR_KMEANS"][resp]
candidates = others["KMEANS_to_SENTS"][cur_cluster]
nb_selected = min(args.nb_addition_negresponse_per_sample, len(candidates)-1)
start_pos = random.randint(0, len(candidates)-nb_selected-1)
sampled_neg_resps = candidates[start_pos:start_pos+nb_selected]
candidates_tokens += [cand_uttr_sys_dict[r] for r in sampled_neg_resps]
else:
start_pos = random.randint(0, len(cand_uttr_sys)-args.nb_addition_negresponse_per_sample-1)
candidates_tokens += cand_uttr_sys_tokens[start_pos:start_pos+args.nb_addition_negresponse_per_sample]
else:
for i in range(args.nb_addition_negresponse_per_sample * batch_size):
pos = random.randint(0, len(cand_uttr_sys_tokens)-1)
candidates_tokens.append(cand_uttr_sys_tokens[pos])
## Padding
input_responses_neg = torch.zeros(len(candidates_tokens), max_response_len).long()
for i in range(len(candidates_tokens)):
if len(candidates_tokens[i]) > max_response_len:
input_responses_neg[i] = candidates_tokens[i][:max_response_len]
else:
input_responses_neg[i, :len(candidates_tokens[i])] = candidates_tokens[i]
## Add those negative responses to response selection pool
input_responses = torch.cat([input_responses, input_responses_neg], 0)
return input_contexts, input_responses, output_labels
def get_candidate_embeddings(uttr_sys_dict, tokenizer, model):
"""
obtain candidate representations by passing through model encoding,
return a dictionary that maps sentences to embeddings
"""
print("Start obtaining representations from model...")
ToD_BERT_SYS_UTTR_EMB = {}
uttr_sys = list(uttr_sys_dict.keys())
uttr_sys_tokens = list(uttr_sys_dict.values())
batch_size = 100
for start in tqdm(range(0, len(uttr_sys), batch_size)):
if start+batch_size > len(uttr_sys):
inputs = uttr_sys[start:]
inputs_ids = uttr_sys_tokens[start:]
else:
inputs = uttr_sys[start:start+batch_size]
inputs_ids = uttr_sys_tokens[start:start+batch_size]
inputs_ids = pad_sequence(inputs_ids, batch_first=True, padding_value=0)
if torch.cuda.is_available(): inputs_ids = inputs_ids.cuda()
with torch.no_grad():
outputs = model.roberta(input_ids=inputs_ids, attention_mask=inputs_ids>0)
sequence_output = outputs[0]
cls_rep = sequence_output[:, 0, :]
for i in range(cls_rep.size(0)):
ToD_BERT_SYS_UTTR_EMB[inputs[i].replace(" ", "")] = {
"sent":inputs[i],
"emb":cls_rep[i, :].cpu().numpy()}
return ToD_BERT_SYS_UTTR_EMB
def get_candidate_kmeans(args, uttr_sys_dict, tokenizer, model):
"""obtain kmeans clustering results"""
import faiss
print("Start computing kmeans with faiss...")
ToD_BERT_SYS_UTTR_EMB = get_candidate_embeddings(uttr_sys_dict, tokenizer, model)
#print('get_candidate_kmeans', ToD_BERT_SYS_UTTR_EMB)
ToD_BERT_SYS_UTTR_KMEANS = {}
KMEANS_to_SENTS = {i:[] for i in range(args.nb_kmeans)}
data = [v["emb"] for v in ToD_BERT_SYS_UTTR_EMB.values()]
data = np.array(data)
kmeans_1k = faiss.Kmeans(data.shape[1], args.nb_kmeans, niter=20, nredo=5, verbose=True)
kmeans_1k.train(data)
D, I = kmeans_1k.index.search(data, 1)
for i, key in enumerate(ToD_BERT_SYS_UTTR_EMB.keys()):
ToD_BERT_SYS_UTTR_KMEANS[key] = I[i][0]
KMEANS_to_SENTS[I[i][0]].append(ToD_BERT_SYS_UTTR_EMB[key]["sent"])
return ToD_BERT_SYS_UTTR_KMEANS, KMEANS_to_SENTS
def train(args, trn_loader, dev_loader, model, tokenizer, cand_uttr_sys_dict, others):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter("runs/"+args.output_dir.replace("/","-"))
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(trn_loader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(trn_loader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
scaler = torch.cuda.amp.GradScaler()
print('n_gpu', args.n_gpu)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
print('multi-gpu training:', args.n_gpu)
model = torch.nn.DataParallel(model) #, device_ids=[0,1]
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
# print('BATCH', args.train_batch_size
# * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size()))
logger.info("***** Running training *****")
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Num batches = %d", len(trn_loader))
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
loss_mlm, loss_rs = 0, 0
patience, best_loss = 0, 1e10
xeloss = torch.nn.CrossEntropyLoss()
model = model.module if hasattr(model, "module") else model
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
## Calculate kmeans results in the beginning of each epoch
if args.negative_sampling_by_kmeans:
ToD_BERT_SYS_UTTR_KMEANS, KMEANS_to_SENTS = get_candidate_kmeans(args, cand_uttr_sys_dict, tokenizer, model)
trn_loader = get_loader(vars(args), "train", tokenizer, others["datasets"], others["unified_meta"], "train")
loss_arr, loss_mlm_arr, loss_rs_arr = [], [], []
epoch_iterator = tqdm(trn_loader, disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
## add response selection into pretraining
if args.add_rs_loss:
kmeans_others = {"ToD_BERT_SYS_UTTR_KMEANS":ToD_BERT_SYS_UTTR_KMEANS,
"KMEANS_to_SENTS":KMEANS_to_SENTS} if args.negative_sampling_by_kmeans else {}
## Split dialogue into (context, response) pairs
input_cont, input_resp, resp_label = mask_for_response_selection(batch,
tokenizer,
args,
cand_uttr_sys_dict,
kmeans_others)
## Mask context part for MLM loss
input_cont, labels = mask_tokens(input_cont, tokenizer, args) if args.mlm else (input_cont, input_cont)
## Allocate tensors to (gpu) devices
input_cont = input_cont.to(args.device)
input_resp = input_resp.to(args.device)
resp_label = resp_label.to(args.device)
labels = labels.to(args.device)
## Encode the context part with BERT
with torch.cuda.amp.autocast(enabled=True):
outputs = model.roberta(
input_cont,
attention_mask=input_cont>0,
)
sequence_output = outputs[0]
hid_cont = sequence_output[:, 0, :] ## CLS token
## Calculate MLM loss for the context
prediction_scores = model.lm_head(sequence_output)
loss = xeloss(prediction_scores.view(-1, model.config.vocab_size), labels.view(-1))
loss_mlm = loss.item()
del input_cont, labels, sequence_output, prediction_scores, outputs
## Encode the response part with BERT
outputs = model.roberta(
input_resp,
attention_mask=input_resp>0,
)
sequence_output = outputs[0]
hid_resp = sequence_output[:, 0, :]
## Calculate RCL loss
scores = torch.matmul(hid_cont, hid_resp.transpose(1, 0))
loss_rs = xeloss(scores, resp_label)
loss += loss_rs
loss_rs = loss_rs.item()
#print('loss_rs:', loss_rs)
del input_resp, resp_label, scores, sequence_output, hid_resp, outputs
## with only MLM loss
else:
inputs = batch["context"].clone()
if args.mlm:
inputs, labels = mask_tokens(inputs, tokenizer, args)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
outputs = model(inputs,
labels=labels, #masked_lm_labels
attention_mask=inputs>0)
else:
labels = inputs.clone()
masked_indices = (labels == 0)
labels[masked_indices] = -100
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
loss_mlm = loss.item()
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss_arr.append(loss.item())
loss_mlm_arr.append(loss_mlm)
loss_rs_arr.append(loss_rs)
#break
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
## Print loss
epoch_iterator.set_description("Loss:{:.4f} MLM:{:.4f} RS:{:.4f}".format(np.mean(loss_arr),
np.mean(loss_mlm_arr),
np.mean(loss_rs_arr)))
#break
tr_loss += loss.item()
del loss
if (step + 1) % args.gradient_accumulation_steps == 0:
print(step, args.gradient_accumulation_steps)
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.evaluate_during_training and args.n_gpu == 1:
results = evaluate(args, model, dev_loader, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
else:
results = {}
results["loss"] = best_loss - 0.1 # always saving
if results["loss"] < best_loss:
patience = 0
best_loss = results["loss"]
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
else:
patience += 1
logger.info("Current patience: patience {}".format(patience))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if patience > args.patience:
logger.info("Ran out of patience...")
break
if (args.max_steps > 0 and global_step > args.max_steps) or patience > args.patience:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
#torch.cuda.empty_cache()
return global_step, tr_loss / global_step
def evaluate(args, model, dev_loader, tokenizer, prefix=""):
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
eval_dataloader = dev_loader
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs = batch["context"].clone()
#inputs, labels = mask_tokens(inputs, tokenizer, args) if args.mlm else (inputs, inputs)
if args.mlm:
inputs, labels = mask_tokens(inputs, tokenizer, args)
else:
labels = inputs.clone()
masked_indices = (labels == 0)
labels[masked_indices] = -100
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = model(inputs,
labels=labels, #masked_lm_labels
attention_mask=inputs>0) if args.mlm else model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity, "loss":eval_loss}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--model_type", default="bert", type=str, help="The model architecture to be fine-tuned.")
parser.add_argument(
"--model_name_or_path",
default="bert-base-uncased",
type=str,
help="The model checkpoint for weights initialization.",
)
parser.add_argument(
"--mlm", action="store_true", help="Train with masked-language modeling loss instead of language modeling."
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument(
"--config_name",
default="",
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--block_size",
default=-1,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=300, type=int, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=100, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=100, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--save_total_limit",
type=int,
default=1,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
## My add
parser.add_argument(
'-dpath','--data_path',
help='path to dataset folder',
required=False,
default='/export/home/dialog_datasets',
type=str)
parser.add_argument(
'-ds','--dataset',
help='which dataset to be used for training.',
required=False,
default='["multiwoz", "camrest676", "woz", "smd", "frames", "msre2e", "taskmaster", "metalwoz", "schema"]',
type=str)
parser.add_argument(
'-hds','--holdout_dataset',
help='which dataset to be held out as dev and test set.',
required=False,
default='["multiwoz"]',
type=str)
parser.add_argument(
'-task','--task',
help='task in ["nlu", "dst", "dm", "nlg", "e2e"] to decide which dataloader to use',
default="usdl",
required=False)
parser.add_argument(
'--usr_token',
help='use to identify user utterance',
required=False,
default="[USR]",
type=str)
parser.add_argument(
'--sys_token',
help='use to identify system response',
required=False,
default="[SYS]",
type=str)
parser.add_argument(
"--add_rs_loss",
action="store_true",
help="whether to add RCL loss during training")
parser.add_argument(
"--nb_addition_negresponse_per_sample",
default=0,
type=int,
help="number of negative responses per sample added to the in-batch negative samples")
parser.add_argument(
"--negative_sampling_by_kmeans",
action="store_true",
help="whether use kmeans to select negative samples or select randomly",)
parser.add_argument(
"--nb_kmeans",
default=500,
type=int,
help="number of kmeans clusters")
parser.add_argument(
"--patience",
type=int,
default=15,
help="waiting to earlystop")
## data reading related setting (can be ignored here)
parser.add_argument(
'--max_line', help='maximum line for reading data (for quick testing)', required=False, default=None, type=int)
parser.add_argument(
'--example_type', help='type in ["turn", "dial"] for data reading', required=False, default="turn")
parser.add_argument(
"--train_data_ratio", default=1.0, type=float, help="")
parser.add_argument(
"--ratio_by_random", action="store_true", help="read data by random with a defined ratio")
parser.add_argument(
"--domain_act", action="store_true", help="use domain_act for mwoz")
parser.add_argument(
'-task_name', '--task_name', help='', required=False, default="")
parser.add_argument(
"--only_last_turn", action="store_true", help="")
parser.add_argument(
"--oracle_domain", action="store_true", help="")
parser.add_argument(
"--ontology_version", default="", type=str, help="['', '1.0']")
parser.add_argument(
"--dstlm", action="store_true", help="")
parser.add_argument(
"--max_seq_length", default=512, type=int, help="")
parser.add_argument(
"--nb_shots", default=-1, type=int, help="")
parser.add_argument(
"--domain", default="all", type=str, help="")
args = parser.parse_args()
args_dict = vars(args)
if args.model_type in ["bert", "roberta", "distilbert", "camembert"] and not args.mlm:
raise ValueError(
"BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
#config.output_hidden_states = True
config.gradient_checkpointing=True
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.block_size <= 0:
args.block_size = (
tokenizer.max_len_single_sentence
) # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model.to(args.device)
# Add new tokens to the vocabulary and embeddings of our model
tokenizer.add_tokens([args.sys_token, args.usr_token])
model.resize_token_embeddings(len(tokenizer))
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
print('Start training...')
if args.do_train:
# Barrier to make sure only the first process in distributed training process the dataset,
# and the others will use the cache
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
## Read datasets and create global set of candidate responses
datasets = {}
cand_uttr_sys = set()
for ds_name in ast.literal_eval(args.dataset):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args_dict)
# held-out mwoz for now
if ds_name in ast.literal_eval(args.holdout_dataset):
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
else:
datasets[ds_name] = {"train": data_trn + data_dev + data_tst, "dev":[], "test": [], "meta":data_meta}
for d in datasets[ds_name]["train"]:
cand_uttr_sys.add(d["turn_sys"])
cand_uttr_sys.update(set([sent for si, sent in enumerate(d["dialog_history"]) if si%2==0]))
unified_meta = get_unified_meta(datasets)
## process candidate responses
if args.nb_addition_negresponse_per_sample > 0:
cand_uttr_sys = list(cand_uttr_sys)
cand_uttr_sys = [s.lower() for s in cand_uttr_sys if len(s.split(" ")) <= 100] # remove too long responses
cand_uttr_sys_tokens = []
for cand in tqdm(cand_uttr_sys):
cand_ids = tokenizer.tokenize("[CLS] [SYS]") + tokenizer.tokenize(cand)
cand_ids = torch.tensor(tokenizer.convert_tokens_to_ids(cand_ids))
cand_uttr_sys_tokens.append(cand_ids)
cand_uttr_sys_dict = {a:b for a, b in zip(cand_uttr_sys, cand_uttr_sys_tokens)}
else:
cand_uttr_sys_dict = {}
## batch size
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
args_dict["batch_size"] = args.train_batch_size
args_dict["eval_batch_size"] = args.eval_batch_size
print(os.environ["CUDA_VISIBLE_DEVICES"])
print(args.train_batch_size, args.eval_batch_size, args.n_gpu, args.device)
## Create Dataloader
trn_loader = get_loader(args_dict, "train", tokenizer, datasets, unified_meta, "train")
dev_loader = get_loader(args_dict, "dev" , tokenizer, datasets, unified_meta, "dev")
## additional information for negative sampling
others = {}
if args.negative_sampling_by_kmeans:
others["datasets"] = datasets
others["unified_meta"] = unified_meta
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, trn_loader, dev_loader, model, tokenizer, cand_uttr_sys_dict, others)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, dev_loader, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
print(results)
if __name__ == "__main__":
main()
| 44,684 | 42.595122 | 141 | py |
Multi2WOZ | Multi2WOZ-main/downstream/main_cl-continue.py | from tqdm import tqdm
import torch.nn as nn
import ast
import glob
import numpy as np
import copy
# utils
from utils.config import *
from utils.utils_general import *
from utils.utils_multiwoz_cl import *
from utils.utils_oos_intent import *
from utils.utils_universal_act import *
# models
from models.BERT_DST_Picklist import *
from models.dual_encoder_ranking import *
# Huggingface models
from transformers import *
import logging
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## model selection
MODELS = {"bert": (BertModel, BertTokenizer, BertConfig),
"todbert": (BertModel, BertTokenizer, BertConfig),
"gpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"todgpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"dialogpt": (AutoModelWithLMHead, AutoTokenizer, GPT2Config),
"albert": (AlbertModel, AlbertTokenizer, AlbertConfig),
"roberta": (RobertaModel, RobertaTokenizer, RobertaConfig),
"distilbert": (DistilBertModel, DistilBertTokenizer, DistilBertConfig),
"electra": (ElectraModel, ElectraTokenizer, ElectraConfig),
"xlmroberta": (XLMRobertaModel, XLMRobertaTokenizer, XLMRobertaConfig)}
## Fix torch random seed
if args["fix_rand_seed"]:
torch.manual_seed(args["rand_seed"])
#logging.info("Running Tgt Language: {}".format(args["tgt_lang"]))
## Reading data and create data loaders
datasets = {}
for ds_name in ast.literal_eval(args["dataset"]):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args)
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
unified_meta = get_unified_meta(datasets)
if "resp_cand_trn" not in unified_meta.keys(): unified_meta["resp_cand_trn"] = {}
args["unified_meta"] = unified_meta
## Create vocab and model class
args["model_type"] = args["model_type"].lower()
model_class, tokenizer_class, config_class = MODELS[args["model_type"]]
tokenizer = tokenizer_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
args["model_class"] = model_class
args["tokenizer"] = tokenizer
if args["model_name_or_path"]:
config = config_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
else:
config = config_class()
args["config"] = config
args["num_labels"] = unified_meta["num_labels"]
if args["continue_ft"]:
datasets[ds_name] = {"train": data_dev, "dev":data_dev, "test": data_tst, "meta":data_meta} #Our training becomes dev set, ignore dev, and eval on test set
else:
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
print(args["continue_model_path"])
## Training and Testing Loop
if args["do_train"]:
result_runs = []
output_dir_origin = str(args["output_dir"])
## Setup logger
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(args["output_dir"], "train.log"),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("Num_labels: {}".format(args["num_labels"]))
## training loop
for run in range(args["nb_runs"]):
## Setup random seed and output dir
rand_seed = SEEDS[run]
if args["fix_rand_seed"]:
torch.manual_seed(rand_seed)
args["rand_seed"] = rand_seed
args["output_dir"] = os.path.join(output_dir_origin, "run{}".format(run))
os.makedirs(args["output_dir"], exist_ok=False)
logging.info("Running Random Seed: {}".format(rand_seed))
## Loading model
model = globals()[args['my_model']](args)
print(args["continue_model_path"])
if args['continue_ft']:
print("MODEL {} LOADED".format(args["continue_model_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["continue_model_path"]))
else:
model.load_state_dict(torch.load(args["continue_model_path"], lambda storage, loc: storage))
if torch.cuda.is_available(): model = model.cuda()
logging.info("Done loading model...")
## Create Dataloader
trn_loader = get_loader(args, "train", tokenizer, datasets, unified_meta) # here will be dev set
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
logging.info("Train: {}, Dev: {}, Test: {}".format(len(trn_loader), len(dev_loader), len(tst_loader)))
logging.info("Epochs: {}".format(args["epoch"]))
logging.info("Continue model path: {}".format(args["continue_model_path"]))
## Create TF Writer
tb_writer = SummaryWriter(comment=args["output_dir"].replace("/", "-"))
# Start training process with early stopping
loss_best, acc_best, cnt, train_step = 1e10, -1, 0, 0
try:
for epoch in range(args["epoch"]):
logging.info("Epoch:{}".format(epoch+1))
train_loss = 0
pbar = tqdm(trn_loader)
for i, d in enumerate(pbar):
model.train()
outputs = model(d)
train_loss += outputs["loss"]
train_step += 1
pbar.set_description("Training Loss: {:.4f}".format(train_loss/(i+1)))
## Dev Evaluation
if (train_step % args["eval_by_step"] == 0 and args["eval_by_step"] != -1) or \
(i == len(pbar)-1 and args["eval_by_step"] == -1):
model.eval()
dev_loss = 0
preds, labels = [], []
ppbar = tqdm(dev_loader)
for d in ppbar:
with torch.no_grad():
outputs = model(d)
#print(outputs)
dev_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
dev_loss = dev_loss / len(dev_loader)
results = model.evaluation(preds, labels)
dev_acc = results[args["earlystop"]] if args["earlystop"] != "loss" else dev_loss #joint_accuracy
## write to tensorboard
tb_writer.add_scalar("train_loss", train_loss/(i+1), train_step)
tb_writer.add_scalar("eval_loss", dev_loss, train_step)
tb_writer.add_scalar("eval_{}".format(args["earlystop"]), dev_acc, train_step)
if args["continue_ft"]:
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
if (dev_loss < loss_best and args["earlystop"] == "loss") or \
(dev_acc > acc_best and args["earlystop"] != "loss"):
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
cnt += 1
logging.info("[Info] Early stop count: {}/{}...".format(cnt, args["patience"]))
if cnt > args["patience"]:
logging.info("Ran out of patient, early stop...")
break
logging.info("Trn loss {:.4f}, Dev loss {:.4f}, Dev {} {:.4f}".format(train_loss/(i+1),
dev_loss,
args["earlystop"],
dev_acc))
if cnt > args["patience"]:
tb_writer.close()
break
if args["continue_ft"]:
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
except KeyboardInterrupt:
logging.info("[Warning] Earlystop by KeyboardInterrupt")
## Load the best model
model = globals()[args['my_model']](args)
if args["not_save_model"]:
model.load_state_dict(copy.deepcopy(model_clone.state_dict()))
else:
# Start evaluating on the test set
if torch.cuda.is_available():
model.load_state_dict(torch.load(output_model_file))
else:
model.load_state_dict(torch.load(output_model_file, lambda storage, loc: storage))
if torch.cuda.is_available():
model = model.cuda()
model.eval()
## Run test set evaluation
pbar = tqdm(tst_loader)
for nb_eval in range(args["nb_evals"]):
test_loss = 0
preds, labels = [], []
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
result_runs.append(results)
logging.info("[{}] Test Results: ".format(nb_eval) + str(results))
## Average results over runs
if args["nb_runs"] > 1:
f_out = open(os.path.join(output_dir_origin, "eval_results_multi-runs.txt"), "w")
f_out.write("Average over {} runs and {} evals \n".format(args["nb_runs"], args["nb_evals"]))
for key in results.keys():
mean = np.mean([r[key] for r in result_runs])
std = np.std([r[key] for r in result_runs])
f_out.write("{}: mean {} std {} \n".format(key, mean, std))
f_out.close()
else:
## Load Model
print("[Info] Loading model from {}".format(args['my_model']))
model = globals()[args['my_model']](args)
if args["load_path"]:
print("MODEL {} LOADED".format(args["load_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["load_path"]))
else:
model.load_state_dict(torch.load(args["load_path"], lambda storage, loc: storage))
else:
print("[WARNING] No trained model is loaded...")
if torch.cuda.is_available():
model = model.cuda()
print("[Info] Start Evaluation on dev and test set...")
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta)
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
model.eval()
for d_eval in ["tst"]: #["dev", "tst"]:
f_w = open(os.path.join(args["output_dir"], "{}_results.txt".format(d_eval)), "w")
## Start evaluating on the test set
test_loss = 0
preds, labels = [], []
pbar = tqdm(locals()["{}_loader".format(d_eval)])
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
print("{} Results: {}".format(d_eval, str(results)))
f_w.write(str(results))
f_w.close()
| 14,594 | 46.386364 | 159 | py |
Multi2WOZ | Multi2WOZ-main/downstream/models/dual_encoder_ranking.py | import os.path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
from sklearn.metrics import f1_score #, average_precision_score
import numpy as np
from transformers import *
import logging
class dual_encoder_ranking(nn.Module):
def __init__(self, args): #, num_labels, device):
super(dual_encoder_ranking, self).__init__()
self.args = args
self.xeloss = nn.CrossEntropyLoss()
self.n_gpu = args["n_gpu"]
### Utterance Encoder
self.utterance_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = args["cache_dir"])
if self.args["fix_encoder"]:
for p in self.utterance_encoder.parameters():
p.requires_grad = False
## Prepare Optimizer
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args["learning_rate"]},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args["learning_rate"]},
]
return optimizer_grouped_parameters
if self.n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self.module)
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=args["learning_rate"],)
#warmup=args["warmup_proportion"],
#t_total=t_total)
def optimize(self):
self.loss_grad.backward()
clip_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args["grad_clip"])
self.optimizer.step()
def forward(self, data):
#input_ids, input_len, labels=None, n_gpu=1, target_slot=None):
self.optimizer.zero_grad()
batch_size = data["context"].size(0)
#max_seq_len = 256
interval = 25
start_list = list(np.arange(0, batch_size, interval))
end_list = start_list[1:] + [None]
context_outputs, response_outputs = [], []
#logging.info("start: {}".format(start_list))
#logging.info("end: {}".format(end_list))
for start, end in zip(start_list, end_list):
#logging.info("{}:{}".format(start, end))
inputs_con = {"input_ids": data["context"][start:end],
"attention_mask": (data["context"][start:end] > 0).long()}
inputs_res = {"input_ids": data["response"][start:end],
"attention_mask": (data["response"][start:end] > 0).long()}
#print(inputs_con, inputs_res)
if "bert" in self.args["model_type"]:
context_output = self.utterance_encoder(**inputs_con)[1] #hidden_state, pooler_output
response_output = self.utterance_encoder(**inputs_res)[1]#hidden_state, pooler_output
elif self.args["model_type"] == "gpt2":
context_output = self.utterance_encoder(**inputs_con)[0].mean(1)
response_output = self.utterance_encoder(**inputs_res)[0].mean(1)
elif self.args["model_type"] == "dialogpt":
transformer_outputs = self.utterance_encoder.transformer(**inputs_con)
context_output = transformer_outputs[0].mean(1)
transformer_outputs = self.utterance_encoder.transformer(**inputs_res)
response_output = transformer_outputs[0].mean(1)
# print(self.utterance_encoder(**inputs_con))
# print(self.utterance_encoder(**inputs_res))
context_outputs.append(context_output.cpu())
response_outputs.append(response_output.cpu())
# evaluation for k-to-100
if (not self.training) and (batch_size < self.args["eval_batch_size"]):
response_outputs.append(self.final_response_output[:self.args["eval_batch_size"]-batch_size, :])
final_context_output = torch.cat(context_outputs, 0)
final_response_output = torch.cat(response_outputs, 0)
if torch.cuda.is_available():
final_context_output = final_context_output.cuda()
final_response_output = final_response_output.cuda()
if (not self.training):
self.final_response_output = final_response_output.cpu()
# mat
logits = torch.matmul(final_context_output, final_response_output.transpose(1, 0))
# loss
labels = torch.tensor(np.arange(batch_size))
if torch.cuda.is_available(): labels = labels.cuda()
loss = self.xeloss(logits, labels)
if self.training:
self.loss_grad = loss
self.optimize()
predictions = np.argsort(logits.detach().cpu().numpy(), axis=1) #torch.argmax(logits, -1)
outputs = {"loss":loss.item(),
"pred":predictions,
"label":np.arange(batch_size)}
return outputs
def evaluation(self, preds, labels):
assert len(preds) == len(labels)
preds = np.array(preds)
labels = np.array(labels)
def _recall_topk(preds_top10, labels, k):
preds = preds_top10[:, -k:]
acc = 0
for li, label in enumerate(labels):
if label in preds[li]: acc += 1
acc = acc / len(labels)
return acc
results = {"top-1": _recall_topk(preds, labels, 1),
"top-3": _recall_topk(preds, labels, 3),
"top-5": _recall_topk(preds, labels, 5),
"top-10": _recall_topk(preds, labels, 10)}
print(results)
return results
| 6,321 | 40.320261 | 132 | py |
Multi2WOZ | Multi2WOZ-main/downstream/models/BERT_DST_Picklist.py | import os.path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
import numpy as np
from transformers import *
def _gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BeliefTracker(nn.Module):
def __init__(self, args):
super(BeliefTracker, self).__init__()
self.args = args
self.n_gpu = args["n_gpu"]
self.hidden_dim = args["hdd_size"]
self.rnn_num_layers = args["num_rnn_layers"]
self.zero_init_rnn = args["zero_init_rnn"]
self.num_direct = 2 if self.args["bidirect"] else 1
self.num_labels = [len(v) for k, v in args["unified_meta"]["slots"].items()]
self.num_slots = len(self.num_labels)
self.tokenizer = args["tokenizer"]
self.slots = [k for k, v in self.args["unified_meta"]["slots"].items()]
self.slot_value2id_dict = self.args["unified_meta"]["slots"]
self.slot_id2value_dict = {}
for k, v in self.slot_value2id_dict.items():
self.slot_id2value_dict[k] = {vv: kk for kk, vv in v.items()}
#print("self.num_slots", self.num_slots)
### Utterance Encoder
self.utterance_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = self.args["cache_dir"])
self.bert_output_dim = args["config"].hidden_size
#self.hidden_dropout_prob = self.utterance_encoder.config.hidden_dropout_prob
if self.args["fix_encoder"]:
print("[Info] Utterance Encoder does not requires grad...")
for p in self.utterance_encoder.parameters():
p.requires_grad = False
### slot, slot-value Encoder (not trainable)
self.sv_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = self.args["cache_dir"])
print("[Info] SV Encoder does not requires grad...")
for p in self.sv_encoder.parameters():
p.requires_grad = False
#self.slot_lookup = nn.Embedding(self.num_slots, self.bert_output_dim)
self.value_lookup = nn.ModuleList([nn.Embedding(num_label, self.bert_output_dim) for num_label in self.num_labels])
### RNN Belief Tracker
#self.nbt = None
#self.linear = nn.Linear(self.hidden_dim, self.bert_output_dim)
#self.layer_norm = nn.LayerNorm(self.bert_output_dim)
### Classifier
self.nll = CrossEntropyLoss(ignore_index=-1)
### Etc.
#self.dropout = nn.Dropout(self.hidden_dropout_prob)
### My Add
self.project_W_1 = nn.ModuleList([nn.Linear(self.bert_output_dim, self.bert_output_dim) \
for _ in range(self.num_slots)])
self.project_W_2 = nn.ModuleList([nn.Linear(2*self.bert_output_dim, self.bert_output_dim) \
for _ in range(self.num_slots)])
self.project_W_3 = nn.ModuleList([nn.Linear(self.bert_output_dim, 1) \
for _ in range(self.num_slots)])
if self.args["gate_supervision_for_dst"]:
self.gate_classifier = nn.Linear(self.bert_output_dim, 2)
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
## Prepare Optimizer
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args["learning_rate"]},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args["learning_rate"]},
]
return optimizer_grouped_parameters
if self.n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self.module)
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=args["learning_rate"],)
#warmup=args["warmup_proportion"])
#t_total=t_total)
self.initialize_slot_value_lookup()
def optimize(self):
self.loss_grad.backward()
clip_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args["grad_clip"])
self.optimizer.step()
def initialize_slot_value_lookup(self, max_seq_length=32):
self.sv_encoder.eval()
label_ids = []
for dslot, value_dict in self.args["unified_meta"]["slots"].items():
label_id = []
value_dict_rev = {v:k for k, v in value_dict.items()}
for i in range(len(value_dict)):
label = value_dict_rev[i]
label = " ".join([i for i in label.split(" ") if i != ""])
label_tokens = [self.start_token] + self.tokenizer.tokenize(label) + [self.sep_token]
label_token_ids = self.tokenizer.convert_tokens_to_ids(label_tokens)
label_len = len(label_token_ids)
label_padding = [0] * (max_seq_length - len(label_token_ids))
label_token_ids += label_padding
assert len(label_token_ids) == max_seq_length
label_id.append(label_token_ids)
label_id = torch.tensor(label_id).long()
label_ids.append(label_id)
for s, label_id in enumerate(label_ids):
inputs = {"input_ids":label_id, "attention_mask":(label_id > 0).long()}
if self.args["sum_token_emb_for_value"]:
hid_label = self.utterance_encoder.embeddings(input_ids=label_id).sum(1)
else:
if "bert" in self.args["model_type"]:
hid_label = self.sv_encoder(**inputs)[0]
hid_label = hid_label[:, 0, :]
elif self.args["model_type"] == "gpt2":
hid_label = self.sv_encoder(**inputs)[0]
hid_label = hid_label.mean(1)
elif self.args["model_type"] == "dialogpt":
transformer_outputs = self.sv_encoder.transformer(**inputs)[0]
hid_label = transformer_outputs.mean(1)
hid_label = hid_label.detach()
self.value_lookup[s] = nn.Embedding.from_pretrained(hid_label, freeze=True)
self.value_lookup[s].padding_idx = -1
print("Complete initialization of slot and value lookup")
def forward(self, data):#input_ids, input_len, labels, gate_label, n_gpu=1, target_slot=None):
batch_size = data["context"].size(0)
labels = data["belief_ontology"]
# Utterance encoding
inputs = {"input_ids": data["context"], "attention_mask":(data["context"] > 0).long()}
if "bert" in self.args["model_type"]:
hidden = self.utterance_encoder(**inputs)[0]
hidden_rep = hidden[:, 0, :]
elif self.args["model_type"] == "gpt2":
hidden = self.utterance_encoder(**inputs)[0]
hidden_rep = hidden.mean(1)
elif self.args["model_type"] == "dialogpt":
#outputs = self.utterance_encoder(**inputs)[2] # 0 is vocab logits, 1 is a tuple of attn head
transformer_outputs = self.utterance_encoder.transformer(
data["context"],
attention_mask=(data["context"] > 0).long()
)
hidden = transformer_outputs[0]
hidden_rep = hidden.mean(1)
# Label (slot-value) encoding
loss = 0
pred_slot = []
for slot_id in range(self.num_slots): ## note: target_slots are successive
# loss calculation
hid_label = self.value_lookup[slot_id].weight # v * d
num_slot_labels = hid_label.size(0)
_hidden = _gelu(self.project_W_1[slot_id](hidden_rep))
_hidden = torch.cat([hid_label.unsqueeze(0).repeat(batch_size, 1, 1), _hidden.unsqueeze(1).repeat(1, num_slot_labels, 1)], dim=2)
_hidden = _gelu(self.project_W_2[slot_id](_hidden))
_hidden = self.project_W_3[slot_id](_hidden)
_dist = _hidden.squeeze(2) # b * 1 * num_slot_labels
_, pred = torch.max(_dist, -1)
pred_slot.append(pred.unsqueeze(1))
#output.append(_dist)
if labels is not None:
_loss = self.nll(_dist, labels[:, slot_id])
#loss_slot.append(_loss.item())
loss += _loss
predictions = torch.cat(pred_slot, 1).detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
if self.training:
self.loss_grad = loss
self.optimize()
if self.args["error_analysis"]:
for bsz_i, (pred, label) in enumerate(zip(np.array(predictions), np.array(labels))):
assert len(pred) == len(label)
joint = 0
pred_arr, gold_arr = [], []
for i, p in enumerate(pred):
pred_str = self.slot_id2value_dict[self.slots[i]][p]
gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]
pred_arr.append(self.slots[i]+"-"+pred_str)
gold_arr.append(self.slots[i]+"-"+gold_str)
if pred_str == gold_str or pred_str in gold_str.split("|"):
joint += 1
#if joint == len(pred):
print(data["context_plain"][bsz_i])
print("Gold:", [s for s in gold_arr if s.split("-")[2] != "none"])
print("Pred:", [s for s in pred_arr if s.split("-")[2] != "none"])
print()
outputs = {"loss":loss.item(), "pred":predictions, "label":labels}
return outputs
def evaluation(self, preds, labels):
preds = np.array(preds)
labels = np.array(labels)
slot_acc, joint_acc, slot_acc_total, joint_acc_total = 0, 0, 0, 0
for pred, label in zip(preds, labels):
joint = 0
assert len(pred) == len(label)
for i, p in enumerate(pred):
pred_str = self.slot_id2value_dict[self.slots[i]][p]
gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]
if pred_str == gold_str or pred_str in gold_str.split("|"):
slot_acc += 1
joint += 1
slot_acc_total += 1
if joint == len(pred):
joint_acc += 1
joint_acc_total += 1
joint_acc = joint_acc / joint_acc_total
slot_acc = slot_acc / slot_acc_total
results = {"joint_acc":joint_acc, "slot_acc":slot_acc}
print("Results 1: ", results)
return results
| 11,918 | 42.5 | 141 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/dataloader_nlg.py | import torch
import torch.utils.data as data
import random
from .utils_function import to_cuda, merge
# from .config import *
class Dataset_nlg(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512, max_sys_resp_len=50):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.max_length = max_length
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.unified_meta = unified_meta
self.args = args
self.mode = mode
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
self.resp_cand_trn = list(self.unified_meta["resp_cand_trn"])
random.shuffle(self.resp_cand_trn)
self.max_sys_resp_len = max_sys_resp_len
self.others = unified_meta["others"]
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
context_plain = self.get_concat_context(self.data["dialog_history"][index])
context_plain_delex = self.get_concat_context(self.data["dialog_history_delex"][index])
context = self.preprocess(context_plain)
context_delex = self.preprocess(context_plain_delex)
response_plain = "{} ".format(self.sys_token) + self.data["turn_sys"][index]
response = self.preprocess(response_plain)[:self.max_sys_resp_len]
response_plain_delex = "{} ".format(self.sys_token) + self.data["turn_sys_delex"][index]
response_delex = self.preprocess(response_plain_delex)
utterance_plain = "{} ".format(self.usr_token) + self.data["turn_usr"][index]
utterance = self.preprocess(utterance_plain)
utterance_plain_delex = "{} ".format(self.usr_token) + self.data["turn_usr_delex"][index]
utterance_delex = self.preprocess(utterance_plain_delex)
else:
raise NotImplementedError
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"context":context,
"context_plain":context_plain,
"context_delex":context_delex,
"context_plain_delex":context_plain_delex,
"response":response,
"response_plain":response_plain,
"response_delex":response_delex,
"response_plain_delex":response_plain_delex,
"utterance":utterance,
"utterance_plain":utterance_plain,
"utterance_delex":utterance_delex,
"utterance_plain_delex":utterance_plain_delex}
'''
Add additional negative samples per training samples to make the selection harder,
we found that by adding this we can slightly improve the response selection performance
'''
if self.args["nb_neg_sample_rs"] != 0 and self.mode == "train":
if self.args["sample_negative_by_kmeans"]:
try:
cur_cluster = self.others["ToD_BERT_SYS_UTTR_KMEANS"][self.data["turn_sys"][index]]
candidates = self.others["KMEANS_to_SENTS"][cur_cluster]
nb_selected = min(self.args["nb_neg_sample_rs"], len(candidates))
try:
start_pos = random.randint(0, len(candidates)-nb_selected-1)
except:
start_pos = 0
sampled_neg_resps = candidates[start_pos:start_pos+nb_selected]
except:
start_pos = random.randint(0, len(self.resp_cand_trn)-self.args["nb_neg_sample_rs"]-1)
sampled_neg_resps = self.resp_cand_trn[start_pos:start_pos+self.args["nb_neg_sample_rs"]]
else:
start_pos = random.randint(0, len(self.resp_cand_trn)-self.args["nb_neg_sample_rs"]-1)
sampled_neg_resps = self.resp_cand_trn[start_pos:start_pos+self.args["nb_neg_sample_rs"]]
neg_resp_arr, neg_resp_idx_arr = [], []
for neg_resp in sampled_neg_resps:
neg_resp_plain = "{} ".format(self.sys_token) + neg_resp
neg_resp_idx = self.preprocess(neg_resp_plain)[:self.max_sys_resp_len]
neg_resp_idx_arr.append(neg_resp_idx)
neg_resp_arr.append(neg_resp_plain)
item_info["neg_resp_idx_arr"] = neg_resp_idx_arr
item_info["neg_resp_arr"] = neg_resp_arr
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_nlg_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# augment negative samples
if "neg_resp_idx_arr" in item_info.keys():
neg_resp_idx_arr = []
for arr in item_info['neg_resp_idx_arr']:
neg_resp_idx_arr += arr
# remove neg samples that are the same as one of the gold responses
#print('item_info["response"]', item_info["response"])
#print('neg_resp_idx_arr', neg_resp_idx_arr)
for bi, arr in enumerate(item_info['neg_resp_arr']):
for ri, neg_resp in enumerate(arr):
if neg_resp not in item_info["response_plain"]:
item_info["response"] += [item_info['neg_resp_idx_arr'][bi][ri]]
# merge sequences
context, context_lengths = merge(item_info['context'])
context_delex, context_delex_lengths = merge(item_info['context_delex'])
response, response_lengths = merge(item_info["response"])
response_delex, response_delex_lengths = merge(item_info["response_delex"])
utterance, utterance_lengths = merge(item_info["utterance"])
utterance_delex, utterance_delex_lengths = merge(item_info["utterance_delex"])
#print("context", context.size())
#print("response", response.size())
item_info["context"] = to_cuda(context)
item_info["context_lengths"] = context_lengths
item_info["response"] = to_cuda(response)
item_info["response_lengths"] = response_lengths
item_info["utterance"] = to_cuda(utterance)
item_info["utterance_lengths"] = response_lengths
return item_info
| 7,624 | 43.590643 | 115 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/dataloader_nlu.py | import torch
import torch.utils.data as data
# from .config import *
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_nlu(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
context_plain = "{} {} {} {} {}".format(self.start_token,
self.sys_token,
self.data["turn_sys"][index],
self.usr_token,
self.data["turn_usr"][index])
context = self.preprocess(context_plain)
intent_plain = self.data["intent"][index]
turn_sys_plain = "{} {}".format(self.sys_token, self.data["turn_sys"][index])
turn_sys = self.preprocess(turn_sys_plain)
try:
intent_idx = self.unified_meta["intent"][intent_plain]
except:
intent_idx = -100
try:
domain_idx = self.unified_meta["turn_domain"][self.data["turn_domain"][index]]
except:
domain_idx = -100
try:
turn_slot_one_hot = [0] * len(self.unified_meta["turn_slot"])
for ts in self.data["turn_slot"][index]:
turn_slot_one_hot[self.unified_meta["turn_slot"][ts]] = 1
except:
turn_slot_one_hot = -100
elif self.args["example_type"] == "dial":
print("Not Implemented dial for nlu yet...")
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"turn_domain":self.data["turn_domain"][index],
"context":context,
"context_plain":context_plain,
"intent":intent_idx,
"intent_plain":intent_plain,
"domain_plain":self.data["turn_domain"][index],
"turn_domain": domain_idx,
"turn_sys":turn_sys,
"turn_slot":turn_slot_one_hot,
"turn_sys_plain":turn_sys_plain
}
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def collate_fn_nlu_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
turn_sys, _ = merge(item_info["turn_sys"])
intent = torch.tensor(item_info["intent"])
turn_domain = torch.tensor(item_info["turn_domain"])
turn_slot = torch.tensor(item_info["turn_slot"]).float()
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["intent"] = to_cuda(intent)
item_info["turn_domain"] = to_cuda(turn_domain)
item_info["turn_sys"] = to_cuda(turn_sys)
item_info["turn_slot"] = to_cuda(turn_slot)
return item_info
def collate_fn_nlu_dial(data):
# TODO
return
| 4,513 | 37.254237 | 115 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/dataloader_dm.py | import torch
import torch.utils.data as data
# from .config import *
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_dm(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
context_plain = self.concat_dh_sys_usr(dialog_history_str, self.data["turn_sys"][index], self.data["turn_usr"][index])
context = self.preprocess(context_plain)
act_plain = self.data["sys_act"][index]
turn_sys_plain = "{} {}".format(self.sys_token, self.data["turn_sys"][index])
turn_sys = self.preprocess(turn_sys_plain)
act_one_hot = [0] * len(self.unified_meta["sysact"])
for act in act_plain:
act_one_hot[self.unified_meta["sysact"][act]] = 1
elif self.args["example_type"] == "dial":
#TODO
print("Not Implemented dial for nlu yet...")
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"context":context,
"context_plain":context_plain,
"sysact":act_one_hot,
"sysact_plain":act_plain,
"turn_sys":turn_sys}
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sys_token) + " {} ".format(self.sep_token) + sys + " {} ".format(self.usr_token) + usr
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_dm_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
turn_sys, _ = merge(item_info["turn_sys"])
sysact = torch.tensor(item_info["sysact"]).float()
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["sysact"] = to_cuda(sysact)
item_info["turn_sys"] = to_cuda(turn_sys)
return item_info
def collate_fn_nlu_dial(data):
# TODO
return
| 4,048 | 37.561905 | 137 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/utils_function.py | import torch
import numpy as np
PAD_token = 0
def to_cuda(x):
if torch.cuda.is_available(): x = x.cuda()
return x
def merge(sequences, ignore_idx=None):
'''
merge from batch * sent_len to batch * max_len
'''
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
lengths = [len(seq) for seq in sequences]
max_len = 1 if max(lengths)==0 else max(lengths)
padded_seqs = torch.ones(len(sequences), max_len).long() * pad_token
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
padded_seqs = padded_seqs.detach() #torch.tensor(padded_seqs)
return padded_seqs, lengths
def merge_multi_response(sequences, ignore_idx=None):
'''
merge from batch * nb_slot * slot_len to batch * nb_slot * max_slot_len
'''
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
lengths = []
for bsz_seq in sequences:
length = [len(v) for v in bsz_seq]
lengths.append(length)
max_len = max([max(l) for l in lengths])
padded_seqs = []
for bsz_seq in sequences:
pad_seq = []
for v in bsz_seq:
v = v + [pad_token] * (max_len-len(v))
pad_seq.append(v)
padded_seqs.append(pad_seq)
padded_seqs = torch.tensor(padded_seqs).long()
lengths = torch.tensor(lengths)
return padded_seqs, lengths
def merge_sent_and_word(sequences, ignore_idx=None):
'''
merge from batch * nb_sent * nb_word to batch * max_nb_sent * max_nb_word
'''
max_nb_sent = max([len(seq) for seq in sequences])
max_nb_word, lengths = [], []
for seq in sequences:
length = [len(sent) for sent in seq]
max_nb_word += length
lengths.append(length)
max_nb_word = max(max_nb_word)
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
padded_seqs = np.ones((len(sequences), max_nb_sent, max_nb_word)) * pad_token
for i, seq in enumerate(sequences):
for ii, sent in enumerate(seq):
padded_seqs[i, ii, :len(sent)] = np.array(sent)
padded_seqs = torch.LongTensor(padded_seqs)
padded_seqs = padded_seqs.detach()
return padded_seqs, lengths
def get_input_example(example_type):
if example_type == "turn":
data_detail = {
"ID":"",
"turn_id":0,
"domains":[],
"turn_domain":[],
"turn_usr":"",
"turn_sys":"",
"turn_usr_delex":"",
"turn_sys_delex":"",
"belief_state_vec":[],
"db_pointer":[],
"dialog_history":[],
"dialog_history_delex":[],
"belief":{},
"del_belief":{},
"slot_gate":[],
"slot_values":[],
"slots":[],
"sys_act":[],
"usr_act":[],
"intent":"",
"turn_slot":[]}
elif example_type == "dial":
data_detail = {
"ID":"",
"turn_id":[],
"domains":[],
"turn_domain":[],
"turn_usr":[],
"turn_sys":[],
"turn_usr_delex":[],
"turn_sys_delex":[],
"belief_state_vec":[],
"db_pointer":[],
"dialog_history":[],
"dialog_history_delex":[],
"belief":[],
"del_belief":[],
"slot_gate":[],
"slot_values":[],
"slots":[],
"sys_act":[],
"usr_act":[],
"intent":[],
"turn_slot":[]}
return data_detail
| 3,659 | 28.28 | 82 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/config.py | import os
import logging
import argparse
from tqdm import tqdm
import torch
import numpy as np
parser = argparse.ArgumentParser(description='Task-oriented Dialogue System Benchmarking')
## Training Setting
parser.add_argument(
'--do_train', action='store_true', help="do training")
parser.add_argument(
'-epoch','--epoch', help='number of epochs to train', required=False, default=300, type=int)
parser.add_argument(
'-patience','--patience', help='patience for early stopping', required=False, default=10, type=int)
parser.add_argument(
'-earlystop','--earlystop', help='metric for early stopping', required=False, default="loss", type=str)
parser.add_argument(
'--my_model', help='my cutomized model', required=False, default="")
parser.add_argument(
'-dr','--dropout', help='Dropout ratio', required=False, type=float, default=0.2)
parser.add_argument(
'-lr','--learning_rate', help='Learning Rate', required=False, type=float, default=5e-5)
parser.add_argument(
'-bsz','--batch_size', help='Batch_size', required=False, type=int, default=16)
parser.add_argument(
'-ebsz','--eval_batch_size', help='Batch_size', required=False, type=int, default=16)
parser.add_argument(
'-hdd','--hdd_size', help='Hidden size', required=False, type=int, default=400)
parser.add_argument(
'-emb','--emb_size', help='Embedding size', required=False, type=int, default=400)
parser.add_argument(
'-clip','--grad_clip', help='gradient clipping', required=False, default=1, type=int)
parser.add_argument(
'-tfr','--teacher_forcing_ratio', help='teacher_forcing_ratio', type=float, required=False, default=0.5)
parser.add_argument(
'-loadEmb','--load_embedding', help='Load Pretrained Glove and Char Embeddings', required=False, default=False, type=bool)
parser.add_argument(
'-fixEmb','--fix_embedding', help='', required=False, default=False, type=bool)
parser.add_argument(
'--n_gpu', help='', required=False, default=1, type=int)
parser.add_argument(
'--eval_by_step', help='', required=False, default=-1, type=int)
parser.add_argument(
'--fix_encoder', action='store_true', help="")
parser.add_argument(
'--model_type', help='', required=False, default="bert", type=str)
parser.add_argument(
'--model_name_or_path', help='', required=False, default="bert", type=str)
parser.add_argument(
'--usr_token', help='', required=False, default="[USR]", type=str)
parser.add_argument(
'--sys_token', help='', required=False, default="[SYS]", type=str)
parser.add_argument(
'--warmup_proportion', help='warm up training in the begining', required=False, default=0.1, type=float)
parser.add_argument(
"--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",)
parser.add_argument(
"--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",)
parser.add_argument(
"--output_mode", default="classification", type=str, help="")
parser.add_argument(
"--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.",)
parser.add_argument(
"--rand_seed", default=0, type=int, help="")
parser.add_argument(
"--fix_rand_seed", action="store_true", help="fix random seed for training",)
parser.add_argument(
"--nb_runs", default=1, type=int, help="number of runs to conduct during training")
parser.add_argument(
"--nb_evals", default=1, type=int, help="number of runs to conduct during inference")
parser.add_argument(
"--max_seq_length", default=512, type=int, help="")
parser.add_argument(
"--input_name", default="context", type=str, help="")
## Dataset or Input/Output Setting
parser.add_argument(
'-dpath','--data_path', help='path to dataset folder, need to change to your local folder',
required=False, default='./dialog_datasets', type=str)
parser.add_argument(
'-task','--task', help='task in ["nlu", "dst", "dm", "nlg", "usdl"] to decide which dataloader to use', required=True)
parser.add_argument(
'-task_name','--task_name', help='task in ["intent", "sysact","rs"]', required=False, default="")
parser.add_argument(
'--example_type', help='type in ["turn", "dial"]', required=False, default="turn")
parser.add_argument(
'-ds','--dataset', help='which dataset to be used.', required=False, default='["multiwoz"]', type=str)
parser.add_argument(
'-load_path','--load_path', help='path of the saved model to load from', required=False)
parser.add_argument(
'-an','--add_name', help='An added name for the save folder', required=False, default='')
parser.add_argument(
'--max_line', help='maximum line for reading data (for quick testing)', required=False, default=None, type=int)
parser.add_argument(
'--output_dir', help='', required=False, default="save/temp/", type=str)
parser.add_argument(
'--overwrite', action='store_true', help="")
parser.add_argument(
"--cache_dir", default=None, type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",)
parser.add_argument(
"--logging_steps", default=500, type=int, help="")
parser.add_argument(
"--save_steps", default=1000, type=int, help="")
parser.add_argument(
"--save_total_limit", type=int, default=1,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir",)
parser.add_argument(
"--train_data_ratio", default=1.0, type=float, help="")
parser.add_argument(
"--domain_act", action="store_true", help="",)
parser.add_argument(
"--only_last_turn", action="store_true", help="",)
parser.add_argument(
"--error_analysis", action="store_true", help="",)
parser.add_argument(
"--not_save_model", action="store_true", help="")
parser.add_argument(
"--nb_shots", default=-1, type=int, help="")
parser.add_argument(
"--continue_ft", action="store_true", help="",)
## Others (May be able to delete or not used in this repo)
parser.add_argument(
'--do_embeddings', action='store_true')
parser.add_argument(
'--create_own_vocab', action='store_true', help="")
parser.add_argument(
'-um','--unk_mask', help='mask out input token to UNK', type=bool, required=False, default=True)
parser.add_argument(
'-paral','--parallel_decode', help='', required=False, default=True, type=bool)
parser.add_argument(
'--self_supervised', help='', required=False, default="generative", type=str)
parser.add_argument(
"--oracle_domain", action="store_true", help="",)
parser.add_argument(
"--more_linear_mapping", action="store_true", help="",)
parser.add_argument(
"--gate_supervision_for_dst", action="store_true", help="",)
parser.add_argument(
"--sum_token_emb_for_value", action="store_true", help="",)
parser.add_argument(
"--nb_neg_sample_rs", default=0, type=int, help="")
parser.add_argument(
"--sample_negative_by_kmeans", action="store_true", help="",)
parser.add_argument(
"--nb_kmeans", default=1000, type=int, help="")
parser.add_argument(
"--bidirect", action="store_true", help="",)
parser.add_argument(
'--rnn_type', help='rnn type ["gru", "lstm"]', required=False, type=str, default="gru")
parser.add_argument(
'--num_rnn_layers', help='rnn layers size', required=False, type=int, default=1)
parser.add_argument(
'--zero_init_rnn',action='store_true', help="set initial hidden of rnns zero")
parser.add_argument(
"--do_zeroshot", action="store_true", help="",)
parser.add_argument(
"--oos_threshold", action="store_true", help="",)
parser.add_argument(
"--ontology_version", default="", type=str, help="1.0 is the cleaned version but not used")
parser.add_argument(
"--dstlm", action="store_true", help="",)
parser.add_argument(
"--domain", default="all", type=str, help="select one of the following domains for multiwoz: taxi, restaurant, attraction, hotel, train, all")
parser.add_argument(
"--domain_option", default="multi", type=str, help="select one of the following domain options for multiwoz: single, multi")
parser.add_argument(
"--tgt_lang", default="de", type=str, help="select one of the following languages: de, cn, ar, ru")
parser.add_argument(
"--adapter_name_or_path", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--adapter_name_or_path_2", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--adapter_name_or_path_3", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--continue_model_path", default="", type=str, help="load previous pretrained model from continue_model_path")
parser.add_argument(
"--save_adapter_path", default="", type=str, help="path to save fine-tuned adapter")
parser.add_argument(
'-viz','--vizualization', help='vizualization', type=int, required=False, default=0)
args = vars(parser.parse_args())
# args = parser.parse_args()
print(str(args))
# check output_dir
if os.path.exists(args["output_dir"]) and os.listdir(args["output_dir"]) and args["do_train"] and (not args["overwrite"]):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args["output_dir"]))
os.makedirs(args["output_dir"], exist_ok=True)
# Dictionary Predefined
SEEDS = [10, 5, 0] # np.arange(0, 100, 5)
| 10,339 | 47.093023 | 150 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/dataloader_dst.py | import torch
import numpy as np
import torch.utils.data as data
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
# SLOT_GATE = {"ptr":0, "dontcare":1, "none":2}
class Dataset_dst(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
self.slots = list(unified_meta["slots"].keys())
self.mask_token_idx = tokenizer.convert_tokens_to_ids("[MASK]")
self.sep_token_idx = tokenizer.convert_tokens_to_ids("[SEP]")
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
gate_label = self.data["slot_gate"][index]
context_plain = self.concat_dh_sys_usr(dialog_history_str,
self.data["turn_sys"][index],
self.data["turn_usr"][index])
slot_values_plain = self.data["slot_values"][index]
slot_values = self.preprocess_slot(slot_values_plain)
triggered_domains = set([domain_slot.split("-")[0] for domain_slot in self.data["belief"][index].keys()])
triggered_domains.add(self.data["turn_domain"][index])
assert len(triggered_domains) != 0
triggered_ds_mask = [1 if s.split("-")[0] in triggered_domains else 0 for s in self.slots]
triggered_ds_idx = []
triggered_ds_pos = []
context = self.preprocess(context_plain)
ontology_idx = []
for si, sv in enumerate(slot_values_plain):
try:
ontology_idx.append(self.unified_meta["slots"][self.slots[si]][sv])
except Exception as e:
print("Not In Ontology")
print(e)
print(self.slots[si], sv)
ontology_idx.append(-1)
elif self.args["example_type"] == "dial":
raise NotImplemented()
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"del_belief":self.data["del_belief"][index],
"slot_gate":gate_label,
"context":context,
"context_plain":context_plain,
"slot_values":slot_values,
"belief":self.data["belief"][index],
"slots":self.data["slots"][index],
"belief_ontology":ontology_idx,
"triggered_ds_mask":triggered_ds_mask,
"triggered_ds_idx":triggered_ds_idx,
"triggered_ds_pos":triggered_ds_pos}
return item_info
def __len__(self):
return self.num_total_seqs
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sep_token) + " {} ".format(self.sys_token) + sys + " {} ".format(self.usr_token) + usr
def preprocess(self, sequence):
"""Converts words to ids."""
#story = torch.Tensor(self.tokenizer.encode(sequence))
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def preprocess_slot(self, sequence):
"""Converts words to ids."""
story = []
for value in sequence:
v = list(self.tokenizer.encode(value + " {}".format(self.sep_token)))
story.append(v)
return story
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_dst_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
y_seqs, y_lengths = merge_multi_response(item_info["slot_values"])
gates = torch.tensor(item_info["slot_gate"])
belief_ontology = torch.tensor(item_info["belief_ontology"])
triggered_ds_mask = torch.tensor(item_info["triggered_ds_mask"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["slot_gate"] = to_cuda(gates)
item_info["slot_values"] = to_cuda(y_seqs)
item_info["slot_values_len"] = y_lengths
item_info["belief_ontology"] = to_cuda(belief_ontology)
item_info["triggered_ds_mask"] = to_cuda(triggered_ds_mask)
return item_info
def collate_fn_dst_dial(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
y = [merge_multi_response(sv) for sv in item_info["slot_values"]]
y_seqs = [_y[0] for _y in y]
y_lengths = [_y[1] for _y in y]
gates, gate_lengths = merge_sent_and_word(item_info['slot_gate'], ignore_idx=-1)
belief_ontology = torch.tensor(item_info["belief_ontology"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["slot_gate"] = to_cuda(gates)
item_info["slot_values"] = [to_cuda(y) for y in y_seqs] # TODO
item_info["slot_values_len"] = y_lengths # TODO
return item_info
| 6,765 | 41.2875 | 137 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/utils_general.py | import torch
import torch.utils.data as data
import random
import math
from .dataloader_dst import *
from .dataloader_nlg import *
from .dataloader_nlu import *
from .dataloader_dm import *
from .dataloader_usdl import *
def get_loader(args, mode, tokenizer, datasets, unified_meta, shuffle=False):
task = args["task"]
batch_size = args["batch_size"] if mode == "train" else args["eval_batch_size"]
combined_ds = []
for ds in datasets:
combined_ds += datasets[ds][mode]
# do not consider empty system responses
if (args["task_name"] == "rs") or (args["task"] == "dm"):
print("[Info] Remove turns with empty system response...")
combined_ds = [d for d in combined_ds if d["turn_sys"]!=""]
## Ignore the first system utterance for response selection task
if (args["task_name"] == "rs"):
print("[Info] Remove turn=0 system response...")
combined_ds = [d for d in combined_ds if d["turn_id"]!=0]
# control data ratio
if (args["train_data_ratio"] != 1 or args["nb_shots"] != -1) and (mode == "train"):
original_len = len(combined_ds)
if ("oos_intent" in args["dataset"]):
nb_train_sample_per_class = int(100 * args["train_data_ratio"])
class_count = {k: 0 for k in unified_meta["intent"]}
random.Random(args["rand_seed"]).shuffle(combined_ds)
pair_trn_new = []
for d in combined_ds:
if class_count[d["intent"]] < nb_train_sample_per_class:
pair_trn_new.append(d)
class_count[d["intent"]] += 1
combined_ds = pair_trn_new
else:
if args["train_data_ratio"] != 1:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:int(len(combined_ds)*args["train_data_ratio"])]
else:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:args["nb_shots"]]
print("[INFO] Use Training Data: from {} to {}".format(original_len, len(combined_ds)))
data_info = {k: [] for k in combined_ds[0].keys()}
for d in combined_ds:
for k in combined_ds[0].keys():
data_info[k].append(d[k])
dataset = globals()["Dataset_"+task](data_info, tokenizer, args, unified_meta, mode, args["max_seq_length"])
bool_shuffle = (mode=="train" or shuffle)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=bool_shuffle,
collate_fn=globals()["collate_fn_{}_{}".format(task, args["example_type"])])
return data_loader
def get_unified_meta(datasets):
unified_meta = {"others":None}
for ds in datasets:
for key, value in datasets[ds]["meta"].items():
if key not in unified_meta.keys():
unified_meta[key] = {}
if type(value) == list:
for v in value:
if v not in unified_meta[key].keys():
unified_meta[key][v] = len(unified_meta[key])
else:
unified_meta[key] = value
return unified_meta
| 3,348 | 39.349398 | 122 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/dataloader_usdl.py | import torch
import torch.utils.data as data
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_usdl(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.usr_token_id = self.tokenizer.convert_tokens_to_ids(args["usr_token"])
self.sys_token_id = self.tokenizer.convert_tokens_to_ids(args["sys_token"])
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
self.mode = mode
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
item_info = {}
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
context_plain = self.concat_dh_sys_usr(dialog_history_str,
self.data["turn_sys"][index],
self.data["turn_usr"][index])
context = self.preprocess(context_plain)
elif self.args["example_type"] == "dial":
context_plain = self.data["dialog_history"][index]
context = self.preprocess_slot(context_plain)
item_info["ID"] = self.data["ID"][index]
item_info["turn_id"] = self.data["turn_id"][index]
item_info["context"] = context
item_info["context_plain"] = context_plain
return item_info
def __len__(self):
return self.num_total_seqs
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sys_token) + sys + " {} ".format(self.usr_token) + usr
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def preprocess_slot(self, sequence):
"""Converts words to ids."""
story = []
for value in sequence:
#v = list(self.tokenizer.encode(value))# + self.tokenizer.encode("[SEP]"))
v = list(self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(value)))
story.append(v)
return story
def get_concat_context(self, dialog_history):
candidate_sys_responses = []
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_usdl_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
return item_info
def collate_fn_usdl_dial(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
return item_info
def collate_fn_usdl_dial_flat(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context_flat']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_flat_seqs, src_flat_lengths = merge(item_info['context_flat'])
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
src_pos_seqs, src_pos_lengths = merge(item_info["sys_usr_id_positions"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["context_flat"] = to_cuda(src_flat_seqs)
item_info["context_flat_len"] = src_flat_lengths
item_info["sys_usr_id_positions"] = to_cuda(src_pos_seqs)
return item_info
| 5,245 | 38.742424 | 118 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/utils_domain.py | import torch
import torch.utils.data as data
import random
import math
from .dataloader_dst import *
from .dataloader_nlg import *
from .dataloader_nlu import *
from .dataloader_dm import *
from .dataloader_usdl import *
def get_loader(args, mode, tokenizer, datasets, unified_meta, shuffle=False):
task = args["task"]
batch_size = args["batch_size"] if mode == "train" else args["eval_batch_size"]
combined_ds = []
for ds in datasets:
combined_ds += datasets[ds][mode]
# do not consider empty system responses
if (args["task_name"] == "rs") or (args["task"] == "dm"):
print("[Info] Remove turns with empty system response...")
combined_ds = [d for d in combined_ds if d["turn_sys"]!=""]
## Ignore the first system utterance for response selection task
if (args["task_name"] == "rs"):
print("[Info] Remove turn=0 system response...")
combined_ds = [d for d in combined_ds if d["turn_id"]!=0]
# control data ratio
if (args["train_data_ratio"] != 1 or args["nb_shots"] != -1) and (mode == "train"):
original_len = len(combined_ds)
if ("oos_intent" in args["dataset"]):
nb_train_sample_per_class = int(100 * args["train_data_ratio"])
class_count = {k: 0 for k in unified_meta["intent"]}
random.Random(args["rand_seed"]).shuffle(combined_ds)
pair_trn_new = []
for d in combined_ds:
if class_count[d["intent"]] < nb_train_sample_per_class:
pair_trn_new.append(d)
class_count[d["intent"]] += 1
combined_ds = pair_trn_new
else:
if args["train_data_ratio"] != 1:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:int(len(combined_ds)*args["train_data_ratio"])]
else:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:args["nb_shots"]]
print("[INFO] Use Training Data: from {} to {}".format(original_len, len(combined_ds)))
data_info = {k: [] for k in combined_ds[0].keys()}
for d in combined_ds:
for k in combined_ds[0].keys():
data_info[k].append(d[k])
dataset = globals()["Dataset_"+task](data_info, tokenizer, args, unified_meta, mode, args["max_seq_length"])
bool_shuffle = (mode=="train" or shuffle)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=bool_shuffle,
collate_fn=globals()["collate_fn_{}_{}".format(task, args["example_type"])])
return data_loader
def get_unified_meta(datasets):
unified_meta = {"others":None}
for ds in datasets:
for key, value in datasets[ds]["meta"].items():
if key not in unified_meta.keys():
unified_meta[key] = {}
if type(value) == list:
for v in value:
if v not in unified_meta[key].keys():
unified_meta[key][v] = len(unified_meta[key])
else:
unified_meta[key] = value
return unified_meta
| 3,348 | 39.349398 | 122 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/multiwoz/fix_label.py |
def fix_general_label_error(labels, type, slots, ontology_version=""):
label_dict = dict([ (l[0], l[1]) for l in labels]) if type else dict([ (l["slots"][0][0], l["slots"][0][1]) for l in labels])
GENERAL_TYPO = {
# type
"guesthouse":"guest house","guesthouses":"guest house","guest":"guest house","mutiple sports":"multiple sports",
"mutliple sports":"multiple sports","sports":"multiple sports","swimmingpool":"swimming pool",
"concerthall":"concert hall", "concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum",
"colleges":"college", "coll":"college","architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north",
"cen":"centre", "east side":"east","east area":"east", "west part of town":"west", "ce":"centre",
"town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre",
"north part of town":"north", "centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"monda": "monday",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"do n't care", "does not care":"do n't care", "not men":"none", "not":"none",
"not mentioned":"none", '':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none",
}
for slot in slots:
if slot in label_dict.keys():
# general typos
if label_dict[slot] in GENERAL_TYPO.keys():
label_dict[slot] = label_dict[slot].replace(label_dict[slot], GENERAL_TYPO[label_dict[slot]])
# do not care
if label_dict[slot] in ["doesn't care", "don't care", "dont care", "does not care", "do not care", "dontcare"]:
label_dict[slot] = "do n't care"
# miss match slot and value
if slot == "hotel-type" and label_dict[slot] in ["nigh", "moderate -ly priced", "bed and breakfast", "centre", "venetian", "intern", "a cheap -er hotel"]:
label_dict[slot] = "none"
if slot == "hotel-internet" and label_dict[slot] == "4":
label_dict[slot] = "none"
if slot == "hotel-internet" and label_dict[slot] == "4":
label_dict[slot] = "none"
if slot == "hotel-pricerange" and label_dict[slot] == "2":
label_dict[slot] = "none"
if "area" in slot and label_dict[slot] in ["moderate"]:
label_dict[slot] = "none"
if "day" in slot and label_dict[slot] == "t":
label_dict[slot] = "none"
if slot == "hotel-type" and label_dict[slot] in ["hotel with free parking and free wifi", "4", "3 star hotel"]:
label_dict[slot] = "hotel"
if slot == "hotel-star" and label_dict[slot] == "3 star hotel":
label_dict[slot] = "3"
if "area" in slot:
if label_dict[slot] == "no":
label_dict[slot] = "north"
elif label_dict[slot] == "we":
label_dict[slot] = "west"
elif label_dict[slot] == "cent":
label_dict[slot] = "centre"
if "day" in slot:
if label_dict[slot] == "we":
label_dict[slot] = "wednesday"
elif label_dict[slot] == "no":
label_dict[slot] = "none"
if "price" in slot and label_dict[slot] == "ch":
label_dict[slot] = "cheap"
if "internet" in slot and label_dict[slot] == "free":
label_dict[slot] = "yes"
# Add on May, 2020
if ontology_version in ["1.0"]:
label_dict[slot] = label_dict[slot].replace("theater", "theatre").replace("guesthouse", "guest house")
# Typo or naming
if label_dict[slot] == "cafe uno":
label_dict[slot] = "caffe uno"
if label_dict[slot] == "alpha milton guest house":
label_dict[slot] = "alpha-milton guest house"
if label_dict[slot] in ["churchills college", "churchhill college", "churchill", "the churchill college"]:
label_dict[slot] = "churchill college"
if label_dict[slot] == "portugese":
label_dict[slot] = "portuguese"
if label_dict[slot] == "pizza hut fenditton":
label_dict[slot] = "pizza hut fen ditton"
if label_dict[slot] == "restaurant 17":
label_dict[slot] = "restaurant one seven"
if label_dict[slot] == "restaurant 2 two":
label_dict[slot] = "restaurant two two"
if label_dict[slot] == "gallery at 12 a high street":
label_dict[slot] = "gallery at twelve a high street"
if label_dict[slot] == "museum of archaelogy":
label_dict[slot] = "museum of archaelogy and anthropology"
if label_dict[slot] in ["huntingdon marriot hotel", "marriot hotel"]:
label_dict[slot] = "huntingdon marriott hotel"
if label_dict[slot] in ["sheeps green and lammas land park fen causeway", "sheeps green and lammas land park"]:
label_dict[slot] = "sheep's green and lammas land park fen causeway"
if label_dict[slot] in ["cambridge and country folk museum", "county folk museum"]:
label_dict[slot] = "cambridge and county folk museum"
if label_dict[slot] == "ambridge":
label_dict[slot] = "cambridge"
if label_dict[slot] == "cambridge contemporary art museum":
label_dict[slot] = "cambridge contemporary art"
if label_dict[slot] == "molecular gastonomy":
label_dict[slot] = "molecular gastronomy"
if label_dict[slot] == "2 two and cote":
label_dict[slot] = "two two and cote"
if label_dict[slot] == "caribbeanindian":
label_dict[slot] = "caribbean|indian"
if label_dict[slot] == "whipple museum":
label_dict[slot] = "whipple museum of the history of science"
if label_dict[slot] == "ian hong":
label_dict[slot] = "ian hong house"
if label_dict[slot] == "sundaymonday":
label_dict[slot] = "sunday|monday"
if label_dict[slot] == "mondaythursday":
label_dict[slot] = "monday|thursday"
if label_dict[slot] == "fridaytuesday":
label_dict[slot] = "friday|tuesday"
if label_dict[slot] == "cheapmoderate":
label_dict[slot] = "cheap|moderate"
if label_dict[slot] == "golden house golden house":
label_dict[slot] = "the golden house"
if label_dict[slot] == "golden house":
label_dict[slot] = "the golden house"
if label_dict[slot] == "sleeperz":
label_dict[slot] = "sleeperz hotel"
if label_dict[slot] == "jamaicanchinese":
label_dict[slot] = "jamaican|chinese"
if label_dict[slot] == "shiraz":
label_dict[slot] = "shiraz restaurant"
if label_dict[slot] == "museum of archaelogy and anthropogy":
label_dict[slot] = "museum of archaelogy and anthropology"
if label_dict[slot] == "yipee noodle bar":
label_dict[slot] = "yippee noodle bar"
if label_dict[slot] == "abc theatre":
label_dict[slot] = "adc theatre"
if label_dict[slot] == "wankworth house":
label_dict[slot] = "warkworth house"
if label_dict[slot] in ["cherry hinton water play park", "cherry hinton water park"]:
label_dict[slot] = "cherry hinton water play"
if label_dict[slot] == "the gallery at 12":
label_dict[slot] = "the gallery at twelve"
if label_dict[slot] == "barbequemodern european":
label_dict[slot] = "barbeque|modern european"
if label_dict[slot] == "north americanindian":
label_dict[slot] = "north american|indian"
if label_dict[slot] == "chiquito":
label_dict[slot] = "chiquito restaurant bar"
# Abbreviation
if label_dict[slot] == "city centre north bed and breakfast":
label_dict[slot] = "city centre north b and b"
if label_dict[slot] == "north bed and breakfast":
label_dict[slot] = "north b and b"
# Article and 's
if label_dict[slot] == "christ college":
label_dict[slot] = "christ's college"
if label_dict[slot] == "kings college":
label_dict[slot] = "king's college"
if label_dict[slot] == "saint johns college":
label_dict[slot] = "saint john's college"
if label_dict[slot] == "kettles yard":
label_dict[slot] = "kettle's yard"
if label_dict[slot] == "rosas bed and breakfast":
label_dict[slot] = "rosa's bed and breakfast"
if label_dict[slot] == "saint catharines college":
label_dict[slot] = "saint catharine's college"
if label_dict[slot] == "little saint marys church":
label_dict[slot] = "little saint mary's church"
if label_dict[slot] == "great saint marys church":
label_dict[slot] = "great saint mary's church"
if label_dict[slot] in ["queens college", "queens' college"]:
label_dict[slot] = "queen's college"
if label_dict[slot] == "peoples portraits exhibition at girton college":
label_dict[slot] = "people's portraits exhibition at girton college"
if label_dict[slot] == "st johns college":
label_dict[slot] = "saint john's college"
if label_dict[slot] == "whale of time":
label_dict[slot] = "whale of a time"
if label_dict[slot] in ["st catharines college", "saint catharines college"]:
label_dict[slot] = "saint catharine's college"
# Time
if label_dict[slot] == "16,15":
label_dict[slot] = "16:15"
if label_dict[slot] == "1330":
label_dict[slot] = "13:30"
if label_dict[slot] == "1430":
label_dict[slot] = "14:30"
if label_dict[slot] == "1532":
label_dict[slot] = "15:32"
if label_dict[slot] == "845":
label_dict[slot] = "08:45"
if label_dict[slot] == "1145":
label_dict[slot] = "11:45"
if label_dict[slot] == "1545":
label_dict[slot] = "15:45"
if label_dict[slot] == "1329":
label_dict[slot] = "13:29"
if label_dict[slot] == "1345":
label_dict[slot] = "13:45"
if label_dict[slot] == "1715":
label_dict[slot] = "17:15"
if label_dict[slot] == "929":
label_dict[slot] = "09:29"
# restaurant
if slot == "restaurant-name" and "meze bar" in label_dict[slot]:
label_dict[slot] = "meze bar restaurant"
if slot == "restaurant-name" and label_dict[slot] == "alimentum":
label_dict[slot] = "restaurant alimentum"
if slot == "restaurant-name" and label_dict[slot] == "good luck":
label_dict[slot] = "the good luck chinese food takeaway"
if slot == "restaurant-name" and label_dict[slot] == "grafton hotel":
label_dict[slot] = "grafton hotel restaurant"
if slot == "restaurant-name" and label_dict[slot] == "2 two":
label_dict[slot] = "restaurant two two"
if slot == "restaurant-name" and label_dict[slot] == "hotpot":
label_dict[slot] = "the hotpot"
if slot == "restaurant-name" and label_dict[slot] == "hobsons house":
label_dict[slot] = "hobson house"
if slot == "restaurant-name" and label_dict[slot] == "shanghai":
label_dict[slot] = "shanghai family restaurant"
if slot == "restaurant-name" and label_dict[slot] == "17":
label_dict[slot] = "restaurant one seven"
if slot == "restaurant-name" and label_dict[slot] in ["22", "restaurant 22"]:
label_dict[slot] = "restaurant two two"
if slot == "restaurant-name" and label_dict[slot] == "the maharajah tandoor":
label_dict[slot] = "maharajah tandoori restaurant"
if slot == "restaurant-name" and label_dict[slot] == "the grafton hotel":
label_dict[slot] = "grafton hotel restaurant"
if slot == "restaurant-name" and label_dict[slot] == "gardenia":
label_dict[slot] = "the gardenia"
if slot == "restaurant-name" and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "restaurant-name" and label_dict[slot] == "the bedouin":
label_dict[slot] = "bedouin"
if slot == "restaurant-name" and label_dict[slot] == "the kohinoor":
label_dict[slot] = "kohinoor"
if slot == "restaurant-name" and label_dict[slot] == "the peking":
label_dict[slot] = "peking restaurant"
if slot == "restaurant-book time" and label_dict[slot] == "7pm":
label_dict[slot] = "19:00"
if slot == "restaurant-book time" and label_dict[slot] == "4pm":
label_dict[slot] = "16:00"
if slot == "restaurant-book time" and label_dict[slot] == "8pm":
label_dict[slot] = "20:00"
if slot == "restaurant-name" and label_dict[slot] == "sitar":
label_dict[slot] = "sitar tandoori"
if slot == "restaurant-name" and label_dict[slot] == "binh":
label_dict[slot] = "thanh binh"
if slot == "restaurant-name" and label_dict[slot] == "mahal":
label_dict[slot] = "mahal of cambridge"
# attraction
if slot == "attraction-name" and label_dict[slot] == "scudamore":
label_dict[slot] = "scudamores punting co"
if slot == "attraction-name" and label_dict[slot] == "salsa":
label_dict[slot] = "club salsa"
if slot == "attraction-name" and label_dict[slot] in ["abbey pool", "abbey pool and astroturf"]:
label_dict[slot] = "abbey pool and astroturf pitch"
if slot == "attraction-name" and label_dict[slot] == "cherry hinton hall":
label_dict[slot] = "cherry hinton hall and grounds"
if slot == "attraction-name" and label_dict[slot] == "trinity street college":
label_dict[slot] = "trinity college"
if slot == "attraction-name" and label_dict[slot] == "the wandlebury":
label_dict[slot] = "wandlebury country park"
if slot == "attraction-name" and label_dict[slot] == "king hedges learner pool":
label_dict[slot] = "kings hedges learner pool"
if slot == "attraction-name" and label_dict[slot] in ["botanic gardens", "cambridge botanic gardens"]:
label_dict[slot] = "cambridge university botanic gardens"
if slot == "attraction-name" and label_dict[slot] == "soultree":
label_dict[slot] = "soul tree nightclub"
if slot == "attraction-name" and label_dict[slot] == "queens":
label_dict[slot] = "queen's college"
if slot == "attraction-name" and label_dict[slot] == "sheeps green":
label_dict[slot] = "sheep's green and lammas land park fen causeway"
if slot == "attraction-name" and label_dict[slot] == "jesus green":
label_dict[slot] = "jesus green outdoor pool"
if slot == "attraction-name" and label_dict[slot] == "adc":
label_dict[slot] = "adc theatre"
if slot == "attraction-name" and label_dict[slot] == "hobsons house":
label_dict[slot] = "hobson house"
if slot == "attraction-name" and label_dict[slot] == "cafe jello museum":
label_dict[slot] = "cafe jello gallery"
if slot == "attraction-name" and label_dict[slot] == "whippple museum":
label_dict[slot] = "whipple museum of the history of science"
if slot == "attraction-type" and label_dict[slot] == "boating":
label_dict[slot] = "boat"
if slot == "attraction-name" and label_dict[slot] == "peoples portraits exhibition":
label_dict[slot] = "people's portraits exhibition at girton college"
if slot == "attraction-name" and label_dict[slot] == "lammas land park":
label_dict[slot] = "sheep's green and lammas land park fen causeway"
# taxi
if slot in ["taxi-destination", "taxi-departure"] and label_dict[slot] == "meze bar":
label_dict[slot] = "meze bar restaurant"
if slot in ["taxi-destination", "taxi-departure"] and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "taxi-departure" and label_dict[slot] == "centre of town at my hotel":
label_dict[slot] = "hotel"
# train
if slot == "train-departure" and label_dict[slot] in ["liverpool", "london liverpool"]:
label_dict[slot] = "london liverpool street"
if slot == "train-destination" and label_dict[slot] == "liverpool street":
label_dict[slot] = "london liverpool street"
if slot == "train-departure" and label_dict[slot] == "alpha milton":
label_dict[slot] = "alpha-milton"
# hotel
if slot == "hotel-name" and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "hotel-name" and label_dict[slot] == "alesbray lodge guest house":
label_dict[slot] = "aylesbray lodge guest house"
if slot == "hotel-name" and label_dict[slot] == "the gonvile hotel":
label_dict[slot] = "the gonville hotel"
if slot == "hotel-name" and label_dict[slot] == "no":
label_dict[slot] = "none"
if slot == "hotel-name" and label_dict[slot] in ["holiday inn", "holiday inn cambridge"]:
label_dict[slot] = "express by holiday inn cambridge"
if slot == "hotel-name" and label_dict[slot] == "wartworth":
label_dict[slot] = "warkworth house"
# Suppose to be a wrong annotation
if slot == "restaurant-name" and label_dict[slot] == "south":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "churchill college":
label_dict[slot] = "none"
if slot == "attraction-name" and label_dict[slot] == "boat":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "museum kettles yard":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "hotel":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "camboats":
label_dict[slot] = "boat"
# TODO: Need to check with dialogue data to deal with strange labels before
# if slot == "restaurant-name" and label_dict[slot] == "eraina and michaelhouse cafe":
# label_dict[slot] = "eraina|michaelhouse cafe"
# if slot == "attraction-name" and label_dict[slot] == "gonville hotel":
# label_dict[slot] = "none"
# if label_dict[slot] == "good luck":
# label_dict[slot] = "the good luck chinese food takeaway"
# if slot == "restaurant-book time" and label_dict[slot] == "9":
# label_dict[slot] = "21:00"
# if slot == "taxi-departure" and label_dict[slot] == "girton college":
# label_dict[slot] = "people's portraits exhibition at girton college"
# if slot == "restaurant-name" and label_dict[slot] == "molecular gastronomy":
# label_dict[slot] = "none"
# [Info] Adding Slot: restaurant-name with value: primavera
# [Info] Adding Slot: train-departure with value: huntingdon
# [Info] Adding Slot: attraction-name with value: aylesbray lodge guest house
# [Info] Adding Slot: attraction-name with value: gallery
# [Info] Adding Slot: hotel-name with value: eraina
# [Info] Adding Slot: restaurant-name with value: india west
# [Info] Adding Slot: restaurant-name with value: autumn house
# [Info] Adding Slot: train-destination with value: norway
# [Info] Adding Slot: attraction-name with value: cinema cinema
# [Info] Adding Slot: hotel-name with value: lan hon
# [Info] Adding Slot: restaurant-food with value: sushi
# [Info] Adding Slot: attraction-name with value: university arms hotel
# [Info] Adding Slot: train-departure with value: stratford
# [Info] Adding Slot: attraction-name with value: history of science museum
# [Info] Adding Slot: restaurant-name with value: nil
# [Info] Adding Slot: train-leaveat with value: 9
# [Info] Adding Slot: restaurant-name with value: ashley hotel
# [Info] Adding Slot: taxi-destination with value: the cambridge shop
# [Info] Adding Slot: hotel-name with value: acorn place
# [Info] Adding Slot: restaurant-name with value: de luca cucina and bar riverside brasserie
# [Info] Adding Slot: hotel-name with value: super 5
# [Info] Adding Slot: attraction-name with value: archway house
# [Info] Adding Slot: train-arriveby with value: 8
# [Info] Adding Slot: train-leaveat with value: 10
# [Info] Adding Slot: restaurant-book time with value: 9
# [Info] Adding Slot: hotel-name with value: nothamilton lodge
# [Info] Adding Slot: attraction-name with value: st christs college
return label_dict
| 24,598 | 59.439803 | 167 | py |
Multi2WOZ | Multi2WOZ-main/downstream/utils/loss_function/masked_cross_entropy.py | import torch
from torch.nn import functional
from torch.autograd import Variable
from utils.config import *
import torch.nn as nn
import numpy as np
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
.expand_as(seq_range_expand))
return seq_range_expand < seq_length_expand
def cross_entropy(logits, target):
batch_size = logits.size(0)
log_probs_flat = functional.log_softmax(logits)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target)
loss = losses_flat.sum() / batch_size
return loss
def masked_cross_entropy(logits, target, length):
"""
Args:
logits: A Variable containing a FloatTensor of size
(batch, max_len, num_classes) which contains the
unnormalized probability for each class.
target: A Variable containing a LongTensor of size
(batch, max_len) which contains the index of the true
class for each corresponding step.
length: A Variable containing a LongTensor of size (batch,)
which contains the length of each data in a batch.
Returns:
loss: An average loss value masked by the length.
"""
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# log_probs_flat: (batch * max_len, num_classes)
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
def masked_binary_cross_entropy(logits, target, length):
'''
logits: (batch, max_len, num_class)
target: (batch, max_len, num_class)
'''
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
bce_criterion = nn.BCEWithLogitsLoss()
loss = 0
for bi in range(logits.size(0)):
for i in range(logits.size(1)):
if i < length[bi]:
loss += bce_criterion(logits[bi][i], target[bi][i])
loss = loss / length.float().sum()
return loss
def masked_cross_entropy_(logits, target, length, take_log=False):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
if take_log:
logits_flat = torch.log(logits_flat)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
def masked_coverage_loss(coverage, attention, length):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
mask = sequence_mask(sequence_length=length)
min_ = torch.min(coverage, attention)
mask = mask.unsqueeze(2).expand_as(min_)
min_ = min_ * mask.float()
loss = min_.sum() / (len(length)*1.0)
return loss
def masked_cross_entropy_for_slot(logits, target, mask, use_softmax=True):
# print("logits", logits)
# print("target", target)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# print(logits_flat.size())
if use_softmax:
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
else:
log_probs_flat = logits_flat #torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s|
losses = losses * mask.float()
loss = losses.sum() / (losses.size(0)*losses.size(1))
# print("loss inside", loss)
return loss
def masked_cross_entropy_for_value(logits, target, mask):
# logits: b * |s| * m * |v|
# target: b * |s| * m
# mask: b * |s|
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# print(logits_flat.size())
log_probs_flat = torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s| * m
loss = masking(losses, mask)
return loss
def masking(losses, mask):
mask_ = []
batch_size = mask.size(0)
max_len = losses.size(2)
for si in range(mask.size(1)):
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if mask[:,si].is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = mask[:, si].unsqueeze(1).expand_as(seq_range_expand)
mask_.append( (seq_range_expand < seq_length_expand) )
mask_ = torch.stack(mask_)
mask_ = mask_.transpose(0, 1)
if losses.is_cuda:
mask_ = mask_.cuda()
losses = losses * mask_.float()
loss = losses.sum() / (mask_.sum().float())
return loss
| 6,501 | 36.802326 | 92 | py |
custom-diffusion | custom-diffusion-main/sample.py | # This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import argparse, os, sys, glob
sys.path.append('stable-diffusion')
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from einops import rearrange
from torchvision.utils import make_grid
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
import wandb
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
token_weights = sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
m, u = model.load_state_dict(sd, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a virus monster playing guitar",
help="the prompt to render"
)
parser.add_argument(
"--outdir",
type=str,
nargs="?",
help="dir to write results to",
default="outputs/txt2img-samples"
)
parser.add_argument(
"--skip_grid",
action='store_true',
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
)
parser.add_argument(
"--skip_save",
action='store_true',
help="do not save individual samples. For speed measurements.",
)
parser.add_argument(
"--ddim_steps",
type=int,
default=200,
help="number of ddim sampling steps",
)
parser.add_argument(
"--plms",
action='store_true',
help="use plms sampling",
)
parser.add_argument(
"--laion400m",
action='store_true',
help="uses the LAION400M model",
)
parser.add_argument(
"--fixed_code",
action='store_true',
help="if enabled, uses the same starting code across samples ",
)
parser.add_argument(
"--ddim_eta",
type=float,
default=1.0,
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
)
parser.add_argument(
"--n_iter",
type=int,
default=1,
help="sample this often",
)
parser.add_argument(
"--H",
type=int,
default=512,
help="image height, in pixel space",
)
parser.add_argument(
"--W",
type=int,
default=512,
help="image width, in pixel space",
)
parser.add_argument(
"--C",
type=int,
default=4,
help="latent channels",
)
parser.add_argument(
"--f",
type=int,
default=8,
help="downsampling factor",
)
parser.add_argument(
"--n_samples",
type=int,
default=6,
help="how many samples to produce for each given prompt. A.k.a. batch size",
)
parser.add_argument(
"--n_rows",
type=int,
default=6,
help="rows in the grid (default: n_samples)",
)
parser.add_argument(
"--scale",
type=float,
default=6.,
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
)
parser.add_argument(
"--from-file",
type=str,
help="if specified, load prompts from this file",
)
parser.add_argument(
"--config",
type=str,
default="configs/custom-diffusion/finetune.yaml",
help="path to config which constructs model",
)
parser.add_argument(
"--ckpt",
type=str,
required=True,
help="path to checkpoint of the pre-trained model",
)
parser.add_argument(
"--delta_ckpt",
type=str,
default=None,
help="path to delta checkpoint of fine-tuned custom diffusion block",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="the seed (for reproducible sampling)",
)
parser.add_argument(
"--precision",
type=str,
help="evaluate at this precision",
choices=["full", "autocast"],
default="autocast"
)
parser.add_argument(
"--wandb_log",
action='store_true',
help="save grid images to wandb.",
)
parser.add_argument(
"--compress",
action='store_true',
help="delta path provided is a compressed checkpoint.",
)
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="A token to use as a modifier for the concept.",
)
opt = parser.parse_args()
if opt.wandb_log:
if opt.delta_ckpt is not None:
name = opt.delta_ckpt.split('/')[-3]
elif 'checkpoints' in opt.ckpt:
name = opt.ckpt.split('/')[-3]
else:
name = opt.ckpt.split('/')[-1]
wandb.init(project="custom-diffusion", entity="cmu-gil", name=name )
if opt.delta_ckpt is not None:
if len(glob.glob(os.path.join(opt.delta_ckpt.split('checkpoints')[0], "configs/*.yaml"))) > 0:
opt.config = sorted(glob.glob(os.path.join(opt.delta_ckpt.split('checkpoints')[0], "configs/*.yaml")))[-1]
else:
if len(glob.glob(os.path.join(opt.ckpt.split('checkpoints')[0], "configs/*.yaml"))) > 0:
opt.config = sorted(glob.glob(os.path.join(opt.ckpt.split('checkpoints')[0], "configs/*.yaml")))[-1]
seed_everything(opt.seed)
config = OmegaConf.load(f"{opt.config}")
if opt.modifier_token is not None:
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = opt.modifier_token
model = load_model_from_config(config, f"{opt.ckpt}")
if opt.delta_ckpt is not None:
delta_st = torch.load(opt.delta_ckpt)
embed = None
if 'embed' in delta_st['state_dict']:
embed = delta_st['state_dict']['embed'].reshape(-1,768)
del delta_st['state_dict']['embed']
print(embed.shape)
delta_st = delta_st['state_dict']
if opt.compress:
for name in delta_st.keys():
if 'to_k' in name or 'to_v' in name:
delta_st[name] = model.state_dict()[name] + delta_st[name]['u']@delta_st[name]['v']
model.load_state_dict(delta_st, strict=False)
else:
model.load_state_dict(delta_st, strict=False)
if embed is not None:
print("loading new embedding")
print(model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data.shape)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[-embed.shape[0]:] = embed
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
if opt.plms:
sampler = PLMSSampler(model)
else:
sampler = DDIMSampler(model)
if opt.delta_ckpt is not None:
outpath = os.path.dirname(os.path.dirname(opt.delta_ckpt))
else:
os.makedirs(opt.outdir, exist_ok=True)
outpath = opt.outdir
batch_size = opt.n_samples
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
if not opt.from_file:
prompt = opt.prompt
assert prompt is not None
data = [batch_size * [prompt]]
else:
print(f"reading prompts from {opt.from_file}")
with open(opt.from_file, "r") as f:
data = f.read().splitlines()
data = [batch_size * [prompt] for prompt in data]
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = len(os.listdir(outpath)) - 1
start_code = None
if opt.fixed_code:
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device)
precision_scope = autocast if opt.precision == "autocast" else nullcontext
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
for prompts in tqdm(data, desc="data"):
all_samples = list()
for n in trange(opt.n_iter, desc="Sampling"):
print(prompts[0])
uc = None
if opt.scale != 1.0:
uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
prompts = list(prompts)
c = model.get_learned_conditioning(prompts)
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
conditioning=c,
batch_size=opt.n_samples,
shape=shape,
verbose=False,
unconditional_guidance_scale=opt.scale,
unconditional_conditioning=uc,
eta=opt.ddim_eta,
x_T=start_code)
# print(samples_ddim.size())
x_samples_ddim = model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
x_samples_ddim = x_samples_ddim.cpu()
if not opt.skip_save:
for x_sample in x_samples_ddim:
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
img = Image.fromarray(x_sample.astype(np.uint8))
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
base_count += 1
if not opt.skip_grid:
all_samples.append(x_samples_ddim)
if not opt.skip_grid:
# additionally, save as grid
grid = torch.stack(all_samples, 0)
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
grid = make_grid(grid, nrow=n_rows)
# to image
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
img = Image.fromarray(grid.astype(np.uint8))
sampling_method = 'plms' if opt.plms else 'ddim'
img.save(os.path.join(outpath, f'{prompts[0].replace(" ", "-")}_{opt.scale}_{sampling_method}_{opt.ddim_steps}_{opt.ddim_eta}.png'))
if opt.wandb_log:
wandb.log({ f'{prompts[0].replace(" ", "-")}_{opt.scale}_{sampling_method}_{opt.ddim_steps}_{opt.ddim_eta}.png' : [wandb.Image(img)]})
grid_count += 1
print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
f" \nEnjoy.")
if __name__ == "__main__":
main()
| 27,272 | 62.131944 | 1,097 | py |
custom-diffusion | custom-diffusion-main/train.py | # This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import argparse, os, sys, datetime, glob
sys.path.append('stable-diffusion')
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-rc",
"--resume-from-checkpoint-custom",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"--delta-ckpt",
type=str,
const=True,
default=None,
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument(
"-p",
"--project",
help="name of new or path to existing project"
)
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
parser.add_argument(
"-l",
"--logdir",
type=str,
default="logs",
help="directory for logging dat shit",
)
parser.add_argument(
"--scale_lr",
type=str2bool,
nargs="?",
const=True,
default=True,
help="scale base-lr by ngpu * batch_size * n_accumulate",
)
parser.add_argument(
"--datapath",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_datapath",
type=str,
default=None,
help="path to regularization images",
)
parser.add_argument(
"--caption",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_caption",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--datapath2",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_datapath2",
type=str,
default=None,
help="path to regularization images",
)
parser.add_argument(
"--caption2",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_caption2",
type=str,
default="",
help="path to regularization images' caption",
)
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="token added before cateogry word for personalization use case",
)
parser.add_argument(
"--freeze_model",
type=str,
default=None,
help="crossattn to enable fine-tuning of all key, value, query matrices",
)
parser.add_argument(
"--repeat",
type=int,
default=0,
help="repeat the target dataset by how many times. Used when training without regularization",
)
parser.add_argument(
"--batch_size",
type=int,
default=None,
help="overwrite batch size",
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class ConcatDataset(Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, idx):
return tuple(d[idx] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, train2=None, validation=None, test=None, predict=None,
wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else batch_size * 2
self.use_worker_init_fn = use_worker_init_fn
if train2 is not None and train2['params']['caption'] != '':
self.dataset_configs["train2"] = train2
if train is not None:
self.dataset_configs["train"] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs["validation"] = validation
self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs["test"] = test
self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs["predict"] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(
(k, instantiate_from_config(self.dataset_configs[k]))
for k in self.dataset_configs)
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if is_iterable_dataset or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
if "train2" in self.dataset_configs and self.dataset_configs["train2"]['params']["caption"] != '':
train_set = self.datasets["train"]
train2_set = self.datasets["train2"]
concat_dataset = ConcatDataset(train_set, train2_set)
return DataLoader(concat_dataset, batch_size=self.batch_size // 2,
num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
worker_init_fn=init_fn)
else:
return DataLoader(self.datasets["train"], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
worker_init_fn=init_fn)
def _val_dataloader(self, shuffle=False):
if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets["validation"],
batch_size=self.batch_size,
num_workers=self.num_workers,
worker_init_fn=init_fn,
shuffle=shuffle)
def _test_dataloader(self, shuffle=False):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if is_iterable_dataset or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
# do not shuffle dataloader for iterable dataset
shuffle = shuffle and (not is_iterable_dataset)
return DataLoader(self.datasets["test"], batch_size=self.batch_size,
num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
def _predict_dataloader(self, shuffle=False):
if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets["predict"], batch_size=self.batch_size,
num_workers=self.num_workers, worker_init_fn=init_fn)
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def on_pretrain_routine_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if "callbacks" in self.lightning_config:
if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print("Project config")
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config,
os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
print("Lightning config")
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
else:
# ModelCheckpoint callback created log directory --- remove it
if not self.resume and os.path.exists(self.logdir):
dst, name = os.path.split(self.logdir)
dst = os.path.join(dst, "child_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True,
rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
log_images_kwargs=None):
super().__init__()
self.rescale = rescale
self.batch_freq = batch_frequency
self.max_images = max_images
self.save_freq = 250
self.logger_log_images = {
pl.loggers.TestTubeLogger: self._testtube,
}
self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
if not increase_log_steps:
self.log_steps = [self.batch_freq]
self.clamp = clamp
self.disabled = disabled
self.log_on_batch_idx = log_on_batch_idx
self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
self.log_first_step = log_first_step
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
tag = f"{split}/{k}"
pl_module.logger.experiment.add_image(
tag, grid,
global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images,
global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, "images", split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
if self.rescale:
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
k,
global_step,
current_epoch,
batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split="train"):
check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
hasattr(pl_module, "log_images") and
callable(pl_module.log_images) and
self.max_images > 0):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], -1., 1.)
self.log_local(pl_module.logger.save_dir, split, images,
pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, check_idx):
if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
check_idx > 0 or self.log_first_step):
try:
self.log_steps.pop(0)
except IndexError as e:
print(e)
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
self.log_img(pl_module, batch, batch_idx, split="train")
# if self.save_freq is not None:
# epoch = trainer.current_epoch
# global_step = trainer.global_step
# if global_step % self.save_freq == 0:
# filename = f'{epoch}_{global_step}.ckpt'
# ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, filename)
# trainer.save_checkpoint(ckpt_path)
class CUDACallback(Callback):
# see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py
def on_train_epoch_start(self, trainer, pl_module):
# Reset the memory use counter
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, outputs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20
epoch_time = time.time() - self.start_time
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds")
rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB")
except AttributeError:
pass
if __name__ == "__main__":
# custom parser to specify config files, train, test and debug mode,
# postfix, resume.
# `--key value` arguments are interpreted as arguments to the trainer.
# `nested.key=value` arguments are interpreted as config parameters.
# configs are merged from left-to-right followed by command line parameters.
# model:
# base_learning_rate: float
# target: path to lightning module
# params:
# key: value
# data:
# target: main.DataModuleFromConfig
# params:
# batch_size: int
# wrap: bool
# train:
# target: path to train dataset
# params:
# key: value
# validation:
# target: path to validation dataset
# params:
# key: value
# test:
# target: path to test dataset
# params:
# key: value
# lightning: (optional, has sane defaults and can be specified on cmdline)
# trainer:
# additional arguments to trainer
# logger:
# logger to instantiate
# modelcheckpoint:
# modelcheckpoint to instantiate
# callbacks:
# callback1:
# target: importpath
# params:
# key: value
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
# (in particular `main.DataModuleFromConfig`)
sys.path.append(os.getcwd())
parser = get_parser()
parser = Trainer.add_argparse_args(parser)
opt, unknown = parser.parse_known_args()
if opt.name and opt.resume:
raise ValueError(
"-n/--name and -r/--resume cannot be specified both."
"If you want to resume training in a new log folder, "
"use -n/--name in combination with --resume_from_checkpoint"
)
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
# idx = len(paths)-paths[::-1].index("logs")+1
# logdir = "/".join(paths[:idx])
logdir = "/".join(paths[:-2])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
opt.resume_from_checkpoint = ckpt
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
opt.base = base_configs + opt.base
_tmp = logdir.split("/")
nowname = _tmp[-1]
else:
if opt.name:
name = "_" + opt.name
elif opt.base:
cfg_fname = os.path.split(opt.base[0])[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
name = "_" + cfg_name
else:
name = ""
nowname = now + name + opt.postfix
logdir = os.path.join(opt.logdir, nowname)
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
seed_everything(opt.seed)
try:
# init and save configs
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
# merge trainer cli with config
trainer_config = lightning_config.get("trainer", OmegaConf.create())
# default to ddp
trainer_config["accelerator"] = "ddp"
for k in nondefault_trainer_args(opt):
trainer_config[k] = getattr(opt, k)
if not ("gpus" in trainer_config):
del trainer_config["accelerator"]
cpu = True
else:
gpuinfo = trainer_config["gpus"]
print(f"Running on GPUs {gpuinfo}")
cpu = False
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
config.data.params.train.params.caption = opt.caption
config.data.params.train.params.reg_caption = opt.reg_caption
config.data.params.train.params.datapath = opt.datapath
config.data.params.train.params.reg_datapath = opt.reg_datapath
if opt.caption2 is not None:
config.data.params.train2.params.caption = opt.caption2
config.data.params.train2.params.reg_caption = opt.reg_caption2
config.data.params.train2.params.datapath = opt.datapath2
config.data.params.train2.params.reg_datapath = opt.reg_datapath2
config.data.params.validation = config.data.params.train
if opt.batch_size is not None:
config.data.params.batch_size = opt.batch_size
if opt.modifier_token is not None:
config.model.params.cond_stage_config.params.modifier_token = opt.modifier_token
if opt.repeat > 0:
config.data.params.train.params.repeat = opt.repeat
if opt.resume_from_checkpoint_custom:
config.model.params.ckpt_path = None
if opt.freeze_model is not None:
config.model.params.freeze_model = opt.freeze_model
model = instantiate_from_config(config.model)
if opt.resume_from_checkpoint_custom:
st = torch.load(opt.resume_from_checkpoint_custom, map_location='cpu')["state_dict"]
token_weights = st["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del st["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
model.load_state_dict(st, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if opt.delta_ckpt is not None:
st = torch.load(opt.delta_ckpt)
embed = None
if 'embed' in st:
embed = st['embed'].reshape(-1, 768)
if 'state_dict' in st:
st = st['state_dict']
print("restroting from delta model from previous version")
st1 = model.state_dict()
for each in st1.keys():
if each in st.keys():
print("found common", each)
model.load_state_dict(st, strict=False)
if embed is not None:
print("restoring embedding")
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[token_weights.shape[0]: token_weights.shape[0] + embed.shape[0]] = embed
# trainer and callbacks
trainer_kwargs = dict()
# default logger configs
default_logger_cfgs = {
"wandb": {
"target": "pytorch_lightning.loggers.WandbLogger",
"params": {
"name": nowname,
"save_dir": logdir,
"offline": opt.debug,
"id": nowname,
}
},
"testtube": {
"target": "pytorch_lightning.loggers.TestTubeLogger",
"params": {
"name": "testtube",
"save_dir": logdir,
}
},
}
default_logger_cfg = default_logger_cfgs["testtube"]
if "logger" in lightning_config:
logger_cfg = lightning_config.logger
else:
logger_cfg = OmegaConf.create()
logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
# specify which metric is used to determine best models
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{epoch:06}",
"verbose": True,
"save_last": True,
}
}
if hasattr(model, "monitor"):
print(f"Monitoring {model.monitor} as checkpoint metric.")
default_modelckpt_cfg["params"]["monitor"] = model.monitor
default_modelckpt_cfg["params"]["save_top_k"] = -1
default_modelckpt_cfg["params"]["every_n_epochs"] = 1
if "modelcheckpoint" in lightning_config:
modelckpt_cfg = lightning_config.modelcheckpoint
else:
modelckpt_cfg = OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
if version.parse(pl.__version__) < version.parse('1.4.0'):
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
# add callback which sets up log directory
default_callbacks_cfg = {
"setup_callback": {
"target": "train.SetupCallback",
"params": {
"resume": opt.resume,
"now": now,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
"lightning_config": lightning_config,
}
},
"image_logger": {
"target": "train.ImageLogger",
"params": {
"batch_frequency": 750,
"max_images": 4,
"clamp": True
}
},
"learning_rate_logger": {
"target": "train.LearningRateMonitor",
"params": {
"logging_interval": "step",
# "log_momentum": True
}
},
"cuda_callback": {
"target": "train.CUDACallback"
},
}
if version.parse(pl.__version__) >= version.parse('1.4.0'):
default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg})
if "callbacks" in lightning_config:
callbacks_cfg = lightning_config.callbacks
else:
callbacks_cfg = OmegaConf.create()
if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg:
print(
'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
default_metrics_over_trainsteps_ckpt_dict = {
'metrics_over_trainsteps_checkpoint':
{"target": 'pytorch_lightning.callbacks.ModelCheckpoint',
'params': {
"dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'),
"filename": "{epoch:06}-{step:09}",
"verbose": True,
'save_top_k': -1,
'every_n_train_steps': 50,
'save_weights_only': True
}
}
}
default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'):
callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint
elif 'ignore_keys_callback' in callbacks_cfg:
del callbacks_cfg['ignore_keys_callback']
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
trainer.logdir = logdir
# data
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though
data.prepare_data()
data.setup()
print("#### Data #####")
for k in data.datasets:
print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")
# configure learning rate
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
if not cpu:
ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
else:
ngpu = 1
if 'accumulate_grad_batches' in lightning_config.trainer:
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
else:
accumulate_grad_batches = 1
print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
if opt.scale_lr:
model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
print(
"Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
else:
model.learning_rate = base_lr
print("++++ NOT USING LR SCALING ++++")
print(f"Setting learning rate to {model.learning_rate:.2e}")
# allow checkpointing via USR1
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb
pudb.set_trace()
import signal
signal.signal(signal.SIGUSR1, melk)
signal.signal(signal.SIGUSR2, divein)
# run
if opt.train:
try:
trainer.fit(model, data)
except Exception:
melk()
raise
if not opt.no_test and not trainer.interrupted:
trainer.test(model, data)
except Exception:
if opt.debug and trainer.global_rank == 0:
try:
import pudb as debugger
except ImportError:
import pdb as debugger
debugger.post_mortem()
raise
finally:
# move newly created debug project to debug_runs
if opt.debug and not opt.resume and trainer.global_rank == 0:
dst, name = os.path.split(logdir)
dst = os.path.join(dst, "debug_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
os.rename(logdir, dst)
if trainer.global_rank == 0:
print(trainer.profiler.summary())
| 48,845 | 48.389282 | 1,097 | py |
custom-diffusion | custom-diffusion-main/src/diffusers_model_pipeline.py | # This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# modifications are MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import torch
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from accelerate.logging import get_logger
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.models.cross_attention import CrossAttention
from diffusers.utils.import_utils import is_xformers_available
if is_xformers_available():
import xformers
import xformers.ops
else:
xformers = None
logger = get_logger(__name__)
def set_use_memory_efficient_attention_xformers(
self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None
):
if use_memory_efficient_attention_xformers:
if self.added_kv_proj_dim is not None:
# TODO(Anton, Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP
# which uses this type of cross attention ONLY because the attention mask of format
# [0, ..., -10.000, ..., 0, ...,] is not supported
raise NotImplementedError(
"Memory efficient attention with `xformers` is currently not supported when"
" `self.added_kv_proj_dim` is defined."
)
elif not is_xformers_available():
raise ModuleNotFoundError(
(
"Refer to https://github.com/facebookresearch/xformers for more information on how to install"
" xformers"
),
name="xformers",
)
elif not torch.cuda.is_available():
raise ValueError(
"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
" only available for GPU "
)
else:
try:
# Make sure we can run the memory efficient attention
_ = xformers.ops.memory_efficient_attention(
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
)
except Exception as e:
raise e
processor = CustomDiffusionXFormersAttnProcessor(attention_op=attention_op)
else:
processor = CustomDiffusionAttnProcessor()
self.set_processor(processor)
class CustomDiffusionAttnProcessor:
def __call__(
self,
attn: CrossAttention,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
query = attn.to_q(hidden_states)
crossattn = False
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
else:
crossattn = True
if attn.cross_attention_norm:
encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
if crossattn:
detach = torch.ones_like(key)
detach[:, :1, :] = detach[:, :1, :]*0.
key = detach*key + (1-detach)*key.detach()
value = detach*value + (1-detach)*value.detach()
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
class CustomDiffusionXFormersAttnProcessor:
def __init__(self, attention_op: Optional[Callable] = None):
self.attention_op = attention_op
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
query = attn.to_q(hidden_states)
crossattn = False
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
else:
crossattn = True
if attn.cross_attention_norm:
encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
if crossattn:
detach = torch.ones_like(key)
detach[:, :1, :] = detach[:, :1, :]*0.
key = detach*key + (1-detach)*key.detach()
value = detach*value + (1-detach)*value.detach()
query = attn.head_to_batch_dim(query).contiguous()
key = attn.head_to_batch_dim(key).contiguous()
value = attn.head_to_batch_dim(value).contiguous()
hidden_states = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias=attention_mask, op=self.attention_op
)
hidden_states = hidden_states.to(query.dtype)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
class CustomDiffusionPipeline(StableDiffusionPipeline):
r"""
Pipeline for custom diffusion model.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.).
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
modifier_token: list of new modifier tokens added or to be added to text_encoder
modifier_token_id: list of id of new modifier tokens added or to be added to text_encoder
"""
_optional_components = ["safety_checker", "feature_extractor", "modifier_token"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: SchedulerMixin,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
requires_safety_checker: bool = True,
modifier_token: list = [],
modifier_token_id: list = [],
):
super().__init__(vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
requires_safety_checker)
# change attn class
self.modifier_token = modifier_token
self.modifier_token_id = modifier_token_id
def add_token(self, initializer_token):
initializer_token_id = []
for modifier_token_, initializer_token_ in zip(self.modifier_token, initializer_token):
# Add the placeholder token in tokenizer
num_added_tokens = self.tokenizer.add_tokens(modifier_token_)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token_}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = self.tokenizer.encode([initializer_token_], add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
self.modifier_token_id.append(self.tokenizer.convert_tokens_to_ids(modifier_token_))
initializer_token_id.append(token_ids[0])
# Resize the token embeddings as we are adding new special tokens to the tokenizer
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = self.text_encoder.get_input_embeddings().weight.data
for (x, y) in zip(self.modifier_token_id, initializer_token_id):
token_embeds[x] = token_embeds[y]
def save_pretrained(self, save_path, freeze_model="crossattn_kv", save_text_encoder=False, all=False):
if all:
super().save_pretrained(save_path)
else:
delta_dict = {'unet': {}, 'modifier_token': {}}
if self.modifier_token is not None:
for i in range(len(self.modifier_token_id)):
learned_embeds = self.text_encoder.get_input_embeddings().weight[self.modifier_token_id[i]]
delta_dict['modifier_token'][self.modifier_token[i]] = learned_embeds.detach().cpu()
if save_text_encoder:
delta_dict['text_encoder'] = self.text_encoder.state_dict()
for name, params in self.unet.named_parameters():
if freeze_model == "crossattn":
if 'attn2' in name:
delta_dict['unet'][name] = params.cpu().clone()
elif freeze_model == "crossattn_kv":
if 'attn2.to_k' in name or 'attn2.to_v' in name:
delta_dict['unet'][name] = params.cpu().clone()
else:
raise ValueError(
"freeze_model argument only supports crossattn_kv or crossattn"
)
torch.save(delta_dict, save_path)
def load_model(self, save_path, compress=False):
st = torch.load(save_path)
if 'text_encoder' in st:
self.text_encoder.load_state_dict(st['text_encoder'])
if 'modifier_token' in st:
modifier_tokens = list(st['modifier_token'].keys())
modifier_token_id = []
for modifier_token in modifier_tokens:
num_added_tokens = self.tokenizer.add_tokens(modifier_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
modifier_token_id.append(self.tokenizer.convert_tokens_to_ids(modifier_token))
# Resize the token embeddings as we are adding new special tokens to the tokenizer
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
token_embeds = self.text_encoder.get_input_embeddings().weight.data
for i, id_ in enumerate(modifier_token_id):
token_embeds[id_] = st['modifier_token'][modifier_tokens[i]]
for name, params in self.unet.named_parameters():
if 'attn2' in name:
if compress and ('to_k' in name or 'to_v' in name):
params.data += st['unet'][name]['u']@st['unet'][name]['v']
elif name in st['unet']:
params.data.copy_(st['unet'][f'{name}'])
| 25,547 | 50.198397 | 149 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.