code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def test_get_all_accuracy_metrics_returns(get_test_set):
"""Test if correct accuracy metrics are returned."""
y_pred, y_std, y_true = get_test_set
met_dict = get_all_accuracy_metrics(y_pred, y_true)
met_keys = met_dict.keys()
assert len(met_keys) == 6
met_str_list = ["mae", "rmse", "mdae", "marpd", "r2", "corr"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct accuracy metrics are returned.
|
test_get_all_accuracy_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_average_calibration_returns(get_test_set):
"""Test if correct average calibration metrics are returned."""
n_bins = 20
met_dict = get_all_average_calibration(*get_test_set, n_bins)
met_keys = met_dict.keys()
assert len(met_keys) == 3
met_str_list = ["rms_cal", "ma_cal", "miscal_area"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct average calibration metrics are returned.
|
test_get_all_average_calibration_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_adversarial_group_calibration_returns(get_test_set):
"""Test if correct adversarial group calibration metrics are returned."""
n_bins = 20
met_dict = get_all_adversarial_group_calibration(*get_test_set, n_bins)
met_keys = met_dict.keys()
assert len(met_keys) == 2
met_str_list = ["ma_adv_group_cal", "rms_adv_group_cal"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
for met_str in met_str_list:
inner_dict = met_dict[met_str]
inner_keys = inner_dict.keys()
assert len(inner_keys) == 3
inner_str_list = [
"group_sizes",
"adv_group_cali_mean",
"adv_group_cali_stderr",
]
bool_list = [s in inner_keys for s in inner_str_list]
assert all(bool_list)
|
Test if correct adversarial group calibration metrics are returned.
|
test_get_all_adversarial_group_calibration_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_sharpness_metrics_returns(get_test_set):
"""Test if correct sharpness metrics are returned."""
y_pred, y_std, y_true = get_test_set
met_dict = get_all_sharpness_metrics(y_std)
met_keys = met_dict.keys()
assert len(met_keys) == 1
assert "sharp" in met_keys
|
Test if correct sharpness metrics are returned.
|
test_get_all_sharpness_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_scoring_rule_metrics_returns(get_test_set):
"""Test if correct scoring rule metrics are returned."""
resolution = 99
scaled = True
met_dict = get_all_scoring_rule_metrics(*get_test_set, resolution, scaled)
met_keys = met_dict.keys()
assert len(met_keys) == 4
met_str_list = ["nll", "crps", "check", "interval"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct scoring rule metrics are returned.
|
test_get_all_scoring_rule_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_metrics_returns(get_test_set):
"""Test if correct metrics are returned by get_all_metrics function."""
met_dict = get_all_metrics(*get_test_set)
met_keys = met_dict.keys()
assert len(met_keys) == 5
met_str_list = [
"accuracy",
"avg_calibration",
"adv_group_calibration",
"sharpness",
"scoring_rule",
]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct metrics are returned by get_all_metrics function.
|
test_get_all_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_prediction_error_metric_fields(get_test_set):
"""Test if prediction error metrics have correct fields."""
y_pred, y_std, y_true = get_test_set
met_dict = prediction_error_metrics(y_pred, y_true)
met_keys = met_dict.keys()
assert len(met_keys) == 6
met_str_list = ["mae", "rmse", "mdae", "marpd", "r2", "corr"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if prediction error metrics have correct fields.
|
test_prediction_error_metric_fields
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_accuracy.py
|
MIT
|
def test_prediction_error_metric_values(get_test_set):
"""Test if prediction error metrics have correct values."""
y_pred, y_std, y_true = get_test_set
met_dict = prediction_error_metrics(y_pred, y_true)
print(met_dict)
assert met_dict["mae"] > 0.21 and met_dict["mae"] < 0.22
assert met_dict["rmse"] > 0.21 and met_dict["rmse"] < 0.22
assert met_dict["mdae"] >= 0.20 and met_dict["mdae"] < 0.21
assert met_dict["marpd"] > 12 and met_dict["marpd"] < 13
assert met_dict["r2"] > 0.88 and met_dict["r2"] < 0.89
assert met_dict["corr"] > 0.99 and met_dict["corr"] < 1.0
|
Test if prediction error metrics have correct values.
|
test_prediction_error_metric_values
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_accuracy.py
|
MIT
|
def test_sharpness_on_test_set(supply_test_set):
"""Test sharpness on the test set for some dummy values."""
_, test_std, _ = supply_test_set
assert np.abs(sharpness(test_std) - 0.648074069840786) < 1e-6
|
Test sharpness on the test set for some dummy values.
|
test_sharpness_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_root_mean_squared_calibration_error_on_test_set(supply_test_set):
"""Test root mean squared calibration error on some dummy values."""
test_rmsce_nonvectorized_interval = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_rmsce_vectorized_interval = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(test_rmsce_nonvectorized_interval - test_rmsce_vectorized_interval)
< 1e-6
)
assert np.abs(test_rmsce_vectorized_interval - 0.4165757476562379) < 1e-6
test_rmsce_nonvectorized_quantile = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_rmsce_vectorized_quantile = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(test_rmsce_nonvectorized_quantile - test_rmsce_vectorized_quantile)
< 1e-6
)
assert np.abs(test_rmsce_vectorized_quantile - 0.30362567774902066) < 1e-6
|
Test root mean squared calibration error on some dummy values.
|
test_root_mean_squared_calibration_error_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_mean_absolute_calibration_error_on_test_set(supply_test_set):
"""Test mean absolute calibration error on some dummy values."""
test_mace_nonvectorized_interval = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_mace_vectorized_interval = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(test_mace_nonvectorized_interval - test_mace_vectorized_interval) < 1e-6
)
assert np.abs(test_mace_vectorized_interval - 0.3733333333333335) < 1e-6
test_mace_nonvectorized_quantile = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_mace_vectorized_quantile = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(test_mace_nonvectorized_quantile - test_mace_vectorized_quantile) < 1e-6
)
assert np.abs(test_mace_vectorized_quantile - 0.23757575757575758) < 1e-6
|
Test mean absolute calibration error on some dummy values.
|
test_mean_absolute_calibration_error_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_adversarial_group_calibration_on_test_set(supply_test_set):
"""Test adversarial group calibration on test set for some dummy values."""
test_out_interval = adversarial_group_calibration(
*supply_test_set,
cali_type="mean_abs",
prop_type="interval",
num_bins=100,
num_group_bins=10,
draw_with_replacement=False,
num_trials=10,
num_group_draws=10,
verbose=False
)
assert np.max(np.abs(test_out_interval.group_size - np.linspace(0, 1, 10))) < 1e-6
assert np.all(test_out_interval.score_mean < 0.5)
assert np.abs(test_out_interval.score_mean[-1] - 0.3733333333333335) < 1e-6
assert np.min(test_out_interval.score_stderr) >= 0
test_out_quantile = adversarial_group_calibration(
*supply_test_set,
cali_type="mean_abs",
prop_type="quantile",
num_bins=100,
num_group_bins=10,
draw_with_replacement=False,
num_trials=10,
num_group_draws=10,
verbose=False
)
assert np.max(np.abs(test_out_quantile.group_size - np.linspace(0, 1, 10))) < 1e-6
assert np.all(test_out_quantile.score_mean < 0.5)
assert np.abs(test_out_quantile.score_mean[-1] - 0.2375757575757576) < 1e-6
assert np.min(test_out_quantile.score_stderr) >= 0
|
Test adversarial group calibration on test set for some dummy values.
|
test_adversarial_group_calibration_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_miscalibration_area_on_test_set(supply_test_set):
"""Test miscalibration area on some dummy values."""
test_miscal_area_nonvectorized_interval = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_miscal_area_vectorized_interval = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(
test_miscal_area_nonvectorized_interval
- test_miscal_area_vectorized_interval
)
< 1e-6
)
assert np.abs(test_miscal_area_vectorized_interval - 0.37710437710437716) < 1e-6
test_miscal_area_nonvectorized_quantile = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_miscal_area_vectorized_quantile = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(
test_miscal_area_nonvectorized_quantile
- test_miscal_area_vectorized_quantile
)
< 1e-6
)
assert np.abs(test_miscal_area_vectorized_quantile - 0.23916245791245785) < 1e-6
|
Test miscalibration area on some dummy values.
|
test_miscalibration_area_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_vectorization_for_proportion_list_on_test_set(supply_test_set):
"""Test vectorization in get_proportion_lists on the test set for some dummy values."""
(
test_exp_props_nonvec_interval,
test_obs_props_nonvec_interval,
) = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
(
test_exp_props_vec_interval,
test_obs_props_vec_interval,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert (
np.max(np.abs(test_exp_props_nonvec_interval - test_exp_props_vec_interval))
< 1e-6
)
assert (
np.max(np.abs(test_obs_props_nonvec_interval - test_obs_props_vec_interval))
< 1e-6
)
(
test_exp_props_nonvec_quantile,
test_obs_props_nonvec_quantile,
) = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
(
test_exp_props_vec_quantile,
test_obs_props_vec_quantile,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert (
np.max(np.abs(test_exp_props_nonvec_quantile - test_exp_props_vec_quantile))
< 1e-6
)
assert (
np.max(np.abs(test_obs_props_nonvec_quantile - test_obs_props_vec_quantile))
< 1e-6
)
|
Test vectorization in get_proportion_lists on the test set for some dummy values.
|
test_vectorization_for_proportion_list_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_lists_vectorized_on_test_set(supply_test_set):
"""Test get_proportion_lists_vectorized on the test set for some dummy values."""
(
test_exp_props_interval,
test_obs_props_interval,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert test_exp_props_interval.shape == test_obs_props_interval.shape
assert (
np.max(np.abs(np.unique(test_exp_props_interval) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_interval))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
(
test_exp_props_quantile,
test_obs_props_quantile,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert test_exp_props_quantile.shape == test_obs_props_quantile.shape
assert (
np.max(np.abs(np.unique(test_exp_props_quantile) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_quantile))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
|
Test get_proportion_lists_vectorized on the test set for some dummy values.
|
test_get_proportion_lists_vectorized_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_lists_on_test_set(supply_test_set):
"""Test get_proportion_lists on the test set for some dummy values."""
test_exp_props_interval, test_obs_props_interval = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert len(test_exp_props_interval) == len(test_obs_props_interval)
assert (
np.max(np.abs(np.unique(test_exp_props_interval) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_interval))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
test_exp_props_quantile, test_obs_props_quantile = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert len(test_exp_props_quantile) == len(test_obs_props_quantile)
assert (
np.max(np.abs(np.unique(test_exp_props_quantile) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_quantile))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
|
Test get_proportion_lists on the test set for some dummy values.
|
test_get_proportion_lists_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_in_interval_on_test_set(supply_test_set):
"""Test get_proportion_in_interval on the test set for some dummy values."""
test_quantile_value_list = [
(0.0, 0.0),
(0.25, 0.0),
(0.5, 0.0),
(0.75, 0.3333333333333333),
(1.0, 1.0),
]
for test_q, test_val in test_quantile_value_list:
assert (
np.abs(
get_proportion_in_interval(*supply_test_set, quantile=test_q) - test_val
)
< 1e-6
)
|
Test get_proportion_in_interval on the test set for some dummy values.
|
test_get_proportion_in_interval_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_prediction_interval_on_test_set(supply_test_set):
"""Test get_prediction_interval on the test set for some dummy values."""
test_quantile_value_list = [
(
0.01,
np.array([1.00125335, 2.00626673, 3.01253347]),
np.array([0.99874665, 1.99373327, 2.98746653]),
),
(
0.25,
np.array([1.03186394, 2.15931968, 3.31863936]),
np.array([0.96813606, 1.84068032, 2.68136064]),
),
(
0.50,
np.array([1.06744898, 2.33724488, 3.67448975]),
np.array([0.93255102, 1.66275512, 2.32551025]),
),
(
0.75,
np.array([1.11503494, 2.57517469, 4.15034938]),
np.array([0.88496506, 1.42482531, 1.84965062]),
),
(
0.99,
np.array([1.25758293, 3.28791465, 5.5758293]),
np.array([0.74241707, 0.71208535, 0.4241707]),
),
]
y_pred, y_std, y_true = supply_test_set
with pytest.raises(Exception):
bounds = get_prediction_interval(y_pred, y_std, quantile=0.0, recal_model=None)
with pytest.raises(Exception):
bounds = get_prediction_interval(y_pred, y_std, quantile=1.0, recal_model=None)
for test_q, test_upper, test_lower in test_quantile_value_list:
bounds = get_prediction_interval(
y_pred, y_std, quantile=test_q, recal_model=None
)
upper_bound = bounds.upper
lower_bound = bounds.lower
assert np.max(np.abs(upper_bound - test_upper)) < 1e-6
assert np.max(np.abs(upper_bound - test_upper)) < 1e-6
|
Test get_prediction_interval on the test set for some dummy values.
|
test_get_prediction_interval_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_nll_gaussian_on_one_pt():
"""Sanity check by testing one point at mean of gaussian."""
y_pred = np.array([0])
y_true = np.array([0])
y_std = np.array([1 / np.sqrt(2 * np.pi)])
assert np.abs(nll_gaussian(y_pred, y_std, y_true)) < 1e-6
|
Sanity check by testing one point at mean of gaussian.
|
test_nll_gaussian_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_check_score_on_one_pt():
"""Sanity check to show that check score is minimized (i.e. 0) if data
occurs at the exact requested quantile."""
y_pred = np.array([0])
y_true = np.array([1])
y_std = np.array([1])
score = check_score(
y_pred=y_pred,
y_std=y_std,
y_true=y_true,
start_q=0.5 + 0.341,
end_q=0.5 + 0.341,
resolution=1,
)
assert np.abs(score) < 1e-2
|
Sanity check to show that check score is minimized (i.e. 0) if data
occurs at the exact requested quantile.
|
test_check_score_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_interval_score_on_one_pt():
"""Sanity check on interval score. For one point in the center of the
distribution and intervals one standard deviation and two standard
deviations away, should return ((1 std) * 2 + (2 std) * 2) / 2 = 3.
"""
y_pred = np.array([0])
y_true = np.array([0])
y_std = np.array([1])
score = interval_score(
y_pred=y_pred,
y_std=y_std,
y_true=y_true,
start_p=0.682,
end_p=0.954,
resolution=2,
)
assert np.abs(score - 3) < 1e-2
|
Sanity check on interval score. For one point in the center of the
distribution and intervals one standard deviation and two standard
deviations away, should return ((1 std) * 2 + (2 std) * 2) / 2 = 3.
|
test_interval_score_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_recal_model_mace_criterion_on_test_set(supply_test_set):
"""
Test recalibration on mean absolute calibration error on the test set
for some dummy values.
"""
test_mace = mean_absolute_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_mace = mean_absolute_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_mace - 0.24206060606060598) < 1e-2
assert np.abs(recal_test_mace - 0.003035353535353514) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on mean absolute calibration error on the test set
for some dummy values.
|
test_recal_model_mace_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_recal_model_rmce_criterion_on_test_set(supply_test_set):
"""
Test recalibration on root mean squared calibration error on the test set
for some dummy values.
"""
test_rmsce = root_mean_squared_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_rmsce = root_mean_squared_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_rmsce - 0.28741418862839013) < 1e-2
assert np.abs(recal_test_rmsce - 0.003981861230030349) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on root mean squared calibration error on the test set
for some dummy values.
|
test_recal_model_rmce_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_recal_model_miscal_area_criterion_on_test_set(supply_test_set):
"""
Test recalibration on miscalibration area on the test set
for some dummy values.
"""
test_miscal_area = miscalibration_area(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_miscal_area = miscalibration_area(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_miscal_area - 0.24426139657444004) < 1e-2
assert np.abs(recal_test_miscal_area - 0.0029569160997732244) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on miscalibration area on the test set
for some dummy values.
|
test_recal_model_miscal_area_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_mace_criterion(supply_test_set):
"""
Test standard deviation recalibration on mean absolute calibration error
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
ma_cal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="ma_cal"
)
recal_ma_cal = mean_absolute_calibration_error(y_pred, ma_cal_ratio * y_std, y_true)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, ma_cal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, ma_cal_ratio * y_std, y_true)
assert np.abs(ma_cal_ratio - 0.33215708813773176) < 1e-2
assert np.abs(recal_ma_cal - 0.06821616161616162) < 1e-2
assert np.abs(recal_rms_cal - 0.08800130087804929) < 1e-2
assert np.abs(recal_miscal - 0.06886262626262629) < 1e-2
|
Test standard deviation recalibration on mean absolute calibration error
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_mace_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_rmce_criterion(supply_test_set):
"""
Test standard deviation recalibration on root mean squared calibration error
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
rms_cal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="rms_cal"
)
recal_ma_cal = mean_absolute_calibration_error(
y_pred, rms_cal_ratio * y_std, y_true
)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, rms_cal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, rms_cal_ratio * y_std, y_true)
assert np.abs(rms_cal_ratio - 0.34900989073212507) < 1e-2
assert np.abs(recal_ma_cal - 0.06945555555555555) < 1e-2
assert np.abs(recal_rms_cal - 0.08570902541177935) < 1e-2
assert np.abs(recal_miscal - 0.07011706864564003) < 1e-2
|
Test standard deviation recalibration on root mean squared calibration error
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_rmce_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_miscal_area_criterion(supply_test_set):
"""
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
miscal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="miscal"
)
recal_ma_cal = mean_absolute_calibration_error(y_pred, miscal_ratio * y_std, y_true)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, miscal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, miscal_ratio * y_std, y_true)
assert np.abs(miscal_ratio - 0.3321912522557988) < 1e-2
assert np.abs(recal_ma_cal - 0.06821616161616162) < 1e-2
assert np.abs(recal_rms_cal - 0.08800130087804929) < 1e-2
assert np.abs(recal_miscal - 0.06886262626262629) < 1e-2
|
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_miscal_area_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_get_std_recalibrator(supply_test_set):
"""
Test get_std_recalibration on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
test_quantile_prop_list = [
(0.01, 0.00, 0.00),
(0.25, 0.06, 0.00),
(0.50, 0.56, 0.00),
(0.75, 0.74, 0.56),
(0.99, 0.89, 0.88),
]
std_recalibrator = get_std_recalibrator(y_pred, y_std, y_true)
for q, test_prop_in_pi, test_prop_under_q in test_quantile_prop_list:
y_std_recal = std_recalibrator(y_std)
pi = get_prediction_interval(y_pred, y_std_recal, q)
prop_in_pi = ((pi.lower <= y_true) * (y_true <= pi.upper)).mean()
quantile_bound = get_quantile(y_pred, y_std_recal, q)
prop_under_q = (quantile_bound >= y_true).mean()
assert np.max(np.abs(test_prop_in_pi - prop_in_pi)) < 5e-2
assert np.max(np.abs(test_prop_under_q - prop_under_q)) < 5e-2
|
Test get_std_recalibration on the test set for some dummy values.
|
test_get_std_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_filter_subset(get_test_set):
"""Test if filter_subset returns correct number of subset elements."""
y_pred, y_std, y_true, _ = get_test_set
_test_n_subset = 2
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], _test_n_subset)
assert len(y_pred) == _test_n_subset
assert len(y_std) == _test_n_subset
assert len(y_true) == _test_n_subset
|
Test if filter_subset returns correct number of subset elements.
|
test_filter_subset
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_viz.py
|
MIT
|
def synthetic_arange_random(
num_points: int = 10,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Dataset of evenly spaced points and identity function (with some randomization).
This function returns predictions and predictive uncertainties (given as standard
deviations) from some hypothetical uncertainty model, along with true input x and
output y data points.
Args:
num_points: The number of data points in the set.
Returns:
- The y predictions given by a hypothetical predictive uncertainty model. These
are the true values of y but with uniform noise added.
- The standard deviations given by a hypothetical predictive uncertainty model.
These are the errors between the predictions and the truth plus some unifom
noise.
- The true outputs y.
- The true inputs x.
"""
x = np.arange(num_points)
y_true = np.arange(num_points)
y_pred = np.arange(num_points) + np.random.random((num_points,))
y_std = np.abs(y_true - y_pred) + 0.1 * np.random.random((num_points,))
return y_pred, y_std, y_true, x
|
Dataset of evenly spaced points and identity function (with some randomization).
This function returns predictions and predictive uncertainties (given as standard
deviations) from some hypothetical uncertainty model, along with true input x and
output y data points.
Args:
num_points: The number of data points in the set.
Returns:
- The y predictions given by a hypothetical predictive uncertainty model. These
are the true values of y but with uniform noise added.
- The standard deviations given by a hypothetical predictive uncertainty model.
These are the errors between the predictions and the truth plus some unifom
noise.
- The true outputs y.
- The true inputs x.
|
synthetic_arange_random
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/data.py
|
MIT
|
def synthetic_sine_heteroscedastic(
n_points: int = 10,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Return samples from "synthetic sine" heteroscedastic noisy function.
This returns a synthetic dataset which can be used to train and assess a predictive
uncertainty model.
Args:
n_points: The number of data points in the set.
Returns:
- Predicted output points y.
- Predictive uncertainties, defined using standard deviation of added noise.
- True output points y.
- True input points x.
"""
bounds = [0, 15]
x = np.linspace(bounds[0], bounds[1], n_points)
f = np.sin(x)
std = 0.01 + np.abs(x - 5.0) / 10.0
noise = np.random.normal(scale=std)
y = f + noise
return f, std, y, x
|
Return samples from "synthetic sine" heteroscedastic noisy function.
This returns a synthetic dataset which can be used to train and assess a predictive
uncertainty model.
Args:
n_points: The number of data points in the set.
Returns:
- Predicted output points y.
- Predictive uncertainties, defined using standard deviation of added noise.
- True output points y.
- True input points x.
|
synthetic_sine_heteroscedastic
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/data.py
|
MIT
|
def get_all_accuracy_metrics(
y_pred: np.ndarray,
y_true: np.ndarray,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all accuracy metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all accuracy related metrics.
"""
if verbose:
print(" (1/n) Calculating accuracy metrics")
acc_metrics = prediction_error_metrics(y_pred, y_true)
return acc_metrics
|
Compute all accuracy metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all accuracy related metrics.
|
get_all_accuracy_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_average_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all metrics for average calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to average calibration.
"""
if verbose:
print(" (2/n) Calculating average calibration metrics")
cali_metrics = {}
cali_metrics["rms_cal"] = root_mean_squared_calibration_error(
y_pred, y_std, y_true, num_bins=num_bins
)
cali_metrics["ma_cal"] = mean_absolute_calibration_error(
y_pred, y_std, y_true, num_bins=num_bins
)
cali_metrics["miscal_area"] = miscalibration_area(
y_pred, y_std, y_true, num_bins=num_bins
)
return cali_metrics
|
Compute all metrics for average calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to average calibration.
|
get_all_average_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_adversarial_group_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int,
verbose: bool = True,
) -> Dict[str, Dict[str, np.ndarray]]:
"""Compute all metrics for adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to adversarial group calibration.
Each inner dictionary contains the size of each group and the metrics
computed for each group.
"""
adv_group_cali_metrics = {}
if verbose:
print(" (3/n) Calculating adversarial group calibration metrics")
print(" [1/2] for mean absolute calibration error")
ma_adv_group_cali = adversarial_group_calibration(
y_pred,
y_std,
y_true,
cali_type="mean_abs",
num_bins=num_bins,
verbose=verbose,
)
ma_adv_group_size = ma_adv_group_cali.group_size
ma_adv_group_cali_score_mean = ma_adv_group_cali.score_mean
ma_adv_group_cali_score_stderr = ma_adv_group_cali.score_stderr
adv_group_cali_metrics["ma_adv_group_cal"] = {
"group_sizes": ma_adv_group_size,
"adv_group_cali_mean": ma_adv_group_cali_score_mean,
"adv_group_cali_stderr": ma_adv_group_cali_score_stderr,
}
if verbose:
print(" [2/2] for root mean squared calibration error")
rms_adv_group_cali = adversarial_group_calibration(
y_pred,
y_std,
y_true,
cali_type="root_mean_sq",
num_bins=num_bins,
verbose=verbose,
)
rms_adv_group_size = rms_adv_group_cali.group_size
rms_adv_group_cali_score_mean = rms_adv_group_cali.score_mean
rms_adv_group_cali_score_stderr = rms_adv_group_cali.score_stderr
adv_group_cali_metrics["rms_adv_group_cal"] = {
"group_sizes": rms_adv_group_size,
"adv_group_cali_mean": rms_adv_group_cali_score_mean,
"adv_group_cali_stderr": rms_adv_group_cali_score_stderr,
}
return adv_group_cali_metrics
|
Compute all metrics for adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to adversarial group calibration.
Each inner dictionary contains the size of each group and the metrics
computed for each group.
|
get_all_adversarial_group_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_sharpness_metrics(
y_std: np.ndarray,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all sharpness metrics
Args:
y_std: 1D array of he predicted standard deviations for the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all sharpness metrics.
"""
if verbose:
print(" (4/n) Calculating sharpness metrics")
sharp_metrics = {}
sharp_metrics["sharp"] = sharpness(y_std)
return sharp_metrics
|
Compute all sharpness metrics
Args:
y_std: 1D array of he predicted standard deviations for the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all sharpness metrics.
|
get_all_sharpness_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_scoring_rule_metrics(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
resolution: int,
scaled: bool,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all scoring rule metrics
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
The computed scoring rule metrics.
"""
if verbose:
print(" (n/n) Calculating proper scoring rule metrics")
sr_metrics = {}
sr_metrics["nll"] = nll_gaussian(y_pred, y_std, y_true, scaled=scaled)
sr_metrics["crps"] = crps_gaussian(y_pred, y_std, y_true, scaled=scaled)
sr_metrics["check"] = check_score(
y_pred, y_std, y_true, scaled=scaled, resolution=resolution
)
sr_metrics["interval"] = interval_score(
y_pred, y_std, y_true, scaled=scaled, resolution=resolution
)
return sr_metrics
|
Compute all scoring rule metrics
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
The computed scoring rule metrics.
|
get_all_scoring_rule_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_metrics(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
resolution: int = 99,
scaled: bool = True,
verbose: bool = True,
) -> Dict[str, Any]:
"""Compute all metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
Dictionary containing all metrics.
"""
# Accuracy
accuracy_metrics = get_all_accuracy_metrics(y_pred, y_true, verbose)
# Calibration
calibration_metrics = get_all_average_calibration(
y_pred, y_std, y_true, num_bins, verbose
)
# Adversarial Group Calibration
adv_group_cali_metrics = get_all_adversarial_group_calibration(
y_pred, y_std, y_true, num_bins, verbose
)
# Sharpness
sharpness_metrics = get_all_sharpness_metrics(y_std, verbose)
# Proper Scoring Rules
scoring_rule_metrics = get_all_scoring_rule_metrics(
y_pred, y_std, y_true, resolution, scaled, verbose
)
# Print all outputs
if verbose:
print("**Finished Calculating All Metrics**")
print("\n")
print(" Accuracy Metrics ".center(60, "="))
for acc_metric, acc_val in accuracy_metrics.items():
print(" {:<13} {:.3f}".format(METRIC_NAMES[acc_metric], acc_val))
print(" Average Calibration Metrics ".center(60, "="))
for cali_metric, cali_val in calibration_metrics.items():
print(" {:<37} {:.3f}".format(METRIC_NAMES[cali_metric], cali_val))
print(" Adversarial Group Calibration Metrics ".center(60, "="))
_print_adversarial_group_calibration(adv_group_cali_metrics, print_group_num=3)
print(" Sharpness Metrics ".center(60, "="))
for sharp_metric, sharp_val in sharpness_metrics.items():
print(" {:} {:.3f}".format(METRIC_NAMES[sharp_metric], sharp_val))
print(" Scoring Rule Metrics ".center(60, "="))
for sr_metric, sr_val in scoring_rule_metrics.items():
print(" {:<25} {:.3f}".format(METRIC_NAMES[sr_metric], sr_val))
all_scores = {
"accuracy": accuracy_metrics,
"avg_calibration": calibration_metrics,
"adv_group_calibration": adv_group_cali_metrics,
"sharpness": sharpness_metrics,
"scoring_rule": scoring_rule_metrics,
}
return all_scores
|
Compute all metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
Dictionary containing all metrics.
|
get_all_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def prediction_error_metrics(
y_pred: np.ndarray,
y_true: np.ndarray,
) -> Dict[str, float]:
"""Get all prediction error metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
Returns:
A dictionary with Mean average error ('mae'), Root mean squared
error ('rmse'), Median absolute error ('mdae'), Mean absolute
relative percent difference ('marpd'), r^2 ('r2'), and Pearson's
correlation coefficient ('corr').
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_true)
# Compute metrics
mae = mean_absolute_error(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
mdae = median_absolute_error(y_true, y_pred)
residuals = y_true - y_pred
marpd = np.abs(2 * residuals / (np.abs(y_pred) + np.abs(y_true))).mean() * 100
r2 = r2_score(y_true, y_pred)
corr = np.corrcoef(y_true, y_pred)[0, 1]
prediction_metrics = {
"mae": mae,
"rmse": rmse,
"mdae": mdae,
"marpd": marpd,
"r2": r2,
"corr": corr,
}
return prediction_metrics
|
Get all prediction error metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
Returns:
A dictionary with Mean average error ('mae'), Root mean squared
error ('rmse'), Median absolute error ('mdae'), Mean absolute
relative percent difference ('marpd'), r^2 ('r2'), and Pearson's
correlation coefficient ('corr').
|
prediction_error_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_accuracy.py
|
MIT
|
def sharpness(y_std: np.ndarray) -> float:
"""Return sharpness (a single measure of the overall confidence).
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
Returns:
A single scalar which quantifies the average of the standard deviations.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_std)
# Check that input std is positive
assert_is_positive(y_std)
# Compute sharpness
sharp_metric = np.sqrt(np.mean(y_std**2))
return sharp_metric
|
Return sharpness (a single measure of the overall confidence).
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
Returns:
A single scalar which quantifies the average of the standard deviations.
|
sharpness
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def root_mean_squared_calibration_error(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
vectorized: bool = False,
recal_model: IsotonicRegression = None,
prop_type: str = "interval",
) -> float:
"""Root mean squared calibration error.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the root mean squared calibration error.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Get lists of expected and observed proportions for a range of quantiles
if vectorized:
(exp_proportions, obs_proportions) = get_proportion_lists_vectorized(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
else:
(exp_proportions, obs_proportions) = get_proportion_lists(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
squared_diff_proportions = np.square(exp_proportions - obs_proportions)
rmsce = np.sqrt(np.mean(squared_diff_proportions))
return rmsce
|
Root mean squared calibration error.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the root mean squared calibration error.
|
root_mean_squared_calibration_error
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def mean_absolute_calibration_error(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
vectorized: bool = False,
recal_model: IsotonicRegression = None,
prop_type: str = "interval",
) -> float:
"""Mean absolute calibration error; identical to ECE.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the mean absolute calibration error.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Get lists of expected and observed proportions for a range of quantiles
if vectorized:
(exp_proportions, obs_proportions) = get_proportion_lists_vectorized(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
else:
(exp_proportions, obs_proportions) = get_proportion_lists(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
abs_diff_proportions = np.abs(exp_proportions - obs_proportions)
mace = np.mean(abs_diff_proportions)
return mace
|
Mean absolute calibration error; identical to ECE.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the mean absolute calibration error.
|
mean_absolute_calibration_error
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def adversarial_group_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
cali_type: str,
prop_type: str = "interval",
num_bins: int = 100,
num_group_bins: int = 10,
draw_with_replacement: bool = False,
num_trials: int = 10,
num_group_draws: int = 10,
verbose: bool = False,
) -> Namespace:
"""Adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
cali_type: type of calibration error to measure; one of ["mean_abs", "root_mean_sq"].
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
num_bins: number of discretizations for the probability space [0, 1].
num_group_bins: number of discretizations for group size proportions between 0 and 1.
draw_with_replacement: True to draw subgroups that draw from the dataset with replacement.
num_trials: number of trials to estimate the worst calibration error per group size.
num_group_draws: number of subgroups to draw per given group size to measure calibration error on.
verbose: True to print progress statements.
Returns:
A Namespace with an array of the group sizes, the mean of the worst
calibration errors for each group size, and the standard error of the
worst calibration error for each group size
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
num_pts = y_true.shape[0]
if cali_type == "mean_abs":
cali_fn = mean_absolute_calibration_error
elif cali_type == "root_mean_sq":
cali_fn = root_mean_squared_calibration_error
num_pts = y_std.shape[0]
ratio_arr = np.linspace(0, 1, num_group_bins)
score_mean_per_ratio = []
score_stderr_per_ratio = []
if verbose:
print(
(
"Measuring adversarial group calibration by spanning group"
" size between {} and {}, in {} intervals"
).format(np.min(ratio_arr), np.max(ratio_arr), num_group_bins)
)
progress = tqdm(ratio_arr) if verbose else ratio_arr
for r in progress:
group_size = max([int(round(num_pts * r)), 2])
score_per_trial = [] # list of worst miscalibrations encountered
for _ in range(num_trials):
group_miscal_scores = []
for g_idx in range(num_group_draws):
rand_idx = np.random.choice(
num_pts, group_size, replace=draw_with_replacement
)
group_y_pred = y_pred[rand_idx]
group_y_true = y_true[rand_idx]
group_y_std = y_std[rand_idx]
group_miscal = cali_fn(
group_y_pred,
group_y_std,
group_y_true,
num_bins=num_bins,
vectorized=True,
prop_type=prop_type,
)
group_miscal_scores.append(group_miscal)
max_miscal_score = np.max(group_miscal_scores)
score_per_trial.append(max_miscal_score)
score_mean_across_trials = np.mean(score_per_trial)
score_stderr_across_trials = np.std(score_per_trial, ddof=1)
score_mean_per_ratio.append(score_mean_across_trials)
score_stderr_per_ratio.append(score_stderr_across_trials)
out = Namespace(
group_size=ratio_arr,
score_mean=np.array(score_mean_per_ratio),
score_stderr=np.array(score_stderr_per_ratio),
)
return out
|
Adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
cali_type: type of calibration error to measure; one of ["mean_abs", "root_mean_sq"].
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
num_bins: number of discretizations for the probability space [0, 1].
num_group_bins: number of discretizations for group size proportions between 0 and 1.
draw_with_replacement: True to draw subgroups that draw from the dataset with replacement.
num_trials: number of trials to estimate the worst calibration error per group size.
num_group_draws: number of subgroups to draw per given group size to measure calibration error on.
verbose: True to print progress statements.
Returns:
A Namespace with an array of the group sizes, the mean of the worst
calibration errors for each group size, and the standard error of the
worst calibration error for each group size
|
adversarial_group_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def miscalibration_area_from_proportions(
exp_proportions: np.ndarray, obs_proportions: np.ndarray
) -> float:
"""Miscalibration area from expected and observed proportions lists.
This function returns the same output as `miscalibration_area` directly from a list
of expected proportions (the proportion of data that you expect to observe within
prediction intervals) and a list of observed proportions (the proportion data that
you observe within prediction intervals).
Args:
exp_proportions: expected proportion of data within prediction intervals.
obs_proportions: observed proportion of data within prediction intervals.
Returns:
A single scalar that contains the miscalibration area.
"""
areas = trapezoid_area(
exp_proportions[:-1],
exp_proportions[:-1],
obs_proportions[:-1],
exp_proportions[1:],
exp_proportions[1:],
obs_proportions[1:],
absolute=True,
)
return areas.sum()
|
Miscalibration area from expected and observed proportions lists.
This function returns the same output as `miscalibration_area` directly from a list
of expected proportions (the proportion of data that you expect to observe within
prediction intervals) and a list of observed proportions (the proportion data that
you observe within prediction intervals).
Args:
exp_proportions: expected proportion of data within prediction intervals.
obs_proportions: observed proportion of data within prediction intervals.
Returns:
A single scalar that contains the miscalibration area.
|
miscalibration_area_from_proportions
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_proportion_lists_vectorized(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
recal_model: Any = None,
prop_type: str = "interval",
) -> Tuple[np.ndarray, np.ndarray]:
"""Arrays of expected and observed proportions
Returns the expected proportions and observed proportion of points falling into
intervals corresponding to a range of quantiles.
Computations here are vectorized for faster execution, but this function is
not suited when there are memory constraints.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A tuple of two numpy arrays, expected proportions and observed proportions
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Compute proportions
exp_proportions = np.linspace(0, 1, num_bins)
# If we are recalibrating, input proportions are recalibrated proportions
if recal_model is not None:
in_exp_proportions = recal_model.predict(exp_proportions)
else:
in_exp_proportions = exp_proportions
residuals = y_pred - y_true
normalized_residuals = (residuals.flatten() / y_std.flatten()).reshape(-1, 1)
norm = stats.norm(loc=0, scale=1)
if prop_type == "interval":
gaussian_lower_bound = norm.ppf(0.5 - in_exp_proportions / 2.0)
gaussian_upper_bound = norm.ppf(0.5 + in_exp_proportions / 2.0)
above_lower = normalized_residuals >= gaussian_lower_bound
below_upper = normalized_residuals <= gaussian_upper_bound
within_quantile = above_lower * below_upper
obs_proportions = np.sum(within_quantile, axis=0).flatten() / len(residuals)
elif prop_type == "quantile":
gaussian_quantile_bound = norm.ppf(in_exp_proportions)
below_quantile = normalized_residuals <= gaussian_quantile_bound
obs_proportions = np.sum(below_quantile, axis=0).flatten() / len(residuals)
return exp_proportions, obs_proportions
|
Arrays of expected and observed proportions
Returns the expected proportions and observed proportion of points falling into
intervals corresponding to a range of quantiles.
Computations here are vectorized for faster execution, but this function is
not suited when there are memory constraints.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A tuple of two numpy arrays, expected proportions and observed proportions
|
get_proportion_lists_vectorized
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_proportion_lists(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
recal_model: IsotonicRegression = None,
prop_type: str = "interval",
) -> Tuple[np.ndarray, np.ndarray]:
"""Arrays of expected and observed proportions
Return arrays of expected and observed proportions of points falling into
intervals corresponding to a range of quantiles.
Computations here are not vectorized, in case there are memory constraints.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A tuple of two numpy arrays, expected proportions and observed proportions
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Compute proportions
exp_proportions = np.linspace(0, 1, num_bins)
# If we are recalibrating, input proportions are recalibrated proportions
if recal_model is not None:
in_exp_proportions = recal_model.predict(exp_proportions)
else:
in_exp_proportions = exp_proportions
if prop_type == "interval":
obs_proportions = [
get_proportion_in_interval(y_pred, y_std, y_true, quantile)
for quantile in in_exp_proportions
]
elif prop_type == "quantile":
obs_proportions = [
get_proportion_under_quantile(y_pred, y_std, y_true, quantile)
for quantile in in_exp_proportions
]
return exp_proportions, obs_proportions
|
Arrays of expected and observed proportions
Return arrays of expected and observed proportions of points falling into
intervals corresponding to a range of quantiles.
Computations here are not vectorized, in case there are memory constraints.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A tuple of two numpy arrays, expected proportions and observed proportions
|
get_proportion_lists
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_proportion_in_interval(
y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, quantile: float
) -> float:
"""For a specified quantile, return the proportion of points falling into
an interval corresponding to that quantile.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
quantile: a specified quantile level
Returns:
A single scalar which is the proportion of the true labels falling into the
prediction interval for the specified quantile.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Computer lower and upper bound for quantile
norm = stats.norm(loc=0, scale=1)
lower_bound = norm.ppf(0.5 - quantile / 2)
upper_bound = norm.ppf(0.5 + quantile / 2)
# Compute proportion of normalized residuals within lower to upper bound
residuals = y_pred - y_true
normalized_residuals = residuals.reshape(-1) / y_std.reshape(-1)
num_within_quantile = 0
for resid in normalized_residuals:
if lower_bound <= resid and resid <= upper_bound:
num_within_quantile += 1.0
proportion = num_within_quantile / len(residuals)
return proportion
|
For a specified quantile, return the proportion of points falling into
an interval corresponding to that quantile.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
quantile: a specified quantile level
Returns:
A single scalar which is the proportion of the true labels falling into the
prediction interval for the specified quantile.
|
get_proportion_in_interval
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_proportion_under_quantile(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
quantile: float,
) -> float:
"""Get the proportion of data that are below the predicted quantile.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
quantile: The quantile level to check.
Returns:
The proportion of data below the quantile level.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Computer lower and upper bound for quantile
norm = stats.norm(loc=0, scale=1)
quantile_bound = norm.ppf(quantile)
# Compute proportion of normalized residuals within lower to upper bound
residuals = y_pred - y_true
normalized_residuals = residuals / y_std
num_below_quantile = 0
for resid in normalized_residuals:
if resid <= quantile_bound:
num_below_quantile += 1.0
proportion = num_below_quantile / len(residuals)
return proportion
|
Get the proportion of data that are below the predicted quantile.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
quantile: The quantile level to check.
Returns:
The proportion of data below the quantile level.
|
get_proportion_under_quantile
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_prediction_interval(
y_pred: np.ndarray,
y_std: np.ndarray,
quantile: np.ndarray,
recal_model: Optional[IsotonicRegression] = None,
) -> Namespace:
"""Return the centered predictional interval corresponding to a quantile.
For a specified quantile level q (must be a float, or a singleton),
return the centered prediction interval corresponding
to the pair of quantiles at levels (0.5-q/2) and (0.5+q/2),
i.e. interval that has nominal coverage equal to q.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
quantile: The quantile level to check.
recal_model: A recalibration model to apply before computing the interval.
Returns:
Namespace containing the lower and upper bound corresponding to the
centered interval.
"""
if isinstance(quantile, float):
quantile = np.array([quantile])
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std)
assert_is_flat_same_shape(quantile)
assert quantile.size == 1
# Check that input std is positive
assert_is_positive(y_std)
if not np.logical_and((0.0 < quantile.item()), (quantile.item() < 1.0)):
raise ValueError("Quantile must be greater than 0.0 and less than 1.0")
# if recal_model is not None, calculate recalibrated quantile
if recal_model is not None:
quantile = recal_model.predict(quantile)
# Computer lower and upper bound for quantile
norm = stats.norm(loc=y_pred, scale=y_std)
lower_bound = norm.ppf(0.5 - quantile / 2)
upper_bound = norm.ppf(0.5 + quantile / 2)
bounds = Namespace(
upper=upper_bound,
lower=lower_bound,
)
return bounds
|
Return the centered predictional interval corresponding to a quantile.
For a specified quantile level q (must be a float, or a singleton),
return the centered prediction interval corresponding
to the pair of quantiles at levels (0.5-q/2) and (0.5+q/2),
i.e. interval that has nominal coverage equal to q.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
quantile: The quantile level to check.
recal_model: A recalibration model to apply before computing the interval.
Returns:
Namespace containing the lower and upper bound corresponding to the
centered interval.
|
get_prediction_interval
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def get_quantile(
y_pred: np.ndarray,
y_std: np.ndarray,
quantile: np.ndarray,
recal_model: Optional[IsotonicRegression] = None,
) -> float:
"""Return the value corresponding with a quantile.
For a specified quantile level q (must be a float, or a singleton),
return the quantile prediction,
i.e. bound that has nominal coverage below the bound equal to q.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
quantile: The quantile level to check.
recal_model: A recalibration model to apply before computing the interval.
Returns:
The value at which the quantile is achieved.
"""
if isinstance(quantile, float):
quantile = np.array([quantile])
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std)
assert_is_flat_same_shape(quantile)
assert quantile.size == 1
# Check that input std is positive
assert_is_positive(y_std)
if not np.logical_and((0.0 < quantile.item()), (quantile.item() < 1.0)):
raise ValueError("Quantile must be greater than 0.0 and less than 1.0")
# if recal_model is not None, calculate recalibrated quantile
if recal_model is not None:
quantile = recal_model.predict(quantile)
# Computer quantile bound
norm = stats.norm(loc=y_pred, scale=y_std)
quantile_prediction = norm.ppf(quantile).flatten()
return quantile_prediction
|
Return the value corresponding with a quantile.
For a specified quantile level q (must be a float, or a singleton),
return the quantile prediction,
i.e. bound that has nominal coverage below the bound equal to q.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
quantile: The quantile level to check.
recal_model: A recalibration model to apply before computing the interval.
Returns:
The value at which the quantile is achieved.
|
get_quantile
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def nll_gaussian(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
scaled: bool = True,
) -> float:
"""Negative log likelihood for a gaussian.
The negative log likelihood for held out data (y_true) given predictive
uncertainty with mean (y_pred) and standard-deviation (y_std).
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the negative log likelihood by size of held out set.
Returns:
The negative log likelihood for the heldout set.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Set residuals
residuals = y_pred - y_true
# Compute nll
nll_list = stats.norm.logpdf(residuals, scale=y_std)
nll = -1 * np.sum(nll_list)
# Potentially scale so that sum becomes mean
if scaled:
nll = nll / len(nll_list)
return nll
|
Negative log likelihood for a gaussian.
The negative log likelihood for held out data (y_true) given predictive
uncertainty with mean (y_pred) and standard-deviation (y_std).
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the negative log likelihood by size of held out set.
Returns:
The negative log likelihood for the heldout set.
|
nll_gaussian
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
|
MIT
|
def crps_gaussian(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
scaled: bool = True,
) -> float:
"""The negatively oriented continuous ranked probability score for Gaussians.
Computes CRPS for held out data (y_true) given predictive uncertainty with mean
(y_pred) and standard-deviation (y_std). Each test point is given equal weight
in the overall score over the test set.
Negatively oriented means a smaller value is more desirable.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
Returns:
The crps for the heldout set.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Compute crps
y_standardized = (y_true - y_pred) / y_std
term_1 = 1 / np.sqrt(np.pi)
term_2 = 2 * stats.norm.pdf(y_standardized, loc=0, scale=1)
term_3 = y_standardized * (2 * stats.norm.cdf(y_standardized, loc=0, scale=1) - 1)
crps_list = -1 * y_std * (term_1 - term_2 - term_3)
crps = np.sum(crps_list)
# Potentially scale so that sum becomes mean
if scaled:
crps = crps / len(crps_list)
return crps
|
The negatively oriented continuous ranked probability score for Gaussians.
Computes CRPS for held out data (y_true) given predictive uncertainty with mean
(y_pred) and standard-deviation (y_std). Each test point is given equal weight
in the overall score over the test set.
Negatively oriented means a smaller value is more desirable.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
Returns:
The crps for the heldout set.
|
crps_gaussian
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
|
MIT
|
def check_score(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
scaled: bool = True,
start_q: float = 0.01,
end_q: float = 0.99,
resolution: int = 99,
) -> float:
"""The negatively oriented check score.
Computes the negatively oriented check score for held out data (y_true)
given predictive uncertainty with mean (y_pred) and standard-deviation (y_std).
Each test point and each quantile is given equal weight in the overall score
over the test set and list of quantiles.
The score is computed by scanning over a sequence of quantiles of the predicted
distributions, starting at (start_q) and ending at (end_q).
Negatively oriented means a smaller value is more desirable.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
start_q: The lower bound of the quantiles to use for computation.
end_q: The upper bound of the quantiles to use for computation.
resolution: The number of quantiles to use for computation.
Returns:
The check score.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
test_qs = np.linspace(start_q, end_q, resolution)
check_list = []
for q in test_qs:
q_level = stats.norm.ppf(q, loc=y_pred, scale=y_std) # pred quantile
diff = q_level - y_true
mask = (diff >= 0).astype(float) - q
score_per_q = np.mean(mask * diff)
check_list.append(score_per_q)
check_score = np.sum(check_list)
if scaled:
check_score = check_score / len(check_list)
return check_score
|
The negatively oriented check score.
Computes the negatively oriented check score for held out data (y_true)
given predictive uncertainty with mean (y_pred) and standard-deviation (y_std).
Each test point and each quantile is given equal weight in the overall score
over the test set and list of quantiles.
The score is computed by scanning over a sequence of quantiles of the predicted
distributions, starting at (start_q) and ending at (end_q).
Negatively oriented means a smaller value is more desirable.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
start_q: The lower bound of the quantiles to use for computation.
end_q: The upper bound of the quantiles to use for computation.
resolution: The number of quantiles to use for computation.
Returns:
The check score.
|
check_score
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
|
MIT
|
def interval_score(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
scaled: bool = True,
start_p: float = 0.01,
end_p: float = 0.99,
resolution: int = 99,
) -> float:
"""The negatively oriented interval score.
Compute the negatively oriented interval score for held out data (y_true)
given predictive uncertainty with mean (y_pred) and standard-deviation
(y_std). Each test point and each percentile is given equal weight in the
overall score over the test set and list of quantiles.
Negatively oriented means a smaller value is more desirable.
This metric is computed by scanning over a sequence of prediction intervals. Where
p is the amount of probability captured from a centered prediction interval,
intervals are formed starting at p=(start_p) and ending at p=(end_p).
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
start_p: The lower bound of probability to capture in a prediction interval.
end_p: The upper bound of probability to capture in a prediction interval.
resolution: The number of prediction intervals to use to compute the metric.
Returns:
The interval score.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
test_ps = np.linspace(start_p, end_p, resolution)
int_list = []
for p in test_ps:
low_p, high_p = 0.5 - (p / 2.0), 0.5 + (p / 2.0) # p% PI
pred_l = stats.norm.ppf(low_p, loc=y_pred, scale=y_std)
pred_u = stats.norm.ppf(high_p, loc=y_pred, scale=y_std)
below_l = ((pred_l - y_true) > 0).astype(float)
above_u = ((y_true - pred_u) > 0).astype(float)
score_per_p = (
(pred_u - pred_l)
+ (2.0 / (1 - p)) * (pred_l - y_true) * below_l
+ (2.0 / (1 - p)) * (y_true - pred_u) * above_u
)
mean_score_per_p = np.mean(score_per_p)
int_list.append(mean_score_per_p)
int_score = np.sum(int_list)
if scaled:
int_score = int_score / len(int_list)
return int_score
|
The negatively oriented interval score.
Compute the negatively oriented interval score for held out data (y_true)
given predictive uncertainty with mean (y_pred) and standard-deviation
(y_std). Each test point and each percentile is given equal weight in the
overall score over the test set and list of quantiles.
Negatively oriented means a smaller value is more desirable.
This metric is computed by scanning over a sequence of prediction intervals. Where
p is the amount of probability captured from a centered prediction interval,
intervals are formed starting at p=(start_p) and ending at p=(end_p).
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
scaled: Whether to scale the score by size of held out set.
start_p: The lower bound of probability to capture in a prediction interval.
end_p: The upper bound of probability to capture in a prediction interval.
resolution: The number of prediction intervals to use to compute the metric.
Returns:
The interval score.
|
interval_score
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
|
MIT
|
def get_q_idx(exp_props: np.ndarray, q: float) -> int:
"""Utility function which outputs the array index of an element.
Gets the (approximate) index of a specified probability value, q, in the expected proportions array.
Used as a utility function in isotonic regression recalibration.
Args:
exp_props: 1D array of expected probabilities.
q: a specified probability float.
Returns:
An index which specifies the (approximate) index of q in exp_props
"""
num_pts = exp_props.shape[0]
target_idx = None
for idx, x in enumerate(exp_props):
if idx + 1 == num_pts:
if round(q, 2) == round(float(exp_props[-1]), 2):
target_idx = exp_props.shape[0] - 1
break
if x <= q < exp_props[idx + 1]:
target_idx = idx
break
if target_idx is None:
raise ValueError("q must be within exp_props")
return target_idx
|
Utility function which outputs the array index of an element.
Gets the (approximate) index of a specified probability value, q, in the expected proportions array.
Used as a utility function in isotonic regression recalibration.
Args:
exp_props: 1D array of expected probabilities.
q: a specified probability float.
Returns:
An index which specifies the (approximate) index of q in exp_props
|
get_q_idx
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def iso_recal(
exp_props: np.ndarray,
obs_props: np.ndarray,
) -> IsotonicRegression:
"""Recalibration algorithm based on isotonic regression.
Fits and outputs an isotonic recalibration model that maps observed
probabilities to expected probabilities. This mapping provides
the necessary adjustments to produce better calibrated outputs.
Args:
exp_props: 1D array of expected probabilities (values must span [0, 1]).
obs_props: 1D array of observed probabilities.
Returns:
An sklearn IsotonicRegression recalibration model.
"""
# Flatten
exp_props = exp_props.flatten()
obs_props = obs_props.flatten()
min_obs = np.min(obs_props)
max_obs = np.max(obs_props)
iso_model = IsotonicRegression(increasing=True, out_of_bounds="clip")
# just need observed prop values between 0 and 1
# problematic if min_obs_p > 0 and max_obs_p < 1
if not (min_obs == 0.0) and (max_obs == 1.0):
print("Obs props not ideal: from {} to {}".format(min_obs, max_obs))
exp_0_idx = get_q_idx(exp_props, 0.0)
exp_1_idx = get_q_idx(exp_props, 1.0)
within_01 = obs_props[exp_0_idx : exp_1_idx + 1]
beg_idx, end_idx = None, None
# Handle beg_idx
if exp_0_idx != 0:
min_obs_below = np.min(obs_props[:exp_0_idx])
min_obs_within = np.min(within_01)
if min_obs_below < min_obs_within:
i = exp_0_idx - 1
while obs_props[i] > min_obs_below:
i -= 1
beg_idx = i
elif np.sum((within_01 == min_obs_within).astype(float)) > 1:
# multiple minima in within_01 ==> get last min idx
i = exp_1_idx - 1
while obs_props[i] > min_obs_within:
i -= 1
beg_idx = i
elif np.sum((within_01 == min_obs_within).astype(float)) == 1:
beg_idx = int(np.argmin(within_01) + exp_0_idx)
else:
raise RuntimeError("Inspect input arrays. Cannot set beginning index.")
else:
beg_idx = exp_0_idx
# Handle end_idx
if exp_1_idx < obs_props.shape[0] - 1:
max_obs_above = np.max(obs_props[exp_1_idx + 1 :])
max_obs_within = np.max(within_01)
if max_obs_above > max_obs_within:
i = exp_1_idx + 1
while obs_props[i] < max_obs_above:
i += 1
end_idx = i + 1
elif np.sum((within_01 == max_obs_within).astype(float)) > 1:
# multiple minima in within_01 ==> get last min idx
i = beg_idx
while obs_props[i] < max_obs_within:
i += 1
end_idx = i + 1
elif np.sum((within_01 == max_obs_within).astype(float)) == 1:
end_idx = int(exp_0_idx + np.argmax(within_01) + 1)
else:
raise RuntimeError("Inspect input arrays. Cannot set ending index.")
else:
end_idx = exp_1_idx + 1
if end_idx <= beg_idx:
raise RuntimeError("Ending index before beginning index")
filtered_obs_props = obs_props[beg_idx:end_idx]
filtered_exp_props = exp_props[beg_idx:end_idx]
try:
iso_model = iso_model.fit(filtered_obs_props, filtered_exp_props)
except Exception:
raise RuntimeError("Failed to fit isotonic regression model")
return iso_model
|
Recalibration algorithm based on isotonic regression.
Fits and outputs an isotonic recalibration model that maps observed
probabilities to expected probabilities. This mapping provides
the necessary adjustments to produce better calibrated outputs.
Args:
exp_props: 1D array of expected probabilities (values must span [0, 1]).
obs_props: 1D array of observed probabilities.
Returns:
An sklearn IsotonicRegression recalibration model.
|
iso_recal
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def optimize_recalibration_ratio(
y_mean: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
criterion: str = "ma_cal",
optimizer_bounds: Tuple[float, float] = (1e-2, 1e2),
) -> float:
"""Scale factor which uniformly recalibrates predicted standard deviations.
Searches via black-box optimization the standard deviation scale factor (opt_ratio)
which produces the best recalibration, i.e. updated standard deviation
can be written as opt_ratio * y_std.
Args:
y_mean: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}.
optimizer_bounds: The bounds for the ratio given to the optimizer.
Returns:
A single scalar which optimally recalibrates the predicted standard deviations.
"""
if criterion == "ma_cal":
cal_fn = uct.metrics.mean_absolute_calibration_error
worst_cal = 0.5
elif criterion == "rms_cal":
cal_fn = uct.metrics.root_mean_squared_calibration_error
worst_cal = np.sqrt(1.0 / 3.0)
elif criterion == "miscal":
cal_fn = uct.metrics.miscalibration_area
worst_cal = 0.5
else:
raise RuntimeError("Wrong criterion option")
def obj(ratio):
# If ratio is 0, return worst-possible calibration metric
if ratio == 0:
return worst_cal
curr_cal = cal_fn(y_mean, ratio * y_std, y_true)
return curr_cal
result = minimize_scalar(fun=obj, bounds=optimizer_bounds)
opt_ratio = result.x
if not result.success:
raise Warning("Optimization did not succeed")
original_cal = cal_fn(y_mean, y_std, y_true)
ratio_cal = cal_fn(y_mean, opt_ratio * y_std, y_true)
if ratio_cal > original_cal:
raise Warning(
"No better calibration found, no recalibration performed and returning original uncertainties"
)
opt_ratio = 1.0
return opt_ratio
|
Scale factor which uniformly recalibrates predicted standard deviations.
Searches via black-box optimization the standard deviation scale factor (opt_ratio)
which produces the best recalibration, i.e. updated standard deviation
can be written as opt_ratio * y_std.
Args:
y_mean: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}.
optimizer_bounds: The bounds for the ratio given to the optimizer.
Returns:
A single scalar which optimally recalibrates the predicted standard deviations.
|
optimize_recalibration_ratio
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def get_std_recalibrator(
y_mean: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
criterion: str = "ma_cal",
optimizer_bounds: Tuple[float, float] = (1e-2, 1e2),
) -> Callable[[np.ndarray], np.ndarray]:
"""Standard deviation recalibrator.
Computes the standard deviation recalibration ratio and returns a function
which takes in an array of uncalibrated standard deviations and returns
an array of recalibrated standard deviations.
Args:
y_mean: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}.
optimizer_bounds: The bounds for the ratio given to the optimizer.
Returns:
A function which takes uncalibrated standard deviations as input and
outputs the recalibrated standard deviations.
"""
std_recal_ratio = optimize_recalibration_ratio(
y_mean, y_std, y_true, criterion, optimizer_bounds=optimizer_bounds
)
def std_recalibrator(std_arr):
return std_recal_ratio * std_arr
return std_recalibrator
|
Standard deviation recalibrator.
Computes the standard deviation recalibration ratio and returns a function
which takes in an array of uncalibrated standard deviations and returns
an array of recalibrated standard deviations.
Args:
y_mean: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}.
optimizer_bounds: The bounds for the ratio given to the optimizer.
Returns:
A function which takes uncalibrated standard deviations as input and
outputs the recalibrated standard deviations.
|
get_std_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def get_quantile_recalibrator(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
) -> Callable[[np.ndarray, np.ndarray, Union[float, np.ndarray]], np.ndarray]:
"""Quantile recalibrator.
Fits an isotonic regression recalibration model and returns a function
which takes in the mean and standard deviation predictions and a specified
quantile level, and returns the recalibrated quantile.
Args:
y_pred: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
Returns:
A function which outputs the recalibrated quantile prediction.
"""
exp_props, obs_props = uct.get_proportion_lists_vectorized(
y_pred, y_std, y_true, prop_type="quantile"
)
iso_model = iso_recal(exp_props, obs_props)
def quantile_recalibrator(y_pred, y_std, quantile):
recal_quantile = uct.metrics_calibration.get_quantile(
y_pred, y_std, quantile, recal_model=iso_model
)
return recal_quantile
return quantile_recalibrator
|
Quantile recalibrator.
Fits an isotonic regression recalibration model and returns a function
which takes in the mean and standard deviation predictions and a specified
quantile level, and returns the recalibrated quantile.
Args:
y_pred: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
Returns:
A function which outputs the recalibrated quantile prediction.
|
get_quantile_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def get_interval_recalibrator(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
) -> Callable[[np.ndarray, np.ndarray, Union[float, np.ndarray]], np.ndarray]:
"""Prediction interval recalibrator.
Fits an isotonic regression recalibration model and returns a function
which takes in the mean and standard deviation predictions and a specified
centered interval coverage level, and returns the recalibrated interval.
Args:
y_pred: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
Returns:
A function which outputs the recalibrated prediction interval.
"""
exp_props, obs_props = uct.get_proportion_lists_vectorized(
y_pred, y_std, y_true, prop_type="interval"
)
iso_model = iso_recal(exp_props, obs_props)
def interval_recalibrator(y_pred, y_std, quantile):
recal_bounds = uct.metrics_calibration.get_prediction_interval(
y_pred, y_std, quantile, recal_model=iso_model
)
return recal_bounds
return interval_recalibrator
|
Prediction interval recalibrator.
Fits an isotonic regression recalibration model and returns a function
which takes in the mean and standard deviation predictions and a specified
centered interval coverage level, and returns the recalibrated interval.
Args:
y_pred: 1D array of the predicted means for the recalibration dataset.
y_std: 1D array of the predicted standard deviations for the recalibration dataset.
y_true: 1D array of the true means for the recalibration dataset.
Returns:
A function which outputs the recalibrated prediction interval.
|
get_interval_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
|
MIT
|
def assert_is_flat_same_shape(*args: Any) -> Union[bool, NoReturn]:
"""Check if inputs are all same-length 1d numpy.ndarray.
Args:
args: the numpy arrays to check.
Returns:
True if all arrays are flat and the same shape, or else raises assertion error.
"""
assert len(args) > 0
assert isinstance(args[0], np.ndarray), "All inputs must be of type numpy.ndarray"
first_shape = args[0].shape
for arr in args:
assert isinstance(arr, np.ndarray), "All inputs must be of type numpy.ndarray"
assert len(arr.shape) == 1, "All inputs must be 1d numpy.ndarray"
assert arr.shape == first_shape, "All inputs must have the same length"
return True
|
Check if inputs are all same-length 1d numpy.ndarray.
Args:
args: the numpy arrays to check.
Returns:
True if all arrays are flat and the same shape, or else raises assertion error.
|
assert_is_flat_same_shape
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/utils.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
|
MIT
|
def assert_is_positive(*args: Any) -> Union[bool, NoReturn]:
"""Assert that all numpy arrays are positive.
Args:
args: the numpy arrays to check.
Returns:
True if all elements in all arrays are positive values, or else raises assertion error.
"""
assert len(args) > 0
for arr in args:
assert isinstance(arr, np.ndarray), "All inputs must be of type numpy.ndarray"
assert np.all(arr > 0.0)
return True
|
Assert that all numpy arrays are positive.
Args:
args: the numpy arrays to check.
Returns:
True if all elements in all arrays are positive values, or else raises assertion error.
|
assert_is_positive
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/utils.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
|
MIT
|
def trapezoid_area(
xl: np.ndarray,
al: np.ndarray,
bl: np.ndarray,
xr: np.ndarray,
ar: np.ndarray,
br: np.ndarray,
absolute: bool = True,
) -> Numeric:
"""
Calculate the area of a vertical-sided trapezoid, formed connecting the following points:
(xl, al) - (xl, bl) - (xr, br) - (xr, ar) - (xl, al)
This function considers the case that the edges of the trapezoid might cross,
and explicitly accounts for this.
Args:
xl: The x coordinate of the left-hand points of the trapezoid
al: The y coordinate of the first left-hand point of the trapezoid
bl: The y coordinate of the second left-hand point of the trapezoid
xr: The x coordinate of the right-hand points of the trapezoid
ar: The y coordinate of the first right-hand point of the trapezoid
br: The y coordinate of the second right-hand point of the trapezoid
absolute: Whether to calculate the absolute area, or allow a negative area (e.g. if a and b are swapped)
Returns: The area of the given trapezoid.
"""
# Differences
dl = bl - al
dr = br - ar
# The ordering is the same for both iff they do not cross.
cross = dl * dr < 0
# Treat the degenerate case as a trapezoid
cross = cross * (1 - ((dl == 0) * (dr == 0)))
# trapezoid for non-crossing lines
area_trapezoid = (xr - xl) * 0.5 * ((bl - al) + (br - ar))
if absolute:
area_trapezoid = np.abs(area_trapezoid)
# Hourglass for crossing lines.
# NaNs should only appear in the degenerate and parallel cases.
# Those NaNs won't get through the final multiplication so it's ok.
with np.errstate(divide="ignore", invalid="ignore"):
x_intersect = intersection((xl, bl), (xr, br), (xl, al), (xr, ar))[0]
tl_area = 0.5 * (bl - al) * (x_intersect - xl)
tr_area = 0.5 * (br - ar) * (xr - x_intersect)
if absolute:
area_hourglass = np.abs(tl_area) + np.abs(tr_area)
else:
area_hourglass = tl_area + tr_area
# The nan_to_num function allows us to do 0 * nan = 0
return (1 - cross) * area_trapezoid + cross * np.nan_to_num(area_hourglass)
|
Calculate the area of a vertical-sided trapezoid, formed connecting the following points:
(xl, al) - (xl, bl) - (xr, br) - (xr, ar) - (xl, al)
This function considers the case that the edges of the trapezoid might cross,
and explicitly accounts for this.
Args:
xl: The x coordinate of the left-hand points of the trapezoid
al: The y coordinate of the first left-hand point of the trapezoid
bl: The y coordinate of the second left-hand point of the trapezoid
xr: The x coordinate of the right-hand points of the trapezoid
ar: The y coordinate of the first right-hand point of the trapezoid
br: The y coordinate of the second right-hand point of the trapezoid
absolute: Whether to calculate the absolute area, or allow a negative area (e.g. if a and b are swapped)
Returns: The area of the given trapezoid.
|
trapezoid_area
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/utils.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
|
MIT
|
def intersection(
p1: Tuple[Numeric, Numeric],
p2: Tuple[Numeric, Numeric],
p3: Tuple[Numeric, Numeric],
p4: Tuple[Numeric, Numeric],
) -> Tuple[Numeric, Numeric]:
"""
Calculate the intersection of two lines between four points, as defined in
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection.
This is an array option and works can be used to calculate the intersections of
entire arrays of points at the same time.
Args:
p1: The point (x1, y1), first point of Line 1
p2: The point (x2, y2), second point of Line 1
p3: The point (x3, y3), first point of Line 2
p4: The point (x4, y4), second point of Line 2
Returns: The point of intersection of the two lines, or (np.nan, np.nan) if the lines are parallel
"""
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x4, y4 = p4
D = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / D
y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / D
return x, y
|
Calculate the intersection of two lines between four points, as defined in
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection.
This is an array option and works can be used to calculate the intersections of
entire arrays of points at the same time.
Args:
p1: The point (x1, y1), first point of Line 1
p2: The point (x2, y2), second point of Line 1
p3: The point (x3, y3), first point of Line 2
p4: The point (x4, y4), second point of Line 2
Returns: The point of intersection of the two lines, or (np.nan, np.nan) if the lines are parallel
|
intersection
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/utils.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
|
MIT
|
def plot_xy(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
x: np.ndarray,
n_subset: Union[int, None] = None,
ylims: Union[Tuple[float, float], None] = None,
xlims: Union[Tuple[float, float], None] = None,
num_stds_confidence_bound: int = 2,
leg_loc: Union[int, str] = 3,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot one-dimensional inputs with associated predicted values, predictive
uncertainties, and true values.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
x: 1D array of input values for the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
xlims: a tuple of x axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of confidence band, in terms of number of
standard deviations.
leg_loc: location of legend as a str or legend code int.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Order points in order of increasing x
order = np.argsort(x)
y_pred, y_std, y_true, x = (
y_pred[order],
y_std[order],
y_true[order],
x[order],
)
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true, x] = filter_subset([y_pred, y_std, y_true, x], n_subset)
intervals = num_stds_confidence_bound * y_std
h1 = ax.plot(x, y_true, ".", mec="#ff7f0e", mfc="None")
h2 = ax.plot(x, y_pred, "-", c="#1f77b4", linewidth=2)
h3 = ax.fill_between(
x,
y_pred - intervals,
y_pred + intervals,
color="lightsteelblue",
alpha=0.4,
)
ax.legend(
[h1[0], h2[0], h3],
["Observations", "Predictions", "$95\%$ Interval"],
loc=leg_loc,
)
# Format plot
if ylims is not None:
ax.set_ylim(ylims)
if xlims is not None:
ax.set_xlim(xlims)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_title("Confidence Band")
ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box")
return ax
|
Plot one-dimensional inputs with associated predicted values, predictive
uncertainties, and true values.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
x: 1D array of input values for the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
xlims: a tuple of x axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of confidence band, in terms of number of
standard deviations.
leg_loc: location of legend as a str or legend code int.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_xy
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_intervals(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
n_subset: Union[int, None] = None,
ylims: Union[Tuple[float, float], None] = None,
num_stds_confidence_bound: int = 2,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot predictions and predictive intervals versus true values.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of intervals, in terms of number of standard
deviations.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset)
# Compute intervals
intervals = num_stds_confidence_bound * y_std
# Plot
ax.errorbar(
y_true,
y_pred,
intervals,
fmt="o",
ls="none",
linewidth=1.5,
c="#1f77b4",
alpha=0.5,
)
h1 = ax.plot(y_true, y_pred, "o", c="#1f77b4")
# Determine lims
if ylims is None:
intervals_lower_upper = [y_pred - intervals, y_pred + intervals]
lims_ext = [
int(np.floor(np.min(intervals_lower_upper[0]))),
int(np.ceil(np.max(intervals_lower_upper[1]))),
]
else:
lims_ext = ylims
# plot 45-degree line
h2 = ax.plot(lims_ext, lims_ext, "--", linewidth=1.5, c="#ff7f0e")
# Legend
ax.legend([h1[0], h2[0]], ["Predictions", "$f(x) = x$"], loc=4)
# Format plot
ax.set_xlim(lims_ext)
ax.set_ylim(lims_ext)
ax.set_xlabel("Observed Values")
ax.set_ylabel("Predicted Values and Intervals")
ax.set_title("Prediction Intervals")
ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box")
return ax
|
Plot predictions and predictive intervals versus true values.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of intervals, in terms of number of standard
deviations.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_intervals
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_intervals_ordered(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
n_subset: Union[int, None] = None,
ylims: Union[Tuple[float, float], None] = None,
num_stds_confidence_bound: int = 2,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot predictions and predictive intervals versus true values, with points ordered
by true value along x-axis.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of intervals, in terms of number of standard
deviations.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset)
order = np.argsort(y_true.flatten())
y_pred, y_std, y_true = y_pred[order], y_std[order], y_true[order]
xs = np.arange(len(order))
intervals = num_stds_confidence_bound * y_std
# Plot
ax.errorbar(
xs,
y_pred,
intervals,
fmt="o",
ls="none",
linewidth=1.5,
c="#1f77b4",
alpha=0.5,
)
h1 = ax.plot(xs, y_pred, "o", c="#1f77b4")
h2 = ax.plot(xs, y_true, "--", linewidth=2.0, c="#ff7f0e")
# Legend
ax.legend([h1[0], h2[0]], ["Predicted Values", "Observed Values"], loc=4)
# Determine lims
if ylims is None:
intervals_lower_upper = [y_pred - intervals, y_pred + intervals]
lims_ext = [
int(np.floor(np.min(intervals_lower_upper[0]))),
int(np.ceil(np.max(intervals_lower_upper[1]))),
]
else:
lims_ext = ylims
# Format plot
ax.set_ylim(lims_ext)
ax.set_xlabel("Index (Ordered by Observed Value)")
ax.set_ylabel("Predicted Values and Intervals")
ax.set_title("Ordered Prediction Intervals")
ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box")
return ax
|
Plot predictions and predictive intervals versus true values, with points ordered
by true value along x-axis.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ylims: a tuple of y axis plotting bounds, given as (lower, upper).
num_stds_confidence_bound: width of intervals, in terms of number of standard
deviations.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_intervals_ordered
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
n_subset: Union[int, None] = None,
curve_label: Union[str, None] = None,
vectorized: bool = True,
exp_props: Union[np.ndarray, None] = None,
obs_props: Union[np.ndarray, None] = None,
ax: Union[matplotlib.axes.Axes, None] = None,
prop_type: str = "interval",
) -> matplotlib.axes.Axes:
"""Plot the observed proportion vs prediction proportion of outputs falling into a
range of intervals, and display miscalibration area.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
curve_label: legend label str for calibration curve.
vectorized: plot using get_proportion_lists_vectorized.
exp_props: plot using the given expected proportions.
obs_props: plot using the given observed proportions.
ax: matplotlib.axes.Axes object.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Ignored if exp_props and obs_props are provided as inputs.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset)
if (exp_props is None) or (obs_props is None):
# Compute exp_proportions and obs_proportions
if vectorized:
(
exp_proportions,
obs_proportions,
) = get_proportion_lists_vectorized(
y_pred, y_std, y_true, prop_type=prop_type
)
else:
(exp_proportions, obs_proportions) = get_proportion_lists(
y_pred, y_std, y_true, prop_type=prop_type
)
else:
# If expected and observed proportions are given
exp_proportions = np.array(exp_props).flatten()
obs_proportions = np.array(obs_props).flatten()
if exp_proportions.shape != obs_proportions.shape:
raise RuntimeError("exp_props and obs_props shape mismatch")
# Set label
if curve_label is None:
curve_label = "Predictor"
# Plot
ax.plot([0, 1], [0, 1], "--", label="Ideal", c="#ff7f0e")
ax.plot(exp_proportions, obs_proportions, label=curve_label, c="#1f77b4")
ax.fill_between(exp_proportions, exp_proportions, obs_proportions, alpha=0.2)
# Format plot
ax.set_xlabel("Predicted Proportion in Interval")
ax.set_ylabel("Observed Proportion in Interval")
ax.axis("square")
buff = 0.01
ax.set_xlim([0 - buff, 1 + buff])
ax.set_ylim([0 - buff, 1 + buff])
ax.set_title("Average Calibration")
# Compute miscalibration area
miscalibration_area = miscalibration_area_from_proportions(
exp_proportions=exp_proportions, obs_proportions=obs_proportions
)
# Annotate plot with the miscalibration area
ax.text(
x=0.95,
y=0.05,
s="Miscalibration area = %.2f" % miscalibration_area,
verticalalignment="bottom",
horizontalalignment="right",
fontsize="small",
)
return ax
|
Plot the observed proportion vs prediction proportion of outputs falling into a
range of intervals, and display miscalibration area.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
curve_label: legend label str for calibration curve.
vectorized: plot using get_proportion_lists_vectorized.
exp_props: plot using the given expected proportions.
obs_props: plot using the given observed proportions.
ax: matplotlib.axes.Axes object.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Ignored if exp_props and obs_props are provided as inputs.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_adversarial_group_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
n_subset: Union[int, None] = None,
cali_type: str = "mean_abs",
curve_label: Union[str, None] = None,
group_size: Union[np.ndarray, None] = None,
score_mean: Union[np.ndarray, None] = None,
score_stderr: Union[np.ndarray, None] = None,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot adversarial group calibration plots by varying group size from 0% to 100% of
dataset size and recording the worst calibration occurred for each group size.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
cali_type: Calibration type str.
curve_label: legend label str for calibration curve.
group_size: 1D array of group size ratios in [0, 1].
score_mean: 1D array of metric means for group size ratios in group_size.
score_stderr: 1D array of metric standard devations for group size ratios in group_size.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(7, 5))
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset)
# Compute group_size, score_mean, score_stderr
if (group_size is None) or (score_mean is None):
# Compute adversarial group calibration
adv_group_cali_namespace = adversarial_group_calibration(
y_pred, y_std, y_true, cali_type=cali_type
)
group_size = adv_group_cali_namespace.group_size
score_mean = adv_group_cali_namespace.score_mean
score_stderr = adv_group_cali_namespace.score_stderr
else:
# If expected and observed proportions are give
group_size = np.array(group_size).flatten()
score_mean = np.array(score_mean).flatten()
score_stderr = np.array(score_stderr).flatten()
if (group_size.shape != score_mean.shape) or (
group_size.shape != score_stderr.shape
):
raise RuntimeError(
"Input arrays for adversarial group calibration shape mismatch"
)
# Set label
if curve_label is None:
curve_label = "Predictor"
# Plot
ax.plot(group_size, score_mean, "-o", label=curve_label, c="#1f77b4")
ax.fill_between(
group_size,
score_mean - score_stderr,
score_mean + score_stderr,
alpha=0.2,
)
# Format plot
buff = 0.02
ax.set_xlim([0 - buff, 1 + buff])
ax.set_ylim([0 - buff, 0.5 + buff])
ax.set_xlabel("Group size")
ax.set_ylabel("Calibration Error of Worst Group")
ax.set_title("Adversarial Group Calibration")
return ax
|
Plot adversarial group calibration plots by varying group size from 0% to 100% of
dataset size and recording the worst calibration occurred for each group size.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
cali_type: Calibration type str.
curve_label: legend label str for calibration curve.
group_size: 1D array of group size ratios in [0, 1].
score_mean: 1D array of metric means for group size ratios in group_size.
score_stderr: 1D array of metric standard devations for group size ratios in group_size.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_adversarial_group_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_sharpness(
y_std: np.ndarray,
n_subset: Union[int, None] = None,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot sharpness of the predictive uncertainties.
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
n_subset: Number of points to plot after filtering.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Optionally select a subset
if n_subset is not None:
y_std = filter_subset([y_std], n_subset)[0]
# Plot sharpness curve
ax.hist(y_std, edgecolor="#1f77b4", color="#a5c8e1", density=True)
# Format plot
xlim = (y_std.min(), y_std.max())
ax.set_xlim(xlim)
ax.set_xlabel("Predicted Standard Deviation")
ax.set_ylabel("Normalized Frequency")
ax.set_title("Sharpness")
ax.set_yticklabels([])
ax.set_yticks([])
# Calculate and report sharpness
sharpness = np.sqrt(np.mean(y_std**2))
ax.axvline(x=sharpness, label="sharpness", color="k", linewidth=2, ls="--")
if sharpness < (xlim[0] + xlim[1]) / 2:
text = "\n Sharpness = %.2f" % sharpness
h_align = "left"
else:
text = "\nSharpness = %.2f " % sharpness
h_align = "right"
ax.text(
x=sharpness,
y=ax.get_ylim()[1],
s=text,
verticalalignment="top",
horizontalalignment=h_align,
fontsize="small",
)
return ax
|
Plot sharpness of the predictive uncertainties.
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
n_subset: Number of points to plot after filtering.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_sharpness
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def plot_residuals_vs_stds(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
n_subset: Union[int, None] = None,
ax: Union[matplotlib.axes.Axes, None] = None,
) -> matplotlib.axes.Axes:
"""Plot absolute value of the prediction residuals versus standard deviations of the
predictive uncertainties.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
"""
# Create ax if it doesn't exist
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
# Optionally select a subset
if n_subset is not None:
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset)
# Compute residuals
residuals = y_true - y_pred
# Put stds on same scale as residuals
residuals_sum = np.sum(np.abs(residuals))
y_std_scaled = (y_std / np.sum(y_std)) * residuals_sum
# Plot residuals vs standard devs
h1 = ax.plot(y_std_scaled, np.abs(residuals), "o", c="#1f77b4")
# Plot 45-degree line
xlims = ax.get_xlim()
ylims = ax.get_ylim()
lims = [np.min([xlims[0], ylims[0]]), np.max([xlims[1], ylims[1]])]
h2 = ax.plot(lims, lims, "--", c="#ff7f0e")
# Legend
ax.legend([h1[0], h2[0]], ["Predictions", "$f(x) = x$"], loc=4)
# Format plot
ax.set_xlabel("Standard Deviations (Scaled)")
ax.set_ylabel("Residuals (Absolute Value)")
ax.set_title("Residuals vs. Predictive Standard Deviations")
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.axis("square")
return ax
|
Plot absolute value of the prediction residuals versus standard deviations of the
predictive uncertainties.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
n_subset: Number of points to plot after filtering.
ax: matplotlib.axes.Axes object.
Returns:
matplotlib.axes.Axes object with plot added.
|
plot_residuals_vs_stds
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def filter_subset(input_list: List[List[Any]], n_subset: int) -> List[List[Any]]:
"""Keep only n_subset random indices from all lists given in input_list.
Args:
input_list: list of lists.
n_subset: Number of points to plot after filtering.
Returns:
List of all input lists with sizes reduced to n_subset.
"""
assert type(n_subset) is int
n_total = len(input_list[0])
idx = np.random.choice(range(n_total), n_subset, replace=False)
idx = np.sort(idx)
output_list = []
for inp in input_list:
outp = inp[idx]
output_list.append(outp)
return output_list
|
Keep only n_subset random indices from all lists given in input_list.
Args:
input_list: list of lists.
n_subset: Number of points to plot after filtering.
Returns:
List of all input lists with sizes reduced to n_subset.
|
filter_subset
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def set_style(style_str: str = "default") -> NoReturn:
"""Set the matplotlib plotting style.
Args:
style_str: string for style file.
"""
if style_str == "default":
plt.style.use((pathlib.Path(__file__).parent / "matplotlibrc").resolve())
|
Set the matplotlib plotting style.
Args:
style_str: string for style file.
|
set_style
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def save_figure(
file_name: str = "figure",
ext_list: Union[list, str, None] = None,
white_background: bool = True,
) -> NoReturn:
"""Save matplotlib figure for all extensions in ext_list.
Args:
file_name: name of saved image file.
ext_list: list of strings (or single string) denoting file type.
white_background: set background of image to white if True.
"""
# Default ext_list
if ext_list is None:
ext_list = ["pdf", "png"]
# If ext_list is a single str
if isinstance(ext_list, str):
ext_list = [ext_list]
# Set facecolor and edgecolor
(fc, ec) = ("w", "w") if white_background else ("none", "none")
# Save each type in ext_list
for ext in ext_list:
save_str = file_name + "." + ext
plt.savefig(save_str, bbox_inches="tight", facecolor=fc, edgecolor=ec)
print(f"Saved figure {save_str}")
|
Save matplotlib figure for all extensions in ext_list.
Args:
file_name: name of saved image file.
ext_list: list of strings (or single string) denoting file type.
white_background: set background of image to white if True.
|
save_figure
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
|
MIT
|
def image_transform(self, images, lm):
"""
param:
images: -- PIL image
lm: -- numpy array
"""
W,H = images.size
if np.mean(lm) == -1:
lm = (self.lm3d_std[:, :2]+1)/2.
lm = np.concatenate(
[lm[:, :1]*W, lm[:, 1:2]*H], 1
)
else:
lm[:, -1] = H - 1 - lm[:, -1]
trans_params, img, lm, _ = align_img(images, lm, self.lm3d_std)
img = torch.tensor(np.array(img)/255., dtype=torch.float32).permute(2, 0, 1)
trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)])
trans_params = torch.tensor(trans_params.astype(np.float32))
return img, trans_params
|
param:
images: -- PIL image
lm: -- numpy array
|
image_transform
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/coeff_detector.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/coeff_detector.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
# self.root = opt.dataroot
self.current_epoch = 0
|
Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/base_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/base_dataset.py
|
Apache-2.0
|
def default_flist_reader(flist):
"""
flist format: impath label\nimpath label\n ...(same to caffe's filelist)
"""
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
impath = line.strip()
imlist.append(impath)
return imlist
|
flist format: impath label
impath label
...(same to caffe's filelist)
|
default_flist_reader
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/flist_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.lm3d_std = load_lm3d(opt.bfm_folder)
msk_names = default_flist_reader(opt.flist)
self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names]
self.size = len(self.msk_paths)
self.opt = opt
self.name = 'train' if opt.isTrain else 'val'
if '_' in opt.flist:
self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0]
|
Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/flist_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
|
Apache-2.0
|
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
img (tensor) -- an image in the input domain
msk (tensor) -- its corresponding attention mask
lm (tensor) -- its corresponding 3d landmarks
im_paths (str) -- image paths
aug_flag (bool) -- a flag used to tell whether its raw or augmented
"""
msk_path = self.msk_paths[index % self.size] # make sure index is within then range
img_path = msk_path.replace('mask/', '')
lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt'
raw_img = Image.open(img_path).convert('RGB')
raw_msk = Image.open(msk_path).convert('RGB')
raw_lm = np.loadtxt(lm_path).astype(np.float32)
_, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk)
aug_flag = self.opt.use_aug and self.opt.isTrain
if aug_flag:
img, lm, msk = self._augmentation(img, lm, self.opt, msk)
_, H = img.size
M = estimate_norm(lm, H)
transform = get_transform()
img_tensor = transform(img)
msk_tensor = transform(msk)[:1, ...]
lm_tensor = parse_label(lm)
M_tensor = parse_label(M)
return {'imgs': img_tensor,
'lms': lm_tensor,
'msks': msk_tensor,
'M': M_tensor,
'im_paths': img_path,
'aug_flag': aug_flag,
'dataset': self.name}
|
Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
img (tensor) -- an image in the input domain
msk (tensor) -- its corresponding attention mask
lm (tensor) -- its corresponding 3d landmarks
im_paths (str) -- image paths
aug_flag (bool) -- a flag used to tell whether its raw or augmented
|
__getitem__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/flist_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
|
Apache-2.0
|
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
|
Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
|
modify_commandline_options
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/template_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
|
Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/template_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
|
Apache-2.0
|
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
data_A = None # needs to be a tensor
data_B = None # needs to be a tensor
return {'data_A': data_A, 'data_B': data_B, 'path': path}
|
Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
|
__getitem__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/template_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
|
Apache-2.0
|
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
|
Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
|
find_dataset_using_name
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/__init__.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
|
Apache-2.0
|
def create_dataset(opt, rank=0):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt, rank=rank)
dataset = data_loader.load_data()
return dataset
|
Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
|
create_dataset
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/__init__.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
|
Apache-2.0
|
def __init__(self, opt, rank=0):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
self.sampler = None
print("rank %d %s dataset [%s] was created" % (rank, self.dataset.name, type(self.dataset).__name__))
if opt.use_ddp and opt.isTrain:
world_size = opt.world_size
self.sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset,
num_replicas=world_size,
rank=rank,
shuffle=not opt.serial_batches
)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
sampler=self.sampler,
num_workers=int(opt.num_threads / world_size),
batch_size=int(opt.batch_size / world_size),
drop_last=True)
else:
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=(not opt.serial_batches) and opt.isTrain,
num_workers=int(opt.num_threads),
drop_last=True
)
|
Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/data/__init__.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.isTrain = opt.isTrain
self.device = torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
self.loss_names = []
self.model_names = []
self.visual_names = []
self.parallel_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
|
Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = opt.epoch
self.load_networks(load_suffix)
# self.print_networks(opt.verbose)
|
Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
setup
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
|
Update learning rates for all the networks; called at the end of every epoch
|
update_learning_rate
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)[:, :3, ...]
return visual_ret
|
Return visualization images. train.py will display these images with visdom, and save the images to a HTML
|
get_current_visuals
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
|
Return traning losses / errors. train.py will print out these errors on console, and save them to a file
|
get_current_losses
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
if not os.path.isdir(self.save_dir):
os.makedirs(self.save_dir)
save_filename = 'epoch_%s.pth' % (epoch)
save_path = os.path.join(self.save_dir, save_filename)
save_dict = {}
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
if isinstance(net, torch.nn.DataParallel) or isinstance(net,
torch.nn.parallel.DistributedDataParallel):
net = net.module
save_dict[name] = net.state_dict()
for i, optim in enumerate(self.optimizers):
save_dict['opt_%02d'%i] = optim.state_dict()
for i, sched in enumerate(self.schedulers):
save_dict['sched_%02d'%i] = sched.state_dict()
torch.save(save_dict, save_path)
|
Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
save_networks
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
Fix InstanceNorm checkpoints incompatibility (prior to 0.4)
|
__patch_instance_norm_state_dict
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
if self.opt.isTrain and self.opt.pretrained_name is not None:
load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
else:
load_dir = self.save_dir
load_filename = 'epoch_%s.pth' % (epoch)
load_path = os.path.join(load_dir, load_filename)
state_dict = torch.load(load_path, map_location=self.device)
print('loading the model from %s' % load_path)
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
net.load_state_dict(state_dict[name])
if self.opt.phase != 'test':
if self.opt.continue_train:
print('loading the optim from %s' % load_path)
for i, optim in enumerate(self.optimizers):
optim.load_state_dict(state_dict['opt_%02d'%i])
try:
print('loading the sched from %s' % load_path)
for i, sched in enumerate(self.schedulers):
sched.load_state_dict(state_dict['sched_%02d'%i])
except:
print('Failed to load schedulers, set schedulers according to epoch count manually')
for i, sched in enumerate(self.schedulers):
sched.last_epoch = self.opt.epoch_count - 1
|
Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
load_networks
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
|
Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
|
print_networks
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
|
Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
|
set_requires_grad
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/base_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
|
Apache-2.0
|
def compute_shape(self, id_coeff, exp_coeff):
"""
Return:
face_shape -- torch.tensor, size (B, N, 3)
Parameters:
id_coeff -- torch.tensor, size (B, 80), identity coeffs
exp_coeff -- torch.tensor, size (B, 64), expression coeffs
"""
batch_size = id_coeff.shape[0]
id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff)
exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff)
face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1])
return face_shape.reshape([batch_size, -1, 3])
|
Return:
face_shape -- torch.tensor, size (B, N, 3)
Parameters:
id_coeff -- torch.tensor, size (B, 80), identity coeffs
exp_coeff -- torch.tensor, size (B, 64), expression coeffs
|
compute_shape
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def compute_texture(self, tex_coeff, normalize=True):
"""
Return:
face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)
Parameters:
tex_coeff -- torch.tensor, size (B, 80)
"""
batch_size = tex_coeff.shape[0]
face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex
if normalize:
face_texture = face_texture / 255.
return face_texture.reshape([batch_size, -1, 3])
|
Return:
face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)
Parameters:
tex_coeff -- torch.tensor, size (B, 80)
|
compute_texture
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def compute_norm(self, face_shape):
"""
Return:
vertex_norm -- torch.tensor, size (B, N, 3)
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
v1 = face_shape[:, self.face_buf[:, 0]]
v2 = face_shape[:, self.face_buf[:, 1]]
v3 = face_shape[:, self.face_buf[:, 2]]
e1 = v1 - v2
e2 = v2 - v3
face_norm = torch.cross(e1, e2, dim=-1)
face_norm = F.normalize(face_norm, dim=-1, p=2)
face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1)
vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2)
vertex_norm = F.normalize(vertex_norm, dim=-1, p=2)
return vertex_norm
|
Return:
vertex_norm -- torch.tensor, size (B, N, 3)
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
|
compute_norm
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def compute_color(self, face_texture, face_norm, gamma):
"""
Return:
face_color -- torch.tensor, size (B, N, 3), range (0, 1.)
Parameters:
face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)
face_norm -- torch.tensor, size (B, N, 3), rotated face normal
gamma -- torch.tensor, size (B, 27), SH coeffs
"""
batch_size = gamma.shape[0]
v_num = face_texture.shape[1]
a, c = self.SH.a, self.SH.c
gamma = gamma.reshape([batch_size, 3, 9])
gamma = gamma + self.init_lit
gamma = gamma.permute(0, 2, 1)
Y = torch.cat([
a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device),
-a[1] * c[1] * face_norm[..., 1:2],
a[1] * c[1] * face_norm[..., 2:],
-a[1] * c[1] * face_norm[..., :1],
a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2],
-a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:],
0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1),
-a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:],
0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2)
], dim=-1)
r = Y @ gamma[..., :1]
g = Y @ gamma[..., 1:2]
b = Y @ gamma[..., 2:]
face_color = torch.cat([r, g, b], dim=-1) * face_texture
return face_color
|
Return:
face_color -- torch.tensor, size (B, N, 3), range (0, 1.)
Parameters:
face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)
face_norm -- torch.tensor, size (B, N, 3), rotated face normal
gamma -- torch.tensor, size (B, 27), SH coeffs
|
compute_color
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def compute_rotation(self, angles):
"""
Return:
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
Parameters:
angles -- torch.tensor, size (B, 3), radian
"""
batch_size = angles.shape[0]
ones = torch.ones([batch_size, 1]).to(self.device)
zeros = torch.zeros([batch_size, 1]).to(self.device)
x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],
rot_x = torch.cat([
ones, zeros, zeros,
zeros, torch.cos(x), -torch.sin(x),
zeros, torch.sin(x), torch.cos(x)
], dim=1).reshape([batch_size, 3, 3])
rot_y = torch.cat([
torch.cos(y), zeros, torch.sin(y),
zeros, ones, zeros,
-torch.sin(y), zeros, torch.cos(y)
], dim=1).reshape([batch_size, 3, 3])
rot_z = torch.cat([
torch.cos(z), -torch.sin(z), zeros,
torch.sin(z), torch.cos(z), zeros,
zeros, zeros, ones
], dim=1).reshape([batch_size, 3, 3])
rot = rot_z @ rot_y @ rot_x
return rot.permute(0, 2, 1)
|
Return:
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
Parameters:
angles -- torch.tensor, size (B, 3), radian
|
compute_rotation
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def to_image(self, face_shape):
"""
Return:
face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
# to image_plane
face_proj = face_shape @ self.persc_proj
face_proj = face_proj[..., :2] / face_proj[..., 2:]
return face_proj
|
Return:
face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
|
to_image
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.