repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/bilinear_interpolator.py | from typing import Dict, List, Tuple
import dill # type: ignore[import]
import torch
from torch import Tensor
class BilinearInterpol:
def __init__(
self,
inner: Dict[Tuple[float, float], float],
outer: Dict[Tuple[float, float], float],
inner_range: float,
outer_range: float,
inner_res: float,
outer_res: float,
) -> None:
self.inner = inner
self.outer = outer
self.inner_range = inner_range
self.outer_range = outer_range
self.inner_res = inner_res
self.outer_res = outer_res
def get_value(self, lb: Tensor, ub: Tensor) -> Tensor:
if abs(lb) < self.inner_range and abs(ub) < self.inner_range:
res = self.inner_res
tb = self.inner
elif abs(lb) < self.outer_range and abs(ub) < self.outer_range:
res = self.outer_res
tb = self.outer
else: # At this point it is numerically irrellevant what we return
return (lb + ub) / 2
lb_low = (float(lb) / res) // 1 * res
ub_low = (float(ub) / res) // 1 * res
lb_high = lb_low + res
ub_high = ub_low + res
if ub_low <= lb_high: # In one resolution interval
return (lb + ub) / 2
else:
ll = tb[(lb_low, ub_low)]
lh = tb[(lb_low, ub_high)]
hl = tb[(lb_high, ub_low)]
hh = tb[(lb_high, ub_high)]
# interpolate
r1 = (lb_high - lb) / res * ll + (lb - lb_low) / res * hl
r2 = (lb_high - lb) / res * lh + (lb - lb_low) / res * hh
y = (ub_high - ub) / res * r1 + (ub - ub_low) / res * r2
return y
@classmethod
def create_from_data(
cls,
inner_values: List[Tuple[float, float, float]],
outer_values: List[Tuple[float, float, float]],
inner_range: float,
outer_range: float,
inner_res: float,
outer_res: float,
) -> "BilinearInterpol":
inner_dict: Dict[Tuple[float, float], float] = {}
outer_dict: Dict[Tuple[float, float], float] = {}
for lb, ub, c in inner_values:
inner_dict[(float(lb), float(ub))] = float(c)
for val in torch.linspace(
-inner_range, inner_range, int(2 * inner_range / inner_res) + 1
):
fv = float(val)
inner_dict[(fv, fv)] = fv
for lb, ub, c in outer_values:
outer_dict[(float(lb), float(ub))] = float(c)
for val in torch.linspace(
-outer_range, outer_range, int(2 * outer_range / outer_range) + 1
):
fv = float(val)
outer_dict[(fv, fv)] = fv
return cls(
inner_dict, outer_dict, inner_range, outer_range, inner_res, outer_res
)
@classmethod
def load_from_path(cls, path: str) -> "BilinearInterpol":
try:
with open(path, "rb") as file:
interpol: "BilinearInterpol" = dill.load(file)
return interpol
except BaseException as e:
print(f"Encountered exception during load {str(e)}")
raise RuntimeError()
def store_to_path(self, path: str) -> None:
try:
with open(path, "wb") as file:
dill.dump(self, file)
except BaseException as e:
print(f"Encountered exception during store {str(e)}")
| 3,417 | 33.18 | 82 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/prima_interface.py | from typing import Callable, List, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from src.utilities.config import PrimaHyperparameters
from src.utilities.prima_util import ActivationType, KAct, encode_kactivation_cons
def get_prima_constraints(
input_lb: Tensor,
input_ub: Tensor,
activation_type: ActivationType,
prima_hyperparameters: PrimaHyperparameters,
intermediate_bounds_callback: Callable[[Tensor], Tuple[Tensor, Tensor]],
batch_size: int,
layer_shape: Tuple[int, ...],
) -> Tuple[Tensor, Tensor, Tensor]:
"""
The PRIMA constraints are of the form:
output_var_coefs @ layer_output + input_var_coefs @ layer_input + const_coefs @ 1 <= 0
:param input_lb: input lower bounds for neurons in current layer
:param input_ub: input uppper bounds for neurons in current layer
:returns:
output_var_coefs
input_var_coefs
const_coefs
"""
batch_prima_constraints = encode_kactivation_cons(
input_lb=input_lb,
input_ub=input_ub,
activation_type=activation_type,
# TODO: just pass through prima_hyperparameters
sparse_n=prima_hyperparameters.sparse_n,
intermediate_bounds_callback=intermediate_bounds_callback,
K=prima_hyperparameters.K,
s=prima_hyperparameters.s,
approx=True,
numproc=prima_hyperparameters.num_proc_to_compute_constraints,
max_number_of_parallel_input_constraint_queries=prima_hyperparameters.max_number_of_parallel_input_constraint_queries,
max_unstable_nodes_considered_per_layer=prima_hyperparameters.max_unstable_nodes_considered_per_layer,
min_relu_transformer_area_to_be_considered=prima_hyperparameters.min_relu_transformer_area_to_be_considered,
fraction_of_constraints_to_keep=prima_hyperparameters.fraction_of_constraints_to_keep,
random_prima_groups=prima_hyperparameters.random_prima_groups,
prima_sparsity_factor=prima_hyperparameters.prima_sparsity_factor,
)
prima_constraints_empty = True
if batch_prima_constraints is not None:
for batch_elem in batch_prima_constraints:
if not prima_constraints_empty:
break
for kact in batch_elem:
if not prima_constraints_empty:
break
prima_constraints_empty = kact.cons.shape[0] == 0
if not batch_prima_constraints or prima_constraints_empty:
n_prima_constraints = 0
output_coefs = torch.zeros(
batch_size, np.prod(layer_shape), n_prima_constraints
)
input_coefs = torch.zeros(batch_size, np.prod(layer_shape), n_prima_constraints)
const_coefs = torch.zeros(batch_size, 1, n_prima_constraints)
return output_coefs, input_coefs, const_coefs
return _build_sparse_prima_coefficient_matrix(batch_prima_constraints, layer_shape)
def _refine_bounds_for_candidate_unstable_neurons(
lb: Tensor,
ub: Tensor,
batch_size: int,
layer_shape: Tuple[int, ...],
intermediate_bounds_callback: Callable[[Tensor], Tuple[Tensor, Tensor]],
) -> Tuple[Tensor, Tensor]:
candidate_unstable_neuron_indices = torch.nonzero(
(lb < 0) & (ub > 0), as_tuple=True
)
if candidate_unstable_neuron_indices[0].numel() == 0:
return lb, ub
n_unstable_neurons_per_batch_element = torch.bincount(
candidate_unstable_neuron_indices[0]
)
indices_in_batch_element = [
index
for n_unstable_neurons in n_unstable_neurons_per_batch_element
for index in np.arange(n_unstable_neurons)
]
candidate_unstable_neuron_indices_in_batch = (
candidate_unstable_neuron_indices[0],
indices_in_batch_element,
*candidate_unstable_neuron_indices[1:],
)
query_coef = torch.zeros( # type: ignore[call-overload]
batch_size,
torch.max(n_unstable_neurons_per_batch_element),
*layer_shape,
)
query_coef[candidate_unstable_neuron_indices_in_batch] = -1
# only do number_of_nodes_in_starting_layer many queries at a time
number_of_queries = query_coef.shape[1]
number_of_nodes_in_starting_layer = np.prod(query_coef.shape[2:])
lb_with_other_params = torch.zeros(batch_size, number_of_queries)
ub_with_other_params = torch.zeros(batch_size, number_of_queries)
offset = 0
while offset < number_of_queries:
query_coef_slice = query_coef[
:, offset : offset + number_of_nodes_in_starting_layer, :
]
(
intermediate_ub_with_other_params,
intermediate_lb_with_other_params,
) = intermediate_bounds_callback(query_coef_slice)
lb_with_other_params[
:, offset : offset + number_of_nodes_in_starting_layer
] = intermediate_lb_with_other_params
ub_with_other_params[
:, offset : offset + number_of_nodes_in_starting_layer
] = intermediate_ub_with_other_params
offset += number_of_nodes_in_starting_layer
candidate_unstable_neuron_indices_in_resulting_bounds = (
candidate_unstable_neuron_indices[0],
indices_in_batch_element,
)
lb_with_other_params = (
-1
* lb_with_other_params[candidate_unstable_neuron_indices_in_resulting_bounds]
.detach()
.cpu()
)
ub_with_other_params = (
-1
* ub_with_other_params[candidate_unstable_neuron_indices_in_resulting_bounds]
.detach()
.cpu()
)
refined_lb = lb.clone()
refined_ub = ub.clone()
refined_lb[candidate_unstable_neuron_indices] = torch.maximum(
lb[candidate_unstable_neuron_indices], lb_with_other_params
)
refined_ub[candidate_unstable_neuron_indices] = torch.minimum(
ub[candidate_unstable_neuron_indices], ub_with_other_params
)
return refined_lb, refined_ub
def _build_sparse_prima_coefficient_matrix(
batch_prima_constraints: Sequence[Sequence[KAct]],
layer_shape: Tuple[int, ...],
) -> Tuple[Tensor, Tensor, Tensor]:
batch_indices: List[int] = []
indices_within_batch_element: List[int] = []
indices_within_layer: List[int] = []
output_coef_values: List[float] = []
input_coef_values: List[float] = []
batch_size = len(batch_prima_constraints)
max_number_of_prima_constraints = max(
sum(constraint_group.cons.shape[0] for constraint_group in prima_constraints)
for prima_constraints in batch_prima_constraints
)
const_coefs = torch.zeros(batch_size, 1, max_number_of_prima_constraints)
for batch_index, prima_constraints in enumerate(batch_prima_constraints):
n_prima_coefficients_in_batch_element = sum(
len(constraint_group.varsid) * constraint_group.cons.shape[0] # type: ignore
for constraint_group in prima_constraints
)
batch_indices += [
batch_index for __ in range(n_prima_coefficients_in_batch_element)
]
offset = 0
for constraint_group in prima_constraints:
n_prima_constraints_in_group = constraint_group.cons.shape[0]
node_indices = constraint_group.varsid
group_size = len(node_indices) # type: ignore
indices_within_batch_element += (
np.arange(offset, offset + n_prima_constraints_in_group)
.repeat(group_size)
.tolist()
)
indices_within_layer += np.tile(
node_indices, n_prima_constraints_in_group
).tolist()
output_coef_values += (
constraint_group.cons[:, group_size + 1 :].ravel(order="C").tolist()
)
input_coef_values += (
constraint_group.cons[:, 1 : group_size + 1].ravel(order="C").tolist()
)
const_coefs_of_group = torch.tensor(constraint_group.cons[:, 0])
const_coefs[
batch_index, 0, offset : offset + n_prima_constraints_in_group
] = const_coefs_of_group
offset += n_prima_constraints_in_group
full_indices_of_non_zero_elements = [
batch_indices,
indices_within_layer,
indices_within_batch_element,
]
dense_coefs_shape = (
batch_size,
np.prod(layer_shape),
max_number_of_prima_constraints,
)
output_coefs = torch.sparse_coo_tensor(
full_indices_of_non_zero_elements, output_coef_values, size=dense_coefs_shape # type: ignore[arg-type]
)
input_coefs = torch.sparse_coo_tensor(
full_indices_of_non_zero_elements, input_coef_values, size=dense_coefs_shape # type: ignore[arg-type]
)
def _eliminate_zeros(x: Tensor) -> Tensor:
assert x.is_sparse
mask = x._values().nonzero()
non_zero_values = x._values().index_select(0, mask.view(-1))
indices_of_non_zero_values = x._indices().index_select(1, mask.view(-1))
return torch.sparse_coo_tensor(
indices_of_non_zero_values, non_zero_values, x.shape
)
output_coefs = _eliminate_zeros(output_coefs).coalesce()
input_coefs = _eliminate_zeros(input_coefs).coalesce()
# ELINA computes constraints s.t. coefs * vars >= 0, we want <= 0
return (-1) * output_coefs, (-1) * input_coefs, (-1) * const_coefs
| 9,343 | 37.138776 | 126 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/general.py | import functools
import itertools
from typing import Callable, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from src.utilities.config import DomainSplittingConfig
def all_larger_equal(seq: Union[Sequence, Tensor], threshold: float) -> bool:
return all(el >= threshold for el in seq)
def any_smaller(seq: Union[Sequence, Tensor], threshold: float) -> bool:
return any(el < threshold for el in seq)
def get_neg_pos_comp(x: Tensor) -> Tuple[Tensor, Tensor]:
neg_comp = torch.where(x < 0, x, torch.zeros_like(x))
pos_comp = torch.where(x >= 0, x, torch.zeros_like(x))
return neg_comp, pos_comp
def property_matrix_from_properties(
properties_to_verify: List[List[Tuple[int, int, float]]],
n_class: int,
device: torch.device,
dtype: torch.dtype,
) -> Tuple[Tensor, Tensor, Tensor]:
all_gt_tuples = list(set([*itertools.chain.from_iterable(properties_to_verify)]))
gt_map = {x: i for i, x in enumerate(all_gt_tuples)}
n_constraints = len(all_gt_tuples)
property_matrix = torch.zeros((n_constraints, n_class), device=device, dtype=dtype)
property_threshold = torch.zeros((n_constraints,), device=device, dtype=dtype)
combination_matrix = torch.zeros(
(len(properties_to_verify), n_constraints), device=device, dtype=dtype
)
for property in all_gt_tuples:
if property[0] != -1:
property_matrix[gt_map[property], property[0]] = 1
property_threshold[gt_map[property]] = torch.as_tensor(property[2])
else:
property_threshold[gt_map[property]] = -torch.as_tensor(property[2])
if property[1] != -1:
property_matrix[gt_map[property], property[1]] = -1
for and_property_counter, and_property in enumerate(properties_to_verify):
for or_property_counter, or_property in enumerate(and_property):
combination_matrix[and_property_counter, gt_map[or_property]] = 1
return (
property_matrix.unsqueeze(0),
property_threshold.unsqueeze(0),
combination_matrix.unsqueeze(0),
)
def update_propertiy_matrices(
verified: Tensor,
falsified: Tensor,
property_matrix: Tensor,
property_threshold: Tensor,
combination_matrix: Tensor,
true_ub: bool,
) -> Tuple[Tensor, Tensor]:
and_properties_verified = (
torch.einsum(
"bij,bj -> bi", combination_matrix, verified.to(combination_matrix.dtype)
)
>= 1
)
and_properties_falsified = torch.einsum(
"bij,bj -> bi", combination_matrix, falsified.to(combination_matrix.dtype)
) == combination_matrix.sum(-1)
if not true_ub:
# Different or clauses might have been falsified for different points
and_properties_falsified = torch.where(
combination_matrix.sum(-1) == 1,
and_properties_falsified,
torch.zeros_like(and_properties_falsified),
)
assert not and_properties_falsified.__and__(and_properties_verified).any()
# constraints_verified = (
# (and_properties_verified.unsqueeze(2) * combination_matrix).sum(1).bool()
# )
property_matrix[verified] = 0
property_threshold[verified] = -1
if true_ub:
property_matrix[falsified] = 0
property_threshold[falsified] = 1
return (
and_properties_verified.all(1),
and_properties_falsified.any(1),
)
def compute_initial_splits(
input_lb: Tensor,
input_ub: Tensor,
property_matrix: Tensor,
property_threshold: Tensor,
combination_matrix: Tensor,
domain_splitting: DomainSplittingConfig,
) -> List[
Tuple[
Tensor,
Tensor,
Tuple[Tensor, Tensor, Tensor],
int,
Optional[Sequence[Sequence[Tuple[int, int, float]]]],
]
]:
un_tight = (input_lb - input_ub).abs() > 1e-6
n_splits = min(
domain_splitting.initial_splits,
int(domain_splitting.batch_size ** (1 / un_tight.sum()) + 0.5),
)
# assert input_lb.dim() == 2
split_dims = domain_splitting.initial_split_dims.copy()
if domain_splitting.initial_splits > 0:
if len(split_dims) == 0:
split_dims = un_tight.flatten().nonzero().flatten().tolist()
initial_input_regions = split_input_regions(
[(input_lb, input_ub)],
dim=split_dims,
splits=[n_splits] * (input_lb.shape[-1]),
)
else:
initial_input_regions = [(input_lb, input_ub)]
initial_splits: List[
Tuple[
Tensor,
Tensor,
Tuple[Tensor, Tensor, Tensor],
int,
Optional[Sequence[Sequence[Tuple[int, int, float]]]],
]
] = [
(
input_region[0],
input_region[1],
(property_matrix, property_threshold, combination_matrix),
domain_splitting.max_depth,
None,
)
for input_region in initial_input_regions
]
return initial_splits
def batch_splits(
queue: List[
Tuple[
Tensor,
Tensor,
Tuple[Tensor, Tensor, Tensor],
int,
Optional[Sequence[Sequence[Tuple[int, int, float]]]],
]
],
batch_size: int,
) -> Tuple[
Tensor,
Tensor,
Tensor,
Tensor,
Tensor,
Tensor,
List[Optional[Sequence[Sequence[Tuple[int, int, float]]]]],
]:
elements = []
query_count = queue[0][2][1].shape[-1]
for _ in range(batch_size):
if not queue[0][2][1].shape[-1] == query_count:
break
elements.append(queue.pop(0))
if len(queue) == 0:
break
# elements = queue[:min(batch_size, len(queue))]
# queue = queue[min(batch_size, len(queue)):]
input_lb = torch.cat([element[0] for element in elements], 0)
input_ub = torch.cat([element[1] for element in elements], 0)
property_matrix = torch.cat([element[2][0] for element in elements], 0)
property_threshold = torch.cat([element[2][1] for element in elements], 0)
combination_matrix = torch.cat([element[2][2] for element in elements], 0)
max_depth = torch.tensor(
[element[3] for element in elements], dtype=torch.int, device=input_lb.device
)
properties_to_verify_batch = [element[4] for element in elements]
solved_properties = (property_matrix == 0).all(2).all(0)
verified = (
(property_matrix == 0)
.all(2)
.__and__(property_threshold < 0)
.to(combination_matrix.dtype)
)
and_properties_verified = (
torch.einsum(
"bij,bj -> bi",
combination_matrix,
verified,
)
>= 1
)
property_matrix = property_matrix[:, ~solved_properties]
property_threshold = property_threshold[:, ~solved_properties]
combination_matrix = combination_matrix[:, :, ~solved_properties][
:, ~and_properties_verified.all(0)
]
assert (combination_matrix.sum(2) > 0).all()
return (
input_lb,
input_ub,
property_matrix,
property_threshold,
combination_matrix,
max_depth,
properties_to_verify_batch,
)
def split_input_regions(
input_regions: List[Tuple[Tensor, Tensor]],
dim: Union[int, List[int]] = 0,
splits: Union[int, List[int]] = 2,
) -> List[Tuple[Tensor, Tensor]]:
input_shape = input_regions[0][0].shape
if isinstance(splits, int):
di = splits
else:
di = splits.pop(0)
if isinstance(dim, int):
d = dim
else:
d = dim.pop(0)
new_input_regions = []
for specLB, specUB in input_regions:
specLB = specLB.flatten(1)
specUB = specUB.flatten(1)
d_lb = specLB[:, d].clone()
d_ub = specUB[:, d].clone()
d_range = d_ub - d_lb
d_step = d_range / di
for i in range(di):
specLB[:, d] = d_lb + i * d_step
specUB[:, d] = d_lb + (i + 1) * d_step
new_input_regions.append(
(
specLB.clone().view(-1, *input_shape[1:]),
specUB.clone().view(-1, *input_shape[1:]),
)
)
assert (specLB[:, d] >= d_lb - 1e-7).all()
assert (specUB[:, d] <= d_ub + 1e-7).all()
if isinstance(splits, list) and isinstance(dim, list):
if len(splits) == 0 or len(dim) == 0:
return new_input_regions
return split_input_regions(new_input_regions, dim=dim, splits=splits)
elif isinstance(splits, list) and dim + 1 < len(splits): # type: ignore # inference does not recognise that we have an int for dim here
return split_input_regions(new_input_regions, dim=dim + 1, splits=splits) # type: ignore # inference does not recognise that we have an int for dim here
else:
return new_input_regions
def consolidate_input_regions(
input_regions: List[Tuple[Tensor, Tensor]]
) -> Tuple[Tensor, Tensor]:
input_lb = torch.stack([x[0] for x in input_regions], 0).min(0)[0]
input_ub = torch.stack([x[1] for x in input_regions], 0).max(0)[0]
return input_lb, input_ub
# def list_minimum(inputs: List[Tensor]) -> Tensor:
# if len(inputs) == 0:
# assert False, "List Minimum undefined for empty lists"
# elif len(inputs) == 1:
# return inputs[0]
# elif len(inputs) == 2:
# return torch.minimum(inputs[0], inputs[1])
# else:
# return list_minimum(
# [
# torch.minimum(inputs[2 * i], inputs[2 * i + 1])
# for i in range(len(inputs) // 2)
# ]
# + ([] if len(inputs) % 2 == 0 else [inputs[-1]])
# )
# def list_maximum(inputs: List[Tensor]) -> Tensor:
# if len(inputs) == 1:
# return inputs[0]
# if len(inputs) == 2:
# return torch.maximum(inputs[0], inputs[1])
# else:
# return list_maximum(
# [
# torch.maximum(inputs[2 * i], inputs[2 * i + 1])
# for i in range(len(inputs) // 2)
# ]
# + ([] if len(inputs) % 2 == 0 else [inputs[-1]])
# )
def tensor_reduce(
fun: Callable[[Tensor, Tensor], Tensor], in_tens: Sequence[Tensor]
) -> Tensor:
return functools.reduce(fun, in_tens)
| 10,303 | 31.2 | 161 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/branching.py | from __future__ import annotations
import random
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from src.abstract_layers.abstract_bn2d import BatchNorm2d
from src.abstract_layers.abstract_conv2d import Conv2d
from src.abstract_layers.abstract_linear import Linear
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_mulit_path_block import MultiPathBlock
from src.abstract_layers.abstract_network import AbstractNetwork
from src.abstract_layers.abstract_relu import ReLU
from src.abstract_layers.abstract_residual_block import ResidualBlock
from src.abstract_layers.abstract_sequential import Sequential
from src.abstract_layers.abstract_sigmoid import Sigmoid
from src.abstract_layers.abstract_split_block import SplitBlock
from src.abstract_layers.abstract_tanh import Tanh
from src.mn_bab_optimizer import MNBabOptimizer
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.parameters import ReadonlyParametersForQuery
from src.state.split_state import ReadonlySplitState
from src.state.subproblem_state import ReadonlySubproblemState, SubproblemState
from src.state.tags import (
LayerTag,
NodeTag,
QueryTag,
key_alpha_relu_lb,
key_alpha_relu_ub,
key_beta_lb,
key_beta_ub,
key_prima_lb,
key_prima_ub,
layer_from_query_tag,
layer_tag,
query_tag,
)
from src.utilities.batching import batch_subproblems
from src.utilities.config import (
BaBsrBranchingConfig,
BacksubstitutionConfig,
BranchingConfig,
BranchingMethod,
FilteredSmartBranchingConfig,
PropagationEffectMode,
ReduceOp,
make_branching_config,
)
from src.utilities.general import get_neg_pos_comp
from src.utilities.queries import get_output_bound_initial_query_coef
from src.verification_subproblem import (
ReadonlyVerificationSubproblem,
VerificationSubproblem,
)
class SplitIndexFinder:
network: AbstractNetwork
backsubstitutionConfig: BacksubstitutionConfig
query_coef: Tensor
split_cost_by_layer: Optional[Dict[LayerTag, float]]
branching_config: BranchingConfig
# (the following parameters are only used for filtered smart branching)
input_lb: Tensor
input_ub: Tensor
batch_sizes: Sequence[int]
recompute_intermediate_bounds_after_branching: bool
optimizer: MNBabOptimizer
def __init__(
self,
network: AbstractNetwork,
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
split_cost_by_layer: Optional[Dict[LayerTag, float]],
branching_config: BranchingConfig,
# (the following parameters are only used for filtered smart branching)
input_lb: Tensor,
input_ub: Tensor,
batch_sizes: Sequence[int],
recompute_intermediate_bounds_after_branching: bool,
optimizer: MNBabOptimizer,
):
self.network = network
self.backsubstitution_config = backsubstitution_config
self.query_coef = query_coef
self.split_cost_by_layer = split_cost_by_layer
self.branching_config = branching_config
# (the following parameters are only used for filtered smart branching)
self.input_lb = input_lb
self.input_ub = input_ub
self.batch_sizes = batch_sizes
self.recompute_intermediate_bounds_after_branching = (
recompute_intermediate_bounds_after_branching
)
self.optimizer = optimizer
def find_node_to_split(self, subproblem: ReadonlyVerificationSubproblem) -> NodeTag:
return find_node_to_split(
subproblem,
self.network,
self.backsubstitution_config,
self.query_coef,
self.split_cost_by_layer,
self.branching_config,
# (the following options are only used for filtered smart branching)
self.input_lb,
self.input_ub,
self.batch_sizes,
self.recompute_intermediate_bounds_after_branching,
self.optimizer,
)
def make_split_index_finder(
network: AbstractNetwork,
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
initial_subproblem: VerificationSubproblem,
branching_config: BranchingConfig,
# (the fonnnnnllowing parameters are only used for filtered smart branching)
input_lb: Tensor,
input_ub: Tensor,
batch_sizes: Sequence[int],
recompute_intermediate_bounds_after_branching: bool,
optimizer: MNBabOptimizer,
) -> SplitIndexFinder:
split_cost_by_layer = None
if branching_config.use_cost_adjusted_scores:
assert (
initial_subproblem.subproblem_state.constraints.prima_constraints
), "prima constraints missing with use_cost_adjusted_scores"
split_cost_by_layer = compute_split_cost_by_layer(
network,
initial_subproblem.subproblem_state.constraints.prima_constraints.prima_coefficients, # TODO: ugly
recompute_intermediate_bounds_after_branching,
)
return SplitIndexFinder(
network,
backsubstitution_config,
query_coef,
split_cost_by_layer,
branching_config,
# (the following parameters are only used for filtered smart branching)
input_lb,
input_ub,
batch_sizes,
recompute_intermediate_bounds_after_branching,
optimizer,
)
def find_node_to_split(
subproblem: ReadonlyVerificationSubproblem,
network: AbstractNetwork,
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
split_cost_by_layer: Optional[Dict[LayerTag, float]],
branching_config: BranchingConfig,
# (the following options are only used for filtered smart branching)
input_lb: Tensor,
input_ub: Tensor,
batch_sizes: Sequence[int],
recompute_intermediate_bounds_after_branching: bool,
optimizer: MNBabOptimizer,
) -> NodeTag:
if branching_config.method == BranchingMethod.babsr:
babsr_config = branching_config.babsr()
node_to_split = find_index_to_split_with_babsr(
subproblem,
network,
backsubstitution_config,
query_coef,
split_cost_by_layer,
babsr_config,
False,
)
elif branching_config.method == BranchingMethod.active_constraint_score:
node_to_split = find_index_to_split_with_babsr(
subproblem,
network,
backsubstitution_config,
query_coef,
split_cost_by_layer,
make_branching_config(
method=BranchingMethod.babsr,
use_prima_contributions=True,
use_optimized_slopes=True,
use_beta_contributions=True,
propagation_effect_mode=PropagationEffectMode.none,
use_indirect_effect=False,
reduce_op=ReduceOp.min,
use_abs=False,
).babsr(),
True,
)
elif branching_config.method == BranchingMethod.filtered_smart_branching:
filtered_smart_branching_config = branching_config.filtered_smart_branching()
node_to_split = find_index_to_split_with_filtered_smart_branching(
subproblem,
network,
backsubstitution_config,
query_coef,
split_cost_by_layer,
filtered_smart_branching_config,
input_lb,
input_ub,
batch_sizes,
recompute_intermediate_bounds_after_branching,
optimizer,
)
else:
raise RuntimeError("Branching method misspecified.")
return node_to_split
# adapted from: https://github.com/KaidiXu/Beta-CROWN/blob/master/src/babsr_score_conv.py
# commit hash: 77a055a39bd338367b9c335316004863681fb671
def find_index_to_split_with_babsr(
subproblem: ReadonlyVerificationSubproblem,
network: AbstractNetwork,
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
split_cost_by_layer: Optional[Dict[LayerTag, float]],
babsr_config: BaBsrBranchingConfig,
use_active_constraint_branching_scores: bool,
) -> NodeTag:
assert (
not subproblem.is_fully_split
), "Can't find a node to split for fully split subproblems."
use_prima_contributions = babsr_config.use_prima_contributions
assert (
not use_active_constraint_branching_scores or use_prima_contributions
), "Must provide prima contributions for active constraint branching scores"
use_optimized_slopes = babsr_config.use_optimized_slopes
use_beta_contributions = babsr_config.use_beta_contributions
propagation_effect_mode = babsr_config.propagation_effect_mode
use_indirect_effect = babsr_config.use_indirect_effect
lower_bound_reduce_op = _get_lower_bound_reduce_op(babsr_config.reduce_op)
use_abs = babsr_config.use_abs
# arguments in beta-crown implementation that always set to these values
decision_threshold = 0.001
activation_layer_ids = network.get_activation_layer_ids()
sparsest_layer_id = activation_layer_ids[0]
device = next(network.parameters()).device
batch_size = 1
subproblem_state = (
subproblem.subproblem_state
if use_prima_contributions
else subproblem.subproblem_state.without_prima()
).deep_copy_to(
device
) # TODO: avoid copying layer bounds for active constraint score?
assert (
not use_prima_contributions
or subproblem_state.constraints.prima_constraints is not None
), "prima constraints missing with use_prima_contributions"
if use_active_constraint_branching_scores:
assert subproblem_state.constraints.prima_constraints is not None
score, intercept_tb = _compute_active_constraint_scores(
network,
subproblem_state,
batch_size,
device,
)
else:
# NOTE Not yet implemented for non-ReLU activations
score, intercept_tb = _compute_split_scores(
backsubstitution_config,
query_coef,
network,
subproblem_state if use_prima_contributions else subproblem_state.without_prima(),
batch_size,
device,
use_optimized_slopes=use_optimized_slopes,
use_beta_contributions=use_beta_contributions,
propagation_effect_mode=propagation_effect_mode,
use_indirect_effect=use_indirect_effect,
lower_bound_reduce_op=lower_bound_reduce_op,
use_abs=use_abs,
)
assert all(layer_scores.shape[0] == batch_size for layer_scores in score.values())
if split_cost_by_layer is not None:
score, intercept_tb = _adjust_based_on_cost(
score, intercept_tb, split_cost_by_layer
)
decision: List[NodeTag] = []
for batch_index in range(batch_size):
new_score = {k: score[k][batch_index] for k in score.keys()}
max_info = {k: torch.max(new_score[k]) for k in new_score.keys()}
decision_layer_id = sorted(
new_score.keys(), key=lambda x: float(torch.max(new_score[x])), reverse=True
)[0]
decision_index_flattened = torch.argmax(new_score[decision_layer_id])
decision_index = np.unravel_index(
decision_index_flattened.cpu(), new_score[decision_layer_id].shape
)
if (
decision_layer_id != sparsest_layer_id
and max_info[decision_layer_id].item() > decision_threshold
):
decision.append(
NodeTag(
layer=decision_layer_id, index=tuple(int(v) for v in decision_index)
)
)
else:
new_intercept_tb = {
k: intercept_tb[k][batch_index] for k in intercept_tb.keys()
}
min_info = {
k: torch.min(new_intercept_tb[k])
for k in new_intercept_tb.keys()
if torch.min(new_intercept_tb[k]) < -1e-4
}
if len(min_info) != 0: # and Icp_score_counter < 2:
intercept_layer_id = [
idx for idx in activation_layer_ids if idx in min_info.keys()
][-1]
intercept_index_flattened = torch.argmin(
new_intercept_tb[intercept_layer_id]
)
intercept_index = np.unravel_index(
intercept_index_flattened.cpu(),
new_intercept_tb[intercept_layer_id].shape,
)
decision.append(
NodeTag(
layer=intercept_layer_id,
index=tuple(int(v) for v in intercept_index),
)
)
else:
assert subproblem_state.constraints.split_state is not None
decision.append(
_find_random_node_to_split(
subproblem_state.constraints.split_state, batch_index
)
)
assert len(decision) == batch_size
return decision[0]
def geo_mean(x: Tensor, y: Tensor) -> Tensor:
return torch.sqrt(F.relu(x * y))
def _get_lower_bound_reduce_op(
lower_bound_reduce_op_tag: ReduceOp,
) -> Callable[[Tensor, Tensor], Tensor]:
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor]
if lower_bound_reduce_op_tag == ReduceOp.min:
lower_bound_reduce_op = torch.minimum
elif lower_bound_reduce_op_tag == ReduceOp.max:
lower_bound_reduce_op = torch.maximum
elif lower_bound_reduce_op_tag == ReduceOp.geo_mean:
lower_bound_reduce_op = geo_mean
else:
raise RuntimeError("Unknown reduce operation for branching")
return lower_bound_reduce_op
def _compute_active_constraint_scores(
network: AbstractNetwork,
subproblem_state: ReadonlySubproblemState,
batch_size: int,
device: torch.device,
) -> Tuple[Dict[LayerTag, Tensor], Dict[LayerTag, Tensor]]:
score: Dict[LayerTag, Tensor] = {}
backup_score: Dict[LayerTag, Tensor] = {}
return _compute_active_constraint_scores_sequential(
score,
backup_score,
network,
subproblem_state.parameters.parameters_by_query[query_tag(network)],
subproblem_state,
batch_size,
device,
)
def _compute_active_constraint_scores_sequential(
score: Dict[LayerTag, Tensor],
backup_score: Dict[LayerTag, Tensor],
network: Sequential,
optimizable_parameters: ReadonlyParametersForQuery,
subproblem_state: ReadonlySubproblemState,
batch_size: int,
device: torch.device,
) -> Tuple[Dict[LayerTag, Tensor], Dict[LayerTag, Tensor]]:
for layer in reversed(network.layers):
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
assert isinstance(
layer, ReLU
), "active constraint score not supported with sigmoid or tanh layers."
direct_effect = _compute_active_constraint_score(
optimizable_parameters,
subproblem_state,
batch_size,
device,
layer,
)
score[layer_tag(layer)] = direct_effect.squeeze(1)
backup_score[layer_tag(layer)] = -1 * direct_effect.squeeze(1)
elif isinstance(layer, Sequential):
score, backup_score = _compute_active_constraint_scores_sequential(
score,
backup_score,
layer,
optimizable_parameters,
subproblem_state,
batch_size,
device,
)
elif isinstance(layer, ResidualBlock):
score, backup_score = _compute_active_constraint_scores_sequential(
score,
backup_score,
layer.path_a,
optimizable_parameters,
subproblem_state,
batch_size,
device,
)
score, backup_score = _compute_active_constraint_scores_sequential(
score,
backup_score,
layer.path_b,
optimizable_parameters,
subproblem_state,
batch_size,
device,
)
return score, backup_score
@torch.no_grad()
def _compute_split_scores(
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
network: AbstractNetwork,
subproblem_state: SubproblemState, # parameter get modified, so this can not be made readonly
batch_size: int,
device: torch.device,
use_optimized_slopes: bool,
use_beta_contributions: bool,
propagation_effect_mode: PropagationEffectMode,
use_indirect_effect: bool,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
use_abs: bool,
) -> Tuple[Dict[LayerTag, Tensor], Dict[LayerTag, Tensor]]:
batch_repeats = batch_size, *([1] * (len(query_coef.shape) - 1))
batch_query_coef = query_coef.repeat(batch_repeats).to(device)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(network),
query_prev_layer=None, # not tracked
queries_to_compute=None, # not tracked
lb=AffineForm(batch_query_coef),
ub=AffineForm(batch_query_coef),
unstable_queries=None, # not tracked
subproblem_state=subproblem_state,
)
score: Dict[LayerTag, Tensor] = {}
contribution_fractions: Dict[LayerTag, Dict[LayerTag, Tensor]] = {}
backup_score: Dict[LayerTag, Tensor] = {}
network.reset_input_bounds()
network.set_intermediate_input_bounds(
subproblem_state.constraints.layer_bounds.intermediate_bounds
) # TODO: get rid of this method
if not use_optimized_slopes:
abstract_shape.change_alphas_to_WK_slopes()
if not use_beta_contributions:
abstract_shape.set_beta_parameters_to_zero()
score, backup_score, __ = _compute_split_scores_sequential(
backsubstitution_config,
abstract_shape,
network,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
return score, backup_score
def _compute_split_scores_sequential( # noqa C901
backsubstitution_config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
network: Sequential,
score: Dict[LayerTag, Tensor],
backup_score: Dict[LayerTag, Tensor],
contribution_fractions: Dict[LayerTag, Dict[LayerTag, Tensor]],
propagation_effect_mode: PropagationEffectMode,
use_indirect_effect: bool,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
use_abs: bool,
) -> Tuple[
Dict[LayerTag, Tensor],
Dict[LayerTag, Tensor],
Dict[LayerTag, Dict[LayerTag, Tensor]],
]:
assert abstract_shape.subproblem_state is not None
assert abstract_shape.subproblem_state.constraints.split_state is not None
for layer_idx, layer in reversed(list(enumerate(network.layers))):
# NOTE @Robin Custom version for Sigmoid
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
previous_layer = network.layers[layer_idx - 1]
current_layer_lower_bounds = abstract_shape.subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(layer)
][
0
]
current_layer_upper_bounds = abstract_shape.subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(layer)
][
1
]
assert isinstance(layer, ReLU) # TODO: get rid of this
(
direct_effect,
propagation_effect,
) = _compute_direct_and_propagation_effect_on_lower_bound(
abstract_shape,
layer,
previous_layer,
current_layer_lower_bounds,
current_layer_upper_bounds,
propagation_effect_mode,
lower_bound_reduce_op,
)
current_layer_score = direct_effect + propagation_effect
if use_indirect_effect:
contribution_fractions_to_current_layer = (
_compute_contribution_fractions_to_layer_bounds(
backsubstitution_config,
network,
query_tag(layer),
layer_idx,
abstract_shape.subproblem_state,
abstract_shape.batch_size,
propagation_effect_mode,
lower_bound_reduce_op,
)
)
for (
contributing_layer_id,
fractions,
) in contribution_fractions_to_current_layer.items():
if contributing_layer_id not in contribution_fractions:
contribution_fractions[contributing_layer_id] = {}
contribution_fractions[contributing_layer_id][
layer_tag(layer)
] = fractions
indirect_effect = _compute_indirect_effect(
contribution_fractions,
score,
layer_tag(layer),
current_layer_score.shape,
current_layer_score.device,
)
current_layer_score += indirect_effect
if use_abs:
current_layer_score = abs(current_layer_score)
score[layer_tag(layer)] = current_layer_score.squeeze(1)
backup_score[layer_tag(layer)] = -1 * direct_effect.squeeze(1)
abstract_shape = layer.backsubstitute(
backsubstitution_config, abstract_shape
)
elif isinstance(layer, Sequential):
(
score,
backup_score,
contribution_fractions,
) = _compute_split_scores_sequential(
backsubstitution_config,
abstract_shape,
layer,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
elif isinstance(layer, ResidualBlock):
in_lb = abstract_shape.lb.clone()
assert abstract_shape.ub is not None
in_ub = abstract_shape.ub.clone()
(
score,
backup_score,
contribution_fractions,
) = _compute_split_scores_sequential(
backsubstitution_config,
abstract_shape,
layer.path_a,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
a_lb = abstract_shape.lb.clone()
a_ub = abstract_shape.ub.clone()
abstract_shape.update_bounds(in_lb, in_ub)
(
score,
backup_score,
contribution_fractions,
) = _compute_split_scores_sequential(
backsubstitution_config,
abstract_shape,
layer.path_b,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
new_lb_bias = (
a_lb.bias + abstract_shape.lb.bias - in_lb.bias
) # Both the shape in a and in b contain the initial bias terms, so one has to be subtracted
new_ub_bias = a_ub.bias + abstract_shape.ub.bias - in_ub.bias
new_lb_coef = a_lb.coef + abstract_shape.lb.coef
new_ub_coef = a_ub.coef + abstract_shape.ub.coef
abstract_shape.update_bounds(
AffineForm(new_lb_coef, new_lb_bias),
AffineForm(new_ub_coef, new_ub_bias),
) # TODO look at merging of dependence sets
elif isinstance(layer, MultiPathBlock):
in_lb = abstract_shape.lb.clone()
assert abstract_shape.ub is not None
in_ub = abstract_shape.ub.clone()
pre_merge_shapes = layer.merge.backsubstitute(
backsubstitution_config, abstract_shape
)
post_path_shapes: List[MN_BaB_Shape] = []
for path_shape, path in zip(pre_merge_shapes, layer.paths):
(
score,
backup_score,
contribution_fractions,
) = _compute_split_scores_sequential(
backsubstitution_config,
path_shape,
path,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
post_path_shapes.append(path_shape)
if layer.header is not None:
post_header_shape = layer.header.backsubstitute(
backsubstitution_config, post_path_shapes
)
else: # All paths are from the same input we can add them up
final_lb_form = post_path_shapes[0].lb
final_ub_form: Optional[AffineForm] = None
if post_path_shapes[0].ub is not None:
final_ub_form = post_path_shapes[0].ub
for abs_shape in post_path_shapes[1:]:
final_lb_form.coef += abs_shape.lb.coef
final_lb_form.bias += abs_shape.lb.bias
if abs_shape.ub is not None:
assert final_ub_form is not None
final_ub_form.coef += abs_shape.ub.coef
final_ub_form.bias += abs_shape.ub.bias
post_header_shape = abstract_shape.clone_with_new_bounds(
final_lb_form, final_ub_form
)
# Adjust bias
new_lower: AffineForm
new_upper: Optional[AffineForm] = None
new_lb_bias = (
post_header_shape.lb.bias - (len(layer.paths) - 1) * in_lb.bias
) # Both the shape in a and in b contain the initial bias terms, so one has to be subtracted
new_lb_coef = post_header_shape.lb.coef
new_lower = AffineForm(new_lb_coef, new_lb_bias)
if post_header_shape.ub is not None and in_ub is not None:
new_ub_bias = (
post_header_shape.ub.bias - (len(layer.paths) - 1) * in_ub.bias
)
new_ub_coef = post_header_shape.ub.coef
new_upper = AffineForm(new_ub_coef, new_ub_bias)
abstract_shape.update_bounds(new_lower, new_upper)
elif isinstance(layer, SplitBlock):
# pass
coef_split_dim = layer.split_dim + 2
# Get the output bounds of the center path
# center_path_out_lb, abstract_shape.subproblem_state.constraints.layer_bounds
assert abstract_shape.subproblem_state is not None
(
center_path_out_lb,
center_path_out_ub,
) = abstract_shape.subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(layer.abs_center_path.layers[-1])
]
assert isinstance(center_path_out_lb, Tensor)
assert isinstance(center_path_out_ub, Tensor)
center_path_out_lb = F.relu(center_path_out_lb)
center_path_out_ub = F.relu(center_path_out_ub)
assert (center_path_out_lb <= center_path_out_ub + 1e-10).all()
# Get the lower and upper-bound slopes and offsets for the multiplication
assert layer.res_lower is not None
assert layer.res_upper is not None
res_lower = layer.res_lower
res_upper = layer.res_upper
mul_factors = (res_lower, res_upper)
mul_convex_bounds = layer._get_multiplication_slopes_and_intercepts(
mul_factors, (center_path_out_lb, center_path_out_ub)
)
# Get the input bounds for the dividend
(
div_input_lb_lb,
div_input_lb_ub,
div_input_ub_lb,
div_input_ub_ub,
) = layer._get_mul_lbs_and_ubs(
mul_factors, (center_path_out_lb, center_path_out_ub)
)
div_input_lb = torch.minimum(div_input_lb_lb, div_input_ub_lb).sum(
dim=layer.outer_reduce_dim + 1
)
div_input_ub = torch.maximum(div_input_lb_ub, div_input_ub_ub).sum(
dim=layer.outer_reduce_dim + 1
)
div_input_bounds = (
div_input_lb,
div_input_ub,
)
# Get the lower and upper-bound slopes and offsets for the division
div_factors = (
1 / res_lower.sum(dim=layer.outer_reduce_dim + 1),
1 / res_upper.sum(dim=layer.outer_reduce_dim + 1),
)
div_convex_bounds = layer._get_multiplication_slopes_and_intercepts(
div_factors, div_input_bounds
)
# Backpropagation Part 1 Div-Reshape
lower_form = layer._backsub_affine_form_first(
abstract_shape.lb, div_convex_bounds, False, abstract_shape
)
upper_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
upper_form = layer._backsub_affine_form_first(
abstract_shape.ub, div_convex_bounds, True, abstract_shape
)
# Update Abstract Shape so that we can go through mul layer
abstract_shape.update_bounds(lower_form, upper_form)
# Backprop Part 2 - Mul
lower_form = layer._backsub_affine_form_given_convex_bounds(
abstract_shape.lb, mul_convex_bounds, False, abstract_shape
)
if abstract_shape.ub is not None:
upper_form = layer._backsub_affine_form_given_convex_bounds(
abstract_shape.ub, mul_convex_bounds, True, abstract_shape
)
# Update Abstract Shape
abstract_shape.update_bounds(lower_form, upper_form)
# Backprop center_path
(
score,
backup_score,
contribution_fractions,
) = _compute_split_scores_sequential(
backsubstitution_config,
abstract_shape,
layer.abs_center_path,
score,
backup_score,
contribution_fractions,
propagation_effect_mode,
use_indirect_effect,
lower_bound_reduce_op,
use_abs,
)
# Backprop through the split
# As we concretized the second split, we simply append it with 0 sensitivity
# NOTE: Not generalized for arbitrary splits (assumes only 2 splits)
assert len(layer.split[1]) == 2
assert isinstance(abstract_shape.lb.coef, Tensor)
zero_append_shape = [
abstract_shape.lb.coef.shape[0],
abstract_shape.lb.coef.shape[1],
*layer.input_dim,
]
zero_append_shape[coef_split_dim] = layer.split[1][1]
zero_append_matrix = torch.zeros(
zero_append_shape, device=abstract_shape.device
)
zero_appended_lb = torch.cat(
(abstract_shape.lb.coef, zero_append_matrix), dim=coef_split_dim
)
lower_form = AffineForm(zero_appended_lb, abstract_shape.lb.bias)
if abstract_shape.ub is not None:
assert isinstance(abstract_shape.ub.coef, Tensor)
zero_appended_ub = torch.cat(
(abstract_shape.ub.coef, zero_append_matrix), dim=coef_split_dim
)
upper_form = AffineForm(zero_appended_ub, abstract_shape.ub.bias)
abstract_shape.update_bounds(lower_form, upper_form)
else:
abstract_shape = layer.backsubstitute(
backsubstitution_config, abstract_shape
)
return score, backup_score, contribution_fractions
def babsr_ratio_computation(
lower_bound: Tensor, upper_bound: Tensor
) -> Tuple[Tensor, Tensor]:
lower_temp = lower_bound - F.relu(lower_bound)
upper_temp = F.relu(upper_bound)
slope_ratio = upper_temp / (upper_temp - lower_temp)
intercept = -1 * lower_temp * slope_ratio
return slope_ratio.nan_to_num(), intercept.nan_to_num()
def _compute_active_constraint_score(
optimizable_parameters: ReadonlyParametersForQuery,
subproblem_state: ReadonlySubproblemState,
batch_size: int,
device: torch.device,
layer: ReLU,
) -> Tensor:
assert subproblem_state.constraints.split_state is not None
split_state = subproblem_state.constraints.split_state
assert subproblem_state.constraints.prima_constraints is not None
prima_constraints = subproblem_state.constraints.prima_constraints
if isinstance(layer, ReLU): # TODO: move this logic entirely into SplitState?
unstable_nodes_mask = split_state.unstable_node_mask_in_layer(
layer_tag(layer)
).unsqueeze(1)
else:
unstable_nodes_mask = torch.ones_like(
split_state.split_constraints[layer_tag(layer)].unsqueeze(1),
dtype=torch.bool,
device=split_state.device, # TODO: can we just use "device" here?
)
if (
prima_constraints is None
or layer_tag(layer) not in prima_constraints.prima_coefficients
or prima_constraints.prima_coefficients[layer_tag(layer)][0].shape[2] == 0
):
return torch.zeros(batch_size, 1, *layer.output_dim, device=device)
(
current_layer_prima_output_coefficients,
current_layer_prima_input_coefficients,
__,
) = prima_constraints.prima_coefficients[layer_tag(layer)]
prima_parameters = optimizable_parameters.parameters[key_prima_lb][layer_tag(layer)]
prima_output_contribution = layer._multiply_prima_coefs_and_parameters(
torch.sqrt(
torch.square(current_layer_prima_output_coefficients)
), # abs not available for sparse tensors
prima_parameters,
)
prima_input_contribution = layer._multiply_prima_coefs_and_parameters(
torch.sqrt(
torch.square(current_layer_prima_input_coefficients)
), # abs not available for sparse tensors
prima_parameters,
)
prima_contribution = prima_input_contribution + prima_output_contribution
return (prima_contribution) * unstable_nodes_mask
def _compute_direct_and_propagation_effect_on_lower_bound(
abstract_shape: MN_BaB_Shape,
layer: ReLU,
previous_layer: Union[Linear, Conv2d],
current_layer_lower_bounds: Tensor,
current_layer_upper_bounds: Tensor,
propagation_effect_mode: PropagationEffectMode,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
) -> Tuple[Tensor, Tensor]:
assert abstract_shape.subproblem_state is not None
subproblem_state = abstract_shape.subproblem_state
assert subproblem_state.constraints.split_state is not None
if ( # TODO: move this logic into PrimaConstraints? ( seems to be duplicated)
subproblem_state.constraints.prima_constraints
and layer_tag(layer)
in subproblem_state.constraints.prima_constraints.prima_coefficients
and subproblem_state.constraints.prima_constraints.prima_coefficients[
layer_tag(layer)
][0].shape[-1]
> 0
):
(
current_layer_prima_output_coefficients,
current_layer_prima_input_coefficients,
__,
) = subproblem_state.constraints.prima_constraints.prima_coefficients[
layer_tag(layer)
]
prima_parameters = abstract_shape.get_existing_parameters(
key_prima_lb, layer_tag(layer)
)
prima_output_contribution = layer._multiply_prima_coefs_and_parameters(
current_layer_prima_output_coefficients, prima_parameters
)
prima_input_contribution = layer._multiply_prima_coefs_and_parameters(
current_layer_prima_input_coefficients, prima_parameters
)
else:
assert not abstract_shape.uses_dependence_sets()
assert isinstance(abstract_shape.lb.coef, Tensor)
prima_output_contribution = torch.zeros_like(abstract_shape.lb.coef)
prima_input_contribution = torch.zeros_like(abstract_shape.lb.coef)
lb_coef_before_relaxation = abstract_shape.lb.coef + prima_output_contribution
lb_slope = abstract_shape.get_existing_parameters(
key_alpha_relu_lb, layer_tag(layer)
)
ub_slope, ub_intercept = babsr_ratio_computation(
current_layer_lower_bounds, current_layer_upper_bounds
)
(
neg_lb_coef_before_relaxation,
pos_lb_coef_before_relaxation,
) = get_neg_pos_comp(lb_coef_before_relaxation)
beta_parameters = abstract_shape.get_existing_parameters(
key_beta_lb, layer_tag(layer)
)
beta_contribution_shape = (abstract_shape.batch_size, 1, *layer.output_dim)
beta_contribution = (
beta_parameters
* subproblem_state.constraints.split_state.split_constraints[layer_tag(layer)]
).view(beta_contribution_shape)
lb_coef = (
(
pos_lb_coef_before_relaxation * lb_slope
+ neg_lb_coef_before_relaxation * ub_slope
)
+ prima_input_contribution
+ beta_contribution
)
neg_lb_coef, pos_lb_coef = get_neg_pos_comp(lb_coef)
previous_layer_bias = _get_layer_bias(previous_layer, lb_coef.dim())
(
negative_coef_multiplier_before,
positive_coef_multiplier_before,
negative_coef_multiplier_neg_split,
positive_coef_multiplier_neg_split,
negative_coef_multiplier_pos_split,
positive_coef_multiplier_pos_split,
) = _get_coef_multipliers(
current_layer_lower_bounds,
current_layer_upper_bounds,
previous_layer_bias,
propagation_effect_mode,
)
propagation_contribution_before = (
neg_lb_coef * negative_coef_multiplier_before
+ pos_lb_coef * positive_coef_multiplier_before
)
neg_lb_coef_neg_split, pos_lb_coef_neg_split = get_neg_pos_comp(
prima_input_contribution + beta_contribution
)
propagation_contribution_neg_split = (
neg_lb_coef_neg_split * negative_coef_multiplier_neg_split
+ pos_lb_coef_neg_split * positive_coef_multiplier_neg_split
)
neg_lb_coef_pos_split, pos_lb_coef_pos_split = get_neg_pos_comp(
lb_coef_before_relaxation + prima_input_contribution + beta_contribution
)
propagation_contribution_pos_split = (
neg_lb_coef_pos_split * negative_coef_multiplier_pos_split
+ pos_lb_coef_pos_split * positive_coef_multiplier_pos_split
)
propagation_effect_neg_split = (
propagation_contribution_neg_split - propagation_contribution_before
)
propagation_effect_pos_split = (
propagation_contribution_pos_split - propagation_contribution_before
)
unstable_nodes_mask = (
subproblem_state.constraints.split_state.split_constraints[layer_tag(layer)]
== 0
).unsqueeze(1)
propagation_effect = lower_bound_reduce_op(
propagation_effect_neg_split, propagation_effect_pos_split
) * (unstable_nodes_mask)
direct_effect = -1 * (
neg_lb_coef_before_relaxation * ub_intercept * unstable_nodes_mask
)
assert (direct_effect >= 0).all()
return direct_effect, propagation_effect
def _get_layer_bias(previous_layer: AbstractModule, coef_dim: int) -> Tensor:
if isinstance(previous_layer, Sequential):
previous_layer_bias = previous_layer.get_babsr_bias()
else:
assert isinstance(previous_layer.bias, Tensor)
previous_layer_bias = previous_layer.bias
# unsqueeze to batch_dim, query_dim, bias_dim
previous_layer_bias = previous_layer_bias.unsqueeze(0).unsqueeze(0)
expected_number_of_coef_dims_if_prev_layer_is_conv = 5
if coef_dim == expected_number_of_coef_dims_if_prev_layer_is_conv:
# unsqueeze bias from batch_dim, query_sim, channel to batch_dim, query_sim, channel, height, width
previous_layer_bias = previous_layer_bias.unsqueeze(-1).unsqueeze(-1)
# assert coef_dim == previous_layer_bias.dim(), "bias expanded to unexpected shape"
return previous_layer_bias
def _compute_direct_and_propagation_effect_on_upper_bound(
abstract_shape: MN_BaB_Shape,
layer: ReLU,
previous_layer: Union[Linear, Conv2d],
current_layer_lower_bounds: Tensor,
current_layer_upper_bounds: Tensor,
propagation_effect_mode: PropagationEffectMode,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
) -> Tuple[Tensor, Tensor]:
assert abstract_shape.subproblem_state is not None
subproblem_state = abstract_shape.subproblem_state
assert subproblem_state.constraints.split_state is not None
assert abstract_shape.ub is not None
if ( # TODO: move this logic into PrimaConstraints (seems to be duplicated)
subproblem_state.constraints.prima_constraints
and layer_tag(layer)
in subproblem_state.constraints.prima_constraints.prima_coefficients
and subproblem_state.constraints.prima_constraints.prima_coefficients[
layer_tag(layer)
][0].shape[-1]
> 0
):
(
current_layer_prima_output_coefficients,
current_layer_prima_input_coefficients,
__,
) = subproblem_state.constraints.prima_constraints.prima_coefficients[
layer_tag(layer)
]
prima_parameters = abstract_shape.get_existing_parameters(
key_prima_ub, layer_tag(layer)
)
prima_output_contribution = layer._multiply_prima_coefs_and_parameters(
current_layer_prima_output_coefficients, prima_parameters
)
prima_input_contribution = layer._multiply_prima_coefs_and_parameters(
current_layer_prima_input_coefficients, prima_parameters
)
else:
assert not abstract_shape.uses_dependence_sets()
assert isinstance(abstract_shape.ub.coef, Tensor)
prima_output_contribution = torch.zeros_like(abstract_shape.ub.coef)
prima_input_contribution = torch.zeros_like(abstract_shape.ub.coef)
ub_coef_before_relaxation = abstract_shape.ub.coef - prima_output_contribution
lb_slope = abstract_shape.get_existing_parameters(
key_alpha_relu_ub, layer_tag(layer)
) # TODO: should this be key_alpha_relu_ub?
ub_slope, ub_intercept = babsr_ratio_computation(
current_layer_lower_bounds, current_layer_upper_bounds
)
(
neg_ub_coef_before_relaxation,
pos_ub_coef_before_relaxation,
) = get_neg_pos_comp(ub_coef_before_relaxation)
beta_parameters = abstract_shape.get_existing_parameters(
key_beta_ub, layer_tag(layer)
)
beta_contribution_shape = (abstract_shape.batch_size, 1, *layer.output_dim)
beta_contribution = (
beta_parameters
* subproblem_state.constraints.split_state.split_constraints[layer_tag(layer)]
).view(beta_contribution_shape)
ub_coef = (
(
pos_ub_coef_before_relaxation * ub_slope
+ neg_ub_coef_before_relaxation * lb_slope
)
- prima_input_contribution
- beta_contribution
)
neg_ub_coef, pos_ub_coef = get_neg_pos_comp(ub_coef)
previous_layer_bias = _get_layer_bias(previous_layer, ub_coef.dim())
(
negative_coef_multiplier_before,
positive_coef_multiplier_before,
negative_coef_multiplier_neg_split,
positive_coef_multiplier_neg_split,
negative_coef_multiplier_pos_split,
positive_coef_multiplier_pos_split,
) = _get_coef_multipliers(
current_layer_lower_bounds,
current_layer_upper_bounds,
previous_layer_bias,
propagation_effect_mode,
for_lower_bound=False,
)
propagation_contribution_before = (
neg_ub_coef * negative_coef_multiplier_before
+ pos_ub_coef * positive_coef_multiplier_before
)
neg_ub_coef_neg_split, pos_ub_coef_neg_split = get_neg_pos_comp(
-prima_input_contribution - beta_contribution
)
propagation_contribution_neg_split = (
neg_ub_coef_neg_split * negative_coef_multiplier_neg_split
+ pos_ub_coef_neg_split * positive_coef_multiplier_neg_split
)
neg_ub_coef_pos_split, pos_ub_coef_pos_split = get_neg_pos_comp(
ub_coef_before_relaxation - prima_input_contribution - beta_contribution
)
propagation_contribution_pos_split = (
neg_ub_coef_pos_split * negative_coef_multiplier_pos_split
+ pos_ub_coef_pos_split * positive_coef_multiplier_pos_split
)
propagation_effect_neg_split = (
propagation_contribution_neg_split - propagation_contribution_before
)
propagation_effect_pos_split = (
propagation_contribution_pos_split - propagation_contribution_before
)
unstable_nodes_mask = (
subproblem_state.constraints.split_state.split_constraints[layer_tag(layer)]
== 0
).unsqueeze(1)
propagation_effect = lower_bound_reduce_op(
propagation_effect_neg_split, propagation_effect_pos_split
) * (unstable_nodes_mask)
direct_effect = -1 * (
pos_ub_coef_before_relaxation * ub_intercept * unstable_nodes_mask
)
assert (direct_effect <= 0).all()
return direct_effect, propagation_effect
def _get_coef_multipliers(
layer_lower_bounds: Tensor,
layer_upper_bounds: Tensor,
prev_layer_bias: Tensor,
propagation_effect_mode: PropagationEffectMode,
for_lower_bound: bool = True,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
zero = torch.zeros_like(layer_lower_bounds)
if propagation_effect_mode == PropagationEffectMode.none:
return zero, zero, zero, zero, zero, zero
elif propagation_effect_mode == PropagationEffectMode.bias:
return (
prev_layer_bias,
prev_layer_bias,
prev_layer_bias,
prev_layer_bias,
prev_layer_bias,
prev_layer_bias,
)
elif propagation_effect_mode == PropagationEffectMode.intermediate_concretization:
negative_coef_multiplier_before = layer_upper_bounds
positive_coef_multiplier_before = layer_lower_bounds
negative_coef_multiplier_neg_split = zero
positive_coef_multiplier_neg_split = layer_lower_bounds
negative_coef_multiplier_pos_split = layer_upper_bounds
positive_coef_multiplier_pos_split = zero
if for_lower_bound:
return (
negative_coef_multiplier_before,
positive_coef_multiplier_before,
negative_coef_multiplier_neg_split,
positive_coef_multiplier_neg_split,
negative_coef_multiplier_pos_split,
positive_coef_multiplier_pos_split,
)
else:
return (
positive_coef_multiplier_before,
negative_coef_multiplier_before,
positive_coef_multiplier_neg_split,
negative_coef_multiplier_neg_split,
positive_coef_multiplier_pos_split,
negative_coef_multiplier_pos_split,
)
else:
raise RuntimeError(
'Unexpected propagation effect mode option, allowed options are "none", "bias" and "intermediate_concretization".'
)
def _compute_contribution_fractions_to_layer_bounds(
backsubstitution_config: BacksubstitutionConfig,
network: Sequential,
query_id: QueryTag,
starting_layer_index: int,
subproblem_state: SubproblemState, # TODO: can this be made readonly?
batch_size: int,
propagation_effect_mode: PropagationEffectMode,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
) -> Dict[LayerTag, Tensor]:
assert query_tag(network.layers[starting_layer_index]) == query_id
assert isinstance(network.layers[starting_layer_index], ReLU)
layer_shape = network.layers[starting_layer_index - 1].output_dim
device = subproblem_state.device
intermediate_bounds_to_recompute = None # (full set of queries)
query_coef = get_output_bound_initial_query_coef(
dim=layer_shape,
batch_size=batch_size,
intermediate_bounds_to_recompute=intermediate_bounds_to_recompute,
use_dependence_sets=False, # TODO: why False? (does it even matter for branching score computations?)
device=device,
dtype=None, # TODO: should this be something else?
)
# subproblem_state_for_bounds=subproblem_state
# if use_dependence_sets_for_current_bounds:
# subproblem_state_for_bounds=subproblem_state.without_prima() # TODO: get rid of this?
abstract_shape = MN_BaB_Shape(
query_id=query_id,
query_prev_layer=None, # (not tracked)
queries_to_compute=intermediate_bounds_to_recompute,
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None, # (not tracked)
subproblem_state=subproblem_state,
)
contribution_fraction_to_starting_layer = {}
starting_layer_lower_bounds = (
subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_from_query_tag(query_id)
][0]
)
starting_layer_upper_bounds = (
subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_from_query_tag(query_id)
][1]
)
assert abstract_shape.subproblem_state is subproblem_state
assert subproblem_state.constraints.split_state is not None
for layer_idx, layer in reversed(
list(enumerate(network.layers[:starting_layer_index]))
):
if isinstance(layer, ReLU):
current_layer_lower_bounds = (
subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(layer)
][0]
)
current_layer_upper_bounds = (
subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(layer)
][1]
)
assert abstract_shape.ub is not None
assert isinstance(abstract_shape.ub.coef, Tensor)
starting_and_affected_node_unstable_mask = (
subproblem_state.constraints.split_state.split_constraints[
layer_tag(layer)
]
== 0
).unsqueeze(1) * _reshape_layer_values(
subproblem_state.constraints.split_state.split_constraints[
layer_from_query_tag(query_id)
]
== 0,
len(abstract_shape.ub.coef.shape),
)
previous_layer = network.layers[layer_idx - 1]
(
lb_direct_effect,
lb_propagation_effect,
) = _compute_direct_and_propagation_effect_on_lower_bound(
abstract_shape,
layer,
previous_layer,
current_layer_lower_bounds,
current_layer_upper_bounds,
propagation_effect_mode,
lower_bound_reduce_op,
)
lb_contribution = (
-1
* (lb_direct_effect + lb_propagation_effect)
* starting_and_affected_node_unstable_mask
)
(
ub_direct_effect,
ub_propagation_effect,
) = _compute_direct_and_propagation_effect_on_upper_bound(
abstract_shape,
layer,
previous_layer,
current_layer_lower_bounds,
current_layer_upper_bounds,
propagation_effect_mode,
_get_opposite_operation(lower_bound_reduce_op),
)
ub_contribution = (
-1
* (ub_direct_effect + ub_propagation_effect)
* starting_and_affected_node_unstable_mask
)
assert lb_contribution.shape == ub_contribution.shape
assert lb_contribution.shape[1] == np.prod(layer_shape)
assert lb_contribution.shape[2:] == layer.output_dim
contribution_fraction_to_starting_layer[
layer_tag(layer)
] = _compute_triangle_relaxation_area_change(
starting_layer_lower_bounds,
starting_layer_upper_bounds,
lb_contribution,
ub_contribution,
)
abstract_shape = layer.backsubstitute(backsubstitution_config, abstract_shape)
return contribution_fraction_to_starting_layer
def _reshape_layer_values(x: Tensor, number_of_dimensions_to_reshape_to: int) -> Tensor:
if number_of_dimensions_to_reshape_to == 5:
return x.flatten(start_dim=1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
elif number_of_dimensions_to_reshape_to == 3:
return x.flatten(start_dim=1).unsqueeze(-1)
else:
raise RuntimeError("Unexpected number of dimensions encountered.")
def _get_opposite_operation(
op: Callable[[Tensor, Tensor], Tensor]
) -> Callable[[Tensor, Tensor], Tensor]:
if op == torch.maximum:
return torch.minimum
elif op == torch.minimum:
return torch.maximum
else:
raise RuntimeError("Unknown reduce operation for branching")
def _compute_triangle_relaxation_area_change(
layer_lower_bounds: Tensor,
layer_upper_bounds: Tensor,
lower_bound_contribution: Tensor,
upper_bound_contribution: Tensor,
) -> Tensor:
unstable_nodes_mask = _reshape_layer_values(
(layer_lower_bounds < 0) & (layer_upper_bounds > 0),
len(lower_bound_contribution.shape),
)
lb_contribution_fraction = torch.where(
unstable_nodes_mask,
lower_bound_contribution
/ _reshape_layer_values(
layer_lower_bounds, len(lower_bound_contribution.shape)
),
torch.zeros_like(lower_bound_contribution),
)
assert (lb_contribution_fraction >= 0).all()
lb_contribution_fraction = lb_contribution_fraction.clamp(max=1)
ub_contribution_fraction = torch.where(
unstable_nodes_mask,
upper_bound_contribution
/ _reshape_layer_values(
layer_upper_bounds, len(upper_bound_contribution.shape)
),
torch.zeros_like(lower_bound_contribution),
)
assert (ub_contribution_fraction >= 0).all()
ub_contribution_fraction = ub_contribution_fraction.clamp(max=1)
contribution_to_triangle_relaxation_area = (
lb_contribution_fraction
+ ub_contribution_fraction
- lb_contribution_fraction * ub_contribution_fraction
)
assert (contribution_to_triangle_relaxation_area >= 0).all()
assert (contribution_to_triangle_relaxation_area <= 1).all()
return contribution_to_triangle_relaxation_area
def _compute_indirect_effect(
contribution_fractions: Dict[LayerTag, Dict[LayerTag, Tensor]],
score: Dict[LayerTag, Tensor],
current_layer_id: LayerTag,
expected_shape: Tuple[int, ...],
device: torch.device,
) -> Tensor:
indirect_score = torch.zeros(*expected_shape, device=device)
if current_layer_id in contribution_fractions:
current_layer_contribution_fractions = contribution_fractions[current_layer_id]
for l_id, fractions in current_layer_contribution_fractions.items():
indirect_score += (
fractions * _reshape_layer_values(score[l_id], len(fractions.shape))
).sum(1)
return indirect_score
def _find_random_node_to_split(
split_state: ReadonlySplitState, batch_index: int
) -> NodeTag:
for layer_id in reversed(list(split_state.split_constraints.keys())):
try:
nodes_not_yet_split_in_layer = split_state.split_constraints[layer_id] == 0
unstable_neuron_indices_in_layer = torch.nonzero(
nodes_not_yet_split_in_layer[batch_index]
)
n_unstable_neurons = unstable_neuron_indices_in_layer.shape[0]
random_unstable_neuron_index = random.randint(0, n_unstable_neurons - 1)
random_unstable_neuron = tuple(
unstable_neuron_indices_in_layer[random_unstable_neuron_index].tolist(),
)
break
except ValueError:
continue
return NodeTag(layer=layer_id, index=random_unstable_neuron)
def _adjust_based_on_cost(
scores: Dict[LayerTag, Tensor],
backup_scores: Dict[LayerTag, Tensor],
split_cost_by_layer: Dict[LayerTag, float],
) -> Tuple[Dict[LayerTag, Tensor], Dict[LayerTag, Tensor]]:
for layer_id, split_cost in split_cost_by_layer.items():
assert layer_id in scores
assert layer_id in backup_scores
scores[layer_id] = scores[layer_id] / split_cost
backup_scores[layer_id] = backup_scores[layer_id] / split_cost
return scores, backup_scores
# adapted from: https://github.com/huanzhang12/alpha-beta-CROWN/blob/main/src/branching_heuristics.py
# commit hash: cdbcba0ea346ebd03d552023773829fe6e0822c7
def find_index_to_split_with_filtered_smart_branching(
subproblem: ReadonlyVerificationSubproblem,
network: AbstractNetwork,
backsubstitution_config: BacksubstitutionConfig,
query_coef: Tensor,
split_cost_by_layer: Optional[Dict[LayerTag, float]],
filtered_smart_branching_config: FilteredSmartBranchingConfig,
input_lb: Tensor,
input_ub: Tensor,
batch_sizes: Sequence[int],
recompute_intermediate_bounds_after_branching: bool,
optimizer: MNBabOptimizer,
) -> NodeTag:
assert (
not subproblem.is_fully_split
), "Can't find a node to split for fully split subproblems."
number_of_preselected_candidates_per_layer = (
filtered_smart_branching_config.n_candidates
)
lower_bound_reduce_op = _get_lower_bound_reduce_op(
filtered_smart_branching_config.reduce_op
)
device = next(network.parameters()).device
dtype = input_lb.dtype
subproblem_state = subproblem.subproblem_state.without_prima().deep_copy_to(device)
assert subproblem_state.constraints.split_state is not None
batch_sizes_by_layer_id = { # TODO: get rid of this hack, there must be a better way to get this information
layer_id: batch_sizes[layer_index]
for layer_index, layer_id in enumerate(
subproblem_state.constraints.layer_bounds.intermediate_bounds.keys()
)
}
batch_size = 1
nodes_not_yet_split_mask = { # TODO: make this a member function of Constraints?
layer_id: (
(
subproblem_state.constraints.layer_bounds.intermediate_bounds[layer_id][
0
]
< 0
)
& (
subproblem_state.constraints.layer_bounds.intermediate_bounds[layer_id][
1
]
> 0
)
& (layer_split_constraints == 0)
).to(dtype)
for layer_id, layer_split_constraints in subproblem_state.constraints.split_state.split_constraints.items()
}
babsr_scores, intercept_tb = _compute_split_scores(
backsubstitution_config=backsubstitution_config,
query_coef=query_coef,
network=network,
subproblem_state=subproblem_state,
batch_size=batch_size,
device=device,
use_optimized_slopes=False,
use_beta_contributions=False,
propagation_effect_mode=PropagationEffectMode.bias,
use_indirect_effect=False,
lower_bound_reduce_op=lower_bound_reduce_op,
use_abs=True,
)
decision: List[NodeTag] = []
for batch_index in range(batch_size):
babsr_scores_of_batch_element = {
k: babsr_scores[k][batch_index] for k in babsr_scores.keys()
}
intercept_tb_of_batch_element = {
k: intercept_tb[k][batch_index] for k in intercept_tb.keys()
}
all_candidates: Dict[NodeTag, float] = {}
for i, layer_id in enumerate(babsr_scores_of_batch_element.keys()):
if (
babsr_scores_of_batch_element[layer_id].max() <= 1e-4
and intercept_tb_of_batch_element[layer_id].min() >= -1e-4
):
print("{}th layer has no valid scores".format(i))
continue
topk_indices_from_babsr_scores = _get_indices_of_topk(
babsr_scores_of_batch_element[layer_id],
number_of_preselected_candidates_per_layer,
largest=True,
)
topk_indices_from_intercept_tb = _get_indices_of_topk(
intercept_tb_of_batch_element[layer_id],
number_of_preselected_candidates_per_layer,
largest=False,
)
unique_topk_indices = list(
set(topk_indices_from_babsr_scores + topk_indices_from_intercept_tb)
)
layer_candidate_nodes_to_split = [
NodeTag(layer=layer_id, index=candidate_index)
for candidate_index in unique_topk_indices
if nodes_not_yet_split_mask[layer_id][(batch_index, *candidate_index)]
]
layer_candidate_scores = _compute_candidate_scores_for(
layer_candidate_nodes_to_split,
subproblem,
optimizer,
query_coef,
network,
input_lb,
input_ub,
lower_bound_reduce_op,
batch_sizes_by_layer_id[layer_id],
recompute_intermediate_bounds_after_branching,
)
for node_split, score in zip(
layer_candidate_nodes_to_split, layer_candidate_scores
):
all_candidates[node_split] = score
if split_cost_by_layer is not None:
all_candidates = _adjust_filtered_smart_branching_scores_based_on_cost(
all_candidates, split_cost_by_layer
)
decision.append(max(all_candidates, key=lambda k: all_candidates[k]))
assert len(decision) == batch_size
return decision[0]
def _get_indices_of_topk(x: Tensor, k: int, largest: bool) -> List[Tuple[int, ...]]:
flattenend_indices = torch.topk(x.flatten(), k, largest=largest).indices.cpu()
indices_by_dimension = np.unravel_index(flattenend_indices, x.shape)
return [tuple(indices[i] for indices in indices_by_dimension) for i in range(k)]
def _compute_candidate_scores_for(
candidate_nodes_to_split: Sequence[NodeTag],
subproblem: ReadonlyVerificationSubproblem,
optimizer: MNBabOptimizer,
query_coef: Tensor,
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
lower_bound_reduce_op: Callable[[Tensor, Tensor], Tensor],
batch_size_for_bounding: int,
recompute_intermediate_bounds_after_branching: bool,
) -> Sequence[float]:
device = input_lb.device
subproblems_to_bound = [
split.deep_copy_to(device)
for node_to_split in candidate_nodes_to_split
for split in subproblem.split(
node_to_split,
recompute_intermediate_bounds_after_branching,
network.layer_id_to_layer[node_to_split.layer],
)
]
max_queries = 2 * batch_size_for_bounding
n_scores_to_compute = len(subproblems_to_bound)
candidate_scores: List[float] = []
offset = 0
while offset < n_scores_to_compute:
subproblem_batch = batch_subproblems(
subproblems_to_bound[offset : offset + max_queries],
reuse_single_subproblem=True,
)
batch_repeats = min(offset + max_queries, n_scores_to_compute) - offset, *(
[1] * (len(query_coef.shape) - 1)
)
# Unclear mypy behaviour
(
lower_bounds,
__,
__,
__,
) = optimizer.bound_minimum_with_deep_poly( # type:ignore [assignment]
optimizer.backsubstitution_config,
input_lb,
input_ub,
network,
query_coef.to(device).repeat(batch_repeats),
subproblem_state=subproblem_batch.subproblem_state,
ibp_pass=False,
reset_input_bounds=False, # TODO check if this is the desried behaviour for FSB
)
assert isinstance(lower_bounds, Sequence)
candidate_scores += _extract_candidate_scores(
lower_bounds, reduce_op=lower_bound_reduce_op
)
offset += max_queries
return candidate_scores
def _extract_candidate_scores(
subproblem_lower_bounds: Sequence[float],
reduce_op: Callable[[Tensor, Tensor], Tensor],
) -> Sequence[float]:
assert (
len(subproblem_lower_bounds) % 2 == 0
), "Expected an even number of lower bounds."
lower_bounds_in_pairs = [
(subproblem_lower_bounds[i], subproblem_lower_bounds[i + 1])
for i in range(0, len(subproblem_lower_bounds), 2)
]
return [
reduce_op(torch.tensor(score_pair[0]), torch.tensor(score_pair[1])).item()
for score_pair in lower_bounds_in_pairs
]
def _adjust_filtered_smart_branching_scores_based_on_cost(
scores: Dict[NodeTag, float], split_cost_by_layer: Dict[LayerTag, float]
) -> Dict[NodeTag, float]:
for node_to_split, score in scores.items():
layer_id = node_to_split.layer
assert layer_id in split_cost_by_layer
scores[node_to_split] = score / split_cost_by_layer[layer_id]
return scores
def compute_split_cost_by_layer(
network: AbstractNetwork,
prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]],
recompute_intermediate_bounds_after_branching: bool,
) -> Dict[LayerTag, float]:
if not recompute_intermediate_bounds_after_branching:
return {layer_id: 1.0 for layer_id in network.get_activation_layer_ids()}
cost_of_backsubstitution_operation_per_layer = (
_estimated_cost_of_backsubstitution_operation_per_layer(
network, prima_coefficients
)
)
(
cost_of_backsubstitution_pass_starting_at,
__,
) = _estimated_cost_of_backsubstitution_pass_per_layer(
network, cost_of_backsubstitution_operation_per_layer, 0.0
)
cost_of_backsubstitution_pass_starting_at[layer_tag(network)] = (
sum(cost_of_backsubstitution_operation_per_layer.values()) * 1
)
cost_by_layer, __ = _estimated_cost_of_split_at_layer(
network, cost_of_backsubstitution_pass_starting_at, 0.0
)
largest_cost_by_layer = max(cost_by_layer.values())
for layer_id, costs in cost_by_layer.items():
cost_by_layer[layer_id] = costs / largest_cost_by_layer
return cost_by_layer
def _estimated_cost_of_split_at_layer(
network: Sequential,
cost_of_backsubstitution_pass_per_layer: Dict[LayerTag, float],
previously_accumulated_cost: float,
) -> Tuple[Dict[LayerTag, float], float]:
cost_of_split_at_layer: Dict[LayerTag, float] = {}
accumulated_cost = previously_accumulated_cost
for layer in reversed(network.layers):
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
accumulated_cost += cost_of_backsubstitution_pass_per_layer[
layer_tag(layer)
]
cost_of_split_at_layer[layer_tag(layer)] = accumulated_cost
elif isinstance(layer, Sequential):
(
nested_cost_of_split_at_layer,
accumulated_cost,
) = _estimated_cost_of_split_at_layer(
layer, cost_of_backsubstitution_pass_per_layer, accumulated_cost
)
cost_of_split_at_layer = {
**cost_of_split_at_layer,
**nested_cost_of_split_at_layer,
}
elif isinstance(layer, ResidualBlock):
(
cost_of_split_at_layer_a,
accumulated_cost_a,
) = _estimated_cost_of_split_at_layer(
layer.path_a, cost_of_backsubstitution_pass_per_layer, accumulated_cost
)
(
cost_of_split_at_layer_b,
accumulated_cost_b,
) = _estimated_cost_of_split_at_layer(
layer.path_b, cost_of_backsubstitution_pass_per_layer, accumulated_cost
)
cost_of_split_at_layer = {
**cost_of_split_at_layer,
**cost_of_split_at_layer_a,
**cost_of_split_at_layer_b,
}
accumulated_cost = (
accumulated_cost_a + accumulated_cost_b - accumulated_cost
)
return cost_of_split_at_layer, accumulated_cost
def _estimated_cost_of_backsubstitution_pass_per_layer(
network: Sequential,
cost_of_backsubstitution_operation_per_layer: Dict[LayerTag, float],
previously_accumulated_cost: float,
) -> Tuple[Dict[LayerTag, float], float]:
cost_of_backsubstitution_pass_per_layer: Dict[LayerTag, float] = {}
accumulated_cost = previously_accumulated_cost
for layer in network.layers:
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
number_of_queries = np.prod(layer.output_dim)
cost_of_backsubstitution_pass_per_layer[layer_tag(layer)] = (
number_of_queries * accumulated_cost
)
accumulated_cost += cost_of_backsubstitution_operation_per_layer[
layer_tag(layer)
]
elif isinstance(layer, Sequential):
(
nested_cost_of_backsubstitution_pass_per_layer,
accumulated_cost,
) = _estimated_cost_of_backsubstitution_pass_per_layer(
layer, cost_of_backsubstitution_operation_per_layer, accumulated_cost
)
cost_of_backsubstitution_pass_per_layer = {
**cost_of_backsubstitution_pass_per_layer,
**nested_cost_of_backsubstitution_pass_per_layer,
}
elif isinstance(layer, ResidualBlock):
(
cost_of_backsubstitution_pass_per_layer_a,
accumulated_cost_a,
) = _estimated_cost_of_backsubstitution_pass_per_layer(
layer.path_a,
cost_of_backsubstitution_operation_per_layer,
accumulated_cost,
)
(
cost_of_backsubstitution_pass_per_layer_b,
accumulated_cost_b,
) = _estimated_cost_of_backsubstitution_pass_per_layer(
layer.path_b,
cost_of_backsubstitution_operation_per_layer,
accumulated_cost,
)
cost_of_backsubstitution_pass_per_layer = {
**cost_of_backsubstitution_pass_per_layer,
**cost_of_backsubstitution_pass_per_layer_a,
**cost_of_backsubstitution_pass_per_layer_b,
}
accumulated_cost = (
accumulated_cost_a + accumulated_cost_b - accumulated_cost
)
else:
accumulated_cost += cost_of_backsubstitution_operation_per_layer[
layer_tag(layer)
]
return cost_of_backsubstitution_pass_per_layer, accumulated_cost
def _estimated_cost_of_backsubstitution_operation_per_layer(
network: Sequential,
prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]],
) -> Dict[LayerTag, float]:
cost_of_backsubstitution_operation_per_layer: Dict[LayerTag, float] = {}
for layer in network.layers:
if isinstance(layer, Sequential):
nested_cost_of_backsubstitution_operation_per_layer = (
_estimated_cost_of_backsubstitution_operation_per_layer(
layer, prima_coefficients
)
)
cost_of_backsubstitution_operation_per_layer = {
**cost_of_backsubstitution_operation_per_layer,
**nested_cost_of_backsubstitution_operation_per_layer,
}
if isinstance(layer, ResidualBlock):
cost_of_backsubstitution_operation_per_layer_a = (
_estimated_cost_of_backsubstitution_operation_per_layer(
layer.path_a, prima_coefficients
)
)
cost_of_backsubstitution_operation_per_layer_b = (
_estimated_cost_of_backsubstitution_operation_per_layer(
layer.path_b, prima_coefficients
)
)
cost_of_backsubstitution_operation_per_layer = {
**cost_of_backsubstitution_operation_per_layer,
**cost_of_backsubstitution_operation_per_layer_a,
**cost_of_backsubstitution_operation_per_layer_b,
}
else:
cost_of_backsubstitution_operation_per_layer[
layer_tag(layer)
] = _estimated_cost_of_backsubstitution_operation(layer, prima_coefficients)
return cost_of_backsubstitution_operation_per_layer
def _estimated_cost_of_backsubstitution_operation(
layer: AbstractModule,
prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]],
) -> float:
if isinstance(layer, ReLU):
n_prima_constraints = 0
if layer_tag(layer) in prima_coefficients:
n_prima_constraints += prima_coefficients[layer_tag(layer)][0].shape[2]
return np.prod(layer.output_dim) + n_prima_constraints
elif isinstance(layer, Conv2d):
kernel_size = layer.kernel_size[0]
number_of_neurons = np.prod(layer.output_dim)
return number_of_neurons * kernel_size * kernel_size
elif isinstance(layer, Linear):
return np.prod(layer.weight.shape)
elif isinstance(layer, BatchNorm2d):
return np.prod(layer.input_dim)
elif isinstance(layer, ResidualBlock):
return (
_estimated_cost_of_backsubstitution_operation(
layer.path_a, prima_coefficients
)
+ _estimated_cost_of_backsubstitution_operation(
layer.path_b, prima_coefficients
)
+ np.prod(layer.output_dim)
)
elif isinstance(layer, Sequential):
cost = 0.0
for sub_layers in layer.layers:
cost += _estimated_cost_of_backsubstitution_operation(
sub_layers, prima_coefficients
)
return cost
else:
return 0.0
| 76,173 | 37.394153 | 126 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/build_tiny_conv_net.py | import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision # type: ignore[import]
from torchvision import transforms
from src.utilities.loading.network import mnist_conv_tiny
def seed_worker(worker_id: int) -> None:
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# roughly adapted from: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
if __name__ == "__main__":
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
batch_size = 4
transform = transforms.Compose([transforms.ToTensor()])
trainset = torchvision.datasets.MNIST(
root="./data", train=True, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
num_workers=2,
worker_init_fn=seed_worker,
)
network = mnist_conv_tiny()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(network.parameters(), lr=0.003, momentum=0.9)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = network(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print("Finished Training")
save_path = "./networks/mnist_convTiny.pyt"
torch.save(network.state_dict(), save_path)
testset = torchvision.datasets.MNIST(
root="./data", train=False, download=True, transform=transform
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=batch_size,
shuffle=False,
num_workers=2,
worker_init_fn=seed_worker,
)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
images, labels = data
# calculate outputs by running images through the network
outputs = network(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(
"Accuracy of the network on the 10000 test images: %f %%"
% (100 * correct / total)
)
| 2,965 | 28.959596 | 90 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/attacks.py | from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
# import torch.nn as nn
import torch.optim as optim
from torch import Tensor
from torch.autograd import Variable
from src.abstract_layers.abstract_network import AbstractNetwork
# adapted from https://github.com/mnmueller/eran_vnncomp2021/blob/master/tf_verify/attacks.py
# commit hash: a8eae0e1e6e26081cdc9f57747c333630f04807a
def margin_loss(logits: Tensor, y: Tensor) -> Tensor:
logit_org = logits.gather(1, y.view(-1, 1))
logit_target = logits.gather(
1, (logits - torch.eye(10)[y].to("cuda") * 9999).argmax(1, keepdim=True)
)
loss = -logit_org + logit_target
loss = torch.sum(loss)
return loss
def constraint_loss(
logits: Tensor,
constraints: Sequence[Sequence[Tuple[int, int, float]]],
and_idx: Optional[Tensor] = None,
) -> Tensor:
loss = torch.zeros_like(logits[:, 0])
for i, or_list in enumerate(constraints):
or_loss = torch.zeros_like(logits[:, 0])
for cstr in or_list:
if cstr[0] == -1:
or_loss += -logits[:, cstr[1]]
elif cstr[1] == -1:
or_loss += logits[:, cstr[0]]
else:
or_loss += logits[:, cstr[0]] - logits[:, cstr[1]]
if and_idx is not None:
loss += torch.where(and_idx == i, or_loss, torch.zeros_like(or_loss))
else:
loss += or_loss
return -loss
class step_lr_scheduler:
def __init__(
self,
initial_step_size: float,
gamma: float = 0.1,
interval: Union[int, Sequence[int]] = 10,
):
self.initial_step_size = initial_step_size
self.gamma = gamma
self.interval = interval
self.current_step = 0
def step(self, k: int = 1) -> None:
self.current_step += k
def get_lr(self) -> float:
if isinstance(self.interval, int):
return self.initial_step_size * self.gamma ** (
np.floor(self.current_step / self.interval)
)
else:
phase = len([x for x in self.interval if self.current_step >= x])
return self.initial_step_size * self.gamma ** (phase)
def torch_whitebox_attack(
model: AbstractNetwork,
device: torch.device,
sample: Tensor,
constraints: Sequence[Sequence[Tuple[int, int, float]]],
specLB: Tensor,
specUB: Tensor,
input_nchw: bool = True,
restarts: int = 1,
stop_early: bool = True,
ODI: bool = True,
) -> Tuple[Optional[List[np.ndarray]], Optional[np.ndarray]]:
if restarts == 0:
return None, sample.detach().cpu().numpy()
input_shape = list(sample.shape)
# input_shape = ([1] if len(input_shape) in [3, 1] else []) + input_shape
nhwc_shape = (
input_shape[0:1] + input_shape[-2:] + input_shape[-3:-2]
if input_nchw
else input_shape
)
nchw_shape = (
input_shape
if input_nchw
else input_shape[0:1] + input_shape[-1:] + input_shape[-3:-1]
)
specLB_t = specLB.reshape(nchw_shape if input_nchw else nhwc_shape).clone().detach()
specUB_t = specUB.reshape(nchw_shape if input_nchw else nhwc_shape).clone().detach()
sample = sample.reshape(input_shape if input_nchw else nhwc_shape)
if len(input_shape) == 4:
specLB_t = specLB_t.permute((0, 1, 2, 3) if input_nchw else (0, 3, 1, 2)).to(
device
)
specUB_t = specUB_t.permute((0, 1, 2, 3) if input_nchw else (0, 3, 1, 2)).to(
device
)
sample = sample.permute((0, 1, 2, 3) if input_nchw else (0, 3, 1, 2))
X = Variable(sample, requires_grad=True).to(device)
if np.prod(input_shape) < 10 or not ODI:
ODI_num_steps = 0
else:
ODI_num_steps = 10
adex, worst_x = _pgd_whitebox(
model,
X,
constraints,
specLB_t,
specUB_t,
device,
lossFunc="margin",
restarts=restarts,
ODI_num_steps=ODI_num_steps,
stop_early=stop_early,
)
if adex is None:
adex, _ = _pgd_whitebox(
model,
X,
constraints,
specLB_t,
specUB_t,
device,
lossFunc="GAMA",
restarts=restarts,
ODI_num_steps=ODI_num_steps,
stop_early=stop_early,
)
if adex is None:
adex, _ = _pgd_whitebox(
model,
X,
constraints,
specLB_t,
specUB_t,
device,
lossFunc="margin",
restarts=1,
ODI_num_steps=0,
stop_early=stop_early,
)
if len(input_shape) == 4:
if adex is not None:
adex = [adex[0][0].transpose((0, 1, 2) if input_nchw else (1, 2, 0))]
if worst_x is not None:
worst_x = worst_x.transpose((0, 1, 2) if input_nchw else (1, 2, 0))
if adex is not None:
assert (adex[0] >= specLB.cpu().numpy()).all() and (
adex[0] <= specUB.cpu().numpy()
).all()
print("Adex found via attack")
else:
assert (worst_x >= specLB.cpu().numpy()).all() and (
worst_x <= specUB.cpu().numpy()
).all()
print("No adex found via attack")
return adex, worst_x
def _evaluate_cstr(
constraints: Sequence[Sequence[Tuple[int, int, float]]],
net_out: Tensor,
torch_input: bool = False,
) -> Union[Tensor, np.ndarray]:
if len(net_out.shape) <= 1:
net_out = net_out.reshape(1, -1)
n_samp = net_out.shape[0]
and_holds: Union[Tensor, np.ndarray] = (
torch.ones(n_samp, dtype=torch.bool, device=net_out.device)
if torch_input
else np.ones(n_samp, dtype=np.bool_)
)
for or_list in constraints:
or_holds: Union[Tensor, np.ndarray] = (
torch.zeros(n_samp, dtype=torch.bool, device=net_out.device)
if torch_input
else np.zeros(n_samp, dtype=np.bool_)
)
for cstr in or_list:
if cstr[0] == -1:
or_holds = or_holds.__or__(cstr[2] > net_out[:, cstr[1]])
elif cstr[1] == -1:
or_holds = or_holds.__or__(net_out[:, cstr[0]] > cstr[2])
else:
or_holds = or_holds.__or__(
net_out[:, cstr[0]] - net_out[:, cstr[1]] > cstr[2]
)
if or_holds.all():
break
and_holds = and_holds.__and__(or_holds)
if not and_holds.any():
break
return and_holds
def _translate_constraints_to_label(
GT_specs: Sequence[Sequence[Sequence[Tuple[int, int, float]]]]
) -> List[Optional[int]]:
labels = []
for and_list in GT_specs:
label = None
for or_list in and_list:
if len(or_list) > 1:
label = None
break
if label is None:
label = or_list[0][0]
elif label != or_list[0][0]:
label = None
break
labels.append(label)
return labels
def _pgd_whitebox(
model: AbstractNetwork,
X: Tensor,
constraints: Sequence[Sequence[Tuple[int, int, float]]],
specLB: Tensor,
specUB: Tensor,
device: torch.device,
num_steps: int = 50,
step_size: float = 0.2,
ODI_num_steps: int = 10,
ODI_step_size: float = 1.0,
batch_size: int = 50,
lossFunc: str = "margin",
restarts: int = 1,
stop_early: bool = True,
) -> Tuple[Optional[List[np.ndarray]], Optional[np.ndarray]]:
out_X = model(X).detach()
worst_x: Optional[np.ndarray] = None
best_loss = -np.inf
for _ in range(restarts):
X_pgd: Tensor = Variable(
X.data.repeat((batch_size,) + (1,) * (X.dim() - 1)), requires_grad=True
).to(device)
randVector_: Tensor = torch.ones_like(model(X_pgd)).uniform_(
-1, 1
) # torch.FloatTensor(*model(X_pgd).shape).uniform_(-1.,1.).to(device)
random_noise: Tensor = torch.ones_like(X_pgd).uniform_(-0.5, 0.5) * (
specUB - specLB
) # torch.FloatTensor(*X_pgd.shape).uniform_(-0.5, 0.5).to(device)*(specUB-specLB)
X_pgd = Variable(
torch.minimum(torch.maximum(X_pgd.data + random_noise, specLB), specUB),
requires_grad=True,
)
lr_scale: Tensor = (specUB - specLB) / 2
lr_scheduler = step_lr_scheduler(
step_size,
gamma=0.1,
interval=[
np.ceil(0.5 * num_steps),
np.ceil(0.8 * num_steps),
np.ceil(0.9 * num_steps),
],
)
gama_lambda = 10.0
for i in range(ODI_num_steps + num_steps + 1):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
out = model(X_pgd)
cstrs_hold = _evaluate_cstr(constraints, out.detach(), torch_input=True)
assert isinstance(cstrs_hold, Tensor)
if stop_early and not cstrs_hold.all():
adv_idx = int((~cstrs_hold.cpu()).nonzero(as_tuple=False)[0].item())
adex_tensor: Tensor = X_pgd[adv_idx : adv_idx + 1]
assert not _evaluate_cstr(
constraints, model(adex_tensor), torch_input=True
)[0], f"{model(adex_tensor)},{constraints}"
# assert (specLB <= adex).all() and (specUB >= adex).all()
# print("Adex found via attack")
return [adex_tensor.detach().cpu().numpy()], None
if i == ODI_num_steps + num_steps:
# print("No adex found via attack")
break
if i < ODI_num_steps:
loss = (out * randVector_).sum()
elif lossFunc == "margin":
and_idx = np.arange(len(constraints)).repeat(
np.floor(batch_size / len(constraints))
)
and_idx = torch.tensor(
np.concatenate(
[and_idx, np.arange(batch_size - len(and_idx))], axis=0
)
).to(device)
loss = constraint_loss(out, constraints, and_idx=and_idx).sum()
elif lossFunc == "GAMA":
and_idx = np.arange(len(constraints)).repeat(
np.floor(batch_size / len(constraints))
)
and_idx = torch.tensor(
np.concatenate(
[and_idx, np.arange(batch_size - len(and_idx))], axis=0
)
).to(device)
out = torch.softmax(out, 1)
loss = (
constraint_loss(out, constraints, and_idx=and_idx)
+ (gama_lambda * (out_X - out) ** 2).sum(dim=1)
).sum()
gama_lambda *= 0.9
max_loss = torch.max(loss).item()
if max_loss > best_loss:
best_loss = max_loss
worst_x = X_pgd[torch.argmax(loss)].detach().cpu().numpy()
loss.backward()
if i < ODI_num_steps:
eta = ODI_step_size * lr_scale * X_pgd.grad.data.sign()
else:
eta = lr_scheduler.get_lr() * lr_scale * X_pgd.grad.data.sign()
lr_scheduler.step()
X_pgd = Variable(
torch.minimum(torch.maximum(X_pgd.data + eta, specLB), specUB),
requires_grad=True,
)
return None, worst_x
| 11,760 | 32.795977 | 93 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/loading/network.py | import gzip
import typing
from os.path import exists
from typing import List, Optional, Sequence, Tuple, Type
import onnx # type: ignore[import]
import torch
from bunch import Bunch # type: ignore[import]
import numpy as np
from torch import nn as nn
from src.concrete_layers.basic_block import BasicBlock
from src.concrete_layers.pad import Pad
from src.utilities.onnx_loader import ConvertModel
def lecture_network_small() -> nn.Sequential:
net = nn.Sequential(
*[
nn.Linear(in_features=2, out_features=2),
nn.ReLU(),
nn.Linear(in_features=2, out_features=1),
]
)
net[0].weight.data = torch.ones_like(net[0].weight.data)
net[0].weight.data[1, 1] = -1.
net[0].bias.data = torch.zeros_like(net[0].bias.data)
net[2].weight.data = torch.ones_like(net[2].weight.data)
net[2].bias.data[0] = -0.5
return net
def lecture_network() -> nn.Sequential:
net = nn.Sequential(
*[
nn.Linear(in_features=2, out_features=2),
nn.ReLU(),
nn.Linear(in_features=2, out_features=2),
nn.ReLU(),
nn.Linear(in_features=2, out_features=2)
]
)
net[0].weight.data = torch.ones_like(net[0].weight.data)
net[0].weight.data[1, 1] = -1.
net[0].bias.data = torch.zeros_like(net[0].bias.data)
net[2].weight.data = torch.ones_like(net[2].weight.data)
net[2].weight.data[1, 1] = -1.
net[2].bias.data = torch.tensor([-0.5, 0])
net[4].weight.data = torch.ones_like(net[4].weight.data)
net[4].weight.data[0, 0] = -1.
net[4].weight.data[1, 0] = 0
net[4].bias.data = torch.tensor([3., 0])
return net
def mnist_conv_tiny() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(in_channels=1, out_channels=4, kernel_size=4, stride=4),
nn.ReLU(),
nn.Conv2d(in_channels=4, out_channels=8, kernel_size=4, stride=2),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=8 * 2 * 2, out_features=50),
nn.ReLU(),
nn.Linear(in_features=50, out_features=10),
]
)
def mnist_conv_small() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=32 * 5 * 5, out_features=100),
nn.ReLU(),
nn.Linear(in_features=100, out_features=10),
]
)
def mnist_conv_sigmoid_small() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, stride=2),
nn.Sigmoid(),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),
nn.Sigmoid(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=32 * 5 * 5, out_features=100),
nn.Sigmoid(),
nn.Linear(in_features=100, out_features=10),
]
)
def mnist_conv_big() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(
in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=64 * 7 * 7, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=10),
]
)
def mnist_conv_super() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(
in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=0
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=4, stride=1, padding=0
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=0
),
nn.ReLU(),
nn.Conv2d(
in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=0
),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=64 * 18 * 18, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=10),
]
)
def mnist_a_b(a: int, b: int) -> nn.Sequential:
layers = [nn.Linear(28 * 28, b), nn.ReLU()]
for __ in range(a - 1):
layers += [
nn.Linear(b, b),
nn.ReLU(),
]
layers += [nn.Linear(b, 10), nn.ReLU()]
return nn.Sequential(*layers)
def mnist_sig_a_b(a: int, b: int) -> nn.Sequential:
layers = [nn.Linear(28 * 28, b), nn.Sigmoid()]
for __ in range(a - 1):
layers += [
nn.Linear(b, b),
nn.Sigmoid(),
]
layers += [nn.Linear(b, 10), nn.Sigmoid()]
return nn.Sequential(*layers)
def mnist_vnncomp_a_b(a: int, b: int) -> nn.Sequential:
layers = [nn.Flatten(start_dim=1, end_dim=-1), nn.Linear(28 * 28, b), nn.ReLU()]
for __ in range(a - 1):
layers += [
nn.Linear(b, b),
nn.ReLU(),
]
layers += [nn.Linear(b, 10)]
return nn.Sequential(*layers)
def cifar10_conv_small() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, stride=2),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=32 * 6 * 6, out_features=100),
nn.ReLU(),
nn.Linear(in_features=100, out_features=10),
]
)
def cifar10_cnn_A() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(
in_channels=3, out_channels=16, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=16, out_channels=32, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=32 * 8 * 8, out_features=100),
nn.ReLU(),
nn.Linear(in_features=100, out_features=10),
]
)
def cifar10_cnn_B():
return nn.Sequential(
Pad((1,2,1,2)),
nn.Conv2d(3, 32, (5,5), stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(32, 128, (4,4), stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(8192, 250),
nn.ReLU(),
nn.Linear(250, 10),
)
def mnist_cnn_A():
return nn.Sequential(
nn.Conv2d(1, 16, (4,4), stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, (4,4), stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(1568, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
def cifar10_base() -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(3, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(8, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(1024, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
def cifar10_wide() -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * 8 * 8, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
def cifar10_deep() -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(3, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(8 * 8 * 8, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
def cifar10_2_255_simplified() -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 128, 4, stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128 * 8 * 8, 250),
nn.ReLU(),
nn.Linear(250, 10),
)
def cifar10_8_255_simplified() -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(3, 32, 5, stride=2, padding=2),
nn.ReLU(),
nn.Conv2d(32, 128, 4, stride=2, padding=1),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128 * 8 * 8, 250),
nn.ReLU(),
nn.Linear(250, 10),
)
def cifar10_conv_big() -> nn.Sequential:
return nn.Sequential(
*[
nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1
),
nn.ReLU(),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(in_features=64 * 8 * 8, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=10),
]
)
def getShapeConv(
in_shape: Tuple[int, int, int],
conv_shape: Tuple[int, ...],
stride: int = 1,
padding: int = 0,
) -> Tuple[int, int, int]:
inChan, inH, inW = in_shape
outChan, kH, kW = conv_shape[:3]
outH = 1 + int((2 * padding + inH - kH) / stride)
outW = 1 + int((2 * padding + inW - kW) / stride)
return (outChan, outH, outW)
class ResNet(nn.Sequential):
def __init__(
self,
block: Type[BasicBlock],
in_ch: int = 3,
num_stages: int = 1,
num_blocks: int = 2,
num_classes: int = 10,
in_planes: int = 64,
bn: bool = True,
last_layer: str = "avg",
in_dim: int = 32,
stride: Optional[Sequence[int]] = None,
):
layers: List[nn.Module] = []
self.in_planes = in_planes
if stride is None:
stride = (num_stages + 1) * [2]
layers.append(
nn.Conv2d(
in_ch,
self.in_planes,
kernel_size=3,
stride=stride[0],
padding=1,
bias=not bn,
)
)
_, _, in_dim = getShapeConv(
(in_ch, in_dim, in_dim), (self.in_planes, 3, 3), stride=stride[0], padding=1
)
if bn:
layers.append(nn.BatchNorm2d(self.in_planes))
layers.append(nn.ReLU())
for s in stride[1:]:
block_layers, in_dim = self._make_layer(
block,
self.in_planes * 2,
num_blocks,
stride=s,
bn=bn,
kernel=3,
in_dim=in_dim,
)
layers.append(block_layers)
if last_layer == "avg":
layers.append(nn.AvgPool2d(4))
layers.append(nn.Flatten())
layers.append(
nn.Linear(
self.in_planes * (in_dim // 4) ** 2 * block.expansion, num_classes
)
)
elif last_layer == "dense":
layers.append(nn.Flatten())
layers.append(
nn.Linear(self.in_planes * block.expansion * in_dim**2, 100)
)
layers.append(nn.ReLU())
layers.append(nn.Linear(100, num_classes))
else:
exit("last_layer type not supported!")
super(ResNet, self).__init__(*layers)
def _make_layer(
self,
block: Type[BasicBlock],
planes: int,
num_layers: int,
stride: int,
bn: bool,
kernel: int,
in_dim: int,
) -> Tuple[nn.Sequential, int]:
strides = [stride] + [1] * (num_layers - 1)
cur_dim: int = in_dim
layers: List[nn.Module] = []
for stride in strides:
layer = block(self.in_planes, planes, stride, bn, kernel, in_dim=cur_dim)
layers.append(layer)
cur_dim = layer.out_dim
layers.append(nn.ReLU())
self.in_planes = planes * block.expansion
return nn.Sequential(*layers), cur_dim
def resnet2b(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock, num_stages=1, num_blocks=2, in_planes=8, bn=bn, last_layer="dense"
)
def resnet2b2(bn: bool = False, in_ch: int = 3, in_dim: int = 32) -> nn.Sequential:
return ResNet(
BasicBlock,
in_ch=in_ch,
num_stages=2,
num_blocks=1,
in_planes=16,
bn=bn,
last_layer="dense",
stride=[2, 2, 2],
)
def resnet4b(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock, num_stages=2, num_blocks=2, in_planes=8, bn=bn, last_layer="dense"
)
def resnet4b1(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock,
in_ch=3,
num_stages=4,
num_blocks=1,
in_planes=16,
bn=bn,
last_layer="dense",
stride=[1, 1, 2, 2, 2],
)
def resnet4b2(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock,
in_ch=3,
num_stages=4,
num_blocks=1,
in_planes=16,
bn=bn,
last_layer="dense",
stride=[2, 2, 2, 1, 1],
)
def resnet3b2(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock,
in_ch=3,
num_stages=3,
num_blocks=1,
in_planes=16,
bn=bn,
last_layer="dense",
stride=[2, 2, 2, 2],
)
def resnet9b(bn: bool = False) -> nn.Sequential:
return ResNet(
BasicBlock,
in_ch=3,
num_stages=3,
num_blocks=3,
in_planes=16,
bn=bn,
last_layer="dense",
)
def ConvMedBig(dataset, bn=False, bn2=False, device="cuda"):
in_ch, in_dim, n_class = get_dataset_info(dataset)
return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,2,8], kernel_sizes=[3,4,4],
linear_sizes=[250], strides=[1,2,2], paddings=[1, 1, 1], net_dim=None, bn=bn, bn2=bn2)
def ConvMed(dataset, bn=False, bn2=False, device="cuda"):
in_ch, in_dim, n_class = get_dataset_info(dataset)
return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,4], kernel_sizes=[5,4],
linear_sizes=[100], strides=[2,2], paddings=[2,1], net_dim=None, bn=bn, bn2=bn2)
def ConvMed2(dataset, bn=False, bn2=False, device="cuda"):
in_ch, in_dim, n_class = get_dataset_info(dataset)
return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[2,8], kernel_sizes=[5,4],
linear_sizes=[250], strides=[2,2], paddings=[2,1], net_dim=None, bn=bn, bn2=bn2)
def ConvMed_tiny(dataset, bn=False, bn2=False, device="cuda"):
in_ch, in_dim, n_class = get_dataset_info(dataset)
return myNet(device, dataset, n_class, in_dim, in_ch, conv_widths=[1,2], kernel_sizes=[5,4],
linear_sizes=[50], strides=[2,2], paddings=[1,1], net_dim=None, bn=bn, bn2=bn2)
class myNet(nn.Module):
def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, conv_widths=None,
kernel_sizes=None, linear_sizes=None, depth_conv=None, paddings=None, strides=None,
dilations=None, pool=False, net_dim=None, bn=False, bn2=False, max=False, scale_width=True):
super(myNet, self).__init__()
if kernel_sizes is None:
kernel_sizes = [3]
if conv_widths is None:
conv_widths = [2]
if linear_sizes is None:
linear_sizes = [200]
if paddings is None:
paddings = [1]
if strides is None:
strides = [2]
if dilations is None:
dilations = [1]
if net_dim is None:
net_dim = input_size
if len(conv_widths) != len(kernel_sizes):
kernel_sizes = len(conv_widths) * [kernel_sizes[0]]
if len(conv_widths) != len(paddings):
paddings = len(conv_widths) * [paddings[0]]
if len(conv_widths) != len(strides):
strides = len(conv_widths) * [strides[0]]
if len(conv_widths) != len(dilations):
dilations = len(conv_widths) * [dilations[0]]
self.n_class=n_class
self.input_size=input_size
self.input_channel=input_channel
self.conv_widths=conv_widths
self.kernel_sizes=kernel_sizes
self.paddings=paddings
self.strides=strides
self.dilations = dilations
self.linear_sizes=linear_sizes
self.depth_conv=depth_conv
self.net_dim = net_dim
self.bn=bn
self.bn2 = bn2
self.max=max
layers = []
N = net_dim
n_channels = input_channel
self.dims = [(n_channels,N,N)]
for width, kernel_size, padding, stride, dilation in zip(conv_widths, kernel_sizes, paddings, strides, dilations):
if scale_width:
width *= 16
N = int(np.floor((N + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))
layers += [nn.Conv2d(n_channels, int(width), kernel_size, stride=stride, padding=padding, dilation=dilation)]
if self.bn:
layers += [nn.BatchNorm2d(int(width))]
if self.max:
layers += [nn.MaxPool2d(int(width))]
layers += [nn.ReLU((int(width), N, N))]
n_channels = int(width)
self.dims += 2*[(n_channels,N,N)]
if depth_conv is not None:
layers += [nn.Conv2d(n_channels, depth_conv, 1, stride=1, padding=0),
nn.ReLU((n_channels, N, N))]
n_channels = depth_conv
self.dims += 2*[(n_channels,N,N)]
if pool:
layers += [nn.GlobalAvgPool2d()]
self.dims += 2 * [(n_channels, 1, 1)]
N=1
layers += [nn.Flatten()]
N = n_channels * N ** 2
self.dims += [(N,)]
for width in linear_sizes:
if width == 0:
continue
layers += [nn.Linear(int(N), int(width))]
if self.bn2:
layers += [nn.BatchNorm1d(int(width))]
layers += [nn.ReLU(width)]
N = width
self.dims+=2*[(N,)]
layers += [nn.Linear(N, n_class)]
self.dims+=[(n_class,)]
self.blocks = nn.Sequential(*layers)
def forward(self, x):
return self.blocks(x)
class CNN7(myNet):
def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, width1=4, width2=8, linear_size=512,
net_dim=None, bn=False, bn2=False):
super(CNN7, self).__init__(device, dataset, n_class, input_size, input_channel,
conv_widths=[width1, width1, width2, width2, width2], kernel_sizes=[3, 3, 3, 3, 3],
linear_sizes=[linear_size], strides=[1, 1, 2, 1, 1], paddings=[1, 1, 1, 1, 1],
net_dim=net_dim, bn=bn, bn2=bn2)
def CNNA(dataset, bn, device="cuda"):
in_ch, in_dim, n_class = get_dataset_info(dataset)
return myNet(device, dataset, n_class, in_dim, in_ch,
conv_widths=[16, 32], kernel_sizes=[4, 4],
linear_sizes=[100], strides=[2, 2], paddings=[1, 1],
net_dim=None, bn=bn)
def get_dataset_info(dataset):
if dataset == "mnist":
return 1, 28, 10
elif dataset == "emnist":
return 1, 28, 10
elif dataset == "fashionmnist":
return 1, 28, 10
if dataset == "svhn":
return 3, 32, 10
elif dataset == "cifar10":
return 3, 32, 10
elif dataset == "tinyimagenet":
return 3, 56, 200
else:
raise ValueError(f"Dataset {dataset} not available")
def freeze_network(network: nn.Module) -> None:
for param in network.parameters():
param.requires_grad = False
def load_net_from(config: Bunch) -> nn.Module:
path = config.network_path
try:
n_layers = config.n_layers
n_neurons_per_layer = config.n_neurons_per_layer
except AttributeError:
n_layers = None
n_neurons_per_layer = None
return load_net(path, n_layers, n_neurons_per_layer)
def load_net( # noqa: C901
path: str, n_layers: Optional[int], n_neurons_per_layer: Optional[int]
) -> nn.Module:
if path.split(".")[-1] in ["onnx", "gz"]:
return load_onnx_model(path)[0]
elif "mnist_sig" in path and "flattened" in path:
assert n_layers is not None and n_neurons_per_layer is not None
original_network = mnist_sig_a_b(n_layers, n_neurons_per_layer)
elif "mnist" in path and "flattened" in path:
assert n_layers is not None and n_neurons_per_layer is not None
original_network = mnist_a_b(n_layers, n_neurons_per_layer)
elif "mnist-net" in path:
assert n_layers is not None and n_neurons_per_layer is not None
original_network = mnist_vnncomp_a_b(n_layers, n_neurons_per_layer)
elif "mnist_convSmallRELU__Point" in path:
original_network = mnist_conv_small()
elif "mnist_SIGMOID" in path:
original_network = mnist_conv_sigmoid_small()
elif "mnist_convBigRELU__DiffAI" in path:
original_network = mnist_conv_big()
elif "mnist_convSuperRELU__DiffAI" in path:
original_network = mnist_conv_super()
elif "cifar10_convSmallRELU__PGDK" in path:
original_network = cifar10_conv_small()
elif "cifar_cnn_a" in path:
original_network = cifar10_cnn_A()
elif "cifar_cnn_b" in path:
original_network = cifar10_cnn_B()
elif "mnist_cnn_a" in path:
original_network = mnist_cnn_A()
elif "cifar_base_kw" in path:
original_network = cifar10_base()
elif "cifar_wide_kw" in path:
original_network = cifar10_wide()
elif "cifar_deep_kw" in path:
original_network = cifar10_deep()
elif "cifar10_2_255_simplified" in path:
original_network = cifar10_2_255_simplified()
elif "cifar10_8_255_simplified" in path:
original_network = cifar10_8_255_simplified()
elif "cifar10_convBigRELU__PGD" in path:
original_network = cifar10_conv_big()
elif "resnet_2b2" in path:
original_network = resnet2b2(bn="bn" in path)
elif "resnet_2b" in path:
original_network = resnet2b()
elif "resnet_3b2" in path:
original_network = resnet3b2(bn="bn" in path)
elif "resnet_4b1" in path:
original_network = resnet4b1(bn="bn" in path)
elif "resnet_4b2" in path:
original_network = resnet4b2(bn="bn" in path)
elif "resnet_4b" in path:
original_network = resnet4b()
elif "resnet_9b_bn" in path:
original_network = resnet9b(bn=True)
elif "ConvMed_tiny" in path:
if "cifar10" in path:
original_network = ConvMed_tiny("cifar10", bn="bn" in path)
elif "mnist" in path:
original_network = ConvMed_tiny("mnist", bn="bn" in path)
elif "ConvMedBig" in path:
if "cifar10" in path:
original_network = ConvMedBig("cifar10")
elif "mnist" in path:
original_network = ConvMedBig("mnist")
elif "ConvMed2" in path:
if "cifar10" in path:
original_network = ConvMed2("cifar10")
elif "mnist" in path:
original_network = ConvMed2("mnist")
elif "ConvMed" in path:
if "cifar10" in path:
original_network = ConvMed("cifar10")
elif "mnist" in path:
original_network = ConvMed("mnist")
elif "SP1" in path:
#original_network = CNNA("fashionmnist", False, "cuda")
if "cifar10" in path:
original_network = CNN7("cuda", "cifar10", input_size=32, input_channel=3, bn=True)
elif "mnist" in path:
original_network = CNN7("cuda", "mnist", input_size=28, input_channel=1, bn=True)
elif "tiny" in path:
original_network = CNN7("cuda", "tinyimagenet", input_size=56, input_channel=3, n_class=200, bn=True)
elif "CNN7" in path:
if "no_BN" in path:
bn = False
else:
bn = True
if "cifar10" in path:
original_network = CNN7("cuda", "cifar10", input_size=32, input_channel=3, bn=bn)
elif "mnist" in path:
original_network = CNN7("cuda", "mnist", input_size=28, input_channel=1, bn=bn)
elif "tiny" in path:
original_network = CNN7("cuda", "tinyimagenet", input_size=56, input_channel=3, n_class=200, bn=bn)
else:
raise NotImplementedError(
"The network specified in the configuration, could not be loaded."
)
else:
raise NotImplementedError(
"The network specified in the configuration, could not be loaded."
)
state_dict = torch.load(path)
if "state_dict" in state_dict.keys():
state_dict = state_dict["state_dict"]
original_network.load_state_dict(state_dict)
original_network = original_network.blocks
freeze_network(original_network)
return original_network
def load_onnx_model(path: str) -> Tuple[nn.Sequential, Tuple[int, ...], str]:
onnx_model = load_onnx(path)
return load_onnx_from_proto(onnx_model, path)
def load_onnx_from_proto(
onnx_model: onnx.ModelProto, path: Optional[str] = None
) -> Tuple[nn.Sequential, Tuple[int, ...], str]:
onnx_input_dims = onnx_model.graph.input[-1].type.tensor_type.shape.dim
inp_name = onnx_model.graph.input[-1].name
onnx_shape = tuple(d.dim_value for d in onnx_input_dims[1:])
pytorch_model = ConvertModel(onnx_model)
if path is not None and "unet" in path:
pytorch_structured = pytorch_model.forward_trace_to_graph_unet()
softmax_idx = [
i for (i, layer) in enumerate(pytorch_structured) if "Softmax" in str(layer)
][0]
pytorch_structured = pytorch_structured[: softmax_idx - 1]
pytorch_structured.append(nn.Flatten())
elif len(onnx_shape) == 0 and path is not None and "vgg16-7" in path:
onnx_shape = (3, 224, 224)
pytorch_structured = pytorch_model.forward_trace_to_graph()
elif len(onnx_shape) == 0 and path is not None and ("test_nano" in path or "test_tiny" in path or "test_small" in path):
onnx_shape = (1,)
pytorch_structured = pytorch_model.forward_trace_to_graph()
else:
pytorch_structured = pytorch_model.forward_trace_to_graph()
return pytorch_structured, onnx_shape, inp_name
@typing.no_type_check
def load_onnx(path: str):
# The official benchmark repo has all networks with the wrong ending
if not exists(path) and not path.endswith(".gz"):
path = path + ".gz"
if path.endswith(".gz"):
onnx_model = onnx.load(gzip.GzipFile(path))
else:
onnx_model = onnx.load(path)
return onnx_model
| 28,413 | 32.039535 | 124 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/utilities/loading/data.py | from typing import Sequence, Tuple
import torch
from bunch import Bunch # type: ignore[import]
from torch import Tensor
def transform_image(
pixel_values: Sequence[str],
input_dim: Tuple[int, ...],
) -> Tensor:
if len(pixel_values)==9409:
normalized_pixel_values = torch.tensor([float(p) for p in pixel_values[0:-1]])
else:
normalized_pixel_values = torch.tensor([float(p) / 255.0 for p in pixel_values])
if len(input_dim) >= 3:
if len(pixel_values)==9409:
input_dim_in_chw = (input_dim[0], input_dim[1], input_dim[2])
image_in_chw = normalized_pixel_values.view(input_dim_in_chw)
image_dim = input_dim_in_chw
else:
input_dim_in_hwc = (input_dim[1], input_dim[2], input_dim[0])
image_in_hwc = normalized_pixel_values.view(input_dim_in_hwc)
image_in_chw = image_in_hwc.permute(2, 0, 1)
image_dim = input_dim
image = image_in_chw
elif len(input_dim) > 1:
image_dim = (int(torch.prod(torch.tensor(input_dim))),)
image = normalized_pixel_values.view(input_dim)
else:
image = normalized_pixel_values
image_dim = input_dim
assert (image >= 0).all()
assert (image <= 1).all()
return image, image_dim
def transform_and_bound(
pixel_values: Sequence[str],
config: Bunch,
device: torch.device = torch.device("cpu"),
) -> Tuple[Tensor, Tensor, Tensor]:
image, image_dim = transform_image(pixel_values, config.input_dim)
input_lb = (image - config.eps).clamp(min=0)
input_ub = (image + config.eps).clamp(max=1)
try:
means = torch.tensor(config.normalization_means)
stds = torch.tensor(config.normalization_stds)
if len(image_dim) == 3:
means = means.unsqueeze(1).unsqueeze(2)
stds = stds.unsqueeze(1).unsqueeze(2)
else:
assert len(means) == 1
assert len(stds) == 1
image = normalize(image, means, stds)
input_lb = normalize(input_lb, means, stds)
input_ub = normalize(input_ub, means, stds)
except AttributeError:
pass # no normalization needed
image = image.view(image_dim)
input_lb = input_lb.view(image_dim)
input_ub = input_ub.view(image_dim)
return (
image.unsqueeze(0).to(device),
input_lb.unsqueeze(0).to(device),
input_ub.unsqueeze(0).to(device),
)
def normalize(image: Tensor, means: Tensor, stds: Tensor) -> Tensor:
return (image - means) / stds
| 2,542 | 33.835616 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/subproblem_state.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Dict, Optional, Tuple
import torch
from torch import Tensor
from src.state.constraints import Constraints, ReadonlyConstraints
from src.state.parameters import Parameters, ReadonlyParameters
from src.state.prima_constraints import PrimaConstraints
from src.state.split_state import SplitState
from src.state.tags import LayerTag, NodeTag
from src.utilities.custom_typing import implement_properties_as_fields
if TYPE_CHECKING:
from src.abstract_layers.abstract_container_module import ActivationLayer
class ReadonlySubproblemState(ABC):
@property
@abstractmethod
def constraints(self) -> ReadonlyConstraints:
pass
@property
@abstractmethod
def parameters(self) -> ReadonlyParameters:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
@property
def is_infeasible(self) -> Tensor:
return self.constraints.is_infeasible
def without_prima(self) -> ReadonlySubproblemState:
return SubproblemState.create_readonly(
self.constraints.without_prima(),
self.parameters,
self.batch_size,
self.device,
)
def with_new_parameters(self) -> ReadonlySubproblemState: # TODO: why necessary?
return SubproblemState.create_readonly(
self.constraints,
Parameters.create_default(self.batch_size, self.device, use_params=True),
self.batch_size,
self.device,
)
def without_parameters(
self,
) -> ReadonlySubproblemState: # TODO: probably it would be better to put use_params into the backsubtitution config.
return SubproblemState.create_readonly(
self.constraints,
Parameters.create_default(self.batch_size, self.device, use_params=False),
self.batch_size,
self.device,
)
def deep_copy_to(self, device: torch.device) -> SubproblemState:
return SubproblemState(
self.constraints.deep_copy_to(device),
self.parameters.deep_copy_to(device),
self.batch_size,
device,
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlySubproblemState:
return SubproblemState.create_readonly(
self.constraints.deep_copy_to_no_clone(device),
self.parameters.deep_copy_to_no_clone(device),
self.batch_size,
device,
)
def split(
self,
node_to_split: NodeTag,
recompute_intermediate_bounds_after_branching: bool,
layer: ActivationLayer,
device: torch.device,
) -> Tuple[
ReadonlySubproblemState, ReadonlySubproblemState
]: # readonly because the resulting parameters alias the original ones
(
constraints_for_negative_split,
constraints_for_positive_split,
intermediate_layer_bounds_to_be_kept_fixed,
split_point,
) = self.constraints.split(
node_to_split, recompute_intermediate_bounds_after_branching, layer, device
)
active_parameters = self.parameters.get_active_parameters_after_split(
recompute_intermediate_bounds_after_branching,
intermediate_layer_bounds_to_be_kept_fixed,
device,
)
negative_split = SubproblemState.create_readonly(
constraints_for_negative_split, active_parameters, self.batch_size, device
)
positive_split = SubproblemState.create_readonly(
constraints_for_positive_split, active_parameters, self.batch_size, device
)
return (negative_split, positive_split)
@property
def is_fully_split(self) -> bool:
return self.constraints.is_fully_split
def get_layer_id_to_index(
self,
) -> Dict[int, int]: # TODO: this seems out of place here
layer_id_to_index: Dict[int, int] = {}
assert (
self.constraints.split_state is not None
), "need valid split state for branch and bound"
for index, layer_id in enumerate(
self.constraints.split_state.split_constraints
):
layer_id_to_index[layer_id] = index
return layer_id_to_index
# for VerificationSubproblemQueue. TODO: why needed?
def get_prima_constraints(
self,
) -> Optional[PrimaConstraints]:
return self.constraints.get_prima_constraints()
def set_prima_coefficients(
self, prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]]
) -> None: # TODO: get rid of this?
self.constraints.set_prima_coefficients(prima_coefficients)
@implement_properties_as_fields
class SubproblemState(ReadonlySubproblemState):
"""
Represents an optimization problem min_x query_i*f(x) w.r.t constraints
together with parameters that are used to bound it from below.
(The coefficients of queries are currently stored and batched separately.)
"""
constraints: Constraints
parameters: Parameters
batch_size: int
device: torch.device
def __init__(
self,
constraints: Constraints,
parameters: Parameters,
batch_size: int,
device: torch.device,
):
self.constraints = constraints
self.parameters = parameters
assert self.constraints.batch_size == batch_size
assert self.parameters.batch_size == batch_size
self.batch_size = batch_size
assert self.constraints.device == device
assert self.parameters.device == device
self.device = device
@classmethod
def create_readonly(
cls,
constraints: ReadonlyConstraints,
parameters: ReadonlyParameters,
batch_size: int,
device: torch.device,
) -> ReadonlySubproblemState:
assert isinstance(constraints, Constraints)
assert isinstance(parameters, Parameters)
return cls(
constraints,
parameters,
batch_size,
device,
)
@classmethod
def create_default(
cls,
split_state: Optional[SplitState],
optimize_prima: bool,
batch_size: int,
device: torch.device,
use_params: bool, # TODO: probably it would be better to put use_params into the backsubtitution config.
) -> SubproblemState:
constraints = Constraints.create_default(
split_state, optimize_prima, batch_size, device
)
parameters = Parameters.create_default(batch_size, device, use_params)
return cls(constraints, parameters, batch_size, device)
def without_prima(self) -> SubproblemState:
return SubproblemState(
self.constraints.without_prima(),
self.parameters,
self.batch_size,
self.device,
)
def with_new_parameters(self) -> SubproblemState: # TODO: why necessary?
return SubproblemState(
self.constraints,
Parameters.create_default(self.batch_size, self.device, use_params=True),
self.batch_size,
self.device,
)
def without_parameters(
self,
) -> SubproblemState: # TODO: probably it would be better to put use_params into the backsubtitution config.
return SubproblemState(
self.constraints,
Parameters.create_default(self.batch_size, self.device, use_params=False),
self.batch_size,
self.device,
)
def move_to(self, device: torch.device) -> None:
if self.device == device:
return
self.constraints.move_to(device)
self.parameters.move_to(device)
self.device = device
def update_feasibility(self) -> None:
self.constraints.update_feasibility_from_constraints()
| 8,046 | 31.447581 | 121 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/split_state.py | from __future__ import annotations
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import TYPE_CHECKING, Dict, Mapping, Optional, Sequence, Tuple
if TYPE_CHECKING:
from src.abstract_layers.abstract_container_module import ActivationLayer
import torch
from torch import Tensor
from src.state.layer_bounds import ReadonlyLayerBounds
from src.state.tags import LayerTag, NodeTag
from src.utilities.custom_typing import implement_properties_as_fields
from src.utilities.tensor_management import deep_copy_to, deep_copy_to_no_clone, move_to
class ReadonlySplitState(ABC):
@property
@abstractmethod
def split_constraints(self) -> Mapping[LayerTag, Tensor]: # TODO: int -> LayerTag
pass
@property
@abstractmethod
def split_points(self) -> Mapping[LayerTag, Tensor]: # TODO: int -> LayerTag
pass
@property
@abstractmethod
def number_of_nodes_split(self) -> Sequence[int]:
pass
@property
@abstractmethod
def is_fully_split(self) -> bool:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
def unstable_node_mask_in_layer(
self, layer_id: LayerTag
) -> Tensor: # TODO: does this only work for ReLU?
return (self.split_constraints[layer_id] == 0).detach()
def deep_copy_to(self, device: torch.device) -> SplitState:
assert isinstance(self.split_constraints, dict)
assert isinstance(self.split_points, dict)
return SplitState(
deep_copy_to(self.split_constraints, device),
deep_copy_to(self.split_points, device),
self.number_of_nodes_split, # (immutable)
self.batch_size,
device,
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlySplitState:
return SplitState.create_readonly(
deep_copy_to_no_clone(self.split_constraints, device),
deep_copy_to_no_clone(self.split_points, device),
self.number_of_nodes_split, # (immutable)
self.batch_size,
device,
)
def split(
self,
bounds: ReadonlyLayerBounds,
node_to_split: NodeTag,
layer: ActivationLayer,
device: torch.device,
) -> Tuple[
SplitState, # negative_split
SplitState, # positive_split
float, # split_point
]:
layer_id = node_to_split.layer
index_in_layer = 0, *node_to_split.index
assert (
self.split_constraints[layer_id][index_in_layer] == 0
), "Attempted to split a node that is already split."
negative_split_constraints = deepcopy(self.split_constraints)
assert isinstance(negative_split_constraints, dict)
positive_split_constraints = deepcopy(self.split_constraints)
assert isinstance(positive_split_constraints, dict)
negative_split_points = deepcopy(self.split_points)
assert isinstance(negative_split_points, dict)
positive_split_points = deepcopy(self.split_points)
assert isinstance(positive_split_points, dict)
layer_split_constraints = self.split_constraints[layer_id]
assert layer_split_constraints.shape[0] == 1
neg_layer_split_constraints = layer_split_constraints.clone()
neg_layer_split_constraints[index_in_layer] = 1
negative_split_constraints[layer_id] = neg_layer_split_constraints
pos_layer_split_constraints = layer_split_constraints.clone()
pos_layer_split_constraints[index_in_layer] = -1
positive_split_constraints[layer_id] = pos_layer_split_constraints
from src.abstract_layers.abstract_relu import ReLU
if isinstance(layer, ReLU):
split_point = torch.Tensor([0.0])
else:
assert layer_id in self.split_points
layer_split_points = self.split_points[layer_id]
neg_layer_split_points = layer_split_points.clone()
pos_layer_split_points = layer_split_points.clone()
lbs, ubs = bounds.intermediate_bounds[layer_id]
lb, ub = lbs[index_in_layer], ubs[index_in_layer]
split_point = layer.get_split_points(lb, ub) # type: ignore # "get_split_points" is a classmethod not Tensor
neg_layer_split_points[index_in_layer] = split_point.item()
pos_layer_split_points[index_in_layer] = split_point.item()
negative_split_points[layer_id] = neg_layer_split_points
positive_split_points[layer_id] = pos_layer_split_points
number_of_nodes_split: Sequence[int] = [
n + 1 for n in self.number_of_nodes_split
]
negative_split = SplitState(
negative_split_constraints,
negative_split_points,
number_of_nodes_split,
self.batch_size,
device,
)
positive_split = SplitState(
positive_split_constraints,
positive_split_points,
number_of_nodes_split,
self.batch_size,
device,
)
return (
negative_split,
positive_split,
split_point.item(),
)
@implement_properties_as_fields
class SplitState(
ReadonlySplitState
): # TODO: probably this class should know which layers are ReLUs
split_constraints: Dict[LayerTag, Tensor]
split_points: Dict[LayerTag, Tensor]
number_of_nodes_split: Sequence[int] # (for multiple batches)
is_fully_split: bool
batch_size: int
device: torch.device
def __init__(
self,
split_constraints: Dict[LayerTag, Tensor],
split_points: Dict[LayerTag, Tensor],
number_of_nodes_split: Sequence[int],
batch_size: int,
device: torch.device,
):
self.split_constraints = split_constraints
self.split_points = split_points
self.number_of_nodes_split = number_of_nodes_split
self.is_fully_split = all(
[(splits != 0).all() for splits in split_constraints.values()]
)
assert len(number_of_nodes_split) == batch_size
self.batch_size = batch_size
self.device = device
@classmethod
def create_readonly(
cls,
split_constraints: Mapping[int, Tensor],
split_points: Mapping[int, Tensor],
number_of_nodes_split: Sequence[int],
batch_size: int,
device: torch.device,
) -> ReadonlySplitState:
assert isinstance(split_constraints, dict)
assert isinstance(split_points, dict)
return cls(
split_constraints,
split_points,
number_of_nodes_split, # (immutable)
batch_size,
device,
)
@classmethod
def create_default(
cls,
split_constraints: Optional[Dict[LayerTag, Tensor]],
split_points: Optional[Dict[LayerTag, Tensor]],
batch_size: int,
device: torch.device,
) -> SplitState:
if split_constraints is None:
split_constraints = {}
if split_points is None:
split_points = {}
number_of_nodes_split = [0]
return cls(
split_constraints, split_points, number_of_nodes_split, batch_size, device
)
def move_to(self, device: torch.device) -> None:
if self.device is device:
return
self.split_constraints = move_to(self.split_constraints, device)
self.split_points = move_to(self.split_points, device)
self.device = device
def refine_split_constraints_for_relu(
self, layer_id: LayerTag, bounds: Tuple[Tensor, Tensor]
) -> None:
assert layer_id in self.split_constraints
assert layer_id not in self.split_points
input_lb, input_ub = bounds
not_already_split_nodes = self.split_constraints[layer_id] == 0
stable_inactive_nodes = input_ub <= 0
stable_active_nodes = input_lb >= 0
self.split_constraints[layer_id] = torch.where(
(stable_inactive_nodes & not_already_split_nodes),
torch.tensor(1, dtype=torch.int8, device=self.device),
self.split_constraints[layer_id],
)
self.split_constraints[layer_id] = torch.where(
(stable_active_nodes & not_already_split_nodes),
torch.tensor(-1, dtype=torch.int8, device=self.device),
self.split_constraints[layer_id],
)
# def refine_split_constraints_for_sig( # Sigmoid / Tanh
# self, layer_id: int, bounds: Tuple[Tensor, Tensor]
# ) -> None:
# assert layer_id in self.split_points.keys()
# input_lb, input_ub = bounds
# not_already_split_nodes = self.split_constraints[layer_id] == 0
# # Resets our splitting in case the bounds move across the split point
# split_points = self.split_points[layer_id]
# no_longer_split_nodes = (input_lb >= split_points) | (
# input_ub <= split_points
# )
# self.split_constraints[layer_id] = torch.where(
# (~not_already_split_nodes & no_longer_split_nodes),
# torch.tensor(0, dtype=torch.int8, device=self.device),
# self.split_constraints[layer_id],
# )
| 9,388 | 34.164794 | 122 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/parameters.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from src.state.layer_bounds import ReadonlyLayerBounds
from src.state.tags import LayerTag, ParameterTag, QueryTag
from src.utilities.custom_typing import implement_properties_as_fields
from src.utilities.tensor_management import (
deep_copy,
deep_copy_to,
deep_copy_to_no_clone,
move_to,
)
class ReadonlyParametersForQuery(ABC):
@property
@abstractmethod
def parameters(self) -> Mapping[ParameterTag, Mapping[LayerTag, Tensor]]:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
def deep_copy(self) -> ParametersForQuery:
return ParametersForQuery(
deep_copy(self.parameters), self.batch_size, self.device
)
def deep_copy_to(self, device: torch.device) -> ParametersForQuery:
return ParametersForQuery(
deep_copy_to(self.parameters, device), self.batch_size, device
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlyParametersForQuery:
return ParametersForQuery.create_readonly(
deep_copy_to_no_clone(self.parameters, device),
self.batch_size,
device,
)
@implement_properties_as_fields
class ParametersForQuery(ReadonlyParametersForQuery):
parameters: Dict[ParameterTag, Dict[LayerTag, Tensor]]
batch_size: int
device: torch.device
def __init__(
self,
parameters: Dict[ParameterTag, Dict[LayerTag, Tensor]],
batch_size: int,
device: torch.device,
):
self.parameters = parameters
self.batch_size = batch_size
self.device = device
@classmethod
def create_readonly(
cls,
parameters: Mapping[ParameterTag, Mapping[LayerTag, Tensor]],
batch_size: int,
device: torch.device,
) -> ReadonlyParametersForQuery:
assert isinstance(parameters, dict)
return ParametersForQuery(parameters, batch_size, device)
@classmethod
def create_default(
cls, batch_size: int, device: torch.device
) -> ParametersForQuery:
parameters: Dict[ParameterTag, Dict[LayerTag, Tensor]] = {}
return cls(parameters, batch_size, device)
def move_to(self, device: torch.device) -> None:
if self.device == device:
return
self.parameters = move_to(self.parameters, device)
self.device = device
def get_optimizable(
self, only_lb: bool
) -> Tuple[
List[Tensor], List[Tensor], List[Tensor], List[Tensor]
]: # TODO: clean this up
alpha_parameters_for_query: List[Tensor] = []
beta_parameters_for_query: List[Tensor] = []
prima_parameters_for_query: List[Tensor] = []
alpha_relu_parameters_for_query: List[Tensor] = []
for param_key, params_by_layer in self.parameters.items():
if only_lb and "ub" in param_key:
continue
if "alpha_relu" in param_key:
alpha_relu_parameters_for_query += list(params_by_layer.values())
if "alpha" in param_key:
alpha_parameters_for_query += list(params_by_layer.values())
elif "beta" in param_key:
beta_parameters_for_query += list(params_by_layer.values())
elif "prima" in param_key:
prima_parameters_for_query += list(params_by_layer.values())
else:
raise RuntimeError("Unknown optimizable parameter {}".format(param_key))
return (
alpha_parameters_for_query,
beta_parameters_for_query,
prima_parameters_for_query,
alpha_relu_parameters_for_query,
)
def get_parameters(
self,
parameter_key: ParameterTag,
layer_id: LayerTag,
make_default_parameters: Union[
Callable[[torch.device], Tensor],
Tuple[int, ...], # default is a zero tensor
],
) -> Tensor:
parameters_per_layer = self.parameters.setdefault(parameter_key, {})
if layer_id not in parameters_per_layer:
if isinstance(make_default_parameters, tuple):
parameter_shape = make_default_parameters
default_parameters = torch.zeros(*parameter_shape, device=self.device)
else:
default_parameters = make_default_parameters(self.device)
requested_parameters = default_parameters
parameters_per_layer[layer_id] = requested_parameters
else:
requested_parameters = parameters_per_layer[layer_id]
if not requested_parameters.requires_grad:
requested_parameters.requires_grad_()
# print(f"Here: {parameter_key}: {layer_id}")
return requested_parameters
def get_existing_parameters(
self,
parameter_key: ParameterTag,
layer_id: LayerTag,
) -> Tensor:
return self.parameters[parameter_key][layer_id]
def modify_for_sharing(
self,
) -> None: # reduce query dimension to 1 so the parameters can be used without reduced parameter sharing
for param_key, layer_parameters in self.parameters.items():
if "alpha_relu" not in param_key: # TODO: this is a bit hacky
continue
for layer_id, parameters in layer_parameters.items():
if parameters.shape[1] != 1:
# just use the first parameter set
# select returns a view, intention of clone is to allow memory to be freed
layer_parameters[layer_id] = (
parameters.select(dim=1, index=0).unsqueeze(1).clone().detach()
)
# (used for split score computations)
def change_alphas_to_WK_slopes(self, layer_bounds: ReadonlyLayerBounds) -> None:
from src.utilities.branching import ( # to avoid circular imports
babsr_ratio_computation,
)
for param_key, layer_parameters in self.parameters.items():
if "alpha" in param_key:
for layer_id, parameters in layer_parameters.items():
current_layer_lower_bounds = layer_bounds.intermediate_bounds[
layer_id
][0].unsqueeze(
1
) # add query dimension
current_layer_upper_bounds = layer_bounds.intermediate_bounds[
layer_id
][1].unsqueeze(
1
) # add query dimension
ub_slope, __ = babsr_ratio_computation(
current_layer_lower_bounds, current_layer_upper_bounds
)
WK_slopes = ub_slope
self.parameters[param_key][layer_id] = WK_slopes
def set_beta_parameters_to_zero(self) -> None:
for param_key, layer_parameters in self.parameters.items():
if "beta" in param_key:
for layer_id, parameters in layer_parameters.items():
self.parameters[param_key][layer_id] = torch.zeros_like(parameters)
def improve(
self, new_parameters_for_query: ParametersForQuery, improvement_mask: Tensor
) -> None:
# if not any(improvement_mask): return # (it suffices if this is done once in Parameters.improve for now)
for (
param_key,
new_parameters_per_layer,
) in new_parameters_for_query.parameters.items():
if param_key not in self.parameters:
self.parameters[param_key] = {}
for layer_id, new_layer_parameters in new_parameters_per_layer.items():
if layer_id in self.parameters[param_key]:
improvement_mask_of_appropriate_shape = improvement_mask.view(
improvement_mask.shape[0],
*([1] * (len(new_layer_parameters.shape) - 1)),
)
self.parameters[param_key][layer_id] = torch.where(
improvement_mask_of_appropriate_shape,
new_layer_parameters,
self.parameters[param_key][layer_id],
).detach()
else:
self.parameters[param_key][layer_id] = deep_copy(
new_layer_parameters
)
class ReadonlyParameters(ABC):
@property
@abstractmethod
def parameters_by_query(
self,
) -> Mapping[QueryTag, ParametersForQuery]:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
@property
@abstractmethod
def use_params(
self,
) -> bool: # TODO: probably it would be better to move use_params into the backsubtitution config.
pass
def deep_copy(self) -> Parameters:
return Parameters(
{
tag: params.deep_copy()
for tag, params in self.parameters_by_query.items()
},
self.batch_size,
self.device,
self.use_params,
)
def deep_copy_to(self, device: torch.device) -> Parameters:
return Parameters(
{
tag: params.deep_copy_to(device)
for tag, params in self.parameters_by_query.items()
},
self.batch_size,
device,
self.use_params,
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlyParameters:
return Parameters.create_readonly(
{
tag: params.deep_copy_to_no_clone(device)
for tag, params in self.parameters_by_query.items()
},
self.batch_size,
device,
self.use_params,
)
def get_active_parameters_after_split(
self,
recompute_intermediate_bounds_after_branching: bool,
intermediate_layer_bounds_to_be_kept_fixed: Sequence[int],
device: torch.device,
) -> ReadonlyParameters: # readonly because the resulting parameters alias the original ones
active_parameters = (
{
query_id: parameters
for query_id, parameters in self.parameters_by_query.items()
if query_id not in intermediate_layer_bounds_to_be_kept_fixed
}
if not recompute_intermediate_bounds_after_branching
else self.parameters_by_query
)
return Parameters.create_readonly(
active_parameters, self.batch_size, device, self.use_params
)
@implement_properties_as_fields
class Parameters(ReadonlyParameters):
parameters_by_query: Dict[QueryTag, ParametersForQuery]
batch_size: int
device: torch.device
use_params: bool # only initialize parameters if this is True => false for DP pass # TODO: probably it's better to move use_params into the backsubstitution config
def __init__(
self,
parameters_by_query: Dict[QueryTag, ParametersForQuery],
batch_size: int,
device: torch.device,
use_params: bool,
):
self.parameters_by_query = parameters_by_query
self.batch_size = batch_size
self.device = device
self.use_params = use_params
@classmethod
def create_readonly(
cls,
parameters_by_query: Mapping[QueryTag, ReadonlyParametersForQuery],
batch_size: int,
device: torch.device,
use_params: bool,
) -> ReadonlyParameters:
assert isinstance(parameters_by_query, dict)
return Parameters(parameters_by_query, batch_size, device, use_params)
@classmethod
def create_default(
cls, batch_size: int, device: torch.device, use_params: bool
) -> Parameters:
parameters_by_query: Dict[QueryTag, ParametersForQuery] = {}
return cls(parameters_by_query, batch_size, device, use_params)
def move_to(self, device: torch.device) -> None:
if self.device == device:
return
for parameters_for_query in self.parameters_by_query.values():
parameters_for_query.move_to(device)
self.device = device
def get_parameters_for_query(self, query_id: QueryTag) -> ParametersForQuery:
if query_id in self.parameters_by_query:
return self.parameters_by_query[query_id]
result = ParametersForQuery.create_default(self.batch_size, self.device)
self.parameters_by_query[query_id] = result
return result
def get_parameters(
self,
query_id: QueryTag,
parameter_key: ParameterTag,
layer_id: LayerTag,
make_default_parameters: Union[
Callable[[torch.device], Tensor],
Tuple[int, ...], # default is a zero tensor
],
) -> Tensor:
parameters_for_query = self.get_parameters_for_query(query_id)
return parameters_for_query.get_parameters(
parameter_key=parameter_key,
layer_id=layer_id,
make_default_parameters=make_default_parameters,
)
def get_existing_parameters(
self, query_id: QueryTag, parameter_key: ParameterTag, layer_id: LayerTag
) -> Tensor:
parameters_for_query = self.get_parameters_for_query(query_id)
return parameters_for_query.get_existing_parameters(
parameter_key=parameter_key,
layer_id=layer_id,
)
def modify_for_sharing(self) -> None:
for query_id, parameters_for_query in self.parameters_by_query.items():
parameters_for_query.modify_for_sharing()
def change_alphas_to_WK_slopes(
self, query_id: QueryTag, layer_bounds: ReadonlyLayerBounds
) -> None:
parameters_for_query = self.get_parameters_for_query(query_id)
parameters_for_query.change_alphas_to_WK_slopes(layer_bounds)
def set_beta_parameters_to_zero(self, query_id: QueryTag) -> None:
parameters_for_query = self.get_parameters_for_query(query_id)
parameters_for_query.set_beta_parameters_to_zero()
def get_optimizable(
self, selected_query_id: Optional[QueryTag], only_lb: bool
) -> Tuple[
List[Tensor], List[Tensor], List[Tensor], List[Tensor]
]: # TODO: clean this up
all_alpha_parameters: List[Tensor] = []
all_beta_parameters: List[Tensor] = []
all_prima_parameters: List[Tensor] = []
all_alpha_relu_parameters: List[Tensor] = []
for (
layer_id,
parameters_for_query,
) in self.parameters_by_query.items():
if selected_query_id is not None and layer_id != selected_query_id:
continue
(
alpha_parameters_for_query,
beta_parameters_for_query,
prima_parameters_for_query,
alpha_relu_parameters_for_query,
) = parameters_for_query.get_optimizable(only_lb)
all_alpha_parameters += alpha_parameters_for_query
all_beta_parameters += beta_parameters_for_query
all_prima_parameters += prima_parameters_for_query
all_alpha_relu_parameters += alpha_relu_parameters_for_query
return (
all_alpha_parameters,
all_beta_parameters,
all_prima_parameters,
all_alpha_relu_parameters,
)
def improve(self, new_parameters: Parameters, improvement_mask: Tensor) -> None:
if not any(improvement_mask):
return
for (
query_id,
new_parameters_for_query,
) in new_parameters.parameters_by_query.items():
if query_id not in self.parameters_by_query:
self.parameters_by_query[query_id] = ParametersForQuery.create_default(
self.batch_size, self.device
)
self.parameters_by_query[query_id].improve(
new_parameters_for_query, improvement_mask
)
| 16,431 | 35.678571 | 168 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/constraints.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Dict, Mapping, Optional, OrderedDict, Sequence, Tuple
import torch
from torch import Tensor
from src.state.layer_bounds import LayerBounds, ReadonlyLayerBounds
from src.state.prima_constraints import PrimaConstraints
from src.state.split_state import ReadonlySplitState, SplitState
from src.state.tags import LayerTag, NodeTag
from src.utilities.custom_typing import implement_properties_as_fields
if TYPE_CHECKING:
from src.abstract_layers.abstract_container_module import ActivationLayer
INFEASIBILITY_CHECK_TOLERANCE = 1e-5
def _get_infeasibility_mask_from_intermediate_bounds(
intermediate_bounds: Mapping[LayerTag, Tuple[Tensor, Tensor]],
batch_size: int,
device: torch.device,
) -> Tensor:
result = torch.zeros(batch_size, dtype=torch.bool, device=device)
for lb, ub in intermediate_bounds.values():
result |= (
(lb > ub + INFEASIBILITY_CHECK_TOLERANCE).flatten(start_dim=1).any(dim=1)
)
return result
class ReadonlyConstraints(ABC):
@property
@abstractmethod
def split_state(self) -> Optional[ReadonlySplitState]:
pass
@property
@abstractmethod
def layer_bounds(self) -> ReadonlyLayerBounds:
pass
@property
@abstractmethod
def is_infeasible(self) -> Tensor:
pass # one entry per batch element
@property
@abstractmethod
def prima_constraints(self) -> Optional[PrimaConstraints]:
pass # it seems this is shared state (Readonly not applied)
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
def without_prima(self) -> ReadonlyConstraints: # TODO: why needed?
return Constraints.create_readonly(
split_state=self.split_state,
layer_bounds=self.layer_bounds,
prima_constraints=None,
is_infeasible=self.is_infeasible,
batch_size=self.batch_size,
device=self.device,
)
def deep_copy_to(self, device: torch.device) -> Constraints:
return Constraints(
split_state=self.split_state.deep_copy_to(device)
if self.split_state is not None
else None,
layer_bounds=self.layer_bounds.deep_copy_to(device),
prima_constraints=self.prima_constraints.deep_copy_to(device)
if self.prima_constraints is not None
else None, # TODO: this is shared state, seems a bit weird
is_infeasible=self.is_infeasible, # (copied in constructor)
batch_size=self.batch_size,
device=device,
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlyConstraints:
if self.prima_constraints is not None:
self.prima_constraints.move_to(
device
) # TODO: this is shared state, seems a bit weird
return Constraints.create_readonly(
split_state=self.split_state.deep_copy_to_no_clone(device)
if self.split_state is not None
else None,
layer_bounds=self.layer_bounds.deep_copy_to_no_clone(device),
prima_constraints=self.prima_constraints,
is_infeasible=self.is_infeasible, # (copied in constructor)
batch_size=self.batch_size,
device=device,
)
@property
def is_fully_split(self) -> bool:
return self.split_state is not None and self.split_state.is_fully_split
def split(
self,
node_to_split: NodeTag,
recompute_intermediate_bounds_after_branching: bool,
layer: ActivationLayer,
device: torch.device,
) -> Tuple[
Constraints, Constraints, Sequence[LayerTag], float
]: # (negative_split, positive_split, intermediate_bounds_to_be_kept_fixed, split_point)
assert (
self.split_state is not None
), "can only split states with a valid SplitState"
(
split_state_for_negative_split,
split_state_for_positive_split,
split_point,
) = self.split_state.split(self.layer_bounds, node_to_split, layer, device)
(
layer_bounds_for_negative_split,
layer_bounds_for_positive_split,
intermediate_bounds_to_be_kept_fixed,
) = self.layer_bounds.split(
node_to_split,
split_point,
recompute_intermediate_bounds_after_branching,
device,
)
negative_split = Constraints(
split_state=split_state_for_negative_split,
layer_bounds=layer_bounds_for_negative_split,
prima_constraints=self.prima_constraints,
is_infeasible=self.is_infeasible, # (copied in constructor)
batch_size=self.batch_size,
device=device,
)
positive_split = Constraints(
split_state=split_state_for_positive_split,
layer_bounds=layer_bounds_for_positive_split,
prima_constraints=self.prima_constraints,
is_infeasible=self.is_infeasible, # (copied in constructor)
batch_size=self.batch_size,
device=device,
)
return (
negative_split,
positive_split,
intermediate_bounds_to_be_kept_fixed,
split_point,
)
# for VerificationSubproblemQueue. TODO: why needed?
def get_prima_constraints(self) -> Optional[PrimaConstraints]:
return self.prima_constraints
@abstractmethod
def set_prima_coefficients(
self, prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]]
) -> None:
pass
@implement_properties_as_fields
class Constraints(ReadonlyConstraints):
split_state: Optional[SplitState]
layer_bounds: LayerBounds
prima_constraints: Optional[PrimaConstraints]
is_infeasible: Tensor # (one entry per batch element)
batch_size: int
device: torch.device
def __init__(
self,
split_state: Optional[SplitState],
layer_bounds: LayerBounds,
prima_constraints: Optional[PrimaConstraints],
is_infeasible: Optional[Tensor],
batch_size: int,
device: torch.device,
):
self.split_state = split_state
self.layer_bounds = layer_bounds
self.prima_constraints = prima_constraints
if is_infeasible is None:
self.is_infeasible = torch.zeros(
batch_size, dtype=torch.bool, device=device
)
else:
self.is_infeasible = is_infeasible.clone().detach()
assert self.split_state is None or self.split_state.batch_size == batch_size
assert self.layer_bounds.batch_size == batch_size
assert (
self.prima_constraints is None
or self.prima_constraints.batch_size == batch_size
)
assert self.is_infeasible.shape == (batch_size,)
self.batch_size = batch_size
assert self.split_state is None or self.split_state.device == device
assert self.layer_bounds.device == device
assert self.prima_constraints is None or self.prima_constraints.device == device
self.device = device
@classmethod
def create_readonly(
cls,
split_state: Optional[ReadonlySplitState],
layer_bounds: ReadonlyLayerBounds,
prima_constraints: Optional[
PrimaConstraints
], # it seems this is shared state (Readonly not applied)
is_infeasible: Optional[Tensor],
batch_size: int,
device: torch.device,
) -> ReadonlyConstraints:
assert split_state is None or isinstance(split_state, SplitState)
assert isinstance(layer_bounds, LayerBounds)
assert prima_constraints is None or isinstance(
prima_constraints, PrimaConstraints
)
return cls(
split_state,
layer_bounds,
prima_constraints,
is_infeasible,
batch_size,
device,
)
@classmethod
def create_default(
cls,
split_state: Optional[SplitState],
optimize_prima: bool,
batch_size: int,
device: torch.device,
) -> Constraints:
layer_bounds = LayerBounds.create_default(batch_size, device)
prima_constraints = (
PrimaConstraints.create_default(batch_size, device)
if optimize_prima
else None
)
return cls(
split_state=split_state,
layer_bounds=layer_bounds,
prima_constraints=prima_constraints,
is_infeasible=None,
batch_size=batch_size,
device=device,
)
def without_prima(self) -> Constraints: # TODO: why needed?
result = Constraints(
split_state=self.split_state,
layer_bounds=self.layer_bounds,
prima_constraints=None,
batch_size=self.batch_size,
is_infeasible=self.is_infeasible, # TODO: a bit ugly (this is copied)
device=self.device,
)
result.is_infeasible = self.is_infeasible # TODO: a bit ugly
return result
def move_to(self, device: torch.device) -> None:
if self.device == device:
return
if self.split_state is not None:
self.split_state.move_to(device)
self.layer_bounds.move_to(device)
if self.prima_constraints is not None:
self.prima_constraints.move_to(device)
self.device = device
def update_is_infeasible(self, is_infeasible: Tensor) -> None:
assert is_infeasible.shape == (self.batch_size,)
self.is_infeasible |= is_infeasible
def update_feasibility_from_constraints(self) -> None:
is_infeasible = _get_infeasibility_mask_from_intermediate_bounds(
self.layer_bounds.intermediate_bounds, self.batch_size, self.device
)
self.update_is_infeasible(is_infeasible)
def update_split_constraints(
self,
relu_layers: Sequence[LayerTag],
bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]],
) -> None:
if self.split_state is None or self.split_state.split_constraints is None:
return
for layer_id in relu_layers:
if layer_id in bounds and layer_id in self.split_state.split_constraints:
self.split_state.refine_split_constraints_for_relu(
layer_id, bounds[layer_id]
)
# for VerificationSubproblemQueue. TODO: why needed?
def set_prima_coefficients(
self, prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]]
) -> None:
assert self.prima_constraints is not None
self.prima_constraints.set_prima_coefficients(prima_coefficients)
| 10,998 | 33.806962 | 93 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/prima_constraints.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Dict, Mapping, Tuple
import torch
from torch import Tensor
from src.state.tags import LayerTag
from src.utilities.custom_typing import implement_properties_as_fields
from src.utilities.tensor_management import deep_copy_to, deep_copy_to_no_clone, move_to
class ReadonlyPrimaConstraints(ABC):
"""
The PRIMA constraints are of the form:
output_var_coefs @ layer_output + input_var_coefs @ layer_input + const_coefs @ 1 <= 0
"""
@property
@abstractmethod
def prima_coefficients(self) -> Mapping[LayerTag, Tuple[Tensor, Tensor, Tensor]]:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlyPrimaConstraints:
return PrimaConstraints(
deep_copy_to_no_clone(self.prima_coefficients, device),
self.batch_size,
device,
)
def deep_copy_to(self, device: torch.device) -> PrimaConstraints:
return PrimaConstraints(
deep_copy_to(self.prima_coefficients, device), self.batch_size, device
)
@abstractmethod
def set_prima_coefficients(
self, prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]]
) -> None:
pass
@implement_properties_as_fields
class PrimaConstraints(ReadonlyPrimaConstraints):
"""
The PRIMA constraints are of the form:
output_var_coefs @ layer_output + input_var_coefs @ layer_input + const_coefs @ 1 <= 0
"""
prima_coefficients: Dict[
LayerTag, Tuple[Tensor, Tensor, Tensor]
] # (output_var_coefs, input_var_coefs, const_coefs) (max 3 entries per row)
batch_size: int
device: torch.device
def __init__(
self,
prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]],
batch_size: int,
device: torch.device,
):
self.prima_coefficients = prima_coefficients
self.batch_size = batch_size
self.device = device
@classmethod
def create_default(
cls,
batch_size: int,
device: torch.device,
) -> PrimaConstraints:
prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]] = {}
return cls(prima_coefficients, batch_size, device)
def move_to(self, device: torch.device) -> None:
if self.device is device:
return
self.prima_coefficients = move_to(self.prima_coefficients, device)
self.device = device
def set_prima_coefficients(
self, prima_coefficients: Dict[LayerTag, Tuple[Tensor, Tensor, Tensor]]
) -> None:
# assert prima_coefficients is self.prima_coefficients # TODO
self.prima_coefficients = move_to(prima_coefficients, self.device)
| 2,936 | 28.666667 | 91 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/state/layer_bounds.py | from __future__ import annotations
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import List, Mapping, Optional, Sequence, Tuple
import torch
from torch import Tensor
from src.state.tags import LayerTag, NodeTag
from src.utilities.custom_typing import implement_properties_as_fields
from src.utilities.tensor_management import (
deep_copy,
deep_copy_to,
deep_copy_to_no_clone,
move_to,
)
class ReadonlyLayerBounds(ABC):
@property
@abstractmethod
def intermediate_layer_bounds_to_be_kept_fixed(
self,
) -> Sequence[LayerTag]: # uniform across batch
pass
@property
@abstractmethod
def intermediate_bounds(self) -> Mapping[LayerTag, Tuple[Tensor, Tensor]]:
pass
@property
@abstractmethod
def batch_size(self) -> int:
pass
@property
@abstractmethod
def device(self) -> torch.device:
pass
@property
def fixed_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]: # TODO: make this a Mapping?
# TODO: cache?
return OrderedDict(
(layer_id, bounds)
for layer_id, bounds in self.intermediate_bounds.items()
if layer_id in self.intermediate_layer_bounds_to_be_kept_fixed
)
def deep_copy_to(self, device: torch.device) -> LayerBounds:
assert isinstance(self.intermediate_bounds, OrderedDict)
return LayerBounds(
self.intermediate_layer_bounds_to_be_kept_fixed, # (immutable)
deep_copy_to(self.intermediate_bounds, device),
self.batch_size,
device,
)
def deep_copy_to_no_clone(self, device: torch.device) -> ReadonlyLayerBounds:
return LayerBounds.create_readonly(
self.intermediate_layer_bounds_to_be_kept_fixed, # (immutable)
deep_copy_to_no_clone(self.intermediate_bounds, device),
self.batch_size,
device,
)
def split(
self,
node_to_split: NodeTag,
split_point: Optional[float],
recompute_intermediate_bounds_after_branching: bool,
device: torch.device,
) -> Tuple[LayerBounds, LayerBounds, Sequence[LayerTag]]:
intermediate_layer_bounds_to_be_kept_fixed: List[LayerTag] = []
# Note if we dont do this sharing we apparently run out of memory during the splitting process
if recompute_intermediate_bounds_after_branching:
for layer_id in self.intermediate_bounds:
intermediate_layer_bounds_to_be_kept_fixed.append(layer_id)
if layer_id == node_to_split.layer:
break
else:
intermediate_layer_bounds_to_be_kept_fixed = list(
self.intermediate_bounds.keys()
)
layer_id = node_to_split.layer
index_in_layer = 0, *node_to_split.index
# assert (
# self.intermediate_bounds[layer_id][0][index_in_layer] < 0
# and self.intermediate_bounds[layer_id][1][index_in_layer] > 0
# ), "Attempted to split a stable node."
if split_point is None: # ReLU
split_point = 0.0
intermediate_bounds_for_negative_split = deepcopy(self.intermediate_bounds)
assert isinstance(intermediate_bounds_for_negative_split, OrderedDict)
intermediate_bounds_for_positive_split = deepcopy(self.intermediate_bounds)
assert isinstance(intermediate_bounds_for_positive_split, OrderedDict)
layer_upper_bounds_for_negative_split = (
intermediate_bounds_for_negative_split[layer_id][1].clone().detach()
)
layer_upper_bounds_for_negative_split[index_in_layer] = split_point
intermediate_bounds_for_negative_split[layer_id] = (
intermediate_bounds_for_negative_split[layer_id][0],
layer_upper_bounds_for_negative_split,
)
layer_lower_bounds_for_positive_split = (
intermediate_bounds_for_positive_split[layer_id][0].clone().detach()
)
layer_lower_bounds_for_positive_split[index_in_layer] = split_point
intermediate_bounds_for_positive_split[layer_id] = (
layer_lower_bounds_for_positive_split,
intermediate_bounds_for_positive_split[layer_id][1],
)
negative_split = LayerBounds(
intermediate_layer_bounds_to_be_kept_fixed=intermediate_layer_bounds_to_be_kept_fixed,
intermediate_bounds=intermediate_bounds_for_negative_split,
batch_size=self.batch_size,
device=device,
)
positive_split = LayerBounds(
intermediate_layer_bounds_to_be_kept_fixed=intermediate_layer_bounds_to_be_kept_fixed,
intermediate_bounds=intermediate_bounds_for_positive_split,
batch_size=self.batch_size,
device=device,
)
return (
negative_split,
positive_split,
intermediate_layer_bounds_to_be_kept_fixed,
)
@implement_properties_as_fields
class LayerBounds(ReadonlyLayerBounds):
intermediate_layer_bounds_to_be_kept_fixed: Sequence[
LayerTag
] # uniform across batch
intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
batch_size: int
device: torch.device
def __init__(
self,
intermediate_layer_bounds_to_be_kept_fixed: Sequence[LayerTag],
intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]],
batch_size: int,
device: torch.device,
):
self.intermediate_layer_bounds_to_be_kept_fixed = (
intermediate_layer_bounds_to_be_kept_fixed
)
self.intermediate_bounds = intermediate_bounds
self.batch_size = batch_size
self.device = device
@classmethod
def create_readonly(
cls,
intermediate_layer_bounds_to_be_kept_fixed: Sequence[LayerTag],
intermediate_bounds: Mapping[LayerTag, Tuple[Tensor, Tensor]],
batch_size: int,
device: torch.device,
) -> ReadonlyLayerBounds:
assert isinstance(intermediate_bounds, OrderedDict)
return LayerBounds(
intermediate_layer_bounds_to_be_kept_fixed, # (immutable)
intermediate_bounds,
batch_size,
device,
)
@classmethod
def create_default(
cls,
batch_size: int,
device: torch.device,
) -> LayerBounds:
intermediate_layer_bounds_to_be_kept_fixed: Sequence[LayerTag] = []
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
return cls(
intermediate_layer_bounds_to_be_kept_fixed,
intermediate_bounds,
batch_size,
device,
)
def move_to(self, device: torch.device) -> None:
if self.device is device:
return
self.intermediate_bounds = move_to(self.intermediate_bounds, device)
self.device = device
def improve(
self,
new_intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
], # TODO: make this a LayerBounds as well?
) -> None:
for layer_id in new_intermediate_bounds:
if layer_id in self.intermediate_bounds:
self.intermediate_bounds[layer_id] = (
torch.maximum(
self.intermediate_bounds[layer_id][0],
new_intermediate_bounds[layer_id][0],
).detach(),
torch.minimum(
self.intermediate_bounds[layer_id][1],
new_intermediate_bounds[layer_id][1],
).detach(),
)
else:
self.intermediate_bounds[layer_id] = deep_copy(
new_intermediate_bounds[layer_id]
)
| 8,005 | 33.808696 | 102 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_flatten.py | from __future__ import annotations
from typing import Any, Optional, Tuple
import numpy as np
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class Flatten(nn.Flatten, AbstractModule):
def __init__(
self, start_dim: int, end_dim: int, input_dim: Tuple[int, ...]
) -> None:
super(Flatten, self).__init__(start_dim, end_dim) # type: ignore # mypy issue 4335
self.input_dim = input_dim
self.output_dim = (np.prod(input_dim),)
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Flatten, input_dim: Tuple[int, ...], **kwargs: Any
) -> Flatten:
assert isinstance(module, nn.Flatten)
return cls(module.start_dim, module.end_dim, input_dim)
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
# Dirty fix as storing an abstract network with input dim (784,) gives us a network with input dim (784,1)
if len(self.input_dim) == 2 and self.input_dim[1] == 1:
self.input_dim = (self.input_dim[0],)
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
assert isinstance(affine_form.coef, Tensor)
new_coef = affine_form.coef.view(*affine_form.coef.size()[:2], *self.input_dim)
new_bias = affine_form.bias
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
return self.forward(interval_lb), self.forward(interval_ub)
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.flatten()
| 2,866 | 36.233766 | 114 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_mulit_path_block.py | from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor, nn
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_binary_op import BinaryOp
from src.abstract_layers.abstract_concat import Concat
from src.abstract_layers.abstract_container_module import (
AbstractContainerModule,
ActivationLayer,
)
from src.abstract_layers.abstract_sequential import Sequential
from src.abstract_layers.abstract_slice import Slice
from src.concrete_layers.binary_op import BinaryOp as concreteBinaryOp
from src.concrete_layers.concat import Concat as concreteConcat
from src.concrete_layers.multi_path_block import (
MultiPathBlock as concreteMultiPathBlock,
)
from src.concrete_layers.slice import Slice as concreteSlice
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.subproblem_state import SubproblemState
from src.state.tags import LayerTag
from src.utilities.config import BacksubstitutionConfig
class MultiPathBlock(concreteMultiPathBlock, AbstractContainerModule):
header: Optional[Slice] # type: ignore[assignment]
paths: nn.ModuleList # type: ignore[assignment]
merge: Union[Concat, BinaryOp] # type: ignore[assignment]
def __init__(
self,
header: Optional[concreteSlice],
paths: List[nn.Sequential],
merge: Union[concreteConcat, concreteBinaryOp],
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> None:
super(MultiPathBlock, self).__init__(header=header, paths=paths, merge=merge)
# Header
self.header: Optional[Slice] = None
path_in_dim = input_dim
if header is not None:
assert isinstance(header, concreteSlice)
self.header = Slice.from_concrete_module(header, input_dim, **kwargs)
path_in_dim = self.header.output_dim
# Paths
abs_paths: List[Sequential] = []
for path in paths:
abs_paths.append(
Sequential.from_concrete_module(path, path_in_dim, **kwargs)
)
self.paths = nn.ModuleList(abs_paths)
# Merge
merge_in_dims = [path.output_dim for path in abs_paths] # TODO
if isinstance(merge, concreteConcat):
self.merge = Concat.from_concrete_module(merge, merge_in_dims, **kwargs)
elif isinstance(merge, concreteBinaryOp):
assert len(paths) == 2
self.merge = BinaryOp.from_concrete_module(merge, merge_in_dims, **kwargs)
else:
assert False, f"Unknown merge block: {str(merge)}"
# Other parameters
self.output_dim = self.merge.output_dim
self.bias = self.get_babsr_bias()
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concreteMultiPathBlock,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> MultiPathBlock:
assert isinstance(module, concreteMultiPathBlock)
abstract_layer = cls( # Checked at runtime
module.header, # type: ignore[arg-type]
module.paths, # type: ignore[arg-type]
module.merge, # type: ignore[arg-type]
input_dim,
**kwargs,
)
return abstract_layer
def backsubstitute_shape(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
from_layer_index: Optional[int],
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
], # = None
preceeding_layers: Optional[List[Any]], # = None
use_early_termination_for_current_query: bool, # = False
full_back_prop: bool, # = False
optimize_intermediate_bounds: bool, # = False
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
# Create corresponding preceeding callback
propagate_preceeding_callback = self._get_header_callback(
propagate_preceeding_callback
)
if preceeding_layers is not None:
if self.header is not None:
preceeding_layers = [*preceeding_layers, self.header]
else:
if self.header is not None:
preceeding_layers = [self.header]
orig_lb = abstract_shape.lb.clone()
orig_ub: Optional[AffineForm] = None
if abstract_shape.ub is not None:
orig_ub = abstract_shape.ub.clone()
unstable_queries_old_for_assert = abstract_shape.unstable_queries
# Backprop through merge layer -> get individual shapes?
pre_merge_shapes = self.merge.backsubstitute(config, abstract_shape)
# Backprop through individual paths
post_path_shapes: List[MN_BaB_Shape] = []
for path_shape, path in zip(pre_merge_shapes, self.paths):
(
post_path_shape,
(post_path_lbs, post_path_ubs),
) = path.backsubstitute_shape(
config,
input_lb,
input_ub,
path_shape,
None,
propagate_preceeding_callback,
preceeding_layers,
use_early_termination_for_current_query=False,
full_back_prop=False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
post_path_shapes.append(post_path_shape)
assert (
abstract_shape.unstable_queries is None
or (
abstract_shape.unstable_queries == unstable_queries_old_for_assert
).all()
)
# Backprop through header layer -> get one shape
if self.header is not None:
post_header_shape = self.header.backsubstitute(config, post_path_shapes)
else: # All paths are from the same input we can add them up
final_lb_form = post_path_shapes[0].lb
final_ub_form: Optional[AffineForm] = None
if post_path_shapes[0].ub is not None:
final_ub_form = post_path_shapes[0].ub
for abs_shape in post_path_shapes[1:]:
final_lb_form.coef += abs_shape.lb.coef
final_lb_form.bias += abs_shape.lb.bias
if abs_shape.ub is not None:
assert final_ub_form is not None
final_ub_form.coef += abs_shape.ub.coef
final_ub_form.bias += abs_shape.ub.bias
post_header_shape = abstract_shape.clone_with_new_bounds(
final_lb_form, final_ub_form
)
# Adjust bias
new_lower: AffineForm
new_upper: Optional[AffineForm] = None
new_lb_bias = (
post_header_shape.lb.bias - (len(self.paths) - 1) * orig_lb.bias
) # Both the shape in a and in b contain the initial bias terms, so one has to be subtracted
new_lb_coef = post_header_shape.lb.coef
new_lower = AffineForm(new_lb_coef, new_lb_bias)
if post_header_shape.ub is not None and orig_ub is not None:
new_ub_bias = (
post_header_shape.ub.bias - (len(self.paths) - 1) * orig_ub.bias
)
new_ub_coef = post_header_shape.ub.coef
new_upper = AffineForm(new_ub_coef, new_ub_bias)
abstract_shape.update_bounds(new_lower, new_upper)
return (
abstract_shape,
(
-np.inf * torch.ones_like(post_path_lbs, device=abstract_shape.device),
np.inf * torch.ones_like(post_path_lbs, device=abstract_shape.device),
),
)
def get_babsr_bias(self) -> Tensor:
biases: List[Tensor] = []
is_cuda = False
# In case one of them is one the gpu we will move both to gpu
# Have to do this here as the paths (sequentials) are unaware of the device
for p in self.paths:
bias = p.get_babsr_bias()
biases.append(bias.detach())
if bias.is_cuda:
is_cuda = True
if is_cuda:
c_biases: List[Tensor] = []
for b in biases:
c_biases.append(b.cuda())
biases = c_biases
bias_shape = biases[0].shape
bias_numel = biases[0].numel()
for b in biases:
if b.numel() > bias_numel:
bias_shape = b.shape
base_sum: Tensor = biases[0].broadcast_to(bias_shape).clone()
if len(biases) > 1:
for b in biases[1:]:
base_sum += b
return nn.Parameter(base_sum)
def reset_input_bounds(self) -> None:
super(MultiPathBlock, self).reset_input_bounds()
for path in self.paths:
path.reset_input_bounds()
def reset_optim_input_bounds(self) -> None:
super(MultiPathBlock, self).reset_input_bounds()
for path in self.paths:
path.reset_optim_input_bounds()
def reset_output_bounds(self) -> None:
super(MultiPathBlock, self).reset_output_bounds()
for path in self.paths:
path.reset_output_bounds()
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_list: List[Tuple[Tensor, Tensor]] = []
if self.header is not None:
interval_head = self.header.propagate_interval(
interval,
use_existing_bounds=use_existing_bounds,
subproblem_state=subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
else:
interval_head = interval
if isinstance(interval_head, List):
assert len(interval_head) == len(self.paths)
interval_list = interval_head
else:
interval_list = [interval_head for _ in self.paths]
out_intervals: List[Tuple[Tensor, Tensor]] = []
for input, path in zip(interval_list, self.paths):
out_intervals.append(
path.propagate_interval(
input,
use_existing_bounds=use_existing_bounds,
subproblem_state=subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
)
merge_interval = self.merge.propagate_interval(
out_intervals,
use_existing_bounds=use_existing_bounds,
subproblem_state=subproblem_state,
activation_layer_only=activation_layer_only,
) # type ignore
return merge_interval
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
ae_list: List[AbstractElement] = []
if self.header is not None:
ae_head = self.header.propagate_abstract_element(
abs_input,
use_existing_bounds=use_existing_bounds,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
else:
ae_head = abs_input
if isinstance(ae_head, List):
assert len(ae_head) == len(self.paths)
ae_list = ae_head
else:
ae_list = [ae_head for _ in self.paths]
out_aes: List[AbstractElement] = []
for path_input, path in zip(ae_list, self.paths):
abs_output = path.propagate_abstract_element(
path_input,
use_existing_bounds,
activation_layer_only,
set_input=set_input,
set_output=set_output,
)
out_aes.append(abs_output)
out_ae = self.merge.propagate_abstract_element(
out_aes, # type: ignore [arg-type]
use_existing_bounds=use_existing_bounds,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
return out_ae
def forward_pass(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
ibp_call: Callable[[], None],
timeout: float,
) -> None:
header_callback = self._get_header_callback(propagate_preceeding_callback)
for path in self.paths:
path.forward_pass(
config,
input_lb,
input_ub,
header_callback,
preceeding_layers,
ibp_call,
timeout,
)
def set_dependence_set_applicability(self, applicable: bool = True) -> None:
is_applicable = True
for path in self.paths:
path.set_dependence_set_applicability(applicable)
if path.layers[-1].dependence_set_applicable is not None:
is_applicable &= path.layers[-1].dependence_set_applicable
if not is_applicable:
break
self.dependence_set_applicable = is_applicable
def get_default_split_constraints(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_constraints: Dict[LayerTag, Tensor] = {}
for path in self.paths:
split_constraints.update(
path.get_default_split_constraints(batch_size, device)
)
return split_constraints
def get_default_split_points(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_points: Dict[LayerTag, Tensor] = {}
for path in self.paths:
split_points.update(path.get_default_split_points(batch_size, device))
return split_points
def get_activation_layers(self) -> Dict[LayerTag, ActivationLayer]:
act_layers: Dict[LayerTag, ActivationLayer] = {}
for path in self.paths:
act_layers.update(path.get_activation_layers())
return act_layers
def get_current_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
for path in self.paths:
intermediate_bounds.update(path.get_current_intermediate_bounds())
return intermediate_bounds
def get_current_optimized_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
for path in self.paths:
intermediate_bounds.update(path.get_current_optimized_intermediate_bounds())
return intermediate_bounds
def set_intermediate_input_bounds(
self, intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
) -> None:
for path in self.paths:
path.set_intermediate_input_bounds(intermediate_bounds)
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
for path in self.paths:
act_layer_ids += path.get_activation_layer_ids()
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
for path in self.paths:
act_layer_ids += path.get_relu_layer_ids()
return act_layer_ids
def _get_header_callback(
self,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
) -> Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]:
"""ReLU layers within the paths need a propagate preceeding callback that takes the header at the top into account"""
def wrapped_call(
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
use_early_termination_for_current_query: bool,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
if self.header is not None:
abstract_shape = self.header.backsubstitute(config, abstract_shape)
if propagate_preceeding_callback is not None:
return propagate_preceeding_callback(
config,
abstract_shape,
use_early_termination_for_current_query,
)
else:
assert isinstance(abstract_shape.lb.coef, Tensor)
bound_shape = abstract_shape.lb.coef.shape[:2]
return (
abstract_shape,
(
-np.inf * torch.ones(bound_shape, device=abstract_shape.device),
np.inf * torch.ones(bound_shape, device=abstract_shape.device),
), # TODO: this seems unnecessary, move bounds into abstract_shape and just update them when it makes sense
)
return wrapped_call
| 18,401 | 36.026157 | 128 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_normalization.py | from __future__ import annotations
from typing import Any, Optional, Sequence, Tuple
import torch
from torch import Tensor
import src.concrete_layers.normalize as concrete_normalize
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class Normalization(concrete_normalize.Normalize, AbstractModule):
mean: Tensor
sigma: Tensor
channel_dim: int
output_dim: Tuple[int, ...]
dependence_set_block: bool
def __init__(
self,
means: Sequence[float],
stds: Sequence[float],
device: torch.device,
channel_dim: int,
output_dim: Tuple[int, ...],
) -> None:
super(Normalization, self).__init__(means, stds, channel_dim)
super(Normalization, self).to(device)
self.output_dim = output_dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concrete_normalize.Normalize,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> Normalization:
assert isinstance(module, concrete_normalize.Normalize)
return cls(
module.means.flatten().tolist(),
module.stds.flatten().tolist(),
module.means.device,
module.channel_dim,
input_dim,
)
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
assert isinstance(affine_form.coef, Tensor)
req_shape = [1] * affine_form.coef.dim()
req_shape[2] = self.means.numel()
new_bias = affine_form.bias + (
affine_form.coef * (-self.means / self.stds).view(req_shape)
).view(*affine_form.coef.size()[:2], -1).sum(2)
new_coef = affine_form.coef / self.stds.view(req_shape)
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
output_lb, output_ub = self.forward(interval_lb), self.forward(interval_ub)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.normalize(self.means, self.stds)
| 3,476 | 32.757282 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_binary_op.py | from __future__ import annotations
import typing
from typing import Any, Optional, Sequence, Tuple, Union
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.binary_op import BinaryOp as concreteBinaryOp
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.subproblem_state import SubproblemState
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
class BinaryOp(concreteBinaryOp, AbstractModule):
def __init__(self, op: str, input_dim: Tuple[int, ...]) -> None:
super(BinaryOp, self).__init__(op)
self.op = op
self.input_dim = input_dim
self.output_dim = input_dim
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concreteBinaryOp,
input_dim: Sequence[Tuple[int, ...]],
**kwargs: Any,
) -> BinaryOp:
assert isinstance(module, concreteBinaryOp)
assert len(input_dim) == 2
assert input_dim[0] == input_dim[1]
return cls(module.op, input_dim[0])
def backsubstitute( # type:ignore[override]
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
) -> Tuple[MN_BaB_Shape, MN_BaB_Shape]:
new_lb_left_form, new_lb_right_form = self._backsub_affine_form(
abstract_shape.lb, abstract_shape
)
new_ub_left_form: Optional[AffineForm] = None
new_ub_right_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_left_form, new_ub_right_form = self._backsub_affine_form(
abstract_shape.ub, abstract_shape
)
left_as = abstract_shape.clone_with_new_bounds(
new_lb_left_form, new_ub_left_form
) # redundant
right_as = abstract_shape.clone_with_new_bounds(
new_lb_right_form, new_ub_right_form
)
return (left_as, right_as)
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> Tuple[AffineForm, AffineForm]:
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
coef = affine_form.coef.sets
else:
assert isinstance(affine_form.coef, Tensor)
coef = affine_form.coef
left_coef = coef
right_coef = coef
bias = affine_form.bias
if self.op == "add":
pass
elif self.op == "sub":
right_coef = -1 * right_coef
else:
assert False, f"Unknown operator {self.op}"
final_left_coef: Union[Tensor, DependenceSets] = left_coef
final_right_coef: Union[Tensor, DependenceSets] = right_coef
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
assert isinstance(final_left_coef, Tensor)
assert isinstance(final_right_coef, Tensor)
final_left_coef = DependenceSets(
final_left_coef,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
final_right_coef = DependenceSets(
final_right_coef,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
return (AffineForm(final_left_coef, bias), AffineForm(final_right_coef, bias))
@typing.no_type_check # Mypy can't handle the buffer type
def propagate_interval(
self,
intervals: Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
left_lb, left_ub = intervals[0]
right_lb, right_ub = intervals[1]
if self.op == "add":
return (left_lb + right_lb, left_ub + right_ub)
elif self.op == "sub":
return (left_lb - right_ub, left_ub - right_lb)
else:
assert False, f"Unknown operation {self.op}"
def propagate_abstract_element( # type: ignore [override] # supertype expects just one abstract element, but this is a special case
self,
abs_inputs: Tuple[AbstractElement, AbstractElement],
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
if self.op == "add":
return abs_inputs[0] + abs_inputs[1]
elif self.op == "sub":
return abs_inputs[0] - abs_inputs[1]
else:
assert False, f"Unknown operation {self.op}"
| 5,158 | 35.588652 | 136 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_network.py | from __future__ import annotations
from typing import Any, Dict, Iterable, Optional, Tuple
import torch
from torch import Size, Tensor, nn
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_container_module import (
ActivationLayer,
ActivationLayers,
)
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_sequential import Sequential
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import LayerTag, QueryTag
from src.utilities.abstract_module_mapper import AbstractModuleMapper
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class AbstractNetwork(Sequential):
def __init__(
self,
layers: Iterable[AbstractModule],
) -> None:
super(AbstractNetwork, self).__init__(layers)
self.layer_id_to_layer: Dict[LayerTag, ActivationLayer] = {}
self.has_output_adapter: bool = False # Whether or not we appended an adapter to the network for computing a disjunctive clause
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Sequential, input_dim: Tuple[int, ...], **kwargs: Any
) -> AbstractNetwork:
assert isinstance(module, nn.Sequential)
layers = Sequential.from_concrete_module(
module,
input_dim,
concrete_to_abstract=AbstractModuleMapper.map_to_abstract_type,
).layers
obj = cls(layers)
obj.set_activation_layers()
return obj
def append_out_adapter(
self, module: nn.Sequential, device: torch.device, dtype: torch.dtype
) -> None:
"""
Appends an output adapter to the current network, i.e.,
we append several layer to the network to encode specific output properties
Args:
module (nn.Sequential): The sequential layer to append (represents the concrete layer)
Returns:
_type_: Reference to the inplace-updated abstract-network.
"""
assert not self.has_output_adapter
new_layers = (
Sequential.from_concrete_module(
module,
self.output_dim,
concrete_to_abstract=AbstractModuleMapper.map_to_abstract_type,
)
.to(device)
.to(dtype)
)
# Note that we add it as a single sequential block
self.has_output_adapter = True
self.layers.append(new_layers)
self.set_activation_layers()
self.output_dim = new_layers.output_dim
def remove_out_adapter(self) -> None:
assert self.has_output_adapter
self.layers = self.layers[:-1]
self.has_output_adapter = False
self.set_activation_layers()
self.output_dim = self.layers[-1].output_dim
def get_mn_bab_shape(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
query_id: QueryTag,
query_coef: Tensor,
subproblem_state: Optional[SubproblemState],
compute_upper_bound: bool,
reset_input_bounds: bool, # = True # TODO: is this ever False?
optimize_intermediate_bounds: bool, # = False
recompute_intermediate_bounds: bool,
) -> MN_BaB_Shape:
assert query_coef.is_leaf
abstract_shape = MN_BaB_Shape(
query_id=query_id,
query_prev_layer=None, # TODO: reduced parameter sharing for the output layer?
queries_to_compute=None, # compute all queries
lb=AffineForm(query_coef),
ub=AffineForm(query_coef) if compute_upper_bound else None,
unstable_queries=None, # (not using early termination)
subproblem_state=subproblem_state,
)
return self.backsubstitute_mn_bab_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
query_coef=None,
abstract_shape=abstract_shape,
compute_upper_bound=compute_upper_bound,
reset_input_bounds=reset_input_bounds,
optimize_intermediate_bounds=optimize_intermediate_bounds,
recompute_intermediate_bounds=recompute_intermediate_bounds,
)
def backsubstitute_mn_bab_shape( # TODO: get rid of this?
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
query_coef: Optional[Tensor],
abstract_shape: MN_BaB_Shape,
compute_upper_bound: bool,
reset_input_bounds: bool, # = True # TODO: is this ever False?
recompute_intermediate_bounds: bool,
optimize_intermediate_bounds: bool, # = False
) -> MN_BaB_Shape:
if reset_input_bounds:
self.reset_input_bounds()
subproblem_state = abstract_shape.subproblem_state
if subproblem_state is not None:
if recompute_intermediate_bounds: # only set the bounds to be kept fix
self.set_intermediate_input_bounds(
subproblem_state.constraints.layer_bounds.fixed_intermediate_bounds
)
else: # Set all bounds => recompute none
self.set_intermediate_input_bounds(
subproblem_state.constraints.layer_bounds.intermediate_bounds
)
if query_coef is not None:
assert query_coef.is_leaf
abstract_shape.update_bounds( # Cloning necessary to prevent aliasing
AffineForm(query_coef.clone()),
AffineForm(query_coef.clone()) if compute_upper_bound else None,
)
shape, __ = self._get_mn_bab_shape_after_layer(
from_layer_index=len(self.layers) - 1,
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
propagate_preceeding_callback=None,
preceeding_layers=None,
use_early_termination_for_current_query=False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
assert shape is not None
return shape
def set_activation_layers(self) -> None:
self.layer_id_to_layer = self.get_activation_layers()
def activation_layer_bounds_to_optim_layer_bounds(self) -> None:
act_layers = self.get_activation_layers()
for id, layer in act_layers.items():
if layer.optim_input_bounds is None:
if layer.input_bounds is not None:
layer.optim_input_bounds = (
layer.input_bounds[0].detach(),
layer.input_bounds[1].detach(),
)
elif layer.input_bounds is not None:
opt_lb = torch.max(
layer.input_bounds[0].detach(), layer.optim_input_bounds[0]
)
opt_ub = torch.min(
layer.input_bounds[1].detach(), layer.optim_input_bounds[1]
)
layer.optim_input_bounds = (opt_lb, opt_ub)
def set_layer_bounds_via_forward_dp_pass(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
timeout: float,
) -> None:
# wraps layers in a sequential
layers_as_seq = Sequential(self.layers)
# Create a reference to trigger a forward ibp call
def ibp_call() -> None:
self.set_layer_bounds_via_interval_propagation(
input_lb,
input_ub,
use_existing_bounds=True,
activation_layer_only=True,
has_batch_dim=True,
set_input=True,
set_output=False,
)
layers_as_seq.forward_pass(
config=config,
input_lb=input_lb,
input_ub=input_ub,
propagate_preceeding_callback=None,
preceeding_layers=None,
ibp_call=ibp_call,
timeout=timeout,
)
# final_layer = layers_as_seq.layers[-1]
# assert isinstance(final_layer, AbstractModule)
# assert final_layer.output_bounds is not None
# return final_layer.output_bounds
def set_layer_bounds_via_interval_propagation(
self,
input_lb: Tensor,
input_ub: Tensor,
use_existing_bounds: bool = False,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
has_batch_dim: bool = False,
) -> Tuple[Tensor, Tensor]:
if not has_batch_dim:
if len(input_lb.shape) in [2, 4]:
shape_with_batch_dimension = input_lb.shape
elif len(input_lb.shape) in [1, 3]:
shape_with_batch_dimension = Size((1, *(input_lb.shape)))
else:
raise RuntimeError(
"Unexpected number of dimensions for interval propagation."
)
interval = (
input_lb.expand(shape_with_batch_dimension),
input_ub.expand(shape_with_batch_dimension),
)
else:
interval = (input_lb, input_ub)
for layer in self.layers:
if use_existing_bounds and layer.input_bounds is not None:
lb = torch.max(interval[0], layer.input_bounds[0].view_as(interval[0]))
ub = torch.min(interval[1], layer.input_bounds[1].view_as(interval[1]))
interval = (lb, ub)
if use_existing_bounds and layer.optim_input_bounds is not None:
lb = torch.max(
interval[0],
layer.optim_input_bounds[0].view(-1, *interval[0].shape[1:]),
)
ub = torch.min(
interval[1],
layer.optim_input_bounds[1].view(-1, *interval[1].shape[1:]),
)
interval = (lb, ub)
if set_input and (
type(layer) in ActivationLayers or not activation_layer_only
):
layer.update_input_bounds(interval, check_feasibility=False)
interval = layer.propagate_interval(
interval,
use_existing_bounds,
subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
assert (interval[0] <= interval[1]).all()
if set_output and (
type(layer) in ActivationLayers or not activation_layer_only
):
layer.update_output_bounds(interval)
return interval
def set_layer_bounds_via_abstract_element_propagation(
self,
abs_input: AbstractElement,
use_existing_bounds: bool = False,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
for layer in self.layers:
if (
set_input
and type(layer) in ActivationLayers
or not activation_layer_only
):
layer.update_input_bounds(
abs_input.concretize(), check_feasibility=False
)
abs_input = layer.propagate_abstract_element(
abs_input,
use_existing_bounds,
activation_layer_only,
set_input,
set_output,
)
if (
set_output
and type(layer) in ActivationLayers
or not activation_layer_only
):
layer.update_output_bounds(abs_input.concretize())
return abs_input
| 11,985 | 36.691824 | 136 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_concat.py | from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.concat import Concat as concreteConcat
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.subproblem_state import SubproblemState
from src.utilities.config import BacksubstitutionConfig
class Concat(concreteConcat, AbstractModule):
def __init__(
self,
dim: int,
input_dims: Sequence[Tuple[int, ...]],
):
super(Concat, self).__init__(dim)
self.abs_dim = dim - 1 # no batch dimension
self.input_dims = input_dims # Ordered list of incoming dimensions
cat_dim = sum([input_dim[self.abs_dim] for input_dim in input_dims])
# Should assert that all dims otherwise are equal
for input_dim in input_dims:
for i in range(len(input_dims[0])):
if i != self.abs_dim:
assert (
input_dim[i] == input_dims[0][i]
), f"Dimension mismatch in concat input: {input_dim} {input_dims[0]}"
output_dim: list[int] = list(input_dim)
output_dim[self.abs_dim] = cat_dim
self.output_dim = tuple(output_dim)
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concreteConcat,
input_dims: Sequence[Tuple[int, ...]],
**kwargs: Any,
) -> Concat:
assert isinstance(module, concreteConcat)
abstract_layer = cls(module.dim, input_dims)
return abstract_layer
def backsubstitute( # type: ignore[override]
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> Sequence[MN_BaB_Shape]:
shapes: List[MN_BaB_Shape] = []
offset = 0
for in_shape in self.input_dims:
new_lb_form = self._backsub_affine_form(
abstract_shape.lb, abstract_shape, offset, in_shape
)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(
abstract_shape.ub, abstract_shape, offset, in_shape
)
offset += in_shape[self.abs_dim]
new_as = abstract_shape.clone_with_new_bounds(new_lb_form, new_ub_form)
shapes.append(new_as)
return shapes
def _backsub_affine_form(
self,
affine_form: AffineForm,
abstract_shape: MN_BaB_Shape,
offset: int,
in_shape: Tuple[int, ...],
) -> AffineForm:
if abstract_shape.uses_dependence_sets():
assert False, "Not implemented - Concat with dependence sets"
assert isinstance(affine_form.coef, Tensor)
qb_dim = (
self.abs_dim + 2
) # Dimension that accounts for query and batch dimension
# q, b, c, h, w
# Slice at the correct dimension
indices = torch.tensor(
range(offset, offset + in_shape[self.abs_dim]), device=affine_form.device
)
new_coef = torch.index_select(affine_form.coef, qb_dim, indices)
new_bias = affine_form.bias
return AffineForm(new_coef, new_bias)
def propagate_interval( # type: ignore[override]
self,
intervals: List[Tuple[Tensor, Tensor]],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
lbs = [lb for (lb, _) in intervals]
ubs = [ub for (_, ub) in intervals]
output_lb = torch.cat(lbs, self.abs_dim + 1)
output_ub = torch.cat(ubs, self.abs_dim + 1)
return output_lb, output_ub
def propagate_abstract_element( # type: ignore [override]
self,
abs_inputs: List[AbstractElement],
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_inputs[0].cat(abs_inputs, self.abs_dim + 1)
| 4,374 | 34.860656 | 89 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_sigmoid.py | from __future__ import annotations
import os
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_sig_base import SigBase
from src.mn_bab_shape import MN_BaB_Shape
from src.state.tags import ParameterTag, layer_tag
from src.utilities.bilinear_interpolator import BilinearInterpol
from src.utilities.config import BacksubstitutionConfig
def sig(x: Tensor) -> Tensor:
return torch.sigmoid(x)
def d_sig(x: Tensor) -> Tensor:
sig = torch.sigmoid(x)
return sig * (1 - sig)
FILE_DIR = os.path.realpath(os.path.dirname(__file__))
class Sigmoid(SigBase, AbstractModule):
sp_interpolator: Optional[BilinearInterpol] = None
intersection_points: Optional[Tensor] = None
tangent_points: Optional[Tensor] = None
step_size: Optional[float] = None
max_x: Optional[float] = None
def __init__(self, dim: Tuple[int, ...]) -> None:
super(Sigmoid, self).__init__(dim, sig, d_sig)
if Sigmoid.intersection_points is None:
(
Sigmoid.intersection_points,
Sigmoid.tangent_points,
Sigmoid.step_size,
Sigmoid.max_x,
) = SigBase._compute_bound_to_tangent_point(sig, d_sig)
if Sigmoid.sp_interpolator is None:
Sigmoid.sp_interpolator = BilinearInterpol.load_from_path(
os.path.realpath(
os.path.join(FILE_DIR, "../../data/sig_bil_interpol.pkl")
)
)
self.output_dim = dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Sigmoid, input_dim: Tuple[int, ...], **kwargs: Any
) -> Sigmoid:
assert isinstance(module, nn.Sigmoid)
return cls(input_dim)
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
prev_layer: Optional[AbstractModule] = None,
) -> MN_BaB_Shape:
assert self.tangent_points is not None, "Tangent points not set"
if self.tangent_points.device != abstract_shape.device:
self.tangent_points = self.tangent_points.to(device=abstract_shape.device)
if self.tangent_points.dtype != abstract_shape.lb.bias.dtype:
self.tangent_points = self.tangent_points.to(
dtype=abstract_shape.lb.bias.dtype
)
return super(Sigmoid, self)._backsubstitute(
abstract_shape,
self.tangent_points,
self.step_size,
self.max_x,
intermediate_bounds_callback,
)
def get_approximation_slopes_and_intercepts(
self,
bounds: Tuple[Tensor, Tensor],
abstract_shape: Optional[MN_BaB_Shape] = None,
parameter_key: Optional[ParameterTag] = None,
split_constraints: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
if self.tangent_points is None or self.step_size is None or self.max_x is None:
raise RuntimeError(
"Cannot compute Sig/Tanh bounds without pre-computed values"
)
if (
abstract_shape is not None
and self.tangent_points.device != abstract_shape.device
):
self.tangent_points = self.tangent_points.to(device=abstract_shape.device)
elif self.tangent_points.device != bounds[0].device:
self.tangent_points = self.tangent_points.to(device=bounds[0].device)
return super(Sigmoid, self)._get_approximation_slopes_and_intercepts_for_act(
bounds,
self.tangent_points,
self.step_size,
self.max_x,
sig,
d_sig,
abstract_shape,
parameter_key,
layer_tag(self),
split_constraints,
)
@classmethod
def get_split_points(cls, lb: Tensor, ub: Tensor) -> Tensor:
assert cls.sp_interpolator, "Split point interpolator for Sigmoid not set"
return cls.sp_interpolator.get_value(lb, ub)
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
assert self.step_size is not None
assert self.max_x is not None
return abs_input.sigmoid(
tangent_points=self.tangent_points,
step_size=self.step_size,
max_x=self.max_x,
)[0]
| 4,927 | 34.453237 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_module.py | from __future__ import annotations
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.exceptions.invalid_bounds import InvalidBoundsError
from src.mn_bab_shape import MN_BaB_Shape
from src.state.constraints import INFEASIBILITY_CHECK_TOLERANCE
from src.state.tags import LayerTag
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class AbstractModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.input_bounds: Optional[Tuple[Tensor, Tensor]] = None
self.optim_input_bounds: Optional[Tuple[Tensor, Tensor]] = None
self.output_bounds: Optional[Tuple[Tensor, Tensor]] = None
self.output_dim: Tuple[int, ...]
self.dependence_set_applicable: Optional[bool] = None
self.dependence_set_block = True
@classmethod
def from_concrete_module(
cls, module: nn.Module, input_dim: Tuple[int, ...], **kwargs: Any
) -> AbstractModule:
raise NotImplementedError
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
return act_layer_ids
def update_input_bounds(
self, input_bounds: Tuple[Tensor, Tensor], check_feasibility: bool = True
) -> None:
lb, ub = input_bounds
if self.input_bounds is None:
self.input_bounds = (lb, ub)
else:
self.input_bounds = (
torch.maximum(lb, self.input_bounds[0].view_as(lb)),
torch.minimum(ub, self.input_bounds[1].view_as(ub)),
)
if check_feasibility:
invalid_bounds_mask_in_batch = (
(
self.input_bounds[0]
> self.input_bounds[1] + INFEASIBILITY_CHECK_TOLERANCE
)
.flatten(start_dim=1)
.any(dim=1)
)
if invalid_bounds_mask_in_batch.any():
raise InvalidBoundsError(invalid_bounds_mask_in_batch)
def reset_input_bounds(self) -> None:
self.input_bounds = None
def reset_optim_input_bounds(self) -> None:
self.optim_input_bounds = None
def detach_input_bounds(self) -> None:
if self.input_bounds is not None:
lb, ub = self.input_bounds
self.input_bounds = lb.detach(), ub.detach()
def update_output_bounds(self, output_bounds: Tuple[Tensor, Tensor]) -> None:
lb, ub = output_bounds
if self.output_bounds is None:
self.output_bounds = (lb, ub)
else:
self.output_bounds = (
torch.maximum(lb, self.output_bounds[0]),
torch.minimum(ub, self.output_bounds[1]),
)
def reset_output_bounds(self) -> None:
self.output_bounds = None
def detach_output_bounds(self) -> None:
if self.output_bounds is not None:
lb, ub = self.output_bounds
self.output_bounds = lb.detach(), ub.detach()
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
raise NotImplementedError
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
raise NotImplementedError
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
raise NotImplementedError
| 4,255 | 31.992248 | 81 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_linear.py | from __future__ import annotations
from typing import Any, Optional, Tuple
# import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.general import get_neg_pos_comp
from src.verification_subproblem import SubproblemState
class Linear(nn.Linear, AbstractModule):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool,
input_dim: Tuple[int, ...],
) -> None:
super(Linear, self).__init__(in_features, out_features, bias) # type: ignore # mypy issue 4335
self.output_dim = (*input_dim[:-1], out_features)
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Linear, input_dim: Tuple[int, ...], **kwargs: Any
) -> Linear:
assert isinstance(module, nn.Linear)
abstract_module = cls(
module.in_features, module.out_features, module.bias is not None, input_dim
)
abstract_module.weight.data = module.weight.data
if module.bias is not None:
abstract_module.bias.data = module.bias.data
return abstract_module
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
assert isinstance(affine_form.coef, Tensor)
new_coef = affine_form.coef.matmul(self.weight)
if self.bias is None:
new_bias = 0
else:
new_bias = affine_form.coef.matmul(self.bias)
if (
len(new_bias.shape) == 3
): # in case we have a matmul on the last dimension the bias is otherwise over multiple channels
new_bias = new_bias.sum(dim=2)
new_bias += affine_form.bias
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
neg_weight, pos_weight = get_neg_pos_comp(self.weight.unsqueeze(0))
output_lb = (
pos_weight.matmul(interval_lb.unsqueeze(-1))
+ neg_weight.matmul(interval_ub.unsqueeze(-1))
).squeeze(dim=-1) + self.bias
output_ub = (
pos_weight.matmul(interval_ub.unsqueeze(-1))
+ neg_weight.matmul(interval_lb.unsqueeze(-1))
).squeeze(dim=-1) + self.bias
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.linear(self.weight, self.bias)
| 3,742 | 34.990385 | 109 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_permute.py | from __future__ import annotations
from typing import Any, Callable, Optional, Tuple
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.permute import Permute as concretePermute
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
EPS = 1e-15
class Permute(concretePermute, AbstractModule):
def __init__(self, perm_ind: Tuple[int, ...], input_dim: Tuple[int, ...]) -> None:
"""Generates the abstract permutation layer. Assumes that perm_ind contains an entry for the batch while input_dim does not. In case len(perm_ind) == len(input_dim) we cut the first dimension of the input_dim.
Args:
perm_ind (): _description_
input_dim (Tuple[int, ...]): _description_
"""
super(Permute, self).__init__(perm_ind)
self.perm_ind = perm_ind
if len(perm_ind) == len(input_dim):
input_dim = input_dim[1:]
self.input_dim = input_dim
self.output_dim = tuple([input_dim[i - 1] for i in perm_ind[1:]])
# As backsub queries have the dim query x neuron x input_dims
# and perm_ind is batch x input dims we add a new index in front
self.perm_ind = tuple([0] + [i + 1 for i in perm_ind])
self.rev_perm_ind = [0] * len(self.perm_ind)
for i in range(len(self.perm_ind)):
self.rev_perm_ind[self.perm_ind[i]] = i
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: concretePermute, input_dim: Tuple[int, ...], **kwargs: Any
) -> Permute:
assert isinstance(module, concretePermute)
return cls(module.dims, input_dim)
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
assert isinstance(affine_form.coef, Tensor)
new_bias = affine_form.bias
new_coef = affine_form.coef.permute(self.rev_perm_ind)
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
lb, ub = interval
no_query_perm_ind = self.perm_ind
if len(self.perm_ind) == len(lb.shape) + 1:
no_query_perm_ind = tuple([i - 1 for i in self.perm_ind[1:]])
return lb.permute(no_query_perm_ind), ub.permute(no_query_perm_ind)
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
raise NotImplementedError
| 3,722 | 38.189474 | 217 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_avg_pool2d.py | from __future__ import annotations
from math import floor
from typing import Any, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.verification_subproblem import SubproblemState
class AvgPool2d(nn.AvgPool2d, AbstractModule):
kernel_size: Tuple[int, int] # type: ignore[assignment] # hack
stride: Tuple[int, int] # type: ignore[assignment]
padding: Tuple[int, int] # type: ignore[assignment]
dilation: Tuple[int, int] # type: ignore[assignment]
weight: Tensor # type: ignore[assignment]
def __init__(
self,
kernel_size: Union[int, Tuple[int, int]],
input_dim: Tuple[int, ...],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(padding, int):
padding = (padding, padding)
super(AvgPool2d, self).__init__( # type: ignore # mypy issue 4335
kernel_size, stride, padding
)
self.input_dim = input_dim
output_height = floor(
(input_dim[1] + 2 * self.padding[0] - self.kernel_size[0]) / self.stride[0]
+ 1
)
output_width = floor(
(input_dim[2] + 2 * self.padding[1] - self.kernel_size[1]) / self.stride[1]
+ 1
)
self.output_dim = (input_dim[0], output_height, output_width)
self.dependence_set_block = False
self.kernel_prod_norm = 1 / torch.prod(torch.Tensor(self.kernel_size))
@classmethod
def from_concrete_module( # type: ignore[override] # checked at runtime
cls, module: nn.AvgPool2d, input_dim: Tuple[int, ...], **kwargs: Any
) -> AvgPool2d:
assert isinstance(module, nn.AvgPool2d)
abstract_layer = cls(
module.kernel_size, input_dim, module.stride, module.padding
)
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
new_coef: Union[Tensor, DependenceSets]
if abstract_shape.uses_dependence_sets():
symmetric_stride = self.stride[0] == self.stride[1]
symmetric_padding = self.padding[0] == self.padding[1]
dilation_one = self.dilation[0] == self.dilation[1] == 1
group_one = self.groups == 1
dependence_sets_assumptions = (
symmetric_stride and symmetric_padding and dilation_one and group_one
)
assert dependence_sets_assumptions, "Dependence set assumptions violated."
def backsubstitute_coef_and_bias(
coef: DependenceSets, bias: Tensor
) -> Tuple[DependenceSets, Tensor]:
new_bias = bias + (
0
if self.bias is None
else (coef.sets.sum((3, 4)) * self.bias).sum(2)
)
# [B*C*HW, c, d, d] -> [B*C*HW, c', d', d']
new_coef_sets = F.conv_transpose2d(
coef.sets.flatten(end_dim=1), self.weight, stride=self.stride
)
new_coef = DependenceSets(
new_coef_sets.view(*coef.sets.shape[:2], *new_coef_sets.shape[1:]),
coef.spatial_idxs,
coef.input_dim,
coef.cstride * self.stride[0],
coef.cpadding * self.stride[0] + self.padding[0],
)
return new_coef, new_bias
assert isinstance(affine_form.coef, DependenceSets)
new_coef, new_bias = backsubstitute_coef_and_bias(
affine_form.coef, affine_form.bias
)
else:
assert isinstance(affine_form.coef, Tensor)
kernel_wh = self.kernel_size
w_padding = (
self.input_dim[1] + 2 * self.padding[0] - kernel_wh[0]
) % self.stride[0]
h_padding = (
self.input_dim[2] + 2 * self.padding[1] - kernel_wh[1]
) % self.stride[1]
output_padding = (w_padding, h_padding)
sz = affine_form.coef.shape
weight = self.kernel_prod_norm * torch.ones(
(self.input_dim[0], 1, *self.kernel_size), device=abstract_shape.device
)
new_bias = affine_form.bias
new_coef = F.conv_transpose2d(
affine_form.coef.view((sz[0] * sz[1], *sz[2:])),
weight,
None,
self.stride,
self.padding,
output_padding,
self.input_dim[0],
1,
)
new_coef = new_coef.view((sz[0], sz[1], *new_coef.shape[1:])) # type: ignore
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
output_lb = F.avg_pool2d(
interval_lb, self.kernel_size, self.stride, self.padding
)
output_ub = F.avg_pool2d(
interval_ub, self.kernel_size, self.stride, self.padding
)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
assert all([self.kernel_size[0] == x for x in self.kernel_size])
assert all([self.stride[0] == x for x in self.stride])
assert all([self.padding[0] == x for x in self.padding])
return abs_input.avg_pool2d(
self.kernel_size[0], self.stride[0], self.padding[0]
)
| 7,064 | 36.380952 | 89 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_sig_base.py | from __future__ import annotations
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import (
LayerTag,
ParameterTag,
key_alpha,
key_beta,
key_plus_lb,
key_plus_ub,
layer_tag,
)
from src.utilities.dependence_sets import DependenceSets
from src.verification_subproblem import SubproblemState
class SigBase(nn.Sigmoid, AbstractModule):
def __init__(
self,
dim: Tuple[int, ...],
act: Callable[[Tensor], Tensor],
d_act: Callable[[Tensor], Tensor],
) -> None:
super(SigBase, self).__init__()
self.output_dim = dim
self.dependence_set_block = False
self.act = act
self.d_act = d_act
def update_input_bounds(
self, input_bounds: Tuple[Tensor, Tensor], check_feasibility: bool = True
) -> None:
input_bounds_shape_adjusted = (
input_bounds[0].view(-1, *self.output_dim),
input_bounds[1].view(-1, *self.output_dim),
)
super(SigBase, self).update_input_bounds(
input_bounds_shape_adjusted, check_feasibility=check_feasibility
)
def _backsubstitute(
self,
abstract_shape: MN_BaB_Shape,
tangent_points: Optional[Tensor],
step_size: Optional[float],
max_x: Optional[float],
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
) -> MN_BaB_Shape:
if self.input_bounds is None:
raise RuntimeError("Cannot backsubstitute if bounds have not been set.")
if tangent_points is None or step_size is None or max_x is None:
raise RuntimeError(
"Cannot compute Sig/Tanh bounds without pre-computed values"
)
(
split_constraints,
split_points,
) = abstract_shape.get_split_constraints_for_sig(
layer_tag(self), self.input_bounds
)
# Backsub
new_lb_form = self._backsub_affine_form(
affine_form=abstract_shape.lb,
input_bounds=self.input_bounds,
tangent_points=tangent_points,
step_size=step_size,
max_x=max_x,
prima_coefs=None,
split_constraints=split_constraints,
split_points=split_points,
compute_upper_bound=False,
abstract_shape=abstract_shape,
)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(
affine_form=abstract_shape.ub,
input_bounds=self.input_bounds,
tangent_points=tangent_points,
step_size=step_size,
max_x=max_x,
prima_coefs=None,
split_constraints=split_constraints,
split_points=split_points,
compute_upper_bound=True,
abstract_shape=abstract_shape,
)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self,
affine_form: AffineForm,
input_bounds: Tuple[Tensor, Tensor],
tangent_points: Tensor,
step_size: float,
max_x: float,
prima_coefs: Optional[Tuple[Tensor, Tensor, Tensor]],
split_constraints: Optional[Tensor],
split_points: Optional[Tensor],
compute_upper_bound: bool,
abstract_shape: MN_BaB_Shape,
) -> AffineForm:
# Get parameters
(
lb_slope,
ub_slope,
lb_intercept,
ub_intercept,
) = SigBase._get_approximation_slopes_and_intercepts_for_act(
input_bounds,
tangent_points,
step_size,
max_x,
self.act,
self.d_act,
abstract_shape,
key_alpha(compute_upper_bound),
layer_tag(self),
split_constraints,
split_points,
)
# Handle bias
new_lb_bias, new_ub_bias = abstract_shape._matmul_of_coef_and_interval(
lb_intercept.unsqueeze(1), # add query dimension
ub_intercept.unsqueeze(1),
)
new_bias = new_ub_bias if compute_upper_bound else new_lb_bias
assert new_bias is not None
new_bias += affine_form.bias
# Handle coef
new_coef: Optional[Union[Tensor, DependenceSets]]
new_lb_coef, new_ub_coef = abstract_shape._elementwise_mul_of_coef_and_interval(
lb_slope.unsqueeze(1), ub_slope.unsqueeze(1) # add query dimension
)
new_coef = new_ub_coef if compute_upper_bound else new_lb_coef
assert new_coef is not None
# Handle Split constraints
if split_constraints is not None:
# add betas, [B, 1, c, h, w]
#
beta_contrib_shape = (abstract_shape.batch_size, 1, *self.output_dim)
beta_lb = abstract_shape.get_parameters(
key_beta(compute_upper_bound), layer_tag(self), split_constraints.shape
)
beta_contrib = (beta_lb * split_constraints).view(beta_contrib_shape)
if compute_upper_bound:
beta_contrib *= -1
# Bias contribution
beta_bias_shape = (abstract_shape.batch_size, *self.output_dim[1:])
beta_bias_cont = beta_lb * split_constraints
beta_bias_cont = (beta_bias_cont * split_points).sum(dim=1)
if compute_upper_bound:
beta_bias_cont *= -1
new_bias -= beta_bias_cont.reshape(beta_bias_shape)
# Coef contribution
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
new_coef += DependenceSets.unfold_to(beta_contrib, affine_form.coef)
else:
new_coef += beta_contrib
# Create output
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
new_coef = DependenceSets(
new_coef,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
output_lb, output_ub = self.act(interval[0]), self.act(interval[1])
assert (output_ub >= output_lb).all()
return output_lb, output_ub
@classmethod
def _get_approximation_slopes_and_intercepts_for_act(
cls,
bounds: Tuple[Tensor, Tensor],
tangent_points: Tensor,
step_size: float,
max_x: float,
act: Callable[[Tensor], Tensor],
d_act: Callable[[Tensor], Tensor],
abstract_shape: Optional[MN_BaB_Shape] = None,
parameter_key: Optional[ParameterTag] = None,
layer_id: Optional[LayerTag] = None,
split_constraints: Optional[Tensor] = None,
split_points: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
input_lb, input_ub = bounds
dtype = input_lb.dtype
# Set bounds based on split constraints
if split_constraints is not None:
input_lb = torch.where(
split_constraints == -1, torch.max(input_lb, split_points), input_lb # type: ignore[arg-type] # mypy bug?
)
input_ub = torch.where(
split_constraints == 1, torch.min(input_ub, split_points), input_ub # type: ignore[arg-type] # mypy bug?
)
input_lb = torch.clamp(input_lb, min=-1 * (max_x - 1))
input_ub = torch.clamp(input_ub, max=(max_x - 1))
lb_convex_mask = input_lb >= 0
ub_convex_mask = input_ub < 0
# Compute the bounds on the tangent points
idx = (
torch.max(
torch.zeros(input_lb.numel(), device=input_ub.device),
(input_ub / step_size).to(torch.long).flatten(),
)
+ 1
).long()
lb_tangent_ubs = torch.index_select(tangent_points, 0, idx).view(input_lb.shape)
idx = (
torch.max(
torch.zeros(input_ub.numel(), device=input_lb.device),
(-1 * input_lb / step_size).to(torch.long).flatten(),
)
+ 1
).long()
ub_tangent_lbs = -1 * torch.index_select(tangent_points, 0, idx).view(
input_ub.shape
)
lb_tangent_ubs = torch.min(lb_tangent_ubs, input_ub)
ub_tangent_lbs = torch.max(ub_tangent_lbs, input_lb)
if (
abstract_shape is not None
and abstract_shape.subproblem_state is not None
and abstract_shape.subproblem_state.parameters.use_params
):
assert parameter_key is not None
assert layer_id is not None
def make_default_lb(device: torch.device) -> Tensor:
lb_init = ((lb_tangent_ubs + input_lb) / 2).detach()
return lb_init.to(
device
) # TODO: it's created on 'cuda:0' and moved to 'cuda' here, why?
def make_default_ub(device: torch.device) -> Tensor:
ub_init = ((ub_tangent_lbs + input_ub) / 2).detach()
return ub_init.to(
device
) # TODO: it's created on 'cuda:0' and moved to 'cuda' here, why?
lb_tangent_points = abstract_shape.get_parameters(
key_plus_lb(parameter_key), layer_id, make_default_lb
)
ub_tangent_points = abstract_shape.get_parameters(
key_plus_ub(parameter_key), layer_id, make_default_ub
)
else:
lb_tangent_points = (lb_tangent_ubs + input_lb) / 2
ub_tangent_points = (ub_tangent_lbs + input_ub) / 2
# if ub >= tangent_intersection_of_lb we can use the convex slope for our lower bound
lb_convex_mask = (lb_convex_mask | (input_lb >= lb_tangent_ubs)).to(dtype)
#
ub_convex_mask = (ub_convex_mask | (input_ub <= ub_tangent_lbs)).to(dtype)
# Note that these intervals may be empty, but only inf the second condition of the mask above holds and we are convex
# Constrain lb_tangent_points to [lbs, min_lb_tangent]
lb_tangent_points = torch.clamp(
lb_tangent_points, min=input_lb, max=lb_tangent_ubs
)
# Constrain ub_tangent_points to [ub_tangent_lbs, ubs]
ub_tangent_points = torch.clamp(
ub_tangent_points, min=ub_tangent_lbs, max=input_ub
)
# Compute the slopes
sigmoid_lb, sigmoid_ub = act(input_lb), act(input_ub)
sigmoid_tlb, sigmoid_tub = act(lb_tangent_points), act(ub_tangent_points)
# Convex slopes
convex_slope = (sigmoid_ub - sigmoid_lb) / (input_ub - input_lb + 1e-6)
convex_intercept = sigmoid_lb - input_lb * convex_slope
# lb tangents
tlb_slope = d_act(lb_tangent_points)
tlb_intercept = sigmoid_tlb - lb_tangent_points * tlb_slope
# ub tangents
tub_slope = d_act(ub_tangent_points)
tub_intercept = sigmoid_tub - ub_tangent_points * tub_slope
# Final slopes and intercepts
lb_slope = lb_convex_mask * convex_slope + (1 - lb_convex_mask) * tlb_slope
lb_intercept = (
lb_convex_mask * convex_intercept + (1 - lb_convex_mask) * tlb_intercept
) - 1e-6
ub_slope = ub_convex_mask * convex_slope + (1 - ub_convex_mask) * tub_slope
ub_intercept = (
ub_convex_mask * convex_intercept + (1 - ub_convex_mask) * tub_intercept
) + 1e-6
return lb_slope, ub_slope, lb_intercept, ub_intercept
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
act_layer_ids.append(layer_tag(self))
return act_layer_ids
@classmethod
def _compute_bound_to_tangent_point(
cls, f: Callable[[Tensor], Tensor], d_f: Callable[[Tensor], Tensor]
) -> Tuple[Tensor, Tensor, float, float]:
with torch.no_grad():
max_x = 500
step_size = 0.01
num_points = int(max_x // step_size) + 1
max_iter = 100
def is_below(ip: Tensor, tp: Tensor) -> Tensor:
"""Return true if the tangent from tp intersects the function below ip
Args:
ip (Tensor): intersection point
tp (Tensor): tangent point
"""
return (d_f(tp) * (ip - tp) + f(tp) <= f(ip)).to(ip.dtype)
ips = torch.linspace(0, max_x, num_points) # Intersection points
ub = torch.zeros_like(ips) # Binary search upperbounds for each ip's tp
lb = -1 * torch.ones_like(ips)
# Adjust all lower bounds to be truely below
while True:
ib = is_below(ips, lb)
lb = ib * lb + (1 - ib) * (2 * lb)
if ib.sum() == ips.numel():
break
for _ in range(max_iter):
m = (lb + ub) / 2
ib = is_below(ips, m)
lb = ib * m + (1 - ib) * lb
ub = ib * ub + (1 - ib) * m
# By symmetry we have valid points negative ip as well
return ips.clone(), lb.clone(), step_size, max_x
| 14,169 | 35.147959 | 125 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_identity.py | from __future__ import annotations
from typing import Any, Optional, Tuple
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class Identity(nn.Identity, AbstractModule):
def __init__(self, input_dim: Tuple[int, ...]) -> None:
super(Identity, self).__init__()
self.output_dim = input_dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Identity, input_dim: Tuple[int, ...], **kwargs: Any
) -> Identity:
assert isinstance(module, nn.Identity)
return cls(input_dim)
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
return abstract_shape
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
return interval
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input
| 1,709 | 31.264151 | 78 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_pad.py | from __future__ import annotations
from typing import Any, Optional, Tuple
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.pad import Pad as concretePad
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class Pad(concretePad, AbstractModule):
def __init__(
self,
pad: Tuple[int, ...],
input_dim: Tuple[int, ...],
mode: str = "constant",
value: float = 0.0,
):
if isinstance(pad, int):
pad = (pad, pad, pad, pad)
elif len(pad) == 2:
pad = (pad[0], pad[1], 0, 0)
super(Pad, self).__init__(pad, mode, value)
self.input_dim = input_dim
self.output_dim = (
input_dim[0],
input_dim[1] + pad[2] + pad[3],
input_dim[2] + pad[0] + pad[1],
)
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: concretePad, input_dim: Tuple[int, ...], **kwargs: Any
) -> Pad:
assert isinstance(module, concretePad)
abstract_layer = cls(
module.pad,
input_dim,
module.mode,
module.value,
)
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
if abstract_shape.uses_dependence_sets():
assert False, "Not implemented - Pad with dependence sets"
assert isinstance(affine_form.coef, Tensor)
assert len(self.pad) == 4, f"Incompatible padding size: {self.pad}"
pad_l, pad_r, pad_t, pad_b = self.pad
pad_b = affine_form.coef.shape[3] - pad_b
pad_r = affine_form.coef.shape[4] - pad_r
# Step 1 unpad the coefficients as the outer coeffs refer to padding values
new_coef = affine_form.coef[:, :, :, pad_t:pad_b, pad_l:pad_r]
# Step 2 concretize the contribution of the padding into the bias
only_pad_coef = (
affine_form.coef.detach().clone()
) # TODO @Robin Detach required?
only_pad_coef[:, :, :, pad_t:pad_b, pad_l:pad_r] = 0
only_pad_coef *= self.value
only_pad_contr = only_pad_coef.sum((2, 3, 4))
new_bias = affine_form.bias + only_pad_contr
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
output_lb = F.pad(interval_lb, self.pad, self.mode, self.value)
output_ub = F.pad(interval_ub, self.pad, self.mode, self.value)
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.pad(self.pad, self.mode, self.value)
| 3,956 | 34.648649 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_unbinary_op.py | from __future__ import annotations
import typing
from typing import Any, Callable, Optional, Tuple, Union
import torch
from torch import Tensor
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers import unbinary_op
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.utilities.general import tensor_reduce
from src.verification_subproblem import SubproblemState
EPS = 1e-15
class UnbinaryOp(AbstractModule):
def __init__(
self, op: str, const_val: Tensor, apply_right: bool, input_dim: Tuple[int, ...]
) -> None:
super(UnbinaryOp, self).__init__()
self.op = op
# self.const_val = torch.nn.Parameter(const_val,requires_grad=False)
self.register_buffer(
"const_val",
const_val,
persistent=False,
)
self.apply_right = apply_right
self.output_dim = input_dim
@classmethod
@typing.no_type_check # Mypy can't handle the buffer type
def from_concrete_module(
cls, module: unbinary_op.UnbinaryOp, input_dim: Tuple[int, ...], **kwargs: Any
) -> UnbinaryOp:
return cls(module.op, module.const_val, module.apply_right, input_dim)
@typing.no_type_check # Mypy can't handle the buffer type
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
coef = affine_form.coef.sets
else:
assert isinstance(affine_form.coef, Tensor)
coef = affine_form.coef
bias = affine_form.bias
bias_c = (coef * self.const_val).view((*coef.shape[:2], -1)).sum(dim=2)
if self.op == "add":
bias = bias + bias_c
elif self.op == "sub":
if self.apply_right: # Y = C - X
coef *= -1
bias = bias + bias_c
else: # X - C
bias = bias - bias_c
elif self.op == "mul":
coef *= self.const_val
elif self.op == "div":
if self.apply_right:
assert False, "Tried to apply non linear division operation"
else:
coef /= self.const_val
final_coef: Union[Tensor, DependenceSets] = coef
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
final_coef = DependenceSets(
coef,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
return AffineForm(final_coef, bias)
@typing.no_type_check # Mypy can't handle the buffer type
def forward(self, x: Tensor):
const_val = self.const_val # .squeeze()
if self.apply_right:
left, right = const_val, x
else:
left, right = x, const_val
if self.op == "add":
return left + right
elif self.op == "sub":
return left - right
elif self.op == "mul":
return left * right
elif self.op == "div":
return left / right
else:
assert False, f"Unknown operation {self.op}"
@typing.no_type_check # Mypy can't handle the buffer type
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
lb, ub = interval
if self.apply_right:
left_lb, right_lb = self.const_val, lb
left_ub, right_ub = self.const_val, ub
else:
left_lb, right_lb = lb, self.const_val
left_ub, right_ub = ub, self.const_val
if self.op == "add":
return (left_lb + right_lb, left_ub + right_ub)
elif self.op == "sub":
return (left_lb - right_ub, left_ub - right_lb)
elif self.op == "mul":
ll = left_lb * right_lb
lu = left_lb * right_ub
ul = left_ub * right_lb
uu = left_ub * right_ub
return (
tensor_reduce(torch.minimum, [ll, lu, ul, uu]),
tensor_reduce(torch.maximum, [ll, lu, ul, uu]),
)
elif self.op == "div":
assert self.const_val != 0, "No division by 0"
ll = left_lb / right_lb
lu = left_lb / right_ub
ul = left_ub / right_lb
uu = left_ub / right_ub
left = tensor_reduce(torch.minimum, [ll, lu, ul, uu])
right = tensor_reduce(torch.maximum, [ll, lu, ul, uu])
return (left, right)
else:
assert False, f"Unknown operation {self.op}"
| 5,758 | 33.90303 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_residual_block.py | from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor, nn
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_container_module import (
AbstractContainerModule,
ActivationLayer,
)
from src.abstract_layers.abstract_sequential import Sequential
from src.concrete_layers import basic_block as concrete_basic_block
from src.concrete_layers.residual_block import ResidualBlock as concreteResidualBlock
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import LayerTag
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.utilities.queries import QueryCoef
from src.verification_subproblem import SubproblemState
class ResidualBlock(concreteResidualBlock, AbstractContainerModule):
path_a: Sequential # type: ignore[assignment] # hack
path_b: Sequential # type: ignore[assignment]
def __init__(
self,
path_a: nn.Sequential,
path_b: nn.Sequential,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> None:
super(ResidualBlock, self).__init__(path_a=path_a, path_b=path_b)
self.path_a = Sequential.from_concrete_module(path_a, input_dim, **kwargs)
self.path_b = Sequential.from_concrete_module(path_b, input_dim, **kwargs)
self.output_dim = self.path_b.layers[-1].output_dim
self.input_dim = input_dim
self.bias = self.get_babsr_bias()
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concrete_basic_block.ResidualBlock,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> ResidualBlock:
assert isinstance(module, concrete_basic_block.ResidualBlock)
abstract_layer = cls(
module.path_a,
module.path_b,
input_dim,
**kwargs,
)
return abstract_layer
def backsubstitute_shape(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
from_layer_index: Optional[int],
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
use_early_termination_for_current_query: bool, # = False,
full_back_prop: bool, # = False,
optimize_intermediate_bounds: bool, # = False,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
in_lb = abstract_shape.lb.clone() # Also works properly for dependence set
in_ub: Optional[AffineForm] = None
if abstract_shape.ub is not None:
in_ub = abstract_shape.ub.clone()
unstable_queries_old_for_assert = abstract_shape.unstable_queries
(a_shape_a, (lbs_a, ubs_a),) = self.path_a.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
from_layer_index=None,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=False,
full_back_prop=False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
assert (
abstract_shape.unstable_queries is None
or (
abstract_shape.unstable_queries == unstable_queries_old_for_assert
).all()
)
a_lb = a_shape_a.lb.clone()
if a_shape_a.ub is not None:
a_ub = a_shape_a.ub.clone()
abstract_shape.update_bounds(in_lb, in_ub)
a_shape_b, __ = self.path_b.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
from_layer_index=None,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=False,
full_back_prop=False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
assert (
abstract_shape.unstable_queries is None
or (
abstract_shape.unstable_queries == unstable_queries_old_for_assert
).all()
)
new_lower: AffineForm
new_upper: Optional[AffineForm] = None
new_lb_coef: QueryCoef
new_lb_bias = (
a_lb.bias + a_shape_b.lb.bias - in_lb.bias
) # Both the shape in a and in b contain the initial bias terms, so one has to be subtracted
if isinstance(a_lb.coef, DependenceSets) and not isinstance(
a_shape_b.lb.coef, DependenceSets
):
new_lb_coef = (
a_lb.coef.to_tensor(a_shape_b.lb.coef.shape[-3:]) + a_shape_b.lb.coef
)
elif not isinstance(a_lb.coef, DependenceSets) and isinstance(
a_shape_b.lb.coef, DependenceSets
):
new_lb_coef = a_lb.coef + a_shape_b.lb.coef.to_tensor(a_lb.coef.shape[-3:])
else:
new_lb_coef = a_lb.coef + a_shape_b.lb.coef
new_lower = AffineForm(new_lb_coef, new_lb_bias)
if a_shape_b.ub is not None and a_ub is not None and in_ub is not None:
new_ub_coef: QueryCoef
new_ub_bias = a_ub.bias + a_shape_b.ub.bias - in_ub.bias
if isinstance(a_ub.coef, DependenceSets) and not isinstance(
a_shape_b.ub.coef, DependenceSets
):
new_ub_coef = (
a_ub.coef.to_tensor(a_shape_b.ub.coef.shape[-3:])
+ a_shape_b.ub.coef
)
elif not isinstance(a_ub.coef, DependenceSets) and isinstance(
a_shape_b.ub.coef, DependenceSets
):
new_ub_coef = a_ub.coef + a_shape_b.ub.coef.to_tensor(
a_ub.coef.shape[-3:]
)
else:
new_ub_coef = a_ub.coef + a_shape_b.ub.coef
new_upper = AffineForm(new_ub_coef, new_ub_bias)
abstract_shape.update_bounds(new_lower, new_upper)
return (
abstract_shape,
(
-np.inf * torch.ones_like(lbs_a),
np.inf * torch.ones_like(lbs_a),
), # TODO: this seems unnecessary, move bounds into abstract_shape and just update them when it makes sense
)
def get_babsr_bias(self) -> Tensor:
bias_a = self.path_a.get_babsr_bias()
bias_b = self.path_b.get_babsr_bias()
# In case one of them is one the gpu we will move both to gpu
# Have to do this here as the paths (sequentials) are unaware of the device
if bias_a.is_cuda != bias_b.is_cuda:
bias_a = bias_a.cuda()
bias_b = bias_b.cuda()
return nn.Parameter(bias_a + bias_b)
def reset_input_bounds(self) -> None:
super(ResidualBlock, self).reset_input_bounds()
self.path_a.reset_input_bounds()
self.path_b.reset_input_bounds()
def reset_optim_input_bounds(self) -> None:
super(ResidualBlock, self).reset_input_bounds()
self.path_a.reset_optim_input_bounds()
self.path_b.reset_optim_input_bounds()
def reset_output_bounds(self) -> None:
super(ResidualBlock, self).reset_output_bounds()
self.path_a.reset_output_bounds()
self.path_b.reset_output_bounds()
def forward_pass(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
ibp_call: Callable[[], None],
timeout: float,
) -> None:
self.path_a.forward_pass(
config,
input_lb,
input_ub,
propagate_preceeding_callback,
preceeding_layers,
ibp_call,
timeout,
)
self.path_b.forward_pass(
config,
input_lb,
input_ub,
propagate_preceeding_callback,
preceeding_layers,
ibp_call,
timeout,
)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_a = self.path_a.propagate_interval(
interval,
use_existing_bounds,
subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
interval_b = self.path_b.propagate_interval(
interval,
use_existing_bounds,
subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
return interval_a[0] + interval_b[0], interval_a[1] + interval_b[1]
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
abs_output_a = self.path_a.propagate_abstract_element(
abs_input,
use_existing_bounds,
activation_layer_only,
set_input=set_input,
set_output=set_output,
)
abs_output_b = self.path_b.propagate_abstract_element(
abs_input,
use_existing_bounds,
activation_layer_only,
set_input=set_input,
set_output=set_output,
)
return abs_output_a + abs_output_b
def set_dependence_set_applicability(self, applicable: bool = True) -> None:
self.path_a.set_dependence_set_applicability(applicable)
self.path_b.set_dependence_set_applicability(applicable)
self.dependence_set_applicable = (
self.path_a.layers[-1].dependence_set_applicable
and self.path_b.layers[-1].dependence_set_applicable
)
def get_default_split_constraints(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_constraints: Dict[LayerTag, Tensor] = {}
split_constraints.update(
self.path_a.get_default_split_constraints(batch_size, device)
)
split_constraints.update(
self.path_b.get_default_split_constraints(batch_size, device)
)
return split_constraints
def get_default_split_points(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_points: Dict[LayerTag, Tensor] = {}
split_points.update(self.path_a.get_default_split_points(batch_size, device))
split_points.update(self.path_b.get_default_split_points(batch_size, device))
return split_points
def get_activation_layers(self) -> Dict[LayerTag, ActivationLayer]:
act_layers: Dict[LayerTag, ActivationLayer] = {}
act_layers.update(self.path_a.get_activation_layers())
act_layers.update(self.path_b.get_activation_layers())
return act_layers
def get_current_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
intermediate_bounds.update(self.path_a.get_current_intermediate_bounds())
intermediate_bounds.update(self.path_b.get_current_intermediate_bounds())
return intermediate_bounds
def get_current_optimized_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
intermediate_bounds.update(
self.path_a.get_current_optimized_intermediate_bounds()
)
intermediate_bounds.update(
self.path_b.get_current_optimized_intermediate_bounds()
)
return intermediate_bounds
def set_intermediate_input_bounds(
self, intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
) -> None:
self.path_a.set_intermediate_input_bounds(intermediate_bounds)
self.path_b.set_intermediate_input_bounds(intermediate_bounds)
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
act_layer_ids += self.path_a.get_activation_layer_ids()
act_layer_ids += self.path_b.get_activation_layer_ids()
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
act_layer_ids += self.path_a.get_relu_layer_ids()
act_layer_ids += self.path_b.get_relu_layer_ids()
return act_layer_ids
| 13,871 | 36.390836 | 120 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_basic_block.py | from __future__ import annotations
from typing import Any, Tuple
from torch import Tensor
from src.abstract_layers.abstract_container_module import AbstractContainerModule
from src.abstract_layers.abstract_sequential import Sequential
from src.concrete_layers import basic_block as concrete_basic_block
from src.concrete_layers.basic_block import BasicBlock as concreteBasicBlock
class BasicBlock(
concreteBasicBlock, AbstractContainerModule
): # TODO: should this inherit from abstract ResidualBlock?
path_a: Sequential # type: ignore[assignment] # hack
path_b: Sequential # type: ignore[assignment]
output_dim: Tuple[int, ...]
bias: Tensor
def __init__(
self,
in_planes: int,
planes: int,
stride: int,
bn: bool,
kernel: int,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> None:
super(BasicBlock, self).__init__(in_planes, planes, stride, bn, kernel)
self.path_a = Sequential.from_concrete_module(self.path_a, input_dim, **kwargs) # type: ignore[arg-type] # hack
self.path_b = Sequential.from_concrete_module(self.path_b, input_dim, **kwargs) # type: ignore[arg-type] # hack
self.output_dim = self.path_b.layers[-1].output_dim
self.bias = self.get_babsr_bias()
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concrete_basic_block.BasicBlock,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> BasicBlock:
assert isinstance(module, concrete_basic_block.BasicBlock)
abstract_layer = cls(
module.in_planes,
module.planes,
module.stride,
module.bn,
module.kernel,
input_dim,
**kwargs,
)
abstract_layer.path_a = Sequential.from_concrete_module(
module.path_a, input_dim, **kwargs
)
abstract_layer.path_b = Sequential.from_concrete_module(
module.path_b, input_dim, **kwargs
)
abstract_layer.bias = abstract_layer.get_babsr_bias()
return abstract_layer
def get_babsr_bias(self) -> Tensor:
raise NotImplementedError # TODO: inherit from abstract ResidualBlock?
| 2,288 | 34.215385 | 120 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_reshape.py | from __future__ import annotations
from typing import Any, Optional, Tuple
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.reshape import Reshape as concreteReshape
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SubproblemState
class Reshape(concreteReshape, AbstractModule):
def __init__(
self,
out_dim: Tuple[int, ...],
input_dim: Tuple[int, ...],
):
super(Reshape, self).__init__(out_dim)
self.input_dim = input_dim
# We assume no batch-dim
self.output_dim = out_dim
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: concreteReshape, input_dim: Tuple[int, ...], **kwargs: Any
) -> Reshape:
assert isinstance(module, concreteReshape)
abstract_layer = cls(
module.shape,
input_dim,
)
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
if abstract_shape.uses_dependence_sets():
assert False, "Not implemented - Reshape with dependence sets"
else:
assert isinstance(affine_form.coef, Tensor)
new_coef = affine_form.coef.reshape(
(*affine_form.coef.shape[:2], *self.input_dim)
)
new_bias = affine_form.bias
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
output_lb = interval_lb.reshape(self.output_dim)
output_ub = interval_ub.reshape(self.output_dim)
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.view((abs_input.shape[0], *self.output_dim))
| 2,999 | 33.090909 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_convtranspose2d.py | from __future__ import annotations
from typing import Any, Optional, Tuple, Union
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.utilities.general import get_neg_pos_comp
from src.verification_subproblem import SubproblemState
class ConvTranspose2d(nn.ConvTranspose2d, AbstractModule):
in_channels: int
out_channels: int
kernel_size: Tuple[int, ...] # Tuple[int, int] ?
input_dim: Tuple[int, ...]
stride: Tuple[int, ...] # Tuple[int, int] ?
padding: Tuple[int, ...] # type: ignore[assignment] # checked at runtime below (Tuple[int, int] ?)
dilation: Tuple[int, ...] # Tuple[int, int] ?
groups: int
bias: Optional[Tensor]
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
input_dim: Tuple[int, ...],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
output_padding: Union[int, Tuple[int, int]] = 0,
groups: int = 1,
bias: bool = True,
dilation: Union[int, Tuple[int, int]] = 1,
):
super(ConvTranspose2d, self).__init__( # type: ignore # mypy issue 4335
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation, # type: ignore # Turns out pytorch typings are wrong / inconsistent
)
self.input_dim = input_dim
assert not isinstance(self.padding, str)
output_height = (
(input_dim[1] - 1) * self.stride[0]
- 2 * self.padding[0]
+ self.dilation[0] * (self.kernel_size[0] - 1)
+ self.output_padding[0]
+ 1
)
output_width = (
(input_dim[2] - 1) * self.stride[1]
- 2 * self.padding[1]
+ self.dilation[1] * (self.kernel_size[1] - 1)
+ self.output_padding[1]
+ 1
)
self.output_dim = (out_channels, output_height, output_width)
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Conv2d, input_dim: Tuple[int, ...], **kwargs: Any
) -> ConvTranspose2d:
assert isinstance(module, nn.ConvTranspose2d)
assert len(module.kernel_size) == 2
assert len(module.stride) == 2
assert len(module.padding) == 2
assert len(module.output_padding) == 2
assert len(module.dilation) == 2
assert not isinstance(module.padding, str)
abstract_layer = cls(
module.in_channels,
module.out_channels,
module.kernel_size, # type: ignore[arg-type]
input_dim,
module.stride, # type: ignore[arg-type]
module.padding, # type: ignore[arg-type]
module.output_padding, # type: ignore[arg-type]
module.groups,
module.bias is not None,
module.dilation, # type: ignore[arg-type]
)
abstract_layer.weight.data = module.weight.data
if module.bias is not None:
assert abstract_layer.bias is not None
abstract_layer.bias.data = module.bias.data
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
new_coef: Union[Tensor, DependenceSets]
if abstract_shape.uses_dependence_sets():
symmetric_stride = self.stride[0] == self.stride[1]
symmetric_padding = self.padding[0] == self.padding[1]
dilation_one = (
self.dilation[0] == self.dilation[1] == 1 and self.dilation[0] == 1
)
group_one = self.groups == 1
dependence_sets_assumptions = (
symmetric_stride and symmetric_padding and dilation_one and group_one
)
assert dependence_sets_assumptions, "Dependence set assumptions violated."
def backsubstitute_coef_and_bias(
coef: DependenceSets, bias: Tensor
) -> Tuple[DependenceSets, Tensor]:
new_bias = bias + (
0
if self.bias is None
else (coef.sets.sum((3, 4)) * self.bias).sum(2)
)
# [B*C*HW, c, d, d] -> [B*C*HW, c', d', d']
new_coef_sets = F.conv2d(
coef.sets.flatten(end_dim=1), self.weight, stride=self.stride
)
assert not isinstance(self.padding, str)
new_coef = DependenceSets(
new_coef_sets.view(*coef.sets.shape[:2], *new_coef_sets.shape[1:]),
coef.spatial_idxs,
coef.input_dim,
coef.cstride * self.stride[0],
coef.cpadding * self.stride[0] + self.padding[0],
)
return new_coef, new_bias
assert isinstance(affine_form.coef, DependenceSets)
new_coef, new_bias = backsubstitute_coef_and_bias(
affine_form.coef, affine_form.bias
)
else:
assert isinstance(affine_form.coef, Tensor)
assert not isinstance(self.padding, str)
sz = affine_form.coef.shape
new_bias = affine_form.bias + (
0
if self.bias is None
else (affine_form.coef.sum((3, 4)) * self.bias).sum(2)
)
new_coef = F.conv2d(
affine_form.coef.view((sz[0] * sz[1], *sz[2:])),
self.weight,
None,
self.stride,
self.padding,
self.dilation,
self.groups,
)
assert isinstance(new_coef, Tensor)
new_coef = new_coef.view((sz[0], sz[1], *new_coef.shape[1:]))
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
neg_kernel, pos_kernel = get_neg_pos_comp(self.weight)
def conv2d_transpose_with_kernel_and_bias(
input: Tensor, kernel: Tensor, bias: Optional[Tensor]
) -> Tensor:
return F.conv_transpose2d(
input=input,
weight=kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
output_padding=self.output_padding,
dilation=self.dilation,
groups=self.groups,
)
output_lb = conv2d_transpose_with_kernel_and_bias(
interval_lb, pos_kernel, self.bias
) + conv2d_transpose_with_kernel_and_bias(interval_ub, neg_kernel, None)
output_ub = conv2d_transpose_with_kernel_and_bias(
interval_ub, pos_kernel, self.bias
) + conv2d_transpose_with_kernel_and_bias(interval_lb, neg_kernel, None)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
assert all([x == self.stride[0] for x in self.stride])
assert all([x == self.padding[0] for x in self.padding])
assert all([x == self.dilation[0] for x in self.dilation])
assert all([x == self.output_padding[0] for x in self.output_padding])
return abs_input.convtranspose2d(
self.weight,
self.bias,
self.stride[0],
self.padding[0],
self.output_padding[0],
self.groups,
self.dilation[0],
)
| 9,058 | 35.528226 | 103 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_slice.py | from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.concrete_layers.slice import Slice as concreteSlice
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.subproblem_state import SubproblemState
from src.utilities.config import BacksubstitutionConfig
class Slice(concreteSlice, AbstractModule):
def __init__(
self,
starts: int,
ends: int,
dim: int,
steps: int,
input_dim: Tuple[int, ...],
):
super(Slice, self).__init__(dim, starts, ends, steps)
self.starts = starts
self.ends = ends
self.abs_dim = dim - 1 # This dim does not include batch-size
self.steps = steps
# The only allowed neg. end is -1 which signals that we go till the end
if self.ends < 0:
assert self.ends == -1, "Negative slice ending != -1"
self.ends = input_dim[self.abs_dim]
self.input_dim = input_dim
output_dim = list(input_dim)
output_dim[self.abs_dim] = len(range(self.starts, self.ends, steps))
self.output_dim = tuple(output_dim)
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: concreteSlice, input_dim: Tuple[int, ...], **kwargs: Any
) -> Slice:
assert isinstance(module, concreteSlice)
abstract_layer = cls(
module.starts, module.ends, module.dim, module.steps, input_dim
)
return abstract_layer
def backsubstitute( # type: ignore[override]
self,
config: BacksubstitutionConfig,
abstract_shape: Union[MN_BaB_Shape, List[MN_BaB_Shape]],
) -> MN_BaB_Shape:
new_ub_form: Optional[AffineForm] = None
if isinstance(abstract_shape, MN_BaB_Shape): # Single input
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(
abstract_shape.ub, abstract_shape
)
else: # Iterate through all incoming bounds
assert isinstance(abstract_shape, List)
assert isinstance(abstract_shape[0], MN_BaB_Shape)
# new_lb_form_i will have all dimensions properly expanded
new_lb_form = self._backsub_affine_form(
abstract_shape[0].lb, abstract_shape[0]
)
if abstract_shape[0].ub is not None:
new_ub_form = self._backsub_affine_form(
abstract_shape[0].ub, abstract_shape[0]
)
for abs_shape in abstract_shape[1:]:
assert isinstance(abs_shape, MN_BaB_Shape)
new_lb_form_i = self._backsub_affine_form(abs_shape.lb, abs_shape)
new_lb_form.coef += new_lb_form_i.coef
new_lb_form.bias += new_lb_form_i.bias
new_ub_form_i: Optional[AffineForm] = None
if abs_shape.ub is not None:
assert new_ub_form is not None
new_ub_form_i = self._backsub_affine_form(abs_shape.ub, abs_shape)
new_ub_form.coef += new_ub_form_i.coef
new_ub_form.bias += new_ub_form_i.bias
abstract_shape = abstract_shape[0]
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
if abstract_shape.uses_dependence_sets():
assert False, "Not implemented - Slice with dependence sets"
assert isinstance(affine_form.coef, Tensor)
bs, num_queries = affine_form.coef.shape[:2]
new_coef_shape = (bs, num_queries, *self.input_dim)
slice_indices = [
slice(0, dim, 1) for dim in new_coef_shape
] # list of slices selecting whole input
slice_indices[self.abs_dim + 2] = slice(
self.starts, self.ends, self.steps
) # replace slice dimension with right stride
new_coef = torch.zeros(new_coef_shape, device=affine_form.device)
new_coef[slice_indices] = affine_form.coef
new_bias = affine_form.bias
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
index = torch.tensor(
range(self.starts, self.ends, self.steps), device=interval_lb.device
)
output_lb = torch.index_select(interval_lb, self.abs_dim + 1, index)
output_ub = torch.index_select(interval_ub, self.abs_dim + 1, index)
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.slice(self.abs_dim + 1, self.starts, self.ends, self.steps)
| 5,575 | 36.173333 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_split_block.py | from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor, nn
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_container_module import (
AbstractContainerModule,
ActivationLayer,
)
from src.abstract_layers.abstract_sequential import Sequential
from src.concrete_layers.split_block import SplitBlock as concreteSplitBlock
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.subproblem_state import SubproblemState
from src.state.tags import LayerTag, query_tag
from src.utilities.config import BacksubstitutionConfig
from src.utilities.general import tensor_reduce
from src.utilities.queries import get_output_bound_initial_query_coef
class SplitBlock(concreteSplitBlock, AbstractContainerModule):
def __init__(
self,
center_path: nn.Sequential,
split: Tuple[bool, Tuple[int, ...], Optional[int], int, bool],
inner_reduce: Tuple[int, bool, bool],
outer_reduce: Tuple[int, bool, bool],
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> None:
super(SplitBlock, self).__init__(
center_path=center_path,
split=split,
inner_reduce=inner_reduce,
outer_reduce=outer_reduce,
)
# Handle all dims
self.input_dim = input_dim
self.split = split
self.split_dim = split[3]
self.inner_reduce_dim = inner_reduce[0]
self.outer_reduce_dim = outer_reduce[0]
# Remove batch from dim
if self.inner_reduce_dim > 0:
self.inner_reduce_dim -= 1
if self.outer_reduce_dim > 0:
self.outer_reduce_dim -= 1
if self.split_dim > 0:
self.split_dim -= 1
if self.split_dim < 0:
self.split_dim = len(input_dim) + self.split_dim
# Dimensions after the split
interm_dim = list(self.input_dim)
interm_dim[self.split_dim] = split[1][0]
self.center_dim = tuple(interm_dim)
interm_dim = list(self.input_dim)
interm_dim[self.split_dim] = split[1][1]
self.res_dim = tuple(interm_dim)
# Center path
# mypy doesnt see that the center_path is a subclass of the center_path of the concrete_path
self.abs_center_path = Sequential.from_concrete_module( # type: ignore[assignment]
center_path, self.center_dim, **kwargs
)
# Other parameters
# Box concretization of division factor - set via propagate-interval
self.res_lower: Optional[Tensor] = None
self.res_upper: Optional[Tensor] = None
# Output dimensions
center_out_dim = list(self.abs_center_path.output_dim)
self.center_out_dim_pre_reduce = center_out_dim[self.inner_reduce_dim]
# center_out_dim[self.inner_reduce_dim] = 1
center_out_dim = [
dim for (i, dim) in enumerate(center_out_dim) if i != self.inner_reduce_dim
]
self.output_dim = tuple(center_out_dim)
self.bias = self.get_babsr_bias()
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: concreteSplitBlock,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> SplitBlock:
assert isinstance(module, concreteSplitBlock)
abstract_layer = cls(
module.center_path,
module.split,
module.inner_reduce,
module.outer_reduce,
input_dim,
**kwargs,
)
return abstract_layer
def backsubstitute_shape(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
from_layer_index: Optional[int],
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
] = None,
preceeding_layers: Optional[List[Any]] = None,
use_early_termination_for_current_query: bool = False,
full_back_prop: bool = False,
optimize_intermediate_bounds: bool = False,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
assert from_layer_index is None
assert self.res_lower is not None and self.res_upper is not None
# Dimensions
coef_split_dim = self.split_dim + 2
# Wraps the prceeding callback such that we correctly apply the split header block
preceeding_callback = self._get_split_block_callback(
propagate_preceeding_callback
)
# Preparation:
# 1. We start by backpropagating a dummy shape through the center path. This gives us easy access to the output bounds of the path
# 2. We use these bounds in the abstract mul transformer
# - Note that these bounds are all positive i.e. our mul transformer is as tight as it can be
# - We build: 1. a backsubstitution matrix for the mul transformer
# 2. Input lower and upper bounds for the input into the div transformer
# 3. Use 2.2 and the mul transformer to compute the backprop matrix for the div
# 4. Backprop through mul
# 5. Backprop through center
# Get the output bounds of the center path
center_path_out_lb, center_path_out_ub = self.get_center_path_out_bounds(
input_lb,
input_ub,
abstract_shape,
config,
preceeding_callback,
preceeding_layers,
)
# last_centre_layer = self.abs_center_path.layers[-1]
# if last_centre_layer.input_bounds is not None and isinstance(
# last_centre_layer, ReLU
# ):
# print(
# (
# F.relu(last_centre_layer.input_bounds[0])
# <= F.relu(last_centre_layer.input_bounds[1]) + 1e-7
# ).all()
# )
# center_path_out_lb = F.relu(last_centre_layer.input_bounds[0])
# center_path_out_ub = F.relu(last_centre_layer.input_bounds[1])
# else:
# center_path_out_lb, center_path_out_ub = self.get_center_path_out_bounds(
# input_lb,
# input_ub,
# abstract_shape,
# config,
# preceeding_callback,
# preceeding_layers,
# )
# if isinstance(last_centre_layer, ReLU):
# assert last_centre_layer.input_bounds is not None
# center_path_out_lb = torch.maximum(
# center_path_out_lb, F.relu(last_centre_layer.input_bounds[0])
# )
# center_path_out_ub = torch.minimum(
# center_path_out_ub, F.relu(last_centre_layer.input_bounds[1])
# )
assert (center_path_out_lb <= center_path_out_ub + 1e-10).all()
# Get the lower and upper-bound slopes and offsets for the multiplication
res_lower, res_upper = self.res_lower, self.res_upper
mul_factors = (res_lower, res_upper)
mul_convex_bounds = self._get_multiplication_slopes_and_intercepts(
mul_factors, (center_path_out_lb, center_path_out_ub)
)
# Get the input bounds for the dividend
(
div_input_lb_lb,
div_input_lb_ub,
div_input_ub_lb,
div_input_ub_ub,
) = self._get_mul_lbs_and_ubs(
mul_factors, (center_path_out_lb, center_path_out_ub)
)
div_input_lb = torch.minimum(div_input_lb_lb, div_input_ub_lb).sum(
dim=self.outer_reduce_dim + 1
)
div_input_ub = torch.maximum(div_input_lb_ub, div_input_ub_ub).sum(
dim=self.outer_reduce_dim + 1
)
div_input_bounds = (
div_input_lb,
div_input_ub,
)
# Get the lower and upper-bound slopes and offsets for the division
div_factor_lower = 1 / res_lower.sum(dim=self.outer_reduce_dim + 1)
div_factor_upper = 1 / res_lower.sum(dim=self.outer_reduce_dim + 1)
assert (div_factor_lower * div_factor_upper > 0).all()
div_factors = (
torch.minimum(div_factor_lower, div_factor_upper),
torch.maximum(div_factor_lower, div_factor_upper),
)
div_convex_bounds = self._get_multiplication_slopes_and_intercepts(
div_factors, div_input_bounds
)
# Backpropagation Part 1 Div-Reshape
lower_form = self._backsub_affine_form_first(
abstract_shape.lb, div_convex_bounds, False, abstract_shape
)
upper_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
upper_form = self._backsub_affine_form_first(
abstract_shape.ub, div_convex_bounds, True, abstract_shape
)
# Update Abstract Shape so that we can go through mul layer
abstract_shape.update_bounds(lower_form, upper_form)
# Backprop Part 2 - Mul
lower_form = self._backsub_affine_form_given_convex_bounds(
abstract_shape.lb, mul_convex_bounds, False, abstract_shape
)
if abstract_shape.ub is not None:
upper_form = self._backsub_affine_form_given_convex_bounds(
abstract_shape.ub, mul_convex_bounds, True, abstract_shape
)
# Update Abstract Shape
abstract_shape.update_bounds(lower_form, upper_form)
unstable_queries_old_for_assert = abstract_shape.unstable_queries
# Backprop center_path
(center_shape, (lbs_c, ubs_c),) = self.abs_center_path.backsubstitute_shape(
config,
input_lb,
input_ub,
abstract_shape,
None,
preceeding_callback, # Append the Split-Block callback
preceeding_layers, # Append the Split-Block layer
use_early_termination_for_current_query=False,
full_back_prop=False, # Only want to backprop the path
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
assert (
abstract_shape.unstable_queries is None
or (
abstract_shape.unstable_queries == unstable_queries_old_for_assert
).all()
)
# Backprop through the split
# As we concretized the second split, we simply append it with 0 sensitivity
# NOTE: Not generalized for arbitrary splits (assumes only 2 splits)
assert len(self.split[1]) == 2
assert isinstance(center_shape.lb.coef, Tensor)
zero_append_shape = [
center_shape.lb.coef.shape[0],
center_shape.lb.coef.shape[1],
*self.input_dim,
]
zero_append_shape[coef_split_dim] = self.split[1][1]
zero_append_matrix = torch.zeros(
zero_append_shape, device=abstract_shape.device
)
zero_appended_lb = torch.cat(
(center_shape.lb.coef, zero_append_matrix), dim=coef_split_dim
)
lower_form = AffineForm(zero_appended_lb, center_shape.lb.bias)
if center_shape.ub is not None:
assert isinstance(center_shape.ub.coef, Tensor)
zero_appended_ub = torch.cat(
(center_shape.ub.coef, zero_append_matrix), dim=coef_split_dim
)
upper_form = AffineForm(zero_appended_ub, center_shape.ub.bias)
abstract_shape.update_bounds(lower_form, upper_form)
return (
abstract_shape,
(
-np.inf * torch.ones_like(lbs_c, device=abstract_shape.device),
np.inf * torch.ones_like(ubs_c, device=abstract_shape.device),
), # TODO: this seems unnecessary, move bounds into abstract_shape and just update them when it makes sense
)
def _backsub_affine_form_first(
self,
affine_form: AffineForm,
div_convex_bounds: Tuple[Tensor, Tensor, Tensor, Tensor],
compute_upper_bound: bool,
abstract_shape: MN_BaB_Shape,
) -> AffineForm:
coef_inner_reduce_dim = self.inner_reduce_dim + 2
div_form = self._backsub_affine_form_given_convex_bounds(
affine_form, div_convex_bounds, compute_upper_bound, abstract_shape
)
assert isinstance(div_form.coef, Tensor)
# Backprop through reduce_sum
repeat_dims = [1] * (len(div_form.coef.shape) + 1)
repeat_dims[coef_inner_reduce_dim] = self.center_out_dim_pre_reduce
pre_red_lb_coef = div_form.coef.unsqueeze(coef_inner_reduce_dim).repeat(
repeat_dims
)
return AffineForm(pre_red_lb_coef, div_form.bias)
def _backsub_affine_form_given_convex_bounds(
self,
affine_form: AffineForm,
convex_bounds: Tuple[Tensor, Tensor, Tensor, Tensor],
compute_upper_bound: bool,
abstract_shape: MN_BaB_Shape,
) -> AffineForm:
lb_slope, lb_offset, ub_slope, ub_offset = convex_bounds
lb_slope = lb_slope.unsqueeze(1)
lb_offset = lb_offset.unsqueeze(1)
ub_slope = ub_slope.unsqueeze(1)
ub_offset = ub_offset.unsqueeze(1)
# Handle bias
lb_bias, ub_bias = abstract_shape._matmul_of_coef_and_interval(
lb_offset, ub_offset
)
new_bias = ub_bias if compute_upper_bound else lb_bias
assert new_bias is not None
new_bias += affine_form.bias
# Handle coef
new_coef: Optional[Tensor]
new_lb_coef, new_ub_coef = abstract_shape._elementwise_mul_of_coef_and_interval(
lb_slope, ub_slope
)
new_coef = new_ub_coef if compute_upper_bound else new_lb_coef
assert new_coef is not None
return AffineForm(new_coef, new_bias)
def get_babsr_bias(self) -> Tensor:
return self.abs_center_path.get_babsr_bias()
def reset_input_bounds(self) -> None:
super(SplitBlock, self).reset_input_bounds()
self.abs_center_path.reset_input_bounds()
def reset_optim_input_bounds(self) -> None:
super(SplitBlock, self).reset_input_bounds()
self.abs_center_path.reset_optim_input_bounds()
def reset_output_bounds(self) -> None:
super(SplitBlock, self).reset_output_bounds()
self.abs_center_path.reset_output_bounds()
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
center_lower, res_lower = torch.split(
interval[0], split_size_or_sections=self.split[1], dim=self.split_dim + 1
)
center_upper, res_upper = torch.split(
interval[1], split_size_or_sections=self.split[1], dim=self.split_dim + 1
)
lower_out, upper_out = self.abs_center_path.propagate_interval(
(center_lower, center_upper),
use_existing_bounds=use_existing_bounds,
subproblem_state=subproblem_state,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
all_combs = [
lower_out * res_lower,
lower_out * res_upper,
upper_out * res_lower,
upper_out * res_upper,
]
lower_inner_merge = tensor_reduce(torch.minimum, all_combs)
upper_inner_merge = tensor_reduce(torch.maximum, all_combs)
lower_inner_reduce = torch.sum(
lower_inner_merge, dim=self.inner_reduce_dim + 1
) # We propagate with batch size
upper_inner_reduce = torch.sum(upper_inner_merge, dim=self.inner_reduce_dim + 1)
lower_outer_reduce = torch.sum(res_lower, dim=self.outer_reduce_dim + 1)
upper_outer_reduce = torch.sum(res_upper, dim=self.outer_reduce_dim + 1)
# If the interval contains 0 we would have NaNs
assert (lower_outer_reduce * upper_outer_reduce > 0).all()
# Save this for backward pass
self.res_lower = res_lower
self.res_upper = res_upper
all_combs = [
lower_inner_reduce / lower_outer_reduce,
lower_inner_reduce / upper_outer_reduce,
upper_inner_reduce / lower_outer_reduce,
upper_inner_reduce / upper_outer_reduce,
]
lower_out = tensor_reduce(torch.minimum, all_combs)
upper_out = tensor_reduce(torch.maximum, all_combs)
return (lower_out, upper_out)
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
center_abs, res_abs = abs_input.split(
split_size_or_sections=self.split[1], dim=self.split_dim + 1
)
res_lb, res_ub = res_abs.concretize()
center_out = self.abs_center_path.propagate_abstract_element(
center_abs,
use_existing_bounds=use_existing_bounds,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
# Multiplication
mul_ae = center_out.multiply_interval((res_lb, res_ub))
# Inner reduction
inner_ae = mul_ae.sum(self.inner_reduce_dim + 1, reduce_dim=True)
# Outer reduction
res_lb_reduce = torch.sum(res_lb, dim=self.outer_reduce_dim + 1)
res_ub_reduce = torch.sum(res_ub, dim=self.outer_reduce_dim + 1)
# If the interval contains 0 we would have NaNs
assert (res_lb_reduce * res_ub_reduce > 0).all()
# Division
div_factors = (
1 / torch.maximum(res_ub_reduce, res_lb_reduce),
1 / torch.minimum(res_ub_reduce, res_lb_reduce),
)
assert (div_factors[0] <= div_factors[1]).all()
out_ae = inner_ae.multiply_interval(div_factors)
return out_ae
def set_dependence_set_applicability(self, applicable: bool = True) -> None:
self.abs_center_path.set_dependence_set_applicability(applicable)
self.dependence_set_applicable = self.abs_center_path.layers[
-1
].dependence_set_applicable
def get_default_split_constraints(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_constraints: Dict[LayerTag, Tensor] = {}
split_constraints.update(
self.abs_center_path.get_default_split_constraints(batch_size, device)
)
return split_constraints
def get_default_split_points(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_points: Dict[LayerTag, Tensor] = {}
split_points.update(
self.abs_center_path.get_default_split_points(batch_size, device)
)
return split_points
def get_activation_layers(self) -> Dict[LayerTag, ActivationLayer]:
act_layers: Dict[LayerTag, ActivationLayer] = {}
act_layers.update(self.abs_center_path.get_activation_layers())
return act_layers
def get_current_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
intermediate_bounds.update(
self.abs_center_path.get_current_intermediate_bounds()
)
return intermediate_bounds
def get_current_optimized_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
intermediate_bounds.update(
self.abs_center_path.get_current_optimized_intermediate_bounds()
)
return intermediate_bounds
def set_intermediate_input_bounds(
self, intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
) -> None:
self.abs_center_path.set_intermediate_input_bounds(intermediate_bounds)
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
act_layer_ids = self.abs_center_path.get_activation_layer_ids()
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
act_layer_ids = self.abs_center_path.get_relu_layer_ids()
return act_layer_ids
def get_center_path_out_bounds(
self,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
config: BacksubstitutionConfig,
preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
] = None,
preceeding_layers: Optional[List[Any]] = None,
) -> Tuple[Tensor, Tensor]:
intermediate_bounds_to_recompute = None # compute all out bounds
initial_intermediate_bound_coef = get_output_bound_initial_query_coef(
dim=self.abs_center_path.output_dim,
intermediate_bounds_to_recompute=intermediate_bounds_to_recompute,
batch_size=abstract_shape.batch_size,
use_dependence_sets=config.use_dependence_sets,
device=abstract_shape.device,
dtype=None,
)
center_bound_abstract_shape = MN_BaB_Shape( # Here AffineForm will be cloned later
query_id=query_tag(self.abs_center_path),
query_prev_layer=None, # TODO: do we want reduced parameter sharing for those bounds?
queries_to_compute=intermediate_bounds_to_recompute,
lb=AffineForm(initial_intermediate_bound_coef),
ub=AffineForm(initial_intermediate_bound_coef),
unstable_queries=None, # (not using early termination)
subproblem_state=abstract_shape.subproblem_state,
)
(
propagated_shape,
layer_bounds,
) = self.abs_center_path._get_mn_bab_shape_after_layer(
from_layer_index=len(self.abs_center_path.layers)
- 1, # Full Backprop through center layer
config=config.where(use_early_termination=False),
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=center_bound_abstract_shape,
propagate_preceeding_callback=preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=False, # TODO why not?
optimize_intermediate_bounds=False,
)
assert propagated_shape is not None
assert layer_bounds is None
(
center_path_out_lb,
center_path_out_ub,
) = propagated_shape.concretize(input_lb, input_ub)
center_path_out_lb = center_path_out_lb.view_as(
self.abs_center_path.layers[-1].input_bounds[0]
)
assert center_path_out_ub is not None
center_path_out_ub = center_path_out_ub.view_as(
self.abs_center_path.layers[-1].input_bounds[1]
)
return (center_path_out_lb, center_path_out_ub)
def _get_multiplication_slopes_and_intercepts(
self, mul_bounds: Tuple[Tensor, Tensor], input_bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
input_lb, input_ub = input_bounds
# Get the lower and upper bound of the multiplication
(mult_lb_lb, mult_lb_ub, mult_ub_lb, mult_ub_ub) = self._get_mul_lbs_and_ubs(
mul_bounds, input_bounds
)
D = 1e-12 if input_lb.dtype == torch.float64 else 1e-7
# Get slopes and offsets
# TODO look at effect of soundness correction here
convex_lb_slope = (mult_ub_lb - mult_lb_lb) / (input_ub - input_lb + D)
convex_lb_intercept = mult_lb_lb - input_lb * convex_lb_slope - D
convex_ub_slope = (mult_ub_ub - mult_lb_ub) / (input_ub - input_lb + D)
convex_ub_intercept = mult_lb_ub - input_lb * convex_ub_slope + D
return (
convex_lb_slope,
convex_lb_intercept,
convex_ub_slope,
convex_ub_intercept,
)
def _get_mul_lbs_and_ubs(
self, b1: Tuple[Tensor, Tensor], b2: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
input_lb_opts = [b2[0] * b1[0], b2[0] * b1[1]]
input_ub_opts = [b2[1] * b1[0], b2[1] * b1[1]]
mult_lb_lb = tensor_reduce(torch.minimum, input_lb_opts)
mult_lb_ub = tensor_reduce(torch.maximum, input_lb_opts)
mult_ub_lb = tensor_reduce(torch.minimum, input_ub_opts)
mult_ub_ub = tensor_reduce(torch.maximum, input_ub_opts)
return (mult_lb_lb, mult_lb_ub, mult_ub_lb, mult_ub_ub)
def _get_split_block_callback(
self,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
) -> Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]:
"""ReLU layers within the center path need a propagate preceeding callback that takes the split at the top into account"""
def wrapped_call(
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
use_early_termination_for_current_query: bool,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
# Backwards prop through split-block
coef_split_dim = self.split_dim + 2
lb = abstract_shape.lb
assert isinstance(lb.coef, Tensor)
zero_append_shape = [lb.coef.shape[0], lb.coef.shape[1], *self.input_dim]
zero_append_shape[coef_split_dim] = self.split[1][1]
zero_append_matrix = torch.zeros(
zero_append_shape, device=abstract_shape.device
)
zero_appended_lb = torch.cat(
(lb.coef, zero_append_matrix), dim=coef_split_dim
).to(abstract_shape.device)
lower_form = AffineForm(zero_appended_lb, lb.bias)
upper_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
ub = abstract_shape.ub
assert isinstance(ub.coef, Tensor)
zero_appended_ub = torch.cat(
(ub.coef, zero_append_matrix), dim=coef_split_dim
).to(abstract_shape.device)
upper_form = AffineForm(zero_appended_ub, ub.bias)
abstract_shape.update_bounds(lower_form, upper_form)
if propagate_preceeding_callback is None:
assert isinstance(abstract_shape.lb.coef, Tensor)
bound_shape = abstract_shape.lb.coef.shape[:2]
return (
abstract_shape,
(
-np.inf * torch.ones(bound_shape, device=abstract_shape.device),
np.inf * torch.ones(bound_shape, device=abstract_shape.device),
), # TODO: this seems unnecessary, move bounds into abstract_shape and just update them when it makes sense
)
else:
return propagate_preceeding_callback(
config,
abstract_shape,
use_early_termination_for_current_query,
)
return wrapped_call
| 28,165 | 37.321088 | 138 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_container_module.py | from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor
from src.abstract_layers.abstract_max_pool2d import MaxPool2d
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_relu import ReLU
from src.abstract_layers.abstract_sigmoid import Sigmoid
from src.abstract_layers.abstract_tanh import Tanh
from src.mn_bab_shape import MN_BaB_Shape
from src.state.tags import LayerTag
from src.utilities.config import BacksubstitutionConfig
from src.verification_subproblem import SplitState
ActivationLayer = Union[
ReLU, Sigmoid, Tanh, MaxPool2d
] # TODO: add common superclass for activation layers?
ActivationLayers = [ReLU, Sigmoid, Tanh, MaxPool2d]
class AbstractContainerModule(AbstractModule):
def set_intermediate_input_bounds(
self, intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
) -> None:
raise NotImplementedError
def get_default_split_constraints(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
raise NotImplementedError
def get_default_split_points(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
raise NotImplementedError
def get_default_split_state(
self, batch_size: int, device: torch.device
) -> SplitState:
return SplitState.create_default(
split_constraints=self.get_default_split_constraints(batch_size, device),
split_points=self.get_default_split_points(batch_size, device),
batch_size=batch_size,
device=device,
)
def get_activation_layers(self) -> Dict[LayerTag, ActivationLayer]:
raise NotImplementedError
def set_dependence_set_applicability(self, applicable: bool = True) -> None:
raise NotImplementedError
def get_current_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
raise NotImplementedError
def get_current_optimized_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
raise NotImplementedError
def backsubstitute_shape(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
from_layer_index: Optional[int],
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
use_early_termination_for_current_query: bool, # = False,
full_back_prop: bool, # = False,
optimize_intermediate_bounds: bool, # = False,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
raise NotImplementedError
def forward_pass(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
ibp_call: Callable[[], None],
timeout: float,
) -> None:
raise NotImplementedError
| 3,496 | 32.625 | 85 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_tanh.py | from __future__ import annotations
import os
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_sig_base import SigBase
from src.mn_bab_shape import MN_BaB_Shape
from src.state.tags import ParameterTag, layer_tag
from src.utilities.bilinear_interpolator import BilinearInterpol
from src.utilities.config import BacksubstitutionConfig
def tanh(x: Tensor) -> Tensor:
return torch.tanh(x)
def d_tanh(x: Tensor) -> Tensor:
return 1 - torch.tanh(x) * torch.tanh(x)
FILE_DIR = os.path.realpath(os.path.dirname(__file__))
class Tanh(SigBase, AbstractModule):
sp_interpolator: Optional[BilinearInterpol] = None
intersection_points: Optional[Tensor] = None
tangent_points: Optional[Tensor] = None
step_size: Optional[float] = None
max_x: Optional[float] = None
def __init__(self, dim: Tuple[int, ...]) -> None:
super(Tanh, self).__init__(dim, tanh, d_tanh)
if Tanh.intersection_points is None:
(
Tanh.intersection_points,
Tanh.tangent_points,
Tanh.step_size,
Tanh.max_x,
) = SigBase._compute_bound_to_tangent_point(tanh, d_tanh)
if Tanh.sp_interpolator is None:
Tanh.sp_interpolator = BilinearInterpol.load_from_path(
os.path.realpath(
os.path.join(FILE_DIR, "../../data/tanh_bil_interpol.pkl")
)
)
self.output_dim = dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Tanh, input_dim: Tuple[int, ...], **kwargs: Any
) -> Tanh:
assert isinstance(module, nn.Tanh)
return cls(input_dim)
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
prev_layer: Optional[AbstractModule] = None,
) -> MN_BaB_Shape:
# TODO solve better
if (
self.tangent_points is not None
and self.tangent_points.device != abstract_shape.device
):
self.tangent_points = self.tangent_points.to(device=abstract_shape.device)
if (
self.tangent_points is not None
and self.tangent_points.dtype != abstract_shape.lb.bias.dtype
):
self.tangent_points = self.tangent_points.to(
dtype=abstract_shape.lb.bias.dtype
)
return super(Tanh, self)._backsubstitute(
abstract_shape,
self.tangent_points,
self.step_size,
self.max_x,
intermediate_bounds_callback,
)
def get_approximation_slopes_and_intercepts(
self,
bounds: Tuple[Tensor, Tensor],
abstract_shape: Optional[MN_BaB_Shape] = None,
parameter_key: Optional[ParameterTag] = None,
split_constraints: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
if self.tangent_points is None or self.step_size is None or self.max_x is None:
raise RuntimeError(
"Cannot compute Sig/Tanh bounds without pre-computed values"
)
if (
abstract_shape is not None
and self.tangent_points.device != abstract_shape.device
):
self.tangent_points = self.tangent_points.to(device=abstract_shape.device)
return super(Tanh, self)._get_approximation_slopes_and_intercepts_for_act(
bounds,
self.tangent_points,
self.step_size,
self.max_x,
tanh,
d_tanh,
abstract_shape,
parameter_key,
layer_tag(self),
split_constraints,
)
def forward(self, x: Tensor) -> Tensor:
return torch.tanh(x)
@classmethod
def get_split_points(cls, lb: Tensor, ub: Tensor) -> Tensor:
assert cls.sp_interpolator, "Split point interpolator for Tanh not set"
return cls.sp_interpolator.get_value(lb, ub)
| 4,335 | 32.612403 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_max_pool2d.py | from __future__ import annotations
from math import floor
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.verification_subproblem import SubproblemState
class MaxPool2d(nn.MaxPool2d, AbstractModule):
kernel_size: Tuple[int, int] # type: ignore[assignment] # hack
stride: Tuple[int, int] # type: ignore[assignment]
padding: Tuple[int, int] # type: ignore[assignment]
dilation: Tuple[int, int] # type: ignore[assignment]
def __init__(
self,
kernel_size: Union[int, Tuple[int, int]],
input_dim: Tuple[int, ...],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
super(MaxPool2d, self).__init__( # type: ignore # mypy issue 4335
kernel_size, stride, padding, dilation
)
self.input_dim = input_dim
output_height = floor(
(
input_dim[1]
+ 2 * self.padding[0]
- self.dilation[0] * (self.kernel_size[0] - 1)
- 1
)
/ self.stride[0]
+ 1
)
output_width = floor(
(
input_dim[2]
+ 2 * self.padding[1]
- self.dilation[1] * (self.kernel_size[1] - 1)
- 1
)
/ self.stride[1]
+ 1
)
self.output_dim = (input_dim[0], output_height, output_width)
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.MaxPool2d, input_dim: Tuple[int, ...], **kwargs: Any
) -> MaxPool2d:
assert isinstance(module, nn.MaxPool2d)
abstract_layer = cls(
module.kernel_size,
input_dim,
module.stride,
module.padding,
module.dilation,
)
return abstract_layer
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
prev_layer: Optional[AbstractModule] = None,
) -> MN_BaB_Shape:
if self.input_bounds is None:
raise RuntimeError("Cannot backsubstitute if bounds have not been set.")
# if abstract_shape.uses_dependence_sets():
# assert False, "Dependence sets with MaxPool not implemented."
# assert isinstance(abstract_shape.lb.coef, Tensor)
input_lb = self.input_bounds[0].view(-1, *self.input_dim)
input_ub = self.input_bounds[1].view(-1, *self.input_dim)
# assert input_lb.shape[0] == 1
output_ub = F.max_pool2d(input_lb, self.kernel_size, self.stride, self.padding)
output_lb = F.max_pool2d(input_ub, self.kernel_size, self.stride, self.padding)
output_dim = output_ub.shape
input_dim = input_lb.shape
pid_lb = F.pad(
input_lb,
(self.padding[1], self.padding[1], self.padding[0], self.padding[0]),
value=-torch.inf,
)
pid_ub = F.pad(
input_ub,
(self.padding[1], self.padding[1], self.padding[0], self.padding[0]),
value=-torch.inf,
)
if isinstance(abstract_shape.lb.coef, Tensor):
tight = (output_ub == output_lb).all(0).all(0)
lb_coef = abstract_shape.lb.coef.permute(1, 0, 2, 3, 4).flatten(start_dim=1)
new_lb_coef = torch.zeros(
(
abstract_shape.lb.num_queries,
abstract_shape.batch_size,
*self.input_dim,
),
device=abstract_shape.device,
).flatten(start_dim=1)
new_lb_bias = abstract_shape.lb.bias.clone().permute(1, 0)
if abstract_shape.ub is not None:
assert isinstance(abstract_shape.ub.coef, Tensor)
ub_coef = abstract_shape.ub.coef.permute(1, 0, 2, 3, 4).flatten(
start_dim=1
)
new_ub_coef = torch.zeros(
(
abstract_shape.ub.num_queries,
abstract_shape.batch_size,
*self.input_dim,
),
device=abstract_shape.device,
).flatten(start_dim=1)
new_ub_bias = abstract_shape.ub.bias.clone().permute(1, 0)
device = abstract_shape.device
offsets_in = torch.tensor(
[int(np.prod(input_dim[i + 1 :])) for i in range(len(input_dim))],
device=device,
)
offsets_out = torch.tensor(
[int(np.prod(output_dim[i + 1 :])) for i in range(len(output_dim))],
device=device,
)
ch_range = torch.arange(output_dim[1], device=device).repeat(output_dim[0])
bs_range = torch.arange(output_dim[0], device=device).repeat_interleave(
output_dim[1]
)
new_lb_bias += (
(
abstract_shape.lb.coef
* (output_lb * tight.unsqueeze(0).unsqueeze(0)).unsqueeze(
1
) # add Batch, Query, Channel [H, W]
)
.flatten(2)
.sum(2)
.permute(1, 0)
)
if abstract_shape.ub is not None:
new_ub_bias += (
(
abstract_shape.ub.coef
* (output_lb * tight.unsqueeze(0).unsqueeze(0)).unsqueeze(1)
)
.flatten(2)
.sum(2)
.permute(1, 0)
)
for y in torch.arange(output_dim[2])[~tight.all(1)]:
for x in torch.arange(output_dim[3])[~tight[y]]:
if tight[y, x]:
assert False
# Get the input_window
w_in_idy = y * self.stride[0]
w_in_idx = x * self.stride[1]
w_lb = pid_lb[
:,
:,
w_in_idy : w_in_idy + self.kernel_size[0],
w_in_idx : w_in_idx + self.kernel_size[1],
].flatten(start_dim=2)
w_ub = pid_ub[
:,
:,
w_in_idy : w_in_idy + self.kernel_size[0],
w_in_idx : w_in_idx + self.kernel_size[1],
].flatten(start_dim=2)
best_lb, best_lb_i = w_lb.max(2)
max_ub = w_ub.max(2)[0]
strict_dom = (
torch.sum((best_lb.unsqueeze(2) <= w_ub).float(), 2) == 1.0
).view(
-1
) # Strict domination check
# Index of respective lower bound element (w.r.t. unpadded input window)
in_idx = (best_lb_i % self.kernel_size[1]).flatten()
in_idy = torch.div(
best_lb_i, self.kernel_size[1], rounding_mode="trunc"
).flatten()
tot_idx = in_idx + w_in_idx - self.padding[0]
tot_idy = in_idy + w_in_idy - self.padding[1]
assert (
(tot_idx >= 0)
& (tot_idx < self.input_dim[2])
& (tot_idy >= 0)
& (tot_idy < self.input_dim[1])
).all()
in_idx = (
bs_range * offsets_in[0]
+ ch_range * offsets_in[1]
+ tot_idy * offsets_in[2]
+ tot_idx * offsets_in[3]
)
out_idx = (
bs_range * offsets_out[0]
+ ch_range * offsets_out[1]
+ y * offsets_out[2]
+ x * offsets_out[3]
)
# Selected actual input
new_lb_coef[:, in_idx] += lb_coef[:, out_idx] * (
(lb_coef[:, out_idx] >= 0)
+ (lb_coef[:, out_idx] < 0) * strict_dom
)
new_lb_bias += (
(
lb_coef[:, out_idx]
* (lb_coef[:, out_idx] < 0)
* (~strict_dom.unsqueeze(0))
* max_ub
)
.view(abstract_shape.num_queries, abstract_shape.batch_size, -1)
.sum(-1)
)
if abstract_shape.ub is not None:
new_ub_coef[:, in_idx] += ub_coef[:, out_idx] * (
(ub_coef[:, out_idx] < 0)
+ (ub_coef[:, out_idx] >= 0) * strict_dom
)
new_ub_bias[:] += (
(
ub_coef[:, out_idx]
* (ub_coef[:, out_idx] >= 0)
* (~strict_dom)
* max_ub
)
.view(
abstract_shape.num_queries,
abstract_shape.batch_size,
-1,
)
.sum(-1)
)
new_lb_coef = new_lb_coef.view(
abstract_shape.lb.num_queries,
abstract_shape.batch_size,
*self.input_dim,
).permute(1, 0, 2, 3, 4)
new_lb_form = AffineForm(new_lb_coef, new_lb_bias.permute(1, 0))
# Upper bound
if abstract_shape.ub is None:
new_ub_form: Optional[AffineForm] = None
else:
new_ub_coef = new_ub_coef.view(
abstract_shape.lb.num_queries,
abstract_shape.batch_size,
*self.input_dim,
).permute(1, 0, 2, 3, 4)
new_ub_form = AffineForm(new_ub_coef, new_ub_bias.permute(1, 0))
elif isinstance(abstract_shape.lb.coef, DependenceSets):
symmetric_stride = self.stride[0] == self.stride[1]
symmetric_padding = self.padding[0] == self.padding[1]
dilation_one = (
self.dilation[0] == self.dilation[1] == 1 and self.dilation[0] == 1
)
dependence_sets_assumptions = (
symmetric_stride and symmetric_padding and dilation_one
)
assert dependence_sets_assumptions
(
bs,
num_queries,
kernel_c,
kernel_h,
kernel_w,
) = abstract_shape.lb.coef.sets.shape
c, input_h, input_w = abstract_shape.lb.coef.input_dim
# Handle padding for DS, as unfold can only pad with 0's
ds_padding = abstract_shape.lb.coef.cpadding * self.stride[0]
ds_stride = abstract_shape.lb.coef.cstride * self.stride[0]
kernel_size = (kernel_h - 1) * self.stride[0] + self.kernel_size[0]
pid_lb = F.pad(
pid_lb,
(ds_padding,) * 4,
value=-torch.inf,
)
pid_ub = F.pad(
pid_ub,
(ds_padding,) * 4,
value=-torch.inf,
)
pid_lb = DependenceSets.unfold_to_spec(
pid_lb,
padding=0,
stride=ds_stride,
kernel_size=kernel_size,
input_dim=(c, input_h, input_w),
spatial_idxs=abstract_shape.lb.coef.spatial_idxs,
).flatten(end_dim=2)
pid_ub = DependenceSets.unfold_to_spec(
pid_ub,
padding=0,
stride=ds_stride,
kernel_size=kernel_size,
input_dim=(c, input_h, input_w),
spatial_idxs=abstract_shape.lb.coef.spatial_idxs,
).flatten(end_dim=2)
pod_lb = DependenceSets.unfold_to(
output_lb,
abstract_shape.lb.coef,
).flatten(end_dim=1)
pod_ub = DependenceSets.unfold_to(
output_ub,
abstract_shape.lb.coef,
).flatten(end_dim=1)
ds_padding += self.padding[0]
tight = (pod_lb == pod_ub).all(0).all(0)
# In contrast to Tensor MaxPool, Queries do not have the same dominance structure as they are in different positions
lb_coef = abstract_shape.lb.coef.sets.flatten(
end_dim=1
) # .flatten(start_dim=1) # Flatten to BS * Query * C x H * W
new_lb_coef = torch.zeros(
(
abstract_shape.batch_size,
abstract_shape.lb.num_queries,
kernel_c,
kernel_size,
kernel_size,
),
device=abstract_shape.device,
).flatten(
end_dim=2
) # .flatten(start_dim=1)
new_lb_bias = abstract_shape.lb.bias.clone().flatten()
if abstract_shape.ub is not None:
assert isinstance(abstract_shape.ub.coef, DependenceSets)
ub_coef = abstract_shape.ub.coef.sets.flatten(end_dim=1)
new_ub_coef = (
torch.zeros(
(
abstract_shape.batch_size,
abstract_shape.lb.num_queries,
kernel_c,
kernel_size,
kernel_size,
),
device=abstract_shape.device,
)
.flatten(end_dim=2)
.flatten()
)
new_ub_bias = abstract_shape.ub.bias.clone().flatten()
# assert not tight.any()
new_lb_bias += (
(lb_coef * pod_lb * tight.unsqueeze(0).unsqueeze(0)).flatten(1).sum(1)
)
if abstract_shape.ub is not None:
new_ub_bias += (
(ub_coef * pod_lb * tight.unsqueeze(0).unsqueeze(0))
.flatten(1)
.sum(1)
)
device = abstract_shape.device
offsets_in = torch.tensor(new_lb_coef.stride(), device=device)
new_lb_coef = new_lb_coef.flatten()
offsets_out = torch.tensor(lb_coef.flatten(end_dim=1).stride())
lb_coef = lb_coef.flatten()
ub_coef = ub_coef.flatten()
bsqc_range = torch.arange(bs * num_queries * kernel_c, device=device)
for y in torch.arange(kernel_h)[~tight.all(1)]:
for x in torch.arange(kernel_w)[~tight[y]]:
assert not tight[y, x]
# Get the input_window
w_in_idy = y * self.stride[0]
w_in_idx = x * self.stride[1]
w_lb = pid_lb[
:,
w_in_idy : w_in_idy + self.kernel_size[0],
w_in_idx : w_in_idx + self.kernel_size[1],
].flatten(start_dim=1)
w_ub = pid_ub[
:,
w_in_idy : w_in_idy + self.kernel_size[0],
w_in_idx : w_in_idx + self.kernel_size[1],
].flatten(start_dim=1)
best_lb, best_lb_i = w_lb.max(1)
max_ub = w_ub.max(1)[0]
strict_dom = (
torch.sum((best_lb.unsqueeze(1) <= w_ub).float(), 1) == 1.0
).view(
-1
) # Strict domination check
# Index of respective lower bound element (w.r.t. unpadded input window)
in_idx = (best_lb_i % self.kernel_size[1]).flatten()
in_idy = torch.div(
best_lb_i, self.kernel_size[1], rounding_mode="trunc"
).flatten()
tot_idx = in_idx + w_in_idx # - self.padding[0]
tot_idx[
tot_idx < 0
] = 0 # Can happen for input blocks that are fully in the padding. Those will be cut away
tot_idy = in_idy + w_in_idy # - self.padding[1]
tot_idy[tot_idy < 0] = 0
in_idx = (
bsqc_range * offsets_in[0]
+ tot_idy * offsets_in[1]
+ tot_idx * offsets_in[2]
)
out_idx = (
bsqc_range * offsets_out[0]
+ y * offsets_out[1]
+ x * offsets_out[2]
)
# Selected actual input
new_lb_coef[in_idx] += lb_coef[out_idx] * (
(lb_coef[out_idx] >= 0) + (lb_coef[out_idx] < 0) * strict_dom
)
new_lb_bias += (
(
lb_coef[out_idx]
* (lb_coef[out_idx] < 0)
* (~strict_dom.unsqueeze(0))
* max_ub
)
.view(bs * num_queries, -1)
.nan_to_num()
.sum(-1)
)
if abstract_shape.ub is not None:
new_ub_coef[in_idx] += ub_coef[out_idx] * (
(ub_coef[out_idx] < 0)
+ (ub_coef[out_idx] >= 0) * strict_dom
)
new_ub_bias += (
(
ub_coef[out_idx]
* (ub_coef[out_idx] >= 0)
* (~strict_dom)
* max_ub
)
.view(bs * num_queries, -1)
.nan_to_num()
.sum(-1)
)
new_lb_coef_ds = DependenceSets(
new_lb_coef.view(
abstract_shape.batch_size,
num_queries,
kernel_c,
kernel_size,
kernel_size,
),
abstract_shape.lb.coef.spatial_idxs,
abstract_shape.lb.coef.input_dim,
ds_stride,
ds_padding,
)
new_lb_coef_ds.handle_padding(self.input_dim)
new_lb_form = AffineForm(new_lb_coef_ds, new_lb_bias.view(bs, num_queries))
# Upper bound
if abstract_shape.ub is None:
new_ub_form = None
else:
new_ub_coef_ds = DependenceSets(
new_ub_coef.view(
abstract_shape.batch_size,
num_queries,
kernel_c,
kernel_size,
kernel_size,
),
abstract_shape.lb.coef.spatial_idxs,
abstract_shape.lb.coef.input_dim,
ds_stride,
ds_padding,
)
new_ub_coef_ds.handle_padding(self.input_dim)
new_ub_form = AffineForm(
new_ub_coef_ds, new_ub_bias.view(bs, num_queries)
)
else:
assert False
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
output_lb = F.max_pool2d(
interval_lb, self.kernel_size, self.stride, self.padding, self.dilation
)
output_ub = F.max_pool2d(
interval_ub, self.kernel_size, self.stride, self.padding, self.dilation
)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.max_pool2d(self.kernel_size, self.stride, self.padding)
| 22,231 | 37.530329 | 128 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_bn2d.py | from __future__ import annotations
from typing import Any, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.utilities.general import get_neg_pos_comp
from src.verification_subproblem import SubproblemState
class BatchNorm2d(nn.BatchNorm2d, AbstractModule):
mult_term: Tensor
add_term: Tensor
weight: nn.Parameter
running_var: Tensor
running_mean: Tensor
current_mean: Tensor
current_var: Tensor
def __init__(
self,
in_channels: int,
input_dim: Tuple[int, ...],
affine: bool = True,
):
super(BatchNorm2d, self).__init__(in_channels, affine=affine) # type: ignore
self.input_dim = input_dim
self.output_dim = input_dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.BatchNorm2d, input_dim: Tuple[int, ...], **kwargs: Any
) -> BatchNorm2d:
assert isinstance(module, nn.BatchNorm2d)
abstract_layer = cls(
module.num_features,
input_dim,
module.affine,
)
assert abstract_layer.running_var is not None
assert module.running_var is not None
abstract_layer.running_var.data = module.running_var.data
assert abstract_layer.running_mean is not None
assert module.running_mean is not None
abstract_layer.running_mean.data = module.running_mean.data
if module.affine:
abstract_layer.weight.data = module.weight.data
abstract_layer.bias.data = module.bias.data
abstract_layer.track_running_stats = module.track_running_stats
abstract_layer.training = False
D = module.eps
mult_term = (
(
(abstract_layer.weight if abstract_layer.affine else 1)
/ torch.sqrt(abstract_layer.running_var + D)
)
.detach()
.requires_grad_(False)
)
abstract_layer.register_buffer("mult_term", mult_term)
add_term = (
(
(abstract_layer.bias if abstract_layer.affine else 0)
- abstract_layer.running_mean * abstract_layer.mult_term
)
.detach()
.requires_grad_(False)
)
abstract_layer.register_buffer("add_term", add_term)
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
new_coef: Union[Tensor, DependenceSets]
if isinstance(affine_form.coef, Tensor):
new_bias = affine_form.bias + (
(affine_form.coef.sum((3, 4)) * self.add_term).sum(2)
)
new_coef = affine_form.coef * self.mult_term.view(1, 1, -1, 1, 1)
elif isinstance(affine_form.coef, DependenceSets):
new_bias = affine_form.bias + (
DependenceSets.unfold_to(
self.add_term.unsqueeze(0)
.unsqueeze(2)
.unsqueeze(3)
.expand(affine_form.bias.shape[0], *self.input_dim),
affine_form.coef,
)
* affine_form.coef.sets
).sum((2, 3, 4))
new_coef = affine_form.coef.sets * self.mult_term.view(1, 1, -1, 1, 1)
new_coef = DependenceSets(
new_coef,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
else:
assert False, "AffineForm not recognized"
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
neg_kernel, pos_kernel = get_neg_pos_comp(self.mult_term.view(1, -1, 1, 1))
output_lb = (
interval_lb * pos_kernel
+ interval_ub * neg_kernel
+ self.add_term.view(1, -1, 1, 1)
)
output_ub = (
interval_lb * neg_kernel
+ interval_ub * pos_kernel
+ self.add_term.view(1, -1, 1, 1)
)
# D = self.eps
# mult_term = ((self.weight if self.affine else 1) / torch.sqrt(self.running_var + D)).detach().requires_grad_(False)
# add_term = ((self.bias if self.affine else 0) - self.running_mean * self.mult_term).detach().requires_grad_(False)
#
# assert ((self.mult_term-mult_term)==0).all()
# assert ((self.add_term - add_term) == 0).all()
#
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.batch_norm(self)
| 6,136 | 34.680233 | 125 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_conv2d.py | from __future__ import annotations
from math import floor
from typing import Any, Optional, Tuple, Union
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
from src.utilities.general import get_neg_pos_comp
from src.verification_subproblem import SubproblemState
class Conv2d(nn.Conv2d, AbstractModule):
in_channels: int
out_channels: int
kernel_size: Tuple[int, ...] # Tuple[int, int] ?
input_dim: Tuple[int, ...]
stride: Tuple[int, ...] # Tuple[int, int] ?
padding: Tuple[int, ...] # type: ignore[assignment] # checked at runtime below (Tuple[int, int] ?)
dilation: Tuple[int, ...] # Tuple[int, int] ?
groups: int
bias: Optional[Tensor]
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
input_dim: Tuple[int, ...],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
bias: bool = True,
):
super(Conv2d, self).__init__( # type: ignore # mypy issue 4335
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
)
self.input_dim = input_dim
assert not isinstance(self.padding, str)
output_height = floor(
(
input_dim[1]
+ 2 * self.padding[0]
- self.dilation[0] * (self.kernel_size[0] - 1)
- 1
)
/ self.stride[0]
+ 1
)
output_width = floor(
(
input_dim[2]
+ 2 * self.padding[1]
- self.dilation[1] * (self.kernel_size[1] - 1)
- 1
)
/ self.stride[1]
+ 1
)
self.output_dim = (out_channels, output_height, output_width)
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.Conv2d, input_dim: Tuple[int, ...], **kwargs: Any
) -> Conv2d:
assert isinstance(module, nn.Conv2d)
assert len(module.kernel_size) == 2
assert len(module.stride) == 2
assert len(module.padding) == 2
assert len(module.dilation) == 2
assert not isinstance(module.padding, str)
abstract_layer = cls(
module.in_channels,
module.out_channels,
module.kernel_size, # type: ignore[arg-type]
input_dim,
module.stride, # type: ignore[arg-type]
module.padding, # type: ignore[arg-type]
module.dilation, # type: ignore[arg-type]
module.groups,
module.bias is not None,
)
abstract_layer.weight.data = module.weight.data
if module.bias is not None:
assert abstract_layer.bias is not None
abstract_layer.bias.data = module.bias.data
return abstract_layer
def backsubstitute(
self, config: BacksubstitutionConfig, abstract_shape: MN_BaB_Shape
) -> MN_BaB_Shape:
new_lb_form = self._backsub_affine_form(abstract_shape.lb, abstract_shape)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(abstract_shape.ub, abstract_shape)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self, affine_form: AffineForm, abstract_shape: MN_BaB_Shape
) -> AffineForm:
new_coef: Union[Tensor, DependenceSets]
if abstract_shape.uses_dependence_sets():
symmetric_stride = self.stride[0] == self.stride[1]
symmetric_padding = self.padding[0] == self.padding[1]
dilation_one = (
self.dilation[0] == self.dilation[1] == 1 and self.dilation[0] == 1
)
group_one = self.groups == 1
dependence_sets_assumptions = (
symmetric_stride and symmetric_padding and dilation_one and group_one
)
assert dependence_sets_assumptions, "Dependence set assumptions violated."
def backsubstitute_coef_and_bias(
coef: DependenceSets, bias: Tensor
) -> Tuple[DependenceSets, Tensor]:
assert isinstance(affine_form.coef, DependenceSets)
new_bias = bias + (
0
if self.bias is None
else (
DependenceSets.unfold_to(
self.bias.unsqueeze(0)
.unsqueeze(2)
.unsqueeze(3)
.expand(bias.shape[0], *self.output_dim),
affine_form.coef,
)
* affine_form.coef.sets
).sum((2, 3, 4))
)
# [B*C*HW, c, d, d] -> [B*C*HW, c', d', d']
new_coef_sets = F.conv_transpose2d(
coef.sets.flatten(end_dim=1), self.weight, stride=self.stride
)
assert not isinstance(self.padding, str)
new_coef = DependenceSets(
new_coef_sets.view(*coef.sets.shape[:2], *new_coef_sets.shape[1:]),
coef.spatial_idxs,
coef.input_dim,
coef.cstride * self.stride[0],
coef.cpadding * self.stride[0] + self.padding[0],
)
return new_coef, new_bias
assert isinstance(affine_form.coef, DependenceSets)
new_coef, new_bias = backsubstitute_coef_and_bias(
affine_form.coef, affine_form.bias
)
new_coef.handle_padding(self.input_dim)
else:
assert isinstance(affine_form.coef, Tensor)
assert not isinstance(self.padding, str)
kernel_wh = self.weight.shape[-2:]
w_padding = (
self.input_dim[1]
+ 2 * self.padding[0]
- 1
- self.dilation[0] * (kernel_wh[0] - 1)
) % self.stride[0]
h_padding = (
self.input_dim[2]
+ 2 * self.padding[1]
- 1
- self.dilation[1] * (kernel_wh[1] - 1)
) % self.stride[1]
output_padding = (w_padding, h_padding)
sz = affine_form.coef.shape
# process reference
new_bias = affine_form.bias + (
0
if self.bias is None
else (affine_form.coef.sum((3, 4)) * self.bias).sum(2)
)
new_coef = F.conv_transpose2d(
affine_form.coef.view((sz[0] * sz[1], *sz[2:])),
self.weight,
None,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
# F.pad(new_x_l_coef, (0, 0, w_padding, h_padding), "constant", 0)
assert isinstance(new_coef, Tensor)
new_coef = new_coef.view((sz[0], sz[1], *new_coef.shape[1:]))
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
interval_lb, interval_ub = interval
neg_kernel, pos_kernel = get_neg_pos_comp(self.weight)
def conv_with_kernel_and_bias(
input: Tensor, kernel: Tensor, bias: Optional[Tensor]
) -> Tensor:
return F.conv2d(
input=input,
weight=kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
output_lb = conv_with_kernel_and_bias(
interval_lb, pos_kernel, self.bias
) + conv_with_kernel_and_bias(interval_ub, neg_kernel, None)
output_ub = conv_with_kernel_and_bias(
interval_ub, pos_kernel, self.bias
) + conv_with_kernel_and_bias(interval_lb, neg_kernel, None)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
assert all([x == self.stride[0] for x in self.stride])
assert all([x == self.padding[0] for x in self.padding])
assert all([x == self.dilation[0] for x in self.dilation])
return abs_input.conv2d(
self.weight,
self.bias,
self.stride[0],
self.padding[0],
self.dilation[0],
self.groups,
)
| 9,757 | 34.613139 | 103 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_relu.py | from __future__ import annotations
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_conv2d import Conv2d
from src.abstract_layers.abstract_module import AbstractModule
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import (
LayerTag,
ParameterTag,
key_alpha_relu,
key_beta,
key_prima,
key_prima_lb,
key_prima_ub,
layer_tag,
)
from src.utilities.config import (
BacksubstitutionConfig,
ParameterSharing,
PrimaHyperparameters,
ReLUAlphaInitMethod,
)
from src.utilities.dependence_sets import DependenceSets
from src.utilities.layer_types import is_layer_of_type
from src.utilities.leaky_gradient_maximum_function import LeakyGradientMaximumFunction
from src.utilities.leaky_gradient_minimum_function import LeakyGradientMinimumFunction
from src.utilities.prima_interface import ActivationType, get_prima_constraints
from src.verification_subproblem import SubproblemState
EPS = 1e-15
class ReLU(nn.ReLU, AbstractModule):
def __init__(self, dim: Tuple[int, ...]) -> None:
super(ReLU, self).__init__()
self.output_dim = dim
self.dependence_set_block = False
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls, module: nn.ReLU, input_dim: Tuple[int, ...], **kwargs: Any
) -> ReLU:
assert isinstance(module, nn.ReLU)
return cls(input_dim)
def update_input_bounds(
self, input_bounds: Tuple[Tensor, Tensor], check_feasibility: bool = True
) -> None:
input_bounds_shape_adjusted = (
input_bounds[0].view(-1, *self.output_dim),
input_bounds[1].view(-1, *self.output_dim),
)
super(ReLU, self).update_input_bounds(
input_bounds_shape_adjusted, check_feasibility=check_feasibility
)
def backsubstitute(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
] = None,
prev_layer: Optional[AbstractModule] = None,
) -> MN_BaB_Shape:
if self.input_bounds is None:
raise RuntimeError("Cannot backsubstitute if bounds have not been set.")
# Shared computation of input bounds and prima coefficients
if self.optim_input_bounds is not None:
optim_lb = self.optim_input_bounds[0].view(1, *self.output_dim)
optim_ub = self.optim_input_bounds[1].view(1, *self.output_dim)
input_lb = LeakyGradientMaximumFunction.apply(
self.input_bounds[0], optim_lb.broadcast_to(self.input_bounds[0].shape)
)
input_ub = LeakyGradientMinimumFunction.apply(
self.input_bounds[1], optim_ub.broadcast_to(self.input_bounds[1].shape)
)
input_bounds = (input_lb, input_ub)
else:
input_bounds = self.input_bounds
prima_coefs = self._get_prima_coefficients(
config, abstract_shape, intermediate_bounds_callback
)
prima_constraints_available = (
prima_coefs is not None and prima_coefs[0].shape[2] > 0
)
# NOTE: This changes the abstract shape in-place leading to consequences for all
# further .matmaul or .elemwise calls on it Thus it needs to be called before
# the ._backsub_affine_form calls()
if prima_constraints_available:
assert prima_coefs is not None
assert not abstract_shape.uses_dependence_sets()
(
prima_output_coefs,
prima_input_coefs,
prima_const_coefs,
) = prima_coefs
prima_parameter_shape = (
abstract_shape.batch_size,
prima_output_coefs.shape[2],
1,
)
prima_lb_parameters = abstract_shape.get_parameters(
key_prima_lb, layer_tag(self), prima_parameter_shape
)
abstract_shape.lb.coef += self._multiply_prima_coefs_and_parameters(
prima_output_coefs, prima_lb_parameters
)
if abstract_shape.ub is not None:
assert isinstance(abstract_shape.ub.coef, Tensor)
prima_ub_parameters = abstract_shape.get_parameters(
key_prima_ub, layer_tag(self), prima_parameter_shape
)
abstract_shape.ub.coef -= self._multiply_prima_coefs_and_parameters(
prima_output_coefs, prima_ub_parameters
)
split_constraints = abstract_shape.get_split_constraints_for_relu(
layer_tag(self), input_bounds
)
lb_intercept, ub_intercept = self._get_approximation_intercepts(
input_bounds, split_constraints
)
new_lb_bias, new_ub_bias = abstract_shape._matmul_of_coef_and_interval(
lb_intercept.unsqueeze(1), ub_intercept.unsqueeze(1) # add query dimension
)
ub_slope = self._get_upper_approximation_slopes(
config, input_bounds, split_constraints
)
# Backsub
new_lb_form = self._backsub_affine_form(
abstract_shape.lb,
input_bounds,
(new_lb_bias, new_ub_bias),
ub_slope,
prima_coefs,
split_constraints,
compute_upper_bound=False,
abstract_shape=abstract_shape,
config=config,
prev_layer=prev_layer,
)
new_ub_form: Optional[AffineForm] = None
if abstract_shape.ub is not None:
new_ub_form = self._backsub_affine_form(
abstract_shape.ub,
input_bounds,
(new_lb_bias, new_ub_bias),
ub_slope,
prima_coefs,
split_constraints,
compute_upper_bound=True,
abstract_shape=abstract_shape,
config=config,
prev_layer=prev_layer,
)
abstract_shape.update_bounds(new_lb_form, new_ub_form)
return abstract_shape
def _backsub_affine_form(
self,
affine_form: AffineForm,
input_bounds: Tuple[Tensor, Tensor],
bias: Tuple[Tensor, Optional[Tensor]],
ub_slope: Tensor,
prima_coefs: Optional[Tuple[Tensor, Tensor, Tensor]],
split_constraints: Optional[Tensor],
compute_upper_bound: bool,
abstract_shape: MN_BaB_Shape,
config: BacksubstitutionConfig,
prev_layer: Optional[AbstractModule],
) -> AffineForm:
# Get parameters
new_lb_bias, new_ub_bias = bias
new_bias = new_ub_bias if compute_upper_bound else new_lb_bias
lb_slope = self._get_lower_approximation_slopes(
config,
input_bounds,
abstract_shape,
key_alpha_relu(compute_upper_bound),
split_constraints,
)
# Handle bias
new_bias += affine_form.bias
# Handle coef
(
new_lb_coef_tensor,
new_ub_coef_tensor,
) = abstract_shape._elementwise_mul_of_coef_and_interval(lb_slope, ub_slope)
new_coef_tensor = (
new_ub_coef_tensor if compute_upper_bound else new_lb_coef_tensor
)
assert new_coef_tensor is not None
# Handle prima contribution
if prima_coefs is not None and prima_coefs[0].shape[2] > 0:
(
prima_output_coefs,
prima_input_coefs,
prima_const_coefs,
) = prima_coefs
prima_parameter_shape = (
abstract_shape.batch_size,
prima_output_coefs.shape[2],
1,
)
prima_parameters = abstract_shape.get_parameters(
key_prima(compute_upper_bound), layer_tag(self), prima_parameter_shape
)
if compute_upper_bound:
# sub prima const constraints to bias
new_bias -= prima_const_coefs.bmm(prima_parameters).squeeze(-1)
# sub prima input constraints to coefs
new_coef_tensor -= self._multiply_prima_coefs_and_parameters(
prima_input_coefs, prima_parameters
)
else:
# add prima input constraints to coefs
new_bias += prima_const_coefs.bmm(prima_parameters).squeeze(-1)
# add prima input constraints to coefs
new_coef_tensor += self._multiply_prima_coefs_and_parameters(
prima_input_coefs, prima_parameters
)
# Handle split constraints
if split_constraints is not None:
# add betas, [B, 1, c, h, w]
beta_contrib_shape = (abstract_shape.batch_size, 1, *self.output_dim)
beta = abstract_shape.get_parameters(
key_beta(compute_upper_bound), layer_tag(self), split_constraints.shape
)
beta_contrib = (beta * split_constraints).view(beta_contrib_shape)
if compute_upper_bound:
beta_contrib *= -1
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
new_coef_tensor += DependenceSets.unfold_to(
beta_contrib, affine_form.coef
)
else:
new_coef_tensor += beta_contrib
# Create output
new_coef: Union[Tensor, DependenceSets]
if abstract_shape.uses_dependence_sets():
assert isinstance(affine_form.coef, DependenceSets)
new_coef = DependenceSets(
new_coef_tensor,
affine_form.coef.spatial_idxs,
affine_form.coef.input_dim,
affine_form.coef.cstride,
affine_form.coef.cpadding,
)
else:
new_coef = new_coef_tensor
return AffineForm(new_coef, new_bias)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
output_lb, output_ub = interval[0].clamp(min=0), interval[1].clamp(min=0)
if (
subproblem_state is not None
and subproblem_state.constraints.split_state is not None
):
subproblem_state.constraints.split_state.refine_split_constraints_for_relu(
layer_tag(self), interval
)
# assert (output_ub >= output_lb).all()
return output_lb, output_ub
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
return abs_input.relu()[0]
def _compute_new_prima_coefficients(
self,
prima_hyperparameters: PrimaHyperparameters,
batch_size: int,
intermediate_bounds_callback: Callable[[Tensor], Tuple[Tensor, Tensor]],
device: torch.device,
) -> Tuple[Tensor, Tensor, Tensor]:
assert self.input_bounds
# lb = self.input_bounds[0].detach().cpu()
# ub = self.input_bounds[1].detach().cpu()
# print(f"lb_avg_improve: {torch.mean(self.optim_input_bounds[0].reshape(self.input_bounds[0].shape) - self.input_bounds[0])} ub_avg_improve: {-1* torch.mean(self.optim_input_bounds[1].reshape(self.input_bounds[1].shape) - self.input_bounds[1])}")
if self.optim_input_bounds is None:
lb = self.input_bounds[0].detach().cpu()
ub = self.input_bounds[1].detach().cpu()
else:
lb = (
torch.max(
self.optim_input_bounds[0].reshape(self.input_bounds[0].shape),
self.input_bounds[0],
)
.detach()
.cpu()
)
ub = (
torch.min(
self.optim_input_bounds[1].reshape(self.input_bounds[1].shape),
self.input_bounds[1],
)
.detach()
.cpu()
)
output_var_coefs, input_var_coefs, const_coefs = get_prima_constraints(
lb,
ub,
ActivationType.ReLU,
prima_hyperparameters,
intermediate_bounds_callback,
batch_size,
self.output_dim,
)
n_prima_constraints = output_var_coefs.shape[2]
assert output_var_coefs.shape == (
batch_size,
np.prod(self.output_dim),
n_prima_constraints,
)
assert input_var_coefs.shape == (
batch_size,
np.prod(self.output_dim),
n_prima_constraints,
)
assert const_coefs.shape == (
batch_size,
1,
n_prima_constraints,
)
return (
output_var_coefs.to(device),
input_var_coefs.to(device),
const_coefs.to(device),
)
def _get_prima_coefficients( # TODO: move some of this logic into verification_subproblem
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
intermediate_bounds_callback: Optional[
Callable[[Tensor], Tuple[Tensor, Tensor]]
],
) -> Optional[Tuple[Tensor, Tensor, Tensor]]:
if config.prima_hyperparameters is None:
return None
assert abstract_shape.subproblem_state is not None
assert abstract_shape.subproblem_state.constraints.prima_constraints is not None
prima_coefficients = (
abstract_shape.subproblem_state.constraints.prima_constraints.prima_coefficients
)
if layer_tag(self) in prima_coefficients:
return prima_coefficients[layer_tag(self)]
if (
intermediate_bounds_callback is None
): # TODO: this seems like a bad way to configure this
return None
new_prima_coefficients = self._compute_new_prima_coefficients(
config.prima_hyperparameters,
abstract_shape.batch_size,
intermediate_bounds_callback,
abstract_shape.device,
)
prima_coefficients[layer_tag(self)] = new_prima_coefficients
return new_prima_coefficients
def _handle_reduced_parameter_sharing_for_lower_approximation_slopes(
self,
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
) -> Tuple[
Callable[
[Tensor], Tensor
], # lb_slope -> expanded_lb_slope (create appropriate number of copies of parameters)
Callable[
[Tensor], Tensor
], # parameters -> selected parameters (select parameters corresponding to each active query)
]:
# TODO: do not create parameters for stable neurons in the first place?
if (
config.parameter_sharing_config is None
or layer_tag(self)
not in config.layer_ids_for_which_to_reduce_parameter_sharing
):
return lambda lb_slope: lb_slope, lambda params: params
assert abstract_shape.subproblem_state is not None
def filter_params(params: Tensor) -> Tensor:
unstable_queries_in_starting_layer = (
abstract_shape.get_unstable_queries_in_starting_layer()
)
if unstable_queries_in_starting_layer is not None:
params = params[:, unstable_queries_in_starting_layer, :]
return params
for (
layer_type,
sharing_config,
) in config.parameter_sharing_config.entries:
if abstract_shape.query_prev_layer is not None and is_layer_of_type(
abstract_shape.query_prev_layer, layer_type
):
if sharing_config == ParameterSharing.same_layer:
def keep_slope(lb_slope: Tensor) -> Tensor:
assert lb_slope.shape[1] == 1
return lb_slope # default behavior, keep query dimension at 1 to share among all queries
def keep_params(params: Tensor) -> Tensor:
return params
return keep_slope, keep_params
if sharing_config == ParameterSharing.none:
def expand_slope(lb_slope: Tensor) -> Tensor:
assert lb_slope.shape[1] == 1
repeats = abstract_shape.total_num_queries_in_starting_layer
return lb_slope.repeat(
1, repeats, *([1] * len(lb_slope.shape[2:]))
)
return expand_slope, filter_params
if sharing_config == ParameterSharing.in_channel:
query_prev_layer_any = abstract_shape.query_prev_layer
assert isinstance(query_prev_layer_any, Conv2d)
query_prev_layer = query_prev_layer_any
num_channels = query_prev_layer.out_channels
num_queries = abstract_shape.total_num_queries_in_starting_layer
assert num_queries % num_channels == 0
def expand_slope(lb_slope: Tensor) -> Tensor:
assert lb_slope.shape[1] == 1
# create one set of parameters for each channel:
return lb_slope.repeat(
1, num_channels, *([1] * len(lb_slope.shape[2:]))
)
def select_params(params: Tensor) -> Tensor:
assert params.shape[0] == abstract_shape.batch_size
batch_size = abstract_shape.batch_size
# add dimension to expand:
resized = params.view(
batch_size, num_channels, 1, *params.shape[2:]
)
# share parameters within each channel: Note this does not allocate additional memory
replicated = resized.expand(
batch_size,
num_channels,
num_queries // num_channels,
*params.shape[2:],
)
# remove additional dimension:
params = replicated.reshape(
batch_size, num_queries, *params.shape[2:]
)
# filter out parameters corresponding to stable queries:
return filter_params(params)
return expand_slope, select_params
# no config found for current layer type
return lambda lb_slope: lb_slope, lambda params: params
def _get_lower_approximation_slopes(
self,
config: BacksubstitutionConfig,
bounds: Tuple[Tensor, Tensor],
abstract_shape: Optional[MN_BaB_Shape] = None,
parameter_key: Optional[ParameterTag] = None,
split_constraints: Optional[Tensor] = None,
) -> Tensor:
input_lb, input_ub = bounds
lb_slope = torch.where(
input_ub <= -input_lb,
torch.zeros_like(input_lb),
torch.ones_like(input_lb),
).unsqueeze(
1
) # add query dimension
if (
abstract_shape is not None
and abstract_shape.subproblem_state is not None
and abstract_shape.subproblem_state.parameters.use_params
):
assert parameter_key is not None
(
expand_slope,
select_params,
) = self._handle_reduced_parameter_sharing_for_lower_approximation_slopes(
config,
abstract_shape,
)
def make_default(device: torch.device) -> Tensor:
if config.relu_alpha_init_method == ReLUAlphaInitMethod.minimum_area:
default = lb_slope
elif config.relu_alpha_init_method == ReLUAlphaInitMethod.one_half:
default = 0.5 * torch.ones_like(lb_slope)
else:
raise RuntimeError("Unknown init method for ReLU alpha parameters")
assert abstract_shape is not None
default = expand_slope(default)
return default.to(
device
) # TODO: it's created on 'cuda:0' and moved to 'cuda' here, why?
lb_slope = abstract_shape.get_parameters(
parameter_key,
layer_tag(self),
make_default_parameters=make_default,
)
lb_slope = self._set_slopes_of_stable_neurons(
bounds, lb_slope, split_constraints
)
lb_slope = select_params(lb_slope)
assert (
lb_slope.shape[1] == 1
or lb_slope.shape[1] == abstract_shape.num_queries
), "{} {} {}".format(
lb_slope.shape, abstract_shape.query_id, abstract_shape.num_queries
)
else:
lb_slope = self._set_slopes_of_stable_neurons(
bounds, lb_slope, split_constraints
)
return lb_slope
def _get_upper_approximation_slopes(
self,
config: BacksubstitutionConfig,
bounds: Tuple[Tensor, Tensor],
split_constraints: Optional[Tensor] = None,
) -> Tensor:
input_lb, input_ub = bounds
ub_slope = input_ub / (input_ub - input_lb + EPS)
ub_slope = ub_slope.unsqueeze(1) # add query dimension
ub_slope = self._set_slopes_of_stable_neurons(
bounds, ub_slope, split_constraints
)
return ub_slope
def _set_slopes_of_stable_neurons(
self,
bounds: Tuple[Tensor, Tensor],
slopes: Tensor,
split_constraints: Optional[Tensor],
) -> Tensor:
input_lb, input_ub = bounds
inactive_relu_mask = (input_ub < 0).unsqueeze(1)
active_relu_mask = (input_lb > 0).unsqueeze(1)
if split_constraints is not None:
inactive_relu_mask = inactive_relu_mask | (
split_constraints == 1
).unsqueeze(1)
active_relu_mask = active_relu_mask | (split_constraints == -1).unsqueeze(1)
# slope of stable inactive ReLU is 0
slopes = torch.where(inactive_relu_mask, torch.zeros_like(slopes), slopes)
# slope of stable active ReLU is 1
slopes = torch.where(active_relu_mask, torch.ones_like(slopes), slopes)
return slopes
def _get_approximation_intercepts(
self,
bounds: Tuple[Tensor, Tensor],
split_constraints: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
input_lb, input_ub = bounds
lb_intercept = torch.zeros_like(input_lb)
ub_intercept = -input_lb * input_ub / (input_ub - input_lb + EPS)
ub_intercept = self._set_intercepts_of_stable_neurons(
bounds, ub_intercept, split_constraints
)
return lb_intercept, ub_intercept
def _set_intercepts_of_stable_neurons(
self,
bounds: Tuple[Tensor, Tensor],
ub_intercept: Tensor,
split_constraints: Optional[Tensor],
) -> Tensor:
input_lb, input_ub = bounds
stable_node_mask = (input_ub < 0) | (input_lb > 0)
if split_constraints is not None:
stable_node_mask = stable_node_mask | (split_constraints != 0)
return torch.where(
stable_node_mask,
torch.zeros_like(ub_intercept),
ub_intercept,
)
def _multiply_prima_coefs_and_parameters( # TODO: move this out
self, prima_coefs: Tensor, prima_params: Tensor
) -> Tensor:
batch_size = prima_coefs.shape[0]
n_prima_constraints = prima_coefs.shape[2]
assert prima_params.shape == (batch_size, n_prima_constraints, 1)
temp = prima_coefs.bmm(prima_params)
return temp.view(batch_size, 1, *self.output_dim)
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
act_layer_ids.append(layer_tag(self))
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
act_layer_ids.append(layer_tag(self))
return act_layer_ids
@classmethod
def get_split_points(cls, lb: float, ub: float) -> float:
return 0.0
| 25,442 | 35.873913 | 255 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_layers/abstract_sequential.py | from __future__ import annotations
import time
from collections import OrderedDict
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement
from src.abstract_layers.abstract_container_module import (
AbstractContainerModule,
ActivationLayer,
ActivationLayers,
)
from src.abstract_layers.abstract_identity import Identity
from src.abstract_layers.abstract_max_pool2d import MaxPool2d
from src.abstract_layers.abstract_module import AbstractModule
from src.abstract_layers.abstract_relu import ReLU
from src.abstract_layers.abstract_sigmoid import Sigmoid
from src.abstract_layers.abstract_tanh import Tanh
from src.exceptions.invalid_bounds import InvalidBoundsError
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import (
LayerTag,
QueryTag,
layer_from_query_tag,
layer_tag,
query_tag,
)
# from src.state.tags import query_tag_for_neuron
from src.utilities.config import BacksubstitutionConfig
from src.utilities.dependence_sets import DependenceSets
# from src.utilities.config import ParameterSharing
# from src.utilities.layer_types import is_layer_of_type
from src.utilities.leaky_gradient_maximum_function import LeakyGradientMaximumFunction
from src.utilities.leaky_gradient_minimum_function import LeakyGradientMinimumFunction
from src.utilities.queries import (
QueryCoef,
get_output_bound_initial_query_coef_iterator,
num_queries,
)
from src.verification_subproblem import SubproblemState
class Sequential(AbstractContainerModule):
def __init__(
self,
layers: Iterable[AbstractModule],
) -> None:
super(Sequential, self).__init__()
self.layers = nn.ModuleList(layers)
# Build during backsubstitution
# self.layer_id_to_layer: Dict[LayerTag, ActivationLayer] = {}
*__, last_layer = layers
self.output_dim = last_layer.output_dim
self.set_dependence_set_applicability()
@classmethod
def from_concrete_module( # type: ignore[override] # (checked at runtime)
cls,
module: nn.Sequential,
input_dim: Tuple[int, ...],
**kwargs: Any,
) -> Sequential:
assert isinstance(module, nn.Sequential)
assert "concrete_to_abstract" in kwargs
concrete_to_abstract_mapping = kwargs["concrete_to_abstract"]
abstract_layers: List[AbstractModule] = []
for i, layer in enumerate(module.children()):
if i == 0:
current_layer_input_dim = input_dim
else:
current_layer_input_dim = abstract_layers[-1].output_dim
abstract_type = concrete_to_abstract_mapping(type(layer))
abstract_layers.append(
abstract_type.from_concrete_module(
layer,
current_layer_input_dim,
**kwargs,
)
)
if len(abstract_layers) == 0:
abstract_layers.append(Identity(input_dim=input_dim))
return cls(abstract_layers)
def forward(self, input: Tensor) -> Tensor:
for layer in self.layers:
input = layer(input)
return input
def reset_input_bounds(self) -> None:
super(Sequential, self).reset_input_bounds()
for layer in self.layers:
layer.reset_input_bounds()
def reset_optim_input_bounds(self) -> None:
super(Sequential, self).reset_input_bounds()
for layer in self.layers:
layer.reset_optim_input_bounds()
def reset_output_bounds(self) -> None:
super(Sequential, self).reset_output_bounds()
for layer in self.layers:
layer.reset_output_bounds()
def set_intermediate_input_bounds(
self, intermediate_bounds: OrderedDict[LayerTag, Tuple[Tensor, Tensor]]
) -> None:
for layer in self.layers:
if layer_tag(layer) in intermediate_bounds:
layer.update_input_bounds(
intermediate_bounds[layer_tag(layer)],
check_feasibility=False,
)
if isinstance(layer, AbstractContainerModule):
layer.set_intermediate_input_bounds(intermediate_bounds)
def propagate_interval(
self,
interval: Tuple[Tensor, Tensor],
use_existing_bounds: Optional[bool] = None,
subproblem_state: Optional[SubproblemState] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> Tuple[Tensor, Tensor]:
for layer in self.layers:
if use_existing_bounds and layer.input_bounds is not None:
lb = torch.max(interval[0], layer.input_bounds[0])
ub = torch.min(interval[1], layer.input_bounds[1])
interval = (lb, ub)
if use_existing_bounds and layer.optim_input_bounds is not None:
lb = torch.max(interval[0], layer.optim_input_bounds[0])
ub = torch.min(interval[1], layer.optim_input_bounds[1])
interval = (lb, ub)
if set_input and (
type(layer) in ActivationLayers or not activation_layer_only
):
layer.update_input_bounds(interval, check_feasibility=False)
interval = layer.propagate_interval(
interval,
use_existing_bounds,
activation_layer_only=activation_layer_only,
set_input=set_input,
set_output=set_output,
)
assert (interval[0] <= interval[1]).all()
if set_output and (
type(layer) in ActivationLayers or not activation_layer_only
):
layer.update_output_bounds(interval)
return interval
def propagate_abstract_element(
self,
abs_input: AbstractElement,
use_existing_bounds: Optional[bool] = None,
activation_layer_only: bool = False,
set_input: bool = True,
set_output: bool = True,
) -> AbstractElement:
for layer in self.layers:
if (
set_input
and type(layer) in ActivationLayers
or not activation_layer_only
):
layer.update_input_bounds(
abs_input.concretize(), check_feasibility=False
)
abs_input = layer.propagate_abstract_element(
abs_input,
use_existing_bounds,
activation_layer_only,
set_input,
set_output,
)
if (
set_output
and type(layer) in ActivationLayers
or not activation_layer_only
):
layer.update_output_bounds(abs_input.concretize())
return abs_input
def set_dependence_set_applicability(self, applicable: bool = True) -> None:
for layer in self.layers:
if isinstance(layer, AbstractContainerModule):
layer.set_dependence_set_applicability(applicable)
else:
layer.dependence_set_applicable = (
applicable and not layer.dependence_set_block
)
applicable = layer.dependence_set_applicable
self.dependence_set_applicable = applicable
def _get_mn_bab_shape_after_layer(
self,
from_layer_index: int,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
use_early_termination_for_current_query: bool, # = False,
optimize_intermediate_bounds: bool, # = False, # Whether to run individual backward passes to tighten intermediate bounds individually
) -> Tuple[Optional[MN_BaB_Shape], Optional[Tuple[Tensor, Tensor]]]:
if from_layer_index == len(self.layers) - 1:
assert layer_from_query_tag(abstract_shape.query_id) == layer_tag(
self
) # TODO: this seems a bit messy
else:
assert layer_from_query_tag(abstract_shape.query_id) == layer_tag(
self.layers[from_layer_index + 1]
) # This kind of makes sense: this is the tag of the node for which you get input constraints
(abstract_shape, (best_lbs, best_ubs)) = self.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
from_layer_index=from_layer_index,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=use_early_termination_for_current_query,
full_back_prop=True,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
if use_early_termination_for_current_query:
assert abstract_shape.unstable_queries is not None
if not abstract_shape.unstable_queries.any():
return None, (best_lbs, best_ubs)
# TODO: move into MN_BaB_shape, this logic is duplicated
curr_lbs, curr_ubs = abstract_shape.concretize(input_lb, input_ub)
assert (
curr_ubs is not None
) # No early termination when we have only lower bounds (atm)
lbs, ubs = curr_lbs, curr_ubs
if abstract_shape.unstable_queries is not None: # TODO: this is always true
best_lbs[:, abstract_shape.unstable_queries] = torch.maximum(
best_lbs[:, abstract_shape.unstable_queries], lbs
)
best_ubs[:, abstract_shape.unstable_queries] = torch.minimum(
best_ubs[:, abstract_shape.unstable_queries], ubs
)
else:
best_lbs = torch.maximum(best_lbs, lbs)
best_ubs = torch.minimum(best_ubs, ubs)
# TODO @Robin
return None, (best_lbs, best_ubs)
else:
return abstract_shape, None
def backsubstitute_shape( # noqa: C901
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
abstract_shape: MN_BaB_Shape,
from_layer_index: Optional[int],
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
use_early_termination_for_current_query: bool, # = False,
full_back_prop: bool, # = False,
optimize_intermediate_bounds: bool, # = False,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
number_of_queries = abstract_shape.num_queries
assert (
abstract_shape.unstable_queries is None
or abstract_shape.unstable_queries.sum() == number_of_queries
)
if use_early_termination_for_current_query:
if (
abstract_shape.unstable_queries is None
): # TODO: initialize this eagerly?
abstract_shape.initialize_unstable_queries()
assert abstract_shape.unstable_queries is not None
assert abstract_shape.matches_filter_mask(
abstract_shape.unstable_queries
) # TODO: comment out for performance?
best_lbs = torch.empty( # TODO: move into MN_BaB_Shape
(
abstract_shape.batch_size,
number_of_queries
if abstract_shape.unstable_queries is None
else len(abstract_shape.unstable_queries),
),
device=abstract_shape.device,
).fill_(-torch.inf)
best_ubs = torch.empty(
(
abstract_shape.batch_size,
number_of_queries
if abstract_shape.unstable_queries is None
else len(abstract_shape.unstable_queries),
),
device=abstract_shape.device,
).fill_(torch.inf)
from_layer_index = (
len(self.layers) - 1 if from_layer_index is None else from_layer_index
)
for i, layer in reversed(list(enumerate(self.layers[: from_layer_index + 1]))):
if isinstance(abstract_shape.lb.coef, DependenceSets):
if np.prod(layer.output_dim[-2:]) <= np.prod(
abstract_shape.lb.coef.sets.shape[-2:]
):
abstract_shape.lb = AffineForm(
abstract_shape.lb.coef.to_tensor(layer.output_dim),
abstract_shape.lb.bias,
)
if abstract_shape.ub is not None:
assert isinstance(abstract_shape.ub.coef, DependenceSets)
abstract_shape.ub = AffineForm(
abstract_shape.ub.coef.to_tensor(layer.output_dim),
abstract_shape.ub.bias,
)
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
or isinstance(layer, MaxPool2d)
):
# if not layer_tag(layer) in self.layer_id_to_layer:
# self.layer_id_to_layer[layer_tag(layer)] = layer
intermediate_bounds_callback = None
if (
layer_tag(layer)
in config.layer_ids_for_which_to_compute_prima_constraints
):
intermediate_bounds_callback = (
self._get_intermediate_bounds_callback(
layer_index=i,
config=config,
subproblem_state=abstract_shape.subproblem_state,
device=abstract_shape.device,
input_lb=input_lb,
input_ub=input_ub,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
)
)
if layer.input_bounds is None:
# print(f"Setting intermediate bounds for layer {i}")
try:
self._set_intermediate_bounds(
current_layer_index=i,
config=config,
input_lb=input_lb,
input_ub=input_ub,
batch_size=abstract_shape.batch_size,
subproblem_state=abstract_shape.subproblem_state,
device=abstract_shape.device,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
except InvalidBoundsError as e:
abstract_shape.update_is_infeasible(
e.invalid_bounds_mask_in_batch
)
# print(f"Finished intermediate bounds for layer {i}")
abstract_shape = layer.backsubstitute(
config,
abstract_shape,
intermediate_bounds_callback,
self.get_prev_layer(i, preceeding_layers),
)
if layer.input_bounds and use_early_termination_for_current_query:
# TODO: move into MN_BaB_Shape, this logic is duplicated
assert abstract_shape.unstable_queries is not None
curr_lbs, curr_ubs = abstract_shape.concretize(*layer.input_bounds)
assert (
curr_ubs is not None
) # No early termination when we have only lower bounds (atm)
lbs, ubs = curr_lbs, curr_ubs
best_lbs[:, abstract_shape.unstable_queries] = torch.maximum(
best_lbs[:, abstract_shape.unstable_queries], lbs
)
best_ubs[:, abstract_shape.unstable_queries] = torch.minimum(
best_ubs[:, abstract_shape.unstable_queries], ubs
)
if isinstance(layer, ReLU):
current_unstable_queries = (lbs * ubs < 0).any(
dim=0
) # was: axis=0
# print(f"Before2: {unstable_queries.shape}")
abstract_shape.update_unstable_queries(current_unstable_queries)
if not abstract_shape.unstable_queries.any():
# if no current queries are unstable, we can return early
return (
abstract_shape,
(best_lbs, best_ubs),
)
elif isinstance(layer, AbstractContainerModule):
if i == 0:
_propagate_preceeding_callback = propagate_preceeding_callback
_preceeding_layers = preceeding_layers
else:
def _propagate_preceeding_callback(
config: BacksubstitutionConfig,
abstract_shape_int: MN_BaB_Shape,
use_early_termination_for_current_query: bool,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
(abstract_shape_int, (lbs, ubs),) = self.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape_int,
from_layer_index=i - 1,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=use_early_termination_for_current_query,
full_back_prop=True,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
return abstract_shape_int, (lbs, ubs)
_preceeding_layers = [preceeding_layers, self.layers[:i]]
(abstract_shape, (lbs, ubs),) = layer.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
from_layer_index=None,
propagate_preceeding_callback=_propagate_preceeding_callback,
preceeding_layers=_preceeding_layers,
use_early_termination_for_current_query=use_early_termination_for_current_query,
full_back_prop=False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
if ( # TODO: move into MN_BaB_Shape
lbs.shape != best_lbs.shape
and abstract_shape.unstable_queries is not None
and abstract_shape.unstable_queries.sum() < best_lbs.numel()
):
best_lbs[:, abstract_shape.unstable_queries] = torch.maximum(
best_lbs[:, abstract_shape.unstable_queries], lbs
)
best_ubs[:, abstract_shape.unstable_queries] = torch.minimum(
best_ubs[:, abstract_shape.unstable_queries], ubs
)
else:
best_lbs = torch.maximum(best_lbs, lbs)
best_ubs = torch.minimum(best_ubs, ubs)
else:
# print(f"Pre layer {i} - {layer}: Shape: {abstract_shape.lb.coef.shape}")
# print(f"affine form dtype: {abstract_shape.lb.coef.dtype}")
abstract_shape = layer.backsubstitute(config, abstract_shape)
# print(f"Post layer {i} - {layer}: Shape: {abstract_shape.lb.coef.shape}")
# print(f"affine form dtype: {abstract_shape.lb.coef.dtype}")
if propagate_preceeding_callback is not None and full_back_prop:
(abstract_shape, (lbs, ubs),) = propagate_preceeding_callback(
config,
abstract_shape,
use_early_termination_for_current_query,
)
best_lbs = torch.maximum(best_lbs, lbs)
best_ubs = torch.minimum(best_ubs, ubs)
return abstract_shape, (best_lbs, best_ubs)
def get_prev_layer(
self,
current_layer_index: int,
preceeding_layers: Optional[List[Any]],
) -> AbstractModule:
if current_layer_index == 0:
# Reached the beginning of a sequential block. Preceeding layer is input to this block.
assert preceeding_layers is not None
if preceeding_layers is None:
return None
else:
prev_layer = preceeding_layers[-1][-1]
else:
prev_layer = self.layers[current_layer_index - 1]
assert isinstance(prev_layer, AbstractModule)
# TODO for the use of intermediate bounds get the actual prev layer in case of Sequential or BB
return prev_layer
def _set_intermediate_bounds( # noqa: C901 # TODO: simplify
self,
current_layer_index: int,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
batch_size: int,
subproblem_state: Optional[SubproblemState],
device: torch.device,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
optimize_intermediate_bounds: bool, # = False
only_recompute_unstable: bool = False, # = True for forward pass
) -> None:
current_layer = self.layers[current_layer_index]
prev_layer = self.get_prev_layer(current_layer_index, preceeding_layers)
if prev_layer is None:
# TODO: check validity before removing assertion
assert current_layer.input_dim == input_lb.shape[1:]
current_layer.update_input_bounds((input_lb, input_ub))
return
if ( # TODO: move this entire logic into Constraints?
not optimize_intermediate_bounds
and subproblem_state is not None
and subproblem_state.constraints.split_state is not None
and subproblem_state.constraints.layer_bounds is not None
and layer_tag(current_layer)
in subproblem_state.constraints.layer_bounds.intermediate_bounds
and isinstance(current_layer, ReLU)
):
unstable_nodes_in_current_layer = (
subproblem_state.constraints.split_state.unstable_node_mask_in_layer(
layer_tag(current_layer)
).any(dim=0)
)
intermediate_bounds_to_recompute = unstable_nodes_in_current_layer.flatten()
else:
if only_recompute_unstable and current_layer.input_bounds is not None:
intermediate_bounds_to_recompute = (
current_layer.input_bounds[0] * current_layer.input_bounds[1] < 0
).flatten()
else:
intermediate_bounds_to_recompute = torch.ones(
prev_layer.output_dim,
device=device,
dtype=torch.bool,
).flatten()
if not intermediate_bounds_to_recompute.any():
assert (
subproblem_state is not None
and layer_tag(current_layer)
in subproblem_state.constraints.layer_bounds.intermediate_bounds
)
current_layer.update_input_bounds(
subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(current_layer)
]
)
return
use_dependence_sets_for_current_bounds = (
config.use_dependence_sets
and self.layers[current_layer_index].dependence_set_applicable
)
if use_dependence_sets_for_current_bounds:
config = config.without_prima()
initial_intermediate_bound_coef_iter = (
get_output_bound_initial_query_coef_iterator(
dim=prev_layer.output_dim,
intermediate_bounds_to_recompute=intermediate_bounds_to_recompute,
use_dependence_sets=use_dependence_sets_for_current_bounds,
batch_size=batch_size,
slice_size=config.max_num_queries,
device=device,
dtype=None, # TODO: should this be something else?
)
)
subproblem_state_for_bounds = subproblem_state
if subproblem_state_for_bounds is not None:
if optimize_intermediate_bounds:
subproblem_state_for_bounds = (
subproblem_state_for_bounds.with_new_parameters()
)
if use_dependence_sets_for_current_bounds:
subproblem_state_for_bounds = (
subproblem_state_for_bounds.without_prima()
) # TODO: get rid of this?
def get_interm_bound_callback(
max_num_queries: Optional[int] = None,
) -> Callable[
[QueryCoef],
Tuple[
Optional[MN_BaB_Shape], Tuple[Tensor, Tensor]
], # TODO: do we need a MN_BaB_Shape result?
]:
def interm_bound_callback(
query_coef: QueryCoef,
) -> Tuple[
Optional[MN_BaB_Shape], Tuple[Tensor, Tensor]
]: # TODO: do we need a MN_BaB_Shape result?
"""
Returns an abstract shape together with intermediate bounds after backpropagation.
Abstract shape is None
Args:
query_coef: all query coefficients
"""
def get_interm_bound_restricted_queries(
query_coef: QueryCoef,
override_query_id: Optional[QueryTag] = None,
) -> Tuple[
Optional[MN_BaB_Shape], Tuple[Tensor, Tensor]
]: # TODO: do we need a MN_BaB_Shape result?
if override_query_id is None:
query_id = query_tag(current_layer)
else:
query_id = override_query_id
abstract_shape = MN_BaB_Shape(
query_id=query_id,
query_prev_layer=prev_layer,
queries_to_compute=intermediate_bounds_to_recompute, # TODO: only pass this if we will need it?
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None, # initialized lazily if we will need it
subproblem_state=subproblem_state_for_bounds,
)
propagated_shape, layer_bounds = self._get_mn_bab_shape_after_layer(
from_layer_index=current_layer_index - 1,
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=config.use_early_termination
if isinstance(current_layer, ReLU)
else False,
optimize_intermediate_bounds=optimize_intermediate_bounds,
)
if propagated_shape is not None:
assert layer_bounds is None
(
recomputed_layer_lb,
recomputed_layer_ub,
) = propagated_shape.concretize(input_lb, input_ub)
assert recomputed_layer_ub is not None
if isinstance(current_layer, MaxPool2d):
recomputed_layer_lb = recomputed_layer_lb.view(
-1, *current_layer.input_dim
)
recomputed_layer_ub = recomputed_layer_ub.view(
-1, *current_layer.input_dim
)
else:
assert layer_bounds is not None
recomputed_layer_lb, recomputed_layer_ub = layer_bounds
return (
propagated_shape,
(recomputed_layer_lb, recomputed_layer_ub),
)
def get_interm_bounds_in_multiple_query_groups(
query_coef: QueryCoef,
queries_per_group: int,
get_query_id: Callable[[int], QueryTag],
) -> Tuple[
Optional[MN_BaB_Shape], Tuple[Tensor, Tensor]
]: # TODO: do we need a MN_BaB_Shape result?
total_queries = num_queries(query_coef)
device = query_coef.device
final_lbs = torch.zeros((batch_size, total_queries), device=device)
final_ubs = torch.zeros((batch_size, total_queries), device=device)
offset = 0
while offset < total_queries:
curr_end = min(offset + queries_per_group, total_queries)
curr_query_coef = query_coef[:, offset:curr_end]
prop_shape, (
curr_lb,
curr_ub,
) = get_interm_bound_restricted_queries(
curr_query_coef, override_query_id=get_query_id(offset)
)
final_lbs[:, offset:curr_end] = curr_lb
final_ubs[:, offset:curr_end] = curr_ub
offset = curr_end
return prop_shape, (
final_lbs,
final_ubs,
) # TODO: returning the final prop_shape seems a bit weird, why return a shape at all?
# if (
# config.parameter_sharing_config is not None
# and subproblem_state_for_bounds is not None
# ): # TODO: clean up
# for (
# layer_type,
# sharing_config,
# ) in config.parameter_sharing_config.entries:
# if is_layer_of_type(prev_layer, layer_type):
# if sharing_config == ParameterSharing.same_layer:
# break # default behavior
# if sharing_config == ParameterSharing.none:
# assert isinstance(
# initial_intermediate_bound_coef, Tensor
# )
# return get_interm_bounds_in_multiple_query_groups(
# query_coef=initial_intermediate_bound_coef,
# queries_per_group=1,
# get_query_id=lambda offset: query_tag_for_neuron(
# current_layer, (offset,)
# ), # TODO: pass an index of the right shape
# )
# if sharing_config == ParameterSharing.in_channel:
# raise Exception(
# "sharing parameters inside each channel not supported yet"
# )
if max_num_queries is None:
return get_interm_bound_restricted_queries(query_coef)
else:
# assert isinstance(query_coef, Tensor)
query_id = query_tag(current_layer)
return get_interm_bounds_in_multiple_query_groups(
query_coef=query_coef,
queries_per_group=max_num_queries,
get_query_id=lambda offset: query_id,
)
return interm_bound_callback
if optimize_intermediate_bounds:
pass
# assert isinstance(initial_intermediate_bound_coef, Tensor)
# assert subproblem_state_for_bounds is not None
# (
# recomputed_layer_lb,
# recomputed_layer_ub,
# ) = optimize_params_for_interm_bounds(
# query_id=query_tag(current_layer),
# query_coef=initial_intermediate_bound_coef,
# subproblem_state=subproblem_state_for_bounds,
# opt_callback=get_interm_bound_callback(config.max_num_queries),
# config=config.intermediate_bound_optimization_config,
# timeout=torch.inf, # TODO: this is probably not right
# )
# recomputed_layer_lb, recomputed_layer_ub = (
# recomputed_layer_lb.detach(),
# recomputed_layer_ub.detach(),
# )
# if current_layer.optim_input_bounds is None:
# current_layer.optim_input_bounds = (
# recomputed_layer_lb,
# recomputed_layer_ub,
# )
# else:
# current_layer.optim_input_bounds = (
# torch.max(recomputed_layer_lb, current_layer.optim_input_bounds[0]),
# torch.min(recomputed_layer_ub, current_layer.optim_input_bounds[1]),
# )
else:
interm_bound_callback = get_interm_bound_callback(config.max_num_queries)
num_recompute = int(intermediate_bounds_to_recompute.float().sum().item())
recomputed_layer_lb = torch.zeros(
(batch_size, num_recompute), device=device
)
recomputed_layer_ub = torch.zeros(
(batch_size, num_recompute), device=device
)
for slice_start, slice_end, coef in initial_intermediate_bound_coef_iter:
debugging_shape, (
recomputed_layer_lb_slice,
recomputed_layer_ub_slice,
) = interm_bound_callback(coef)
recomputed_layer_lb[
:, slice_start:slice_end
] = recomputed_layer_lb_slice.view((batch_size, -1))
recomputed_layer_ub[
:, slice_start:slice_end
] = recomputed_layer_ub_slice.view((batch_size, -1))
if not intermediate_bounds_to_recompute.all():
assert (
subproblem_state is not None
and layer_tag(current_layer)
in subproblem_state.constraints.layer_bounds.intermediate_bounds
)
(
layer_lb,
layer_ub,
) = subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(current_layer)
]
layer_lb = layer_lb.flatten(start_dim=1).clone()
layer_ub = layer_ub.flatten(start_dim=1).clone()
layer_lb[:, intermediate_bounds_to_recompute] = recomputed_layer_lb
layer_ub[:, intermediate_bounds_to_recompute] = recomputed_layer_ub
else:
layer_lb, layer_ub = recomputed_layer_lb, recomputed_layer_ub
if (
subproblem_state is not None
and layer_tag(current_layer)
in subproblem_state.constraints.layer_bounds.intermediate_bounds
):
(
best_lb,
best_ub,
) = subproblem_state.constraints.layer_bounds.intermediate_bounds[
layer_tag(current_layer)
]
layer_lb = LeakyGradientMaximumFunction.apply(
layer_lb, best_lb.view_as(layer_lb)
)
layer_ub = LeakyGradientMinimumFunction.apply(
layer_ub, best_ub.view_as(layer_ub)
)
# print(f"lb_sum: {layer_lb.sum()}, ub_sum: {layer_ub.sum()}")
current_layer.update_input_bounds((layer_lb, layer_ub), check_feasibility=True)
if (
isinstance(current_layer, ReLU)
and subproblem_state is not None
and subproblem_state.constraints.split_state is not None
):
assert current_layer.input_bounds is not None
subproblem_state.constraints.split_state.refine_split_constraints_for_relu(
layer_tag(current_layer), current_layer.input_bounds
)
def _get_intermediate_bounds_callback(
self,
layer_index: int,
config: BacksubstitutionConfig,
subproblem_state: Optional[SubproblemState],
device: torch.device,
input_lb: Tensor,
input_ub: Tensor,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
) -> Callable[[Tensor], Tuple[Tensor, Tensor]]:
assert layer_index >= 1
layer_input_shape = self.layers[layer_index - 1].output_dim
current_layer = self.layers[layer_index]
subproblem_state_for_queries = subproblem_state
use_dependent_sets = config.use_dependence_sets
if (
subproblem_state_for_queries is not None and config.reduce_parameter_sharing
): # use plain deep poly pass if reduced parameter sharing is active (there aren't canonical parameters to use and there is no optimization)
subproblem_state_for_queries = (
subproblem_state_for_queries.without_parameters()
)
@torch.no_grad()
def compute_intermediate_bounds(
query_coef: Tensor,
) -> Tuple[Tensor, Tensor]:
query_coef = query_coef.view(
*(query_coef.shape[:2] + layer_input_shape)
).to(device)
query_shape = MN_BaB_Shape( # Here AffineForm will be cloned later
query_id=query_tag(current_layer),
query_prev_layer=None, # (not using reduced parameter sharing)
queries_to_compute=None, # we are given a complete set of queries
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None, # (see use_early_termination_for_current_query=False below)
subproblem_state=subproblem_state_for_queries,
)
use_dependence_sets_for_current_bounds = (
use_dependent_sets and current_layer.dependence_set_applicable
)
propagated_shape, __ = self._get_mn_bab_shape_after_layer(
layer_index - 1,
(
config.without_prima()
if use_dependence_sets_for_current_bounds
else config
).where(
use_early_termination=False,
layer_ids_for_which_to_compute_prima_constraints=[],
),
input_lb,
input_ub,
query_shape,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
use_early_termination_for_current_query=False, # This is only used for PRIMA constraints, there we almost only call this on queries that should be unstable
optimize_intermediate_bounds=False, # (intermediate bounds are optimized during the top level pass)
)
# assert query_shape.carried_over_optimizable_parameters == abstract_shape.carried_over_optimizable_parameters
assert propagated_shape is not None
ret_lbs, ret_ubs = propagated_shape.concretize(input_lb, input_ub)
assert ret_ubs is not None
return (ret_lbs, ret_ubs)
return compute_intermediate_bounds
def get_default_split_constraints(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_constraints: Dict[LayerTag, Tensor] = {}
for layer in self.layers:
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
split_constraints[layer_tag(layer)] = torch.zeros(
batch_size, *layer.output_dim, dtype=torch.int8, device=device
)
elif isinstance(layer, AbstractContainerModule):
split_constraints.update(
layer.get_default_split_constraints(batch_size, device)
)
return split_constraints
def get_default_split_points(
self, batch_size: int, device: torch.device
) -> Dict[LayerTag, Tensor]:
split_points: Dict[LayerTag, Tensor] = {}
for layer in self.layers:
if isinstance(layer, Sigmoid) or isinstance(layer, Tanh):
split_points[layer_tag(layer)] = torch.zeros(
batch_size, *layer.output_dim, dtype=torch.float32, device=device
)
elif isinstance(layer, AbstractContainerModule):
split_points.update(layer.get_default_split_points(batch_size, device))
return split_points
def get_activation_layers(self) -> Dict[LayerTag, ActivationLayer]:
act_layers: Dict[LayerTag, ActivationLayer] = {}
for layer in self.layers:
if (
isinstance(layer, ReLU)
or isinstance(layer, Sigmoid)
or isinstance(layer, Tanh)
):
act_layers[layer_tag(layer)] = layer
elif isinstance(layer, AbstractContainerModule):
act_layers.update(layer.get_activation_layers())
return act_layers
def get_current_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
for layer in self.layers:
if layer.input_bounds is not None:
intermediate_bounds[layer_tag(layer)] = (
layer.input_bounds[0].detach(),
layer.input_bounds[1].detach(),
)
if isinstance(layer, AbstractContainerModule):
intermediate_bounds.update(layer.get_current_intermediate_bounds())
return intermediate_bounds
def get_current_optimized_intermediate_bounds(
self,
) -> OrderedDict[LayerTag, Tuple[Tensor, Tensor]]:
intermediate_bounds: OrderedDict[
LayerTag, Tuple[Tensor, Tensor]
] = OrderedDict()
for layer in self.layers:
if layer.optim_input_bounds is not None:
intermediate_bounds[layer_tag(layer)] = (
layer.optim_input_bounds[0].detach(),
layer.optim_input_bounds[1].detach(),
)
if isinstance(layer, AbstractContainerModule):
intermediate_bounds.update(
layer.get_current_optimized_intermediate_bounds()
)
return intermediate_bounds
def get_babsr_bias(self, from_layer_index: Optional[int] = None) -> Tensor:
if from_layer_index is None:
from_layer_index = len(self.layers) - 1
for i, layer in reversed(list(enumerate(self.layers[: from_layer_index + 1]))):
if isinstance(layer, Sequential):
return layer.get_babsr_bias()
elif hasattr(layer, "bias"):
return layer.bias
# nn.Parameter so it is automatically moved to correct device
# and converted to correct dtype in nn.Module constructor
return nn.Parameter(torch.zeros((1,)))
def get_activation_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
for layer in self.layers:
act_layer_ids += layer.get_activation_layer_ids()
return act_layer_ids
def get_relu_layer_ids(
self, act_layer_ids: Optional[List[LayerTag]] = None
) -> List[LayerTag]:
if act_layer_ids is None:
act_layer_ids = []
for layer in self.layers:
act_layer_ids += layer.get_relu_layer_ids()
return act_layer_ids
def forward_pass(
self,
config: BacksubstitutionConfig,
input_lb: Tensor,
input_ub: Tensor,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
preceeding_layers: Optional[List[Any]],
ibp_call: Callable[[], None],
timeout: float,
) -> None:
device = input_lb.device
inner_preceeding_layers: List[AbstractModule] = []
tracked_preceeding_layers: List[AbstractModule] = []
inner_propagate_preceeding_callback = propagate_preceeding_callback
ibp_call()
for i, layer in list(enumerate(self.layers)):
if time.time() > timeout:
return
if type(layer) in ActivationLayers:
subproblem_state = SubproblemState.create_default(
split_state=None,
optimize_prima=False,
batch_size=1,
device=input_lb.device,
use_params=False,
)
subproblem_state.constraints.layer_bounds.intermediate_bounds = (
self.get_current_intermediate_bounds()
)
pre_unstable = (
(layer.input_bounds[0] * layer.input_bounds[1] < 0).float().sum()
)
pre_width = (layer.input_bounds[1] - layer.input_bounds[0]).mean()
if pre_unstable > 0:
# Here the esequential takes care of preceeding layers
self._set_intermediate_bounds(
current_layer_index=i,
config=config,
input_lb=input_lb,
input_ub=input_ub,
batch_size=subproblem_state.batch_size,
subproblem_state=subproblem_state,
device=device,
propagate_preceeding_callback=propagate_preceeding_callback,
preceeding_layers=preceeding_layers,
optimize_intermediate_bounds=False,
only_recompute_unstable=True,
)
ibp_call()
post_unstable = (
(layer.input_bounds[0] * layer.input_bounds[1] < 0).float().sum()
)
post_width = (layer.input_bounds[1] - layer.input_bounds[0]).mean()
print(
f"ID: {id(layer)} | Pre: {pre_unstable:.0f} | Post: {post_unstable:.0f} | Pre-Width: {pre_width} | Post-Width: {post_width} | TR {timeout - time.time():.3f}"
)
if isinstance(layer, AbstractContainerModule):
# Updated preceeding callback
# Callback for all layers before the current AbstractContainerModule
if len(inner_preceeding_layers) > 0:
inner_propagate_preceeding_callback = (
self._get_preceeding_callback_wrapper(
inner_propagate_preceeding_callback, inner_preceeding_layers
)
)
if preceeding_layers is None:
temp_preceeding_layers = [tracked_preceeding_layers]
else:
temp_preceeding_layers = [
*preceeding_layers,
tracked_preceeding_layers,
]
layer.forward_pass(
config=config,
input_lb=input_lb,
input_ub=input_ub,
propagate_preceeding_callback=inner_propagate_preceeding_callback,
preceeding_layers=temp_preceeding_layers,
ibp_call=ibp_call,
timeout=timeout,
)
# Callback including the current AbstractContainerModule
def get_preceeding_callback(
layer: AbstractContainerModule,
existing_preceeding_callback: Optional[
Callable[
[
BacksubstitutionConfig,
MN_BaB_Shape,
bool,
],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
) -> Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]:
def _propagate_preceeding_callback(
config: BacksubstitutionConfig,
abstract_shape_int: MN_BaB_Shape,
use_early_termination_for_current_query: bool,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
(abstract_shape_int, (lbs, ubs),) = layer.backsubstitute_shape(
config=config,
input_lb=input_lb,
input_ub=input_ub,
abstract_shape=abstract_shape_int,
from_layer_index=i,
propagate_preceeding_callback=None,
preceeding_layers=temp_preceeding_layers, # Which layers are required here?
use_early_termination_for_current_query=use_early_termination_for_current_query,
full_back_prop=False,
optimize_intermediate_bounds=False,
)
if existing_preceeding_callback is not None:
return existing_preceeding_callback(
config,
abstract_shape_int,
use_early_termination_for_current_query,
)
else:
return (
abstract_shape_int,
(lbs, ubs),
)
return _propagate_preceeding_callback
inner_propagate_preceeding_callback = get_preceeding_callback(
layer, inner_propagate_preceeding_callback
)
inner_preceeding_layers = []
ibp_call()
else:
inner_preceeding_layers.append(layer)
tracked_preceeding_layers.append(layer)
def _get_preceeding_callback_wrapper(
self,
propagate_preceeding_callback: Optional[
Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]
],
layers: List[AbstractModule],
) -> Callable[
[BacksubstitutionConfig, MN_BaB_Shape, bool],
Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]],
]:
def wrapped_call(
config: BacksubstitutionConfig,
abstract_shape: MN_BaB_Shape,
use_early_termination_for_current_query: bool,
) -> Tuple[MN_BaB_Shape, Tuple[Tensor, Tensor]]:
for layer in reversed(layers):
abstract_shape = layer.backsubstitute(config, abstract_shape)
if propagate_preceeding_callback is None:
# assert isinstance(abstract_shape.lb.coef, Tensor)
# bound_shape = abstract_shape.lb.coef.shape[:2]
# if isinstance(abstract_shape.lb.coef, Tensor):
# bound_shape = abstract_shape.lb.coef.shape[:2]
# elif isinstance(abstract_shape.lb.coef, DependenceSets):
# bound_shape = abstract_shape.lb.coef.sets.shape[:2]
# else:
# assert False
return (
abstract_shape,
(
-np.inf * torch.ones_like(abstract_shape.lb.bias),
np.inf * torch.ones_like(abstract_shape.lb.bias),
),
)
else:
return propagate_preceeding_callback(
config,
abstract_shape,
use_early_termination_for_current_query,
)
return wrapped_call
| 54,902 | 42.817239 | 177 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_domains/zonotope.py | """
Based on HybridZonotope from DiffAI (https://github.com/eth-sri/diffai/blob/master/ai.py)
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from src.abstract_domains.ai_util import AbstractElement, clamp_image, head_from_bounds
if TYPE_CHECKING:
from src.abstract_layers.abstract_bn2d import BatchNorm2d
class HybridZonotope(AbstractElement):
def __init__(
self,
head: Tensor,
beta: Optional[Tensor],
errors: Optional[Tensor],
domain: str,
) -> None:
super(HybridZonotope, self).__init__()
self.head = head
self.beta = beta
self.errors = errors
self.domain = domain
assert not torch.isnan(self.head).any()
assert self.beta is None or (not torch.isnan(self.beta).any())
assert self.errors is None or (not torch.isnan(self.errors).any())
@classmethod
def construct_from_noise(
cls,
x: Tensor,
eps: Union[float, Tensor],
domain: str,
dtype: Optional[torch.dtype] = None,
data_range: Tuple[float, float] = (0, 1),
) -> "HybridZonotope":
dtype = torch.get_default_dtype() if dtype is None else dtype
# compute center and error terms from input, perturbation size and data range
assert data_range[0] < data_range[1]
x_center, x_beta = clamp_image(x, eps, data_range[0], data_range[1])
x_center, x_beta = x_center.to(dtype=dtype), x_beta.to(dtype=dtype)
return cls.construct(x_center, x_beta, domain=domain)
@classmethod
def construct_from_bounds(
cls,
min_x: Tensor,
max_x: Tensor,
dtype: Optional[torch.dtype] = None,
domain: str = "box",
) -> "HybridZonotope":
dtype = torch.get_default_dtype() if dtype is None else dtype
# compute center and error terms from elementwise bounds
assert min_x.shape == max_x.shape
x_center, x_beta = head_from_bounds(min_x, max_x)
x_center, x_beta = x_center.to(dtype=dtype), x_beta.to(dtype=dtype)
return cls.construct(x_center, x_beta, domain=domain)
@staticmethod
def construct(
x_center: Tensor, x_beta: Tensor, domain: str = "box", sparse: bool = False
) -> "HybridZonotope":
device = x_center.device
dtype = x_center.dtype
if domain == "box":
return HybridZonotope(x_center, x_beta, None, domain)
elif domain in ["zono", "zono_iter", "hbox"]:
# batch_size = x_center.size()[0]
# n_elements = x_center[0].numel()
# construct error coefficient matrix
ei = HybridZonotope.get_error_matrix(x_center)
# update beta tensor to account for errors captures by error coefficients
new_beta = (
None
if "zono" in domain
else torch.zeros(x_beta.shape).to(device=device, dtype=dtype)
)
# remove zero entries to reduce the number of error terms
if sparse:
nnz = x_beta > 0
ei = ei[nnz.view(-1), ...]
x_beta = x_beta[nnz].unsqueeze(0)
return HybridZonotope(x_center, new_beta, ei * x_beta.unsqueeze(0), domain)
else:
raise RuntimeError("Unsupported HybridZonotope domain: {}".format(domain))
@staticmethod
def get_error_matrix(x: Tensor, error_idx: Optional[Tensor] = None) -> Tensor:
batch_size = x.shape[0]
if error_idx is None:
n_elements_e = x[0].numel()
ei = (
torch.eye(n_elements_e, dtype=x.dtype, device=x.device)
.expand(batch_size, n_elements_e, n_elements_e)
.permute(1, 0, 2)
)
else:
assert batch_size == 1
n_elements_e = int(error_idx.sum())
n_elements_x = x[0].numel()
ei = torch.zeros(
(n_elements_e, n_elements_x), dtype=x.dtype, device=x.device
)
ei[
torch.arange(n_elements_e).view(-1, 1), error_idx.flatten().nonzero()
] = 1
ei = ei.expand(batch_size, n_elements_e, n_elements_x).permute(1, 0, 2)
if len(x.size()) > 2:
ei = ei.contiguous().view(n_elements_e, *x.size())
return ei
@staticmethod
def get_new_errs(
approx_indicator: torch.Tensor,
x_center_new: torch.Tensor,
x_beta_new: torch.Tensor,
) -> torch.Tensor:
device = x_center_new.device
dtype = x_center_new.dtype
batch_size, center_shape = x_center_new.size()[0], x_center_new.size()[1:]
# accumulate error position over batch dimension
new_err_pos = (approx_indicator.long().sum(dim=0) > 0).nonzero()
num_new_errs = new_err_pos.size()[0]
err_idx_dict = {
tuple(pos.cpu().numpy()): idx
for pos, idx in zip(new_err_pos, range(num_new_errs))
}
nnz = approx_indicator.nonzero()
# extract error sizes
beta_values = x_beta_new[tuple(nnz[:, i] for i in range((nnz.shape[1])))]
# generate new error matrix portion
new_errs = torch.zeros(
(
num_new_errs,
batch_size,
)
+ center_shape
).to(device, dtype=dtype)
new_errs[
([err_idx_dict[tuple(key[1:].cpu().numpy())] for key in nnz],)
+ tuple(nnz[:, i] for i in range((nnz.shape[1])))
] = beta_values
return new_errs
@property
def dim(self) -> int:
return self.head.dim()
@staticmethod
def join(
x: List[HybridZonotope],
trunk_errors: Optional[List[int]] = None,
dim: int = 0,
mode: str = "cat",
) -> "HybridZonotope":
# x is list of HybridZonotopes
# trunk_errors is number of last shared error between all Hybrid zonotopes, usually either number of initial
# errors or number of errors at point where a split between network branches occured
device = x[0].head.device
if mode not in ["cat", "stack"]:
raise RuntimeError(f"Unkown join mode : {mode:}")
if mode == "cat":
new_head = torch.cat([x_i.head for x_i in x], dim=dim)
elif mode == "stack":
new_head = torch.stack([x_i.head for x_i in x], dim=dim)
else:
assert False, f"Unknown mode {mode}"
if all([x_i.beta is None for x_i in x]):
new_beta = None
elif any([x_i.beta is None for x_i in x]):
assert False, "Mixed HybridZonotopes can't be joined"
else:
if mode == "cat":
new_beta = torch.cat(
[
torch.zeros_like(x_i.head) if x_i.beta is None else x_i.beta
for x_i in x
],
dim=dim,
)
elif mode == "stack":
new_beta = torch.stack(
[
torch.zeros_like(x_i.head) if x_i.beta is None else x_i.beta
for x_i in x
],
dim=dim,
)
if all([x_i.errors is None for x_i in x]):
new_errors = None
elif any([x_i.errors is None for x_i in x]):
assert False, "Mixed HybridZonotopes can't be joined"
else:
if trunk_errors is None:
trunk_errors = [0 for _ in x]
exit_errors = [
0 if x_i.errors is None else x_i.errors.size()[0] - trunk_errors[i]
for i, x_i in enumerate(x)
] # number of additional errors for every Hybrid zonotope
tmp_errors = [torch.tensor([]) for _ in x]
for i, x_i in enumerate(x):
if x_i.errors is None:
continue
tmp_errors[i] = torch.cat(
[
x_i.errors[: trunk_errors[i]],
torch.zeros(
[max(trunk_errors) - trunk_errors[i] + sum(exit_errors[:i])]
+ list(x_i.head.shape)
).to(device),
x_i.errors[trunk_errors[i] :],
torch.zeros(
[sum(exit_errors[i + 1 :])] + list(x_i.head.shape)
).to(device),
],
dim=0,
)
if mode == "cat":
new_errors = torch.cat(tmp_errors, dim=dim + 1)
elif mode == "stack":
new_errors = torch.stack(tmp_errors, dim=dim + 1)
return HybridZonotope(new_head, new_beta, new_errors, x[0].domain)
def size(self, idx: Optional[int] = None) -> Union[Tuple[int, ...], int]:
if idx is None:
return self.head.size()
else:
return self.head.size(idx)
def view(self, size: Tuple[int, ...]) -> HybridZonotope:
return HybridZonotope(
self.head.view(*size),
None if self.beta is None else self.beta.view(size),
None
if self.errors is None
else self.errors.view(self.errors.size()[0], *size),
self.domain,
)
def flatten(self) -> HybridZonotope:
bsize = self.head.size(0)
return self.view((bsize, -1))
def normalize(self, mean: Tensor, sigma: Tensor) -> "HybridZonotope":
return (self - mean) / sigma
def __sub__( # type: ignore[override]
self, other: Union[Tensor, float, int, HybridZonotope]
) -> HybridZonotope:
return self + (-other)
def __neg__(self) -> "HybridZonotope":
new_head = -self.head
new_beta = None if self.beta is None else self.beta
new_errors = None if self.errors is None else -self.errors
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def __add__( # type: ignore[override]
self, other: Union[Tensor, float, int, HybridZonotope]
) -> "HybridZonotope":
if (
isinstance(other, torch.Tensor)
or isinstance(other, float)
or isinstance(other, int)
):
return HybridZonotope(
self.head + other, self.beta, self.errors, self.domain
)
elif isinstance(other, HybridZonotope):
assert self.domain == other.domain
return self.add(other, shared_errors=0)
else:
assert False, "Unknown type of other object"
def __truediv__(self, other: Union[Tensor, int, float]) -> "HybridZonotope":
if (
isinstance(other, torch.Tensor)
or isinstance(other, float)
or isinstance(other, int)
or isinstance(other, torch.Tensor)
):
return HybridZonotope(
self.head / other,
None if self.beta is None else self.beta / abs(other),
None if self.errors is None else self.errors / other,
self.domain,
)
else:
assert False, "Unknown type of other object"
def __mul__(self, other: Union[Tensor, int, float]) -> "HybridZonotope":
if (
isinstance(other, int)
or isinstance(other, float)
or isinstance(other, int)
or (isinstance(other, torch.Tensor))
):
d = self.head.device
return HybridZonotope(
(self.head * other).to(d),
None if self.beta is None else (self.beta * abs(other)).to(d),
None if self.errors is None else (self.errors * other).to(d),
self.domain,
)
else:
assert False, "Unknown type of other object"
def __rmul__(self, other: Union[Tensor, int, float]) -> "HybridZonotope": # type: ignore # complains obout signature overlap with __mul__ # Assumes associativity
return self.__mul__(other)
def __getitem__(self, indices: Tuple[slice, ...]) -> "HybridZonotope":
if not isinstance(indices, tuple):
indices = tuple([indices])
return HybridZonotope(
self.head[indices],
None if self.beta is None else self.beta[indices],
None if self.errors is None else self.errors[(slice(None), *indices)],
self.domain,
)
def clone(self) -> "HybridZonotope":
return HybridZonotope(
self.head.clone(),
None if self.beta is None else self.beta.clone(),
None if self.errors is None else self.errors.clone(),
self.domain,
)
def detach(self) -> "HybridZonotope":
return HybridZonotope(
self.head.detach(),
None if self.beta is None else self.beta.detach(),
None if self.errors is None else self.errors.detach(),
self.domain,
)
def max_center(self) -> Tensor:
return self.head.max(dim=1)[0].unsqueeze(1)
def avg_pool2d(
self, kernel_size: int, stride: int, padding: int
) -> "HybridZonotope":
new_head = F.avg_pool2d(self.head, kernel_size, stride, padding)
new_beta = (
None
if self.beta is None
else F.avg_pool2d(
self.beta.view(-1, *self.head.shape[1:]), kernel_size, stride, padding
)
)
new_errors = (
None
if self.errors is None
else F.avg_pool2d(
self.errors.view(-1, *self.head.shape[1:]), kernel_size, stride, padding
).view(-1, *new_head.shape)
)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def global_avg_pool2d(self) -> "HybridZonotope":
new_head = F.adaptive_avg_pool2d(self.head, 1)
new_beta = (
None
if self.beta is None
else F.adaptive_avg_pool2d(self.beta.view(-1, *self.head.shape[1:]), 1)
)
new_errors = (
None
if self.errors is None
else F.adaptive_avg_pool2d(
self.errors.view(-1, *self.head.shape[1:]), 1
).view(-1, *new_head.shape)
)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def max_pool2d(
self,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
) -> "HybridZonotope":
if self.errors is not None:
assert False, "MaxPool for Zono not Implemented"
lb, ub = self.concretize()
new_lb = F.max_pool2d(lb, kernel_size, stride, padding)
new_ub = F.max_pool2d(ub, kernel_size, stride, padding)
return HybridZonotope.construct_from_bounds(
new_lb, new_ub, self.dtype, self.domain
)
def pad(
self, pad: Tuple[int, ...], mode: str = "constant", value: float = 0.0
) -> "HybridZonotope":
assert mode == "constant"
new_head = F.pad(self.head, pad, mode="constant", value=value)
new_beta = (
None
if self.beta is None
else F.pad(self.beta, pad, mode="constant", value=0.0)
)
new_errors = (
None
if self.errors is None
else F.pad(
self.errors.view(-1, *self.head.shape[1:]),
pad,
mode="constant",
value=0.0,
).view(-1, *new_head.shape)
)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def conv2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
dilation: int,
groups: int,
) -> "HybridZonotope":
new_head = F.conv2d(self.head, weight, bias, stride, padding, dilation, groups)
new_beta = (
None
if self.beta is None
else F.conv2d(
self.beta, weight.abs(), None, stride, padding, dilation, groups
)
)
if self.errors is not None:
errors_resized = self.errors.view(-1, *self.errors.size()[2:])
new_errors = F.conv2d(
errors_resized, weight, None, stride, padding, dilation, groups
)
new_errors = new_errors.view(self.errors.shape[0], *new_head.shape)
else:
new_errors = None
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def convtranspose2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
output_padding: int,
groups: int,
dilation: int,
) -> "HybridZonotope":
new_head = F.conv_transpose2d(
self.head, weight, bias, stride, padding, output_padding, dilation, groups
)
new_beta = (
None
if self.beta is None
else F.conv_transpose2d(
self.beta,
weight.abs(),
None,
stride,
padding,
output_padding,
dilation,
groups,
)
)
if self.errors is not None:
errors_resized = self.errors.view(-1, *self.errors.size()[2:])
new_errors = F.conv_transpose2d(
errors_resized,
weight,
None,
stride,
padding,
output_padding,
dilation,
groups,
)
new_errors = new_errors.view(
self.errors.size()[0], self.errors.size()[1], *new_errors.size()[1:]
)
else:
new_errors = None
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def contains( # noqa: C901 # function too complex
self, other: "HybridZonotope", verbose: Optional[bool] = False
) -> Tuple[bool, float]:
assert self.head.size(0) == 1
if self.errors is None and other.errors is None: # interval
lb, ub = self.concretize()
other_lb, other_ub = other.concretize()
contained = (lb <= other_lb).__and__(other_ub <= ub)
cont_factor = (
2
* torch.max(
((other_ub - self.head) / (ub - lb + 1e-16)).abs().max(),
((other_lb - self.head) / (ub - lb + 1e-16)).abs().max(),
).item()
)
return contained.all().item(), cont_factor # type: ignore # this is a bool
elif self.errors is None:
return False, torch.nan
else:
dtype = self.head.dtype
device = self.head.device
# Solve the LGS that we get when representing the "other" zonotope in the "self" basis
# System Ax = B
# NOTE: This gives us eps parameterized vectors in x space (i.e. shape 40x824)
A = self.errors.flatten(start_dim=1).T # containing init errors
if other.errors is None:
B = torch.zeros_like(A)
else:
B = other.errors.flatten(start_dim=1).T # contained init errors
if not hasattr(self, "errors_inv"):
self.errors_inv = None
if A.shape[-1] == A.shape[-2] and self.errors_inv is None:
try:
self.errors_inv = torch.inverse(A)
except Exception as e:
print(f"Failed to invert error matrix: {e}")
if self.errors_inv is None:
if A.shape[0] != A.shape[1]:
sol = np.linalg.lstsq(A.cpu().numpy(), B.cpu().numpy(), rcond=None)
x = torch.tensor(sol[0], dtype=dtype, device=device)
elif float(torch.__version__[:-2]) < 1.9:
x = torch.solve(B, A).solution
else:
x = torch.linalg.solve(A, B)
else:
x = torch.matmul(self.errors_inv, B)
# Note sometimes we dont have full rank for A (check sol[1]) - the solution however has no residuals
# Here x contains the coordinates of the inner zonotope in the outer basis -> 824 x 824
if not torch.isclose(
torch.matmul(A, x), B, atol=1e-7, rtol=1e-6
).all(): # , f"Projection of contained errors into base of containing errors failed"
uncaptured_errors = torch.abs(B - torch.matmul(A, x)).sum(dim=1)
# assert False
else:
uncaptured_errors = torch.zeros_like(self.head)
# Sum the absolutes row-wise to get the scaling factor for the containing error coefficients to overapproximated the contained ones
abs_per_orig_vector = torch.sum(torch.abs(x), dim=1)
max_sp = torch.max(abs_per_orig_vector).cpu().item()
if max_sp > 1 or str(max_sp) == "nan":
if verbose:
print(f"Containment of errors failed with {max_sp}")
return False, max_sp
# Here would could probably do some smarter combination i.e. we could compensate the worst errors of the init errors in the differences in the merge errors
# However this is generally hard (I believe) - a greedy solution should work
# Here we adjust for the head displacement
diff = torch.abs(self.head - other.head).detach().view(-1)
# Here we adjust for errors not captured by the intial matching due to differences in spanned space:
diff += uncaptured_errors.view(-1)
# Here we just do a basic check on the "independent" merge errors
if other.beta is None:
max_sp_merge = 0.0
merge_cont = True
elif self.beta is None:
max_sp_merge = torch.nan
merge_cont = False
else:
merge_cont = True
# Check that merge errors (or rather their difference) is diagonal
if merge_cont:
self_beta = self.beta.detach()
other_beta = other.beta.detach()
merge_diff = (self_beta - other_beta).view(-1)
merge_cont = (
merge_diff >= 0 # type: ignore # this is a bool
).all() # Ensure that the box errors of other can be contained with the box errors of selfe
max_sp_merge = (
torch.max(other_beta / (self_beta + 1e-8)).cpu().item()
)
# When the merge errors of the containing zono are larger than that of the contained one, we can use this extra to compensate for some of the difference in the heads
# diff = torch.maximum(diff - torch.diagonal(merge_diff), torch.tensor(0)).detach()
diff = torch.maximum(
diff - merge_diff, torch.zeros_like(diff)
).detach()
if not merge_cont:
if verbose:
print("Containment of merge errors failed")
return False, max_sp_merge
# This projects the remaining difference between the heads into the error coefficient matrix
diff = torch.diag(diff.view(-1))
if self.errors_inv is None:
if A.shape[0] != A.shape[1]:
sol_diff = np.linalg.lstsq(
A.cpu().numpy(), diff.cpu().numpy(), rcond=None
)
x_diff = torch.tensor(sol_diff[0], dtype=dtype, device=device)
elif float(torch.__version__[:-2]) < 1.9:
x_diff = torch.solve(diff, A).solution
else:
x_diff = torch.linalg.solve(A, diff)
else:
x_diff = torch.matmul(self.errors_inv, diff)
if not torch.isclose(
torch.matmul(A, x_diff), diff, atol=1e-7, rtol=1e-6
).all():
# f"Projection of head difference into base of containing errors failed"
return False, np.inf
abs_per_orig_vector_diff = abs_per_orig_vector + torch.abs(x_diff).sum(
dim=1
)
max_sp_diff = torch.max(abs_per_orig_vector_diff).cpu().item()
# Check if with this additional component, we are still contained
if max_sp_diff > 1 or str(max_sp_diff) == "nan":
if verbose:
print(f"Containment of head differences failed with {max_sp_diff}")
return False, max_sp_diff
if verbose:
print(f"Containment with {max_sp_diff}")
return True, max(max_sp_merge, max_sp_diff, max_sp)
@property
def shape(self) -> torch.Size:
return self.head.shape
@property
def dtype(self) -> torch.dtype:
return self.head.dtype
@property
def device(self) -> torch.device:
return self.head.device
def linear(
self, weight: Tensor, bias: Union[Tensor, None], C: Union[Tensor, None] = None
) -> "HybridZonotope":
if C is None:
if bias is None:
return self.matmul(weight.t())
else:
return self.matmul(weight.t()) + bias.unsqueeze(0)
else:
if bias is None:
return self.unsqueeze(-1).rev_matmul(C.matmul(weight)).squeeze()
else:
return self.unsqueeze(-1).rev_matmul(
C.matmul(weight)
).squeeze() + C.matmul(bias)
def matmul(self, other: Tensor) -> "HybridZonotope":
return HybridZonotope(
self.head.matmul(other),
None if self.beta is None else self.beta.matmul(other.abs()),
None if self.errors is None else self.errors.matmul(other),
self.domain,
)
def einsum(self, defining_str: str, other: Tensor) -> HybridZonotope:
input_self_str, rest = defining_str.split(",")
input_other_str, output_str = rest.split("->")
input_self_str, input_other_str, output_str = (
input_self_str.strip(),
input_other_str.strip(),
output_str.strip(),
)
new_head = torch.einsum(defining_str, self.head, other)
new_beta = (
None
if self.beta is None
else torch.einsum(defining_str, self.beta, other.abs())
)
new_errors = (
None
if self.errors is None
else torch.einsum(
f"i{input_self_str},{input_other_str} -> i{output_str}",
self.errors,
other,
)
)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def bmm(self, other: Tensor) -> "HybridZonotope":
if self.dim != 3:
self = self.unsqueeze(1)
unsqueezed = True
else:
unsqueezed = False
new_head = torch.bmm(self.head, other)
new_beta = (
None
if self.beta is None
else torch.bmm(self.beta.view(-1, *self.head.shape[1:]), other)
)
new_errors = None if self.errors is None else torch.matmul(self.errors, other)
if unsqueezed:
new_head = new_head.squeeze(1)
new_beta = None if new_beta is None else new_beta.squeeze(1)
new_errors = None if new_errors is None else new_errors.squeeze(1 + 1)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def rev_matmul(self, other: Tensor) -> "HybridZonotope":
return HybridZonotope(
other.matmul(self.head),
None if self.beta is None else other.abs().matmul(self.beta),
None if self.errors is None else other.matmul(self.errors),
self.domain,
)
def fft(self) -> HybridZonotope:
assert self.beta is None
return HybridZonotope(
torch.fft.fft2(self.head).real,
None,
None if self.errors is None else torch.fft.fft2(self.errors).real,
self.domain,
)
def batch_norm(self, bn: BatchNorm2d) -> "HybridZonotope":
view_dim_list = [1, -1] + (self.head.dim() - 2) * [1]
# self_stat_dim_list = [0, 2, 3] if self.head.dim()==4 else [0]
# if bn.training:
# momentum = 1 if bn.momentum is None else bn.momentum
# mean = self.head.mean(dim=self_stat_dim_list).detach()
# var = self.head.var(unbiased=False, dim=self_stat_dim_list).detach()
# if bn.running_mean is not None and bn.running_var is not None and bn.track_running_stats:
# bn.running_mean = bn.running_mean * (1 - momentum) + mean * momentum
# bn.running_var = bn.running_var * (1 - momentum) + var * momentum
# else:
# bn.running_mean = mean
# bn.running_var = var
if bn.training:
mean: Tensor = bn.current_mean
var: Tensor = bn.current_var
else:
assert bn.running_mean is not None
assert bn.running_var is not None
mean = bn.running_mean
var = bn.running_var
c = bn.weight / torch.sqrt(var + bn.eps)
b = -mean * c + bn.bias
new_head = self.head * c.view(*view_dim_list) + b.view(*view_dim_list)
new_errors = (
None
if self.errors is None
else self.errors * c.view(*([1] + view_dim_list))
)
new_beta = (
None if self.beta is None else self.beta * c.abs().view(*view_dim_list)
)
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
# def batch_norm(self, bn, mean, var):
# view_dim_list = [1, -1]+(self.head.dim()-2)*[1]
# assert mean is not None and var is not None
# c = (bn.weight / torch.sqrt(var + bn.eps))
# b = (-mean*c + bn.bias)
# new_head = self.head*c.view(*view_dim_list)+b.view(*view_dim_list)
# new_errors = None if self.errors is None else self.errors * c.view(*([1]+view_dim_list))
# new_beta = None if self.beta is None else self.beta * c.abs().view(*view_dim_list)
# return HybridZonotope(new_head, new_beta, new_errors, self.domain)
@staticmethod
def cat(zonos: List[HybridZonotope], dim: int = 0) -> HybridZonotope: # type: ignore [override]
dtype = zonos[0].head.dtype
device = zonos[0].head.device
new_head = torch.cat([x.head for x in zonos], dim)
new_beta = torch.cat(
[x.beta if x.beta is not None else torch.zeros_like(x.head) for x in zonos],
dim,
)
actual_dim = dim if dim >= 0 else new_head.dim() + dim
errors = [zono.errors for zono in zonos if zono.errors is not None]
if len(errors) > 0:
n_err = sum([x.shape[0] for x in errors])
new_errors = torch.zeros(
(n_err, *new_head.shape), dtype=dtype, device=device
).transpose(1, actual_dim + 1)
i = 0
j = 0
for error in errors:
error = error.transpose(1, actual_dim + 1)
new_errors[i : i + error.shape[0], j : j + error.shape[1]] = error
i += error.shape[0]
j += error.shape[1]
new_errors = new_errors.transpose(1, actual_dim + 1)
else:
new_errors = None
return HybridZonotope(new_head, new_beta, new_errors, zonos[0].domain)
def relu(
self,
deepz_lambda: Optional[Tensor] = None,
bounds: Optional[Tuple[Tensor, Tensor]] = None,
) -> Tuple["HybridZonotope", Optional[Tensor]]:
lb, ub = self.concretize()
D = 1e-6
dtype = self.dtype
if self.domain == "box":
min_relu, max_relu = F.relu(lb), F.relu(ub)
return (
HybridZonotope(
0.5 * (max_relu + min_relu),
0.5 * (max_relu - min_relu),
None,
self.domain,
),
None,
)
elif self.domain == "hbox":
is_under = ub <= 0
is_above = (ub > 0) & (lb >= 0)
is_cross = (ub > 0) & (lb < 0)
ub_half = ub / 2
new_head = self.head.clone()
new_head[is_under] = 0
new_head[is_cross] = ub_half[is_cross]
if self.beta is None:
new_beta = None
else:
new_beta = self.beta.clone()
new_beta[is_under] = 0
new_beta[is_cross] = ub_half[is_cross]
if self.errors is None:
new_errors = None
else:
new_errors = self.errors.clone()
new_errors[:, ~is_above] = 0.0
return HybridZonotope(new_head, new_beta, new_errors, self.domain), None
elif "zono" in self.domain:
if bounds is not None:
lb_refined, ub_refined = bounds
lb = torch.max(lb_refined, lb)
ub = torch.min(ub_refined, ub)
is_cross = (lb < 0) & (ub > 0)
relu_lambda = torch.where(is_cross, ub / (ub - lb + D), (lb >= 0).to(dtype))
if self.domain == "zono_iter":
if deepz_lambda is not None:
# assert (deepz_lambda >= 0).all() and (deepz_lambda <= 1).all()
if not ((deepz_lambda >= 0).all() and (deepz_lambda <= 1).all()):
deepz_lambda.data = relu_lambda.data
relu_lambda_cross = deepz_lambda
else:
deepz_lambda = torch.nn.Parameter(-relu_lambda.data)
relu_lambda_cross = relu_lambda
relu_mu_cross = torch.where(
relu_lambda_cross < relu_lambda,
0.5 * ub * (1 - relu_lambda_cross),
-0.5 * relu_lambda_cross * lb,
)
relu_lambda = torch.where(
is_cross, relu_lambda_cross, (lb >= 0).to(dtype)
)
relu_mu = torch.where(
is_cross, relu_mu_cross, torch.zeros(lb.size()).to(self.device)
)
else:
relu_mu = torch.where(
is_cross,
-0.5 * ub * lb / (ub - lb + D),
torch.zeros(lb.size()).to(self.device),
)
deepz_lambda = None
assert (not torch.isnan(relu_mu).any()) and (
not torch.isnan(relu_lambda).any()
)
new_head = self.head * relu_lambda + relu_mu
old_errs = self.errors * relu_lambda
new_errs = self.get_new_errs(is_cross, new_head, relu_mu)
new_errors = torch.cat([old_errs, new_errs], dim=0)
assert (not torch.isnan(new_head).any()) and (
not torch.isnan(new_errors).any()
)
return HybridZonotope(new_head, None, new_errors, self.domain), deepz_lambda
else:
raise RuntimeError(
"Error applying ReLU with unkown domain: {}".format(self.domain)
)
def sum(self, dim: int, reduce_dim: bool = False) -> "HybridZonotope":
new_head = self.head.sum(dim=dim)
new_beta = None if self.beta is None else self.beta.abs().sum(dim=dim)
new_errors = None if self.errors is None else self.errors.sum(dim=dim + 1)
if not reduce_dim:
new_head = new_head.unsqueeze(dim)
new_beta = None if new_beta is None else new_beta.unsqueeze(dim)
new_errors = None if new_errors is None else new_errors.unsqueeze(dim + 1)
assert not torch.isnan(new_head).any()
assert new_beta is None or not torch.isnan(new_beta).any()
assert new_errors is None or not torch.isnan(new_errors).any()
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def unsqueeze(self, dim: int) -> "HybridZonotope":
new_head = self.head.unsqueeze(dim)
new_beta = None if self.beta is None else self.beta.unsqueeze(dim)
new_errors = None if self.errors is None else self.errors.unsqueeze(dim + 1)
assert not torch.isnan(new_head).any()
assert new_beta is None or not torch.isnan(new_beta).any()
assert new_errors is None or not torch.isnan(new_errors).any()
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def squeeze(self, dim: Union[None, int] = None) -> "HybridZonotope":
if dim is None:
new_head = self.head.squeeze()
new_beta = None if self.beta is None else self.beta.squeeze()
new_errors = None if self.errors is None else self.errors.squeeze()
else:
new_head = self.head.squeeze(dim)
new_beta = None if self.beta is None else self.beta.squeeze(dim)
new_errors = None if self.errors is None else self.errors.squeeze(dim + 1)
assert not torch.isnan(new_head).any()
assert new_beta is None or not torch.isnan(new_beta).any()
assert new_errors is None or not torch.isnan(new_errors).any()
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def add(
self, summand_zono: "HybridZonotope", shared_errors: int = 0
) -> "HybridZonotope":
assert all(
[
x == y or x == 1 or y == 1
for x, y in zip(self.head.shape[::-1], summand_zono.head.shape[::-1])
]
)
dtype = self.head.dtype
device = self.head.device
new_head = self.head + summand_zono.head
if self.beta is None and summand_zono.beta is None:
new_beta = None
elif self.beta is not None and summand_zono.beta is not None:
new_beta = self.beta.abs() + summand_zono.beta.abs()
else:
new_beta = self.beta if self.beta is not None else summand_zono.beta
if self.errors is None:
new_errors = None
elif self.errors is not None and summand_zono.errors is not None:
if shared_errors < 0:
shared_errors = (
self.errors.size(0)
if self.errors.size(0) == summand_zono.errors.size(0)
else 0
)
# Shape cast errors to output shape
self_errors = torch.cat(
[
self.errors
* torch.ones(
(self.errors.size(0),) + tuple(summand_zono.head.shape),
dtype=dtype,
device=device,
),
torch.zeros(
(summand_zono.errors.size(0) - shared_errors,)
+ tuple(self.head.shape),
dtype=dtype,
device=device,
)
* torch.ones(
(summand_zono.errors.size(0) - shared_errors,)
+ tuple(summand_zono.head.shape),
dtype=dtype,
device=device,
),
],
dim=0,
)
summand_errors = torch.cat(
[
summand_zono.errors[:shared_errors]
* torch.ones_like(self.errors[:shared_errors]),
torch.zeros(
(self.errors.size(0) - shared_errors,) + tuple(self.head.shape),
dtype=dtype,
device=device,
)
* torch.ones(
(self.errors.size(0) - shared_errors,)
+ tuple(summand_zono.head.shape),
dtype=dtype,
device=device,
),
summand_zono.errors[shared_errors:]
* torch.ones(
(summand_zono.errors.size(0) - shared_errors,)
+ tuple(self.head.shape),
dtype=dtype,
device=device,
),
],
dim=0,
)
new_errors = self_errors + summand_errors
else:
new_errors = self.errors if self.errors is not None else summand_zono.errors
assert not torch.isnan(new_head).any()
assert new_beta is None or not torch.isnan(new_beta).any()
assert new_errors is None or not torch.isnan(new_errors).any()
new_domain = (
summand_zono.domain
if new_beta is None
else ("hbox" if new_errors is not None else "box")
)
return HybridZonotope(new_head, new_beta, new_errors, new_domain)
def prod(
self,
factor_zono: "HybridZonotope",
shared_errors: Union[int, None] = None,
low_mem: bool = False,
) -> "HybridZonotope":
dtype = self.head.dtype
device = self.head.device
lb_self, ub_self = self.concretize()
lb_other, ub_other = factor_zono.concretize()
if self.domain == factor_zono.domain:
domain = self.domain
elif "box" in [self.domain, factor_zono.domain]:
domain = "box"
elif "hbox" in [self.domain, factor_zono.domain]:
domain = "hbox"
else:
assert False
if domain in ["box", "hbox"] or low_mem:
min_prod = torch.min(
torch.min(
torch.min(lb_self * lb_other, lb_self * ub_other),
ub_self * lb_other,
),
ub_self * ub_other,
)
max_prod = torch.max(
torch.max(
torch.max(lb_self * lb_other, lb_self * ub_other),
ub_self * lb_other,
),
ub_self * ub_other,
)
return HybridZonotope(
0.5 * (max_prod + min_prod), 0.5 * (max_prod - min_prod), None, "box"
)
assert self.beta is None
assert self.errors is not None, "Special case not implemented"
assert factor_zono.errors is not None, "Special case not implemented"
n_self_errors = 0 if self.errors is None else self.errors.shape[0]
n_factor_errors = (
0 if factor_zono.errors is None else factor_zono.errors.shape[0]
)
assert all(
[
x == y or x == 1 or y == 1
for x, y in zip(self.head.shape[::-1], factor_zono.head.shape[::-1])
]
)
if shared_errors is None:
shared_errors = 0
if shared_errors == -1:
shared_errors = n_self_errors if n_self_errors == n_factor_errors else 0
# Shape cast to output shape
self_errors = torch.cat(
[
self.errors
* torch.ones(
(n_self_errors,) + tuple(factor_zono.head.shape),
dtype=dtype,
device=device,
),
torch.zeros(
(n_factor_errors - shared_errors,) + tuple(self.head.shape),
dtype=dtype,
device=device,
)
* torch.ones(
(n_factor_errors - shared_errors,) + tuple(factor_zono.head.shape),
dtype=dtype,
device=device,
),
],
dim=0,
)
factor_errors = torch.cat(
[
factor_zono.errors[:shared_errors]
* torch.ones_like(self.errors[:shared_errors]),
torch.zeros(
(n_self_errors - shared_errors,) + tuple(self.head.shape),
dtype=dtype,
device=device,
)
* torch.ones(
(n_self_errors - shared_errors,) + tuple(factor_zono.head.shape),
dtype=dtype,
device=device,
),
factor_zono.errors[shared_errors:]
* torch.ones(
(n_factor_errors - shared_errors,) + tuple(self.head.shape),
dtype=dtype,
device=device,
),
],
dim=0,
)
lin_err = (
self.head.unsqueeze(dim=0) * factor_errors
+ factor_zono.head.unsqueeze(dim=0) * self_errors
)
quadr_const = self_errors * factor_errors
quadr_error_tmp = self_errors.unsqueeze(1) * factor_errors.unsqueeze(0)
quadr_error_tmp = 1.0 / 2.0 * (
quadr_error_tmp + quadr_error_tmp.transpose(1, 0)
).abs().sum(dim=1).sum(dim=0) - 1.0 / 2.0 * quadr_const.abs().sum(dim=0)
new_head = self.head * factor_zono.head + 1.0 / 2.0 * quadr_const.sum(dim=0)
old_errs = lin_err
new_errs = self.get_new_errs(
torch.ones(self.head.shape), new_head, quadr_error_tmp
)
new_errors = torch.cat([old_errs, new_errs], dim=0)
assert (not torch.isnan(new_head).any()) and (not torch.isnan(new_errors).any())
return HybridZonotope(new_head, None, new_errors, self.domain)
def upsample(
self, size: int, mode: str, align_corners: bool, consolidate_errors: bool = True
) -> "HybridZonotope":
new_head = F.interpolate(
self.head, size=size, mode=mode, align_corners=align_corners
)
delta = 0
assert mode in ["nearest", "linear", "bilinear", "trilinear"], "Upsample"
if self.beta is not None:
new_beta = F.interpolate(
self.beta, size=size, mode=mode, align_corners=align_corners
)
delta = delta + new_beta
else:
new_beta = None
if self.errors is not None:
errors_resized = self.errors.view(-1, *self.head.shape[1:])
new_errors = F.interpolate(
errors_resized, size=size, mode=mode, align_corners=align_corners
)
new_errors = new_errors.view(-1, *new_head.shape)
delta = delta + new_errors.abs().sum(0)
else:
new_errors = None
if consolidate_errors:
return HybridZonotope.construct_from_bounds(
new_head - delta, new_head + delta, domain=self.domain
)
else:
return HybridZonotope(new_head, new_beta, new_errors, self.domain)
def beta_to_error(self) -> "HybridZonotope":
if self.beta is None:
return HybridZonotope(self.head, None, self.errors, self.domain)
new_errors = self.get_error_matrix(
self.head, error_idx=self.beta != 0
) * self.beta.unsqueeze(0)
new_errors = torch.cat(
([] if self.errors is None else [self.errors]) + [new_errors], dim=0
)
return HybridZonotope(self.head, None, new_errors, self.domain)
def concretize(self) -> Tuple[Tensor, Tensor]:
delta = torch.zeros_like(self.head)
if self.beta is not None:
delta = delta + self.beta
if self.errors is not None:
delta = delta + self.errors.abs().sum(0)
return self.head - delta, self.head + delta
def avg_width(self) -> Tensor:
lb, ub = self.concretize()
return (ub - lb).mean()
def is_greater(
self, i: int, j: int, threshold_min: Union[Tensor, float] = 0
) -> Tuple[Tensor, Tensor]:
diff_head = self.head[:, i] - self.head[:, j]
delta = diff_head
if self.errors is not None:
diff_errors = (self.errors[:, :, i] - self.errors[:, :, j]).abs().sum(dim=0)
delta -= diff_errors
if self.beta is not None:
diff_beta = (self.beta[:, i] + self.beta[:, j]).abs()
delta -= diff_beta
return delta, delta > threshold_min
def get_min_diff(self, i: int, j: int) -> Tensor:
"""returns minimum of logit[i] - logit[j]"""
return self.is_greater(i, j)[0]
def verify(
self,
targets: Tensor,
threshold_min: Union[Tensor, float] = 0,
corr_only: bool = False,
) -> Tuple[Tensor, Tensor, Tensor]:
n_class = self.head.size()[1]
dtype = self.dtype
verified = torch.zeros(targets.size(), dtype=torch.uint8).to(self.head.device)
verified_corr = torch.zeros(targets.size(), dtype=torch.uint8).to(
self.head.device
)
if n_class == 1:
# assert len(targets) == 1
verified_list = torch.cat(
[
self.concretize()[1] < threshold_min,
self.concretize()[0] >= threshold_min,
],
dim=1,
)
verified[:] = torch.any(verified_list, dim=1)
verified_corr[:] = verified_list.gather(
dim=1, index=targets.long().unsqueeze(dim=1)
).squeeze(1)
threshold = (
torch.cat(self.concretize(), 1)
.gather(dim=1, index=(1 - targets).long().unsqueeze(dim=1))
.squeeze(1)
)
else:
threshold = np.inf * torch.ones(targets.size(), dtype=dtype).to(
self.head.device
)
for i in range(n_class):
if corr_only and i not in targets:
continue
isg = torch.ones(targets.size(), dtype=torch.uint8).to(self.head.device)
margin = np.inf * torch.ones(targets.size(), dtype=dtype).to(
self.head.device
)
for j in range(n_class):
if i != j and isg.any():
margin_tmp, ok = self.is_greater(i, j, threshold_min)
margin = torch.min(margin, margin_tmp)
isg = isg & ok.to(torch.uint8)
verified = verified | isg
verified_corr = verified_corr | (targets.eq(i).byte() & isg)
threshold = torch.where(targets.eq(i).byte(), margin, threshold)
return verified, verified_corr, threshold
def get_wc_logits(self, targets: Tensor, use_margins: bool = False) -> Tensor:
n_class = self.shape[-1]
device = self.head.device
dtype = self.dtype
if use_margins:
def get_c_mat(n_class: int, target: Tensor) -> Tensor:
return torch.eye(n_class, dtype=dtype)[target].unsqueeze(
dim=0
) - torch.eye(n_class, dtype=dtype)
if n_class > 1:
c = torch.stack([get_c_mat(n_class, x) for x in targets], dim=0)
self = -(self.unsqueeze(dim=1) * c.to(device)).sum(
dim=2, reduce_dim=True
)
batch_size = targets.size()[0]
lb, ub = self.concretize()
if n_class == 1:
wc_logits = torch.cat([ub, lb], dim=1)
wc_logits = wc_logits.gather(dim=1, index=targets.long().unsqueeze(1))
else:
wc_logits = ub.clone()
wc_logits[np.arange(batch_size), targets] = lb[
np.arange(batch_size), targets
]
return wc_logits
def ce_loss(self, targets: Tensor) -> Tensor:
wc_logits = self.get_wc_logits(targets)
if wc_logits.size(1) == 1:
return F.binary_cross_entropy_with_logits(
wc_logits.squeeze(1), targets.to(self.dtype), reduction="none"
)
else:
return F.cross_entropy(wc_logits, targets.long(), reduction="none")
def to(self, castable: Union[torch.dtype, str, torch.device]) -> "HybridZonotope":
return HybridZonotope(
self.head.to(castable),
None if self.beta is None else self.beta.to(castable),
None if self.errors is None else self.errors.to(castable),
self.domain,
)
def basis_transform(
self,
new_basis: Tensor,
ERROR_EPS_ADD: float = 0.0,
ERROR_EPS_MUL: float = 0.0,
) -> Tuple["HybridZonotope", Tensor]:
if self.errors is None: # No errors to transform in a different basis
return self, torch.ones_like(self.head) - 1e-8
# We solve for the coordinates (x) of curr_basis (B) in new_basis (A)
# I.e. we solve Ax=b
A = new_basis
B = torch.flatten(self.errors, start_dim=1).T
device = self.device
dtype = self.dtype
if A.shape[0] < 500 or A.shape[0] != A.shape[1]:
# depending on the size of the matrices different methods are faster
if isinstance(A, torch.Tensor):
A = A.cpu().detach().numpy()
B = B.cpu().detach().numpy()
sol = np.linalg.lstsq(A, B, rcond=None)[0]
sol = torch.tensor(sol, dtype=dtype, device=device)
else:
if not isinstance(A, torch.Tensor):
A = torch.tensor(A)
sol = torch.solve(B, A).solution
assert sol is not None, "No solution found"
assert torch.isclose(
torch.matmul(A, sol), B, atol=1e-7, rtol=1e-6
).all(), "Projection into new base errors failed"
# We add the component ERROR_EPS_ADD to ensure the resulting error matrix has full rank and to compensate for potential numerical errors
x = torch.sum(sol.abs(), dim=1) * (1 + ERROR_EPS_MUL) + ERROR_EPS_ADD
new_errors = (
torch.tensor(x.reshape(1, -1) * new_basis, dtype=dtype)
.T.unsqueeze(1)
.view(-1, *self.head.shape)
)
return HybridZonotope(self.head, self.beta, new_errors, self.domain), x
def concretize_into_basis(self, basis: Tensor) -> Tuple[Tensor, Tensor]:
shp = self.head.shape
all_as_errors = self.beta_to_error()
delta = all_as_errors.basis_transform(basis)[1]
if isinstance(basis, torch.Tensor):
A = basis.cpu().detach().numpy()
B = torch.flatten(self.head, start_dim=1).cpu().detach().numpy().T
sol = np.linalg.lstsq(A, B, rcond=None)[0].T
new_head = torch.tensor(sol, dtype=self.dtype, device=self.device)
return (new_head - delta).view(shp), (new_head + delta).view(shp)
def get_new_basis(
self,
method: Optional[str] = "pca",
errors_to_get_basis_from: Optional[Tensor] = None,
) -> Tensor:
"""
Compute a bais of error directions from errors_to_get_basis_from
:param errors_to_get_basis_from: Error matrix to be overapproximated
:param method: "pca" or "pca_zero_mean"
:return: a basis of error directions
"""
if errors_to_get_basis_from is None:
errors_to_get_basis_from = self.errors
assert (
errors_to_get_basis_from is not None
), "No errors to compute basis from"
if method == "pca":
U, S, Vt = np.linalg.svd(
(errors_to_get_basis_from - errors_to_get_basis_from.mean(0)).cpu(),
full_matrices=False,
)
max_abs_cols = np.argmax(np.abs(U), axis=0)
signs = np.sign(U[max_abs_cols, range(U.shape[1])])
Vt *= signs[:, np.newaxis]
new_basis_vectors = Vt.T
# Torch version is much (factor 6) slower despite avoiding move of data to cpu
# U, S, V = torch.svd(errors_to_get_basis_from - errors_to_get_basis_from.mean(0), some=True)
# max_abs_cols = U.abs().argmax(0)
# signs = U[max_abs_cols, range(U.shape[1])].sign()
# new_basis_vectors_2 = V*signs.unsqueeze(0)
elif method == "pca_zero_mean":
U, S, Vt = np.linalg.svd(
errors_to_get_basis_from.cpu(), full_matrices=False
)
max_abs_cols = np.argmax(np.abs(U), axis=0)
signs = np.sign(U[max_abs_cols, range(U.shape[1])])
Vt *= signs[:, np.newaxis]
new_basis_vectors = Vt.T
return new_basis_vectors
| 56,944 | 37.870307 | 185 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_domains/ai_util.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import torch
from torch import Tensor
if TYPE_CHECKING:
from src.abstract_layers.abstract_bn2d import BatchNorm2d
def clamp_image(
x: Tensor, eps: Union[Tensor, float], clamp_min: float = 0, clamp_max: float = 1
) -> Tuple[Tensor, Tensor]:
min_x = torch.clamp(x - eps, min=clamp_min)
max_x = torch.clamp(x + eps, max=clamp_max)
x_center = 0.5 * (max_x + min_x)
x_beta = 0.5 * (max_x - min_x)
return x_center, x_beta
def head_from_bounds(min_x: Tensor, max_x: Tensor) -> Tuple[Tensor, Tensor]:
x_center = 0.5 * (max_x + min_x)
x_betas = 0.5 * (max_x - min_x)
return x_center, x_betas
class AbstractElement:
def __init__(self) -> None:
pass
def __neg__(self) -> AbstractElement:
raise NotImplementedError
def __sub__(
self, other: Union[Tensor, float, int, "AbstractElement"]
) -> "AbstractElement":
raise NotImplementedError
def __add__(
self, other: Union[Tensor, float, int, "AbstractElement"]
) -> "AbstractElement":
raise NotImplementedError
@property
def shape(self) -> torch.Size:
raise NotImplementedError
@property
def dtype(self) -> torch.dtype:
raise NotImplementedError
@property
def device(self) -> torch.device:
raise NotImplementedError
@staticmethod
def cat(x: List["AbstractElement"], dim: int) -> "AbstractElement":
raise NotImplementedError
def max_center(self) -> Tensor:
raise NotImplementedError
def conv2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
dilation: int,
groups: int,
) -> "AbstractElement":
raise NotImplementedError
def convtranspose2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
out_padding: int,
groups: int,
dilation: int,
) -> "AbstractElement":
raise NotImplementedError
def avg_pool2d(
self, kernel_size: int, stride: int, padding: int
) -> "AbstractElement":
raise NotImplementedError
def batch_norm(self, bn: BatchNorm2d) -> "AbstractElement":
raise NotImplementedError
def einsum(self, defining_str: str, other: Tensor) -> AbstractElement:
raise NotImplementedError
def flatten(self) -> "AbstractElement":
raise NotImplementedError
def max_pool2d(
self,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
) -> "AbstractElement":
raise NotImplementedError
def pad(
self, kernel_size: Tuple[int, ...], mode: str, value: float
) -> "AbstractElement":
raise NotImplementedError
def upsample(
self, size: int, mode: str, align_corners: bool, consolidate_errors: bool
) -> "AbstractElement":
raise NotImplementedError
def linear(self, weight: Tensor, bias: Tensor) -> "AbstractElement":
raise NotImplementedError
def size(self) -> Union[Tuple[int, ...], int]:
raise NotImplementedError
def sum(self, dim: int, reduce_dim: bool) -> "AbstractElement":
raise NotImplementedError
def view(self, shape_tuple: Tuple[int, ...]) -> "AbstractElement":
raise NotImplementedError
def multiply_interval(self, interval: Tuple[Tensor, Tensor]) -> "AbstractElement":
raise NotImplementedError
def normalize(self, mean: Tensor, sigma: Tensor) -> "AbstractElement":
raise NotImplementedError
def clone(self) -> "AbstractElement":
raise NotImplementedError
def relu(
self,
deepz_lambda: Optional[Tensor] = None,
bounds: Optional[Tuple[Tensor, Tensor]] = None,
) -> Tuple["AbstractElement", Optional[Tensor]]:
raise NotImplementedError
def sigmoid(
self,
step_size: float,
max_x: float,
deepz_lambda: Optional[Tensor] = None,
bounds: Optional[Tuple[Tensor, Tensor]] = None,
tangent_points: Optional[Tensor] = None,
) -> Tuple["AbstractElement", Optional[Tensor]]:
raise NotImplementedError
def slice(
self,
dim: int,
starts: int,
ends: int,
steps: int,
) -> "AbstractElement":
raise NotImplementedError
def split(
self, split_size_or_sections: Tuple[int, ...], dim: int
) -> Tuple[AbstractElement, ...]:
raise NotImplementedError
def concretize(self) -> Tuple[Tensor, Tensor]:
raise NotImplementedError
def evaluate_queries(
self, query_matrix: Tensor, query_threshold: Optional[Tensor] = None
) -> Tuple[Tensor, Tensor, Tensor, Tensor, AbstractElement]:
# Verify query_matrix * x > query_threshold
# Assumes query is identical for all batch elements
if query_threshold is None:
query_threshold = torch.zeros_like(query_matrix[:, 0])
abs_query = self.einsum(
"bs, bqs -> bq", query_matrix.to(self.device).to(self.dtype)
) - query_threshold.to(self.device).to(self.dtype)
query_lb, query_ub = abs_query.concretize()
verified = query_lb > 0 # .view(-1)
falsified = query_ub < 0 # .view(-1)
assert not (
verified.__and__(falsified)
).any(), "Should never verify and falsify a property"
return verified, falsified, query_lb, query_ub, abs_query
def may_contain_point(self, x: Tensor, D: float = 1e-7) -> bool:
lb, ub = self.concretize()
may_contain = (lb <= x + D).__and__(x - D <= ub).all()
if not may_contain:
print(f"Max violation lb: {(lb-x).max()}; Max violation ub: {(x-ub).max()}")
return may_contain # type: ignore # this is a bool
def get_neg_pos_comp(x: Tensor) -> Tuple[Tensor, Tensor]:
neg_comp = torch.where(x < 0, x, torch.zeros_like(x))
pos_comp = torch.where(x >= 0, x, torch.zeros_like(x))
return neg_comp, pos_comp
| 6,159 | 28.615385 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/abstract_domains/DP_f.py | """
Based on DeepPoly_f from DiffAI (https://github.com/eth-sri/diffai/blob/master/ai.py)
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from src.abstract_layers.abstract_sig_base import SigBase
from src.utilities.general import tensor_reduce
if TYPE_CHECKING:
from src.abstract_layers.abstract_bn2d import BatchNorm2d
from src.abstract_domains.ai_util import (
AbstractElement,
clamp_image,
get_neg_pos_comp,
head_from_bounds,
)
from src.abstract_domains.zonotope import HybridZonotope
class DeepPoly_f(AbstractElement):
def __init__(
self,
inputs: HybridZonotope,
x_l_coef: Tensor,
x_u_coef: Tensor,
x_l_bias: Optional[Tensor] = None,
x_u_bias: Optional[Tensor] = None,
input_error_map: Optional[Tensor] = None,
) -> None:
super(DeepPoly_f, self).__init__()
dtype = x_l_coef.dtype
device = x_l_coef.device
self.x_l_coef = x_l_coef
self.x_u_coef = x_u_coef
self.x_l_bias = (
torch.zeros(x_l_coef.shape[1:], device=device, dtype=dtype)
if x_l_bias is None
else x_l_bias
)
self.x_u_bias = (
torch.zeros(x_l_coef.shape[1:], device=device, dtype=dtype)
if x_u_bias is None
else x_u_bias
)
self.input_error_map = (
input_error_map
if input_error_map is not None
else torch.arange(0, self.x_l_coef[0].numel())
)
assert self.input_error_map.shape[0] == self.x_l_coef.shape[0]
self.inputs = inputs
self.domain = "DPF"
@classmethod
def construct_from_noise(
cls,
x: Tensor,
eps: Union[float, Tensor],
data_range: Tuple[float, float] = (0, 1),
dtype: Optional[torch.dtype] = None,
domain: Optional[str] = None,
) -> "DeepPoly_f":
# compute center and error terms from input, perturbation size and data range
assert domain is None or domain == "DPF"
if dtype is None:
dtype = x.dtype
if data_range is None:
data_range = (-np.inf, np.inf)
assert data_range[0] < data_range[1]
x_center, x_beta = clamp_image(x, eps, data_range[0], data_range[1])
x_center, x_beta = x_center.to(dtype=dtype), x_beta.to(dtype=dtype)
return cls.construct(
x_center, HybridZonotope.construct(x_center, x_beta, domain="box")
)
@classmethod
def construct_constant(
cls,
x: Tensor,
inputs: HybridZonotope,
dtype: Optional[torch.dtype] = None,
domain: Optional[str] = None,
) -> "DeepPoly_f":
# compute center and error terms from input, perturbation size and data range
assert domain is None or domain == "DPF"
if dtype is None:
dtype = x.dtype
x_l_coef = torch.zeros(
[inputs.head[0].numel() if inputs is not None else 1, *x.shape],
dtype=dtype,
device=x.device,
)
x_u_coef = torch.zeros(
[inputs.head[0].numel() if inputs is not None else 1, *x.shape],
dtype=dtype,
device=x.device,
)
return cls(inputs, x_l_coef, x_u_coef, x, x)
@classmethod
def construct_from_bounds(
cls,
min_x: Tensor,
max_x: Tensor,
dtype: Optional[torch.dtype] = None,
domain: Optional[str] = None,
) -> "DeepPoly_f":
dtype = torch.get_default_dtype() if dtype is None else dtype
assert domain is None or domain == "DPF"
assert min_x.shape == max_x.shape
x_center, x_beta = head_from_bounds(min_x, max_x)
x_center, x_beta = x_center.to(dtype=dtype), x_beta.to(dtype=dtype)
return cls.construct(
x_center, HybridZonotope.construct(x_center, x_beta, domain="box")
)
@classmethod
def construct_from_zono(cls, input_zono: HybridZonotope) -> "DeepPoly_f":
assert input_zono.beta is None
assert input_zono.errors is not None
x_l_coef = input_zono.errors.clone()
x_u_coef = input_zono.errors.clone()
x_l_bias = input_zono.head.clone()
x_u_bias = input_zono.head.clone()
base_box = HybridZonotope.construct_from_noise(
torch.zeros_like(input_zono.head), eps=1, domain="box", data_range=(-1, 1)
)
return cls(base_box, x_l_coef, x_u_coef, x_l_bias, x_u_bias)
@staticmethod
def construct(x: Tensor, inputs: "HybridZonotope") -> "DeepPoly_f":
lb_in, ub_in = inputs.concretize()
non_zero_width_dim = (
(ub_in != lb_in).flatten(1).any(0)
) # ((ub_in - lb_in) > 0).flatten(1).any(0)
k = int(non_zero_width_dim.sum().item())
assert k > 0, "No nonzero dimension found"
input_error_map = torch.arange(0, lb_in[0].numel())[non_zero_width_dim]
x_l_coef = torch.zeros((x.shape[0], k * x[0].numel())).to(x.device)
x_l_coef[
:,
torch.arange(k).to(lb_in.device) * x[0].numel()
+ non_zero_width_dim.nonzero()[:, 0],
] = 1.0
x_l_coef = (
x_l_coef.view(x.shape[0], k, -1)
.permute(1, 0, 2)
.view(k, *x.shape)
.contiguous()
)
x_u_coef = x_l_coef.clone().detach()
x_l_bias = torch.where(
non_zero_width_dim.view(1, *x.shape[1:]), torch.zeros_like(lb_in), lb_in
)
x_u_bias = torch.where(
non_zero_width_dim.view(1, *x.shape[1:]), torch.zeros_like(lb_in), ub_in
)
return DeepPoly_f(
inputs=inputs.flatten()[:, non_zero_width_dim], # type: ignore # indexing with bool tensor
x_l_coef=x_l_coef,
x_u_coef=x_u_coef,
x_l_bias=x_l_bias,
x_u_bias=x_u_bias,
input_error_map=input_error_map,
)
def dim(self) -> int:
return self.x_l_coef.dim() - 1
@staticmethod
def cat(x: List["DeepPoly_f"], dim: int = 0) -> "DeepPoly_f": # type: ignore [override]
assert all([x[0].inputs == y.inputs for y in x])
actual_dim = dim if dim >= 0 else x[0].dim() + dim
assert 0 <= actual_dim < x[0].dim()
x_l_coef = torch.cat([x_i.x_l_coef for x_i in x], dim=actual_dim + 1)
x_u_coef = torch.cat([x_i.x_u_coef for x_i in x], dim=actual_dim + 1)
x_l_bias = torch.cat([x_i.x_l_bias for x_i in x], dim=actual_dim)
x_u_bias = torch.cat([x_i.x_u_bias for x_i in x], dim=actual_dim)
return DeepPoly_f(
x[0].inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, x[0].input_error_map
)
@staticmethod
def stack(x: List["DeepPoly_f"], dim: int = 0) -> "DeepPoly_f":
assert all([x[0].inputs == y.inputs for y in x])
actual_dim = dim if dim >= 0 else x[0].dim() + dim + 1
assert 0 <= actual_dim <= x[0].dim()
x_l_coef = torch.stack([x_i.x_l_coef for x_i in x], dim=actual_dim + 1)
x_u_coef = torch.stack([x_i.x_u_coef for x_i in x], dim=actual_dim + 1)
x_l_bias = torch.stack([x_i.x_l_bias for x_i in x], dim=actual_dim)
x_u_bias = torch.stack([x_i.x_u_bias for x_i in x], dim=actual_dim)
return DeepPoly_f(
x[0].inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, x[0].input_error_map
)
def size(self, idx: Optional[int] = None) -> Union[Tuple[int, ...], int]:
if idx is None:
return self.shape
else:
return self.shape[idx]
def view(self, size: Tuple[int, ...]) -> "DeepPoly_f":
input_terms = self.x_l_coef.shape[0]
return DeepPoly_f(
self.inputs,
self.x_l_coef.view(input_terms, *size),
self.x_u_coef.view(input_terms, *size),
self.x_l_bias.view(*size),
self.x_u_bias.view(*size),
self.input_error_map,
)
@property
def shape(self) -> torch.Size:
return self.x_l_bias.shape
@property
def device(self) -> torch.device:
return self.x_l_bias.device
@property
def dtype(self) -> torch.dtype:
return self.x_l_bias.dtype
def flatten(self) -> "DeepPoly_f":
return self.view((*self.shape[:1], -1))
def normalize(self, mean: Tensor, sigma: Tensor) -> "DeepPoly_f":
return (self - mean) / sigma
def __sub__( # type: ignore[override]
self, other: Union[Tensor, float, int, DeepPoly_f]
) -> "DeepPoly_f":
if (
isinstance(other, torch.Tensor)
or isinstance(other, float)
or isinstance(other, int)
):
return DeepPoly_f(
self.inputs,
self.x_l_coef,
self.x_u_coef,
self.x_l_bias - other,
self.x_u_bias - other,
self.input_error_map,
)
elif isinstance(other, DeepPoly_f):
assert self.inputs == other.inputs
return DeepPoly_f(
self.inputs,
self.x_l_coef - other.x_u_coef,
self.x_u_coef - other.x_l_coef,
self.x_l_bias - other.x_u_bias,
self.x_u_bias - other.x_l_bias,
self.input_error_map,
)
else:
assert False, "Unknown type of other object"
def __neg__(self) -> "DeepPoly_f":
return DeepPoly_f(
self.inputs,
-self.x_u_coef,
-self.x_l_coef,
-self.x_u_bias,
-self.x_l_bias,
self.input_error_map,
)
def __add__( # type: ignore[override]
self, other: Union[Tensor, float, int, DeepPoly_f]
) -> "DeepPoly_f":
if (
isinstance(other, torch.Tensor)
or isinstance(other, float)
or isinstance(other, int)
):
return DeepPoly_f(
self.inputs,
self.x_l_coef,
self.x_u_coef,
self.x_l_bias + other,
self.x_u_bias + other,
self.input_error_map,
)
elif isinstance(other, DeepPoly_f):
assert self.inputs == other.inputs
return DeepPoly_f(
self.inputs,
self.x_l_coef + other.x_l_coef,
self.x_u_coef + other.x_u_coef,
self.x_l_bias + other.x_l_bias,
self.x_u_bias + other.x_u_bias,
self.input_error_map,
)
else:
assert False, "Unknown type of other object"
def __truediv__(self, other: Union[Tensor, float, int]) -> "DeepPoly_f":
if isinstance(other, torch.Tensor):
assert (other != 0).all()
x_l_coef = torch.where(
other >= 0, self.x_l_coef / other, self.x_u_coef / other
)
x_u_coef = torch.where(
other >= 0, self.x_u_coef / other, self.x_l_coef / other
)
x_l_bias = torch.where(
other >= 0, self.x_l_bias / other, self.x_u_bias / other
)
x_u_bias = torch.where(
other >= 0, self.x_u_bias / other, self.x_l_bias / other
)
elif isinstance(other, float) or isinstance(other, int):
assert other != 0
x_l_coef = self.x_l_coef / other if other >= 0 else self.x_u_coef / other
x_u_coef = self.x_u_coef / other if other >= 0 else self.x_l_coef / other
x_l_bias = self.x_l_bias / other if other >= 0 else self.x_u_bias / other
x_u_bias = self.x_u_bias / other if other >= 0 else self.x_l_bias / other
else:
assert False, "Unknown type of other object"
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def __rmul__( # type: ignore # Complains about unsafe overlap with __mul__
self, other: Union[Tensor, float, int]
) -> "DeepPoly_f":
return self.__mul__(other)
def __mul__(self, other: Union[Tensor, float, int]) -> "DeepPoly_f":
if isinstance(other, torch.Tensor):
x_l_coef = torch.where(
other >= 0, self.x_l_coef * other, self.x_u_coef * other
)
x_u_coef = torch.where(
other >= 0, self.x_u_coef * other, self.x_l_coef * other
)
x_l_bias = torch.where(
other >= 0, self.x_l_bias * other, self.x_u_bias * other
)
x_u_bias = torch.where(
other >= 0, self.x_u_bias * other, self.x_l_bias * other
)
return DeepPoly_f(
self.inputs,
x_l_coef,
x_u_coef,
x_l_bias,
x_u_bias,
self.input_error_map,
)
elif isinstance(other, int) or isinstance(other, float):
x_l_coef = self.x_l_coef * other if other >= 0 else self.x_u_coef * other
x_u_coef = self.x_u_coef * other if other >= 0 else self.x_l_coef * other
x_l_bias = self.x_l_bias * other if other >= 0 else self.x_u_bias * other
x_u_bias = self.x_u_bias * other if other >= 0 else self.x_l_bias * other
return DeepPoly_f(
self.inputs,
x_l_coef,
x_u_coef,
x_l_bias,
x_u_bias,
self.input_error_map,
)
else:
assert False, "Unknown type of other object"
def __getitem__(self, indices: Tuple[int, ...]) -> "DeepPoly_f":
if not isinstance(indices, tuple):
indices = tuple([indices])
return DeepPoly_f(
self.inputs,
self.x_l_coef[(slice(None, None, None), *indices)],
self.x_u_coef[(slice(None, None, None), *indices)],
self.x_l_bias[indices],
self.x_u_bias[indices],
self.input_error_map,
)
def clone(self) -> "DeepPoly_f":
return DeepPoly_f(
self.inputs,
self.x_l_coef.clone(),
self.x_u_coef.clone(),
self.x_l_bias.clone(),
self.x_u_bias.clone(),
self.input_error_map.clone(),
)
def detach(self) -> "DeepPoly_f":
return DeepPoly_f(
self.inputs,
self.x_l_coef.detach(),
self.x_u_coef.detach(),
self.x_l_bias.detach(),
self.x_u_bias.detach(),
self.input_error_map.detach(),
)
def max_center(self) -> Tensor:
return self.x_u_bias.max(dim=1)[0].unsqueeze(1)
def max_pool2d(
self,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
) -> "DeepPoly_f":
k = self.x_l_coef.shape[0]
in_lb, in_ub = self.concretize()
device = in_lb.device
pid_lb = F.pad(
in_lb,
(padding[1], padding[1], padding[0], padding[0]),
value=-torch.inf,
)
pid_ub = F.pad(
in_ub,
(padding[1], padding[1], padding[0], padding[0]),
value=-torch.inf,
)
output_ub = F.max_pool2d(in_ub, kernel_size, stride, padding)
output_lb = F.max_pool2d(in_lb, kernel_size, stride, padding)
tight = (output_ub == output_lb).all(0).all(0)
output_dim = output_ub.shape
output_lb, output_ub = output_lb.flatten(), output_ub.flatten()
input_dim = in_ub.shape
x_l_coef = torch.zeros((k, np.prod(output_dim)), device=device)
x_u_coef = torch.zeros((k, np.prod(output_dim)), device=device)
x_u_bias = output_ub.clone().flatten()
x_l_bias = output_lb.clone().flatten()
self_x_l_coef = self.x_l_coef.flatten(1)
self_x_u_coef = self.x_u_coef.flatten(1)
self_x_l_bias = self.x_l_bias.flatten()
self_x_u_bias = self.x_u_bias.flatten()
offsets_in = torch.tensor(
[int(np.prod(input_dim[i + 1 :])) for i in range(len(input_dim))],
device=device,
)
offsets_out = torch.tensor(
[int(np.prod(output_dim[i + 1 :])) for i in range(len(output_dim))],
device=device,
)
ch_range = torch.arange(output_dim[1], device=device).repeat(output_dim[0])
bs_range = torch.arange(output_dim[0], device=device).repeat_interleave(
output_dim[1]
)
for y in torch.arange(output_dim[2])[~tight.all(1)]:
for x in torch.arange(output_dim[3])[~tight[y]]:
if tight[y, x]:
assert False
# Get the input_window
w_in_idy = y * stride[0]
w_in_idx = x * stride[1]
w_lb = pid_lb[
:,
:,
w_in_idy : w_in_idy + kernel_size[0],
w_in_idx : w_in_idx + kernel_size[1],
].flatten(start_dim=2)
w_ub = pid_ub[
:,
:,
w_in_idy : w_in_idy + kernel_size[0],
w_in_idx : w_in_idx + kernel_size[1],
].flatten(start_dim=2)
best_lb, best_lb_i = w_lb.max(2)
best_lb_i = best_lb_i.view(-1)
max_ub = w_ub.max(2)[0]
strict_dom = (
torch.sum((best_lb.unsqueeze(2) <= w_ub).float(), 2) == 1.0
).view(-1)
in_idx = best_lb_i % kernel_size[1]
in_idy = torch.div(best_lb_i, kernel_size[1], rounding_mode="trunc")
tot_idx = in_idx + w_in_idx - padding[0]
tot_idy = in_idy + w_in_idy - padding[1]
tot_idx_valid = (
(tot_idx >= 0)
& (tot_idx < input_dim[3])
& (tot_idy >= 0)
& (tot_idy < input_dim[2])
)
assert all(tot_idx_valid)
in_idx = (
bs_range * offsets_in[0]
+ ch_range * offsets_in[1]
+ tot_idy * offsets_in[2]
+ tot_idx * offsets_in[3]
)
out_idx = (
bs_range * offsets_out[0]
+ ch_range * offsets_out[1]
+ y * offsets_out[2]
+ x * offsets_out[3]
)
assert (max_ub.flatten() == output_ub[out_idx]).all()
x_u_coef[:, out_idx[strict_dom]] = self_x_u_coef[:, in_idx[strict_dom]]
x_u_bias[out_idx[strict_dom]] = self_x_u_bias[in_idx[strict_dom]]
x_l_coef[:, out_idx] = self_x_l_coef[:, in_idx]
x_l_bias[out_idx] = self_x_l_bias[in_idx]
# x_u_coef[:, bs_range, ch_range, y, x] = torch.where((strict_dom & tot_idx_valid).unsqueeze(0), self.x_u_coef[:,bs_range,ch_range,tot_idy,tot_idx], x_u_coef[:, bs_range, ch_range, y, x])
# x_u_bias[bs_range, ch_range, y, x] = torch.where(strict_dom & tot_idx_valid, self.x_u_bias[bs_range, ch_range, tot_idy, tot_idx], output_ub[:, ch_range, y, x])
#
# x_l_coef[:, bs_range, ch_range, y, x] = torch.where(tot_idx_valid, self.x_l_coef[:,bs_range,ch_range,tot_idy,tot_idx], x_l_coef[:, bs_range, ch_range, y, x])
# x_l_bias[bs_range, ch_range, y, x] = torch.where(tot_idx_valid, self.x_l_bias[:, ch_range, tot_idy, tot_idx], output_lb[:, ch_range, y, x])
x_u_coef = x_u_coef.view(k, *output_dim)
x_l_coef = x_l_coef.view(k, *output_dim)
x_u_bias = x_u_bias.view(*output_dim)
x_l_bias = x_l_bias.view(*output_dim)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def avg_pool2d(self, kernel_size: int, stride: int, padding: int) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
x_l_coef = F.avg_pool2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
kernel_size,
stride,
padding,
)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = F.avg_pool2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]),
kernel_size,
stride,
padding,
)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = F.avg_pool2d(self.x_l_bias, kernel_size, stride, padding)
x_u_bias = F.avg_pool2d(self.x_u_bias, kernel_size, stride, padding)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def global_avg_pool2d(self) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
x_l_coef = F.adaptive_avg_pool2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]), 1
)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = F.adaptive_avg_pool2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]), 1
)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = F.adaptive_avg_pool2d(self.x_l_bias, 1)
x_u_bias = F.adaptive_avg_pool2d(self.x_u_bias, 1)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def pad(
self, pad: Tuple[int, ...], mode: str = "constant", value: float = 0.0
) -> "DeepPoly_f":
assert mode == "constant"
x_l_bias = F.pad(self.x_l_bias, pad, mode="constant", value=value)
x_u_bias = F.pad(self.x_u_bias, pad, mode="constant", value=value)
x_l_coef = F.pad(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
pad,
mode="constant",
value=0,
).view(-1, *x_l_bias.shape)
x_u_coef = F.pad(
self.x_u_coef.view(-1, *self.x_u_coef.shape[2:]),
pad,
mode="constant",
value=0,
).view(-1, *x_u_bias.shape)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def conv2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
dilation: int,
groups: int,
) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
weight_neg, weight_pos = get_neg_pos_comp(weight)
x_l_coef = F.conv2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_pos,
None,
stride,
padding,
dilation,
groups,
) + F.conv2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_neg,
None,
stride,
padding,
dilation,
groups,
)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = F.conv2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_pos,
None,
stride,
padding,
dilation,
groups,
) + F.conv2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_neg,
None,
stride,
padding,
dilation,
groups,
)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = F.conv2d(
self.x_l_bias, weight_pos, bias, stride, padding, dilation, groups
) + F.conv2d(self.x_u_bias, weight_neg, None, stride, padding, dilation, groups)
x_u_bias = F.conv2d(
self.x_u_bias, weight_pos, bias, stride, padding, dilation, groups
) + F.conv2d(self.x_l_bias, weight_neg, None, stride, padding, dilation, groups)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def convtranspose2d(
self,
weight: Tensor,
bias: Optional[Tensor],
stride: int,
padding: int,
out_padding: int,
groups: int,
dilation: int,
) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
weight_neg, weight_pos = get_neg_pos_comp(weight)
x_l_coef = F.conv_transpose2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_pos,
None,
stride,
padding,
out_padding,
groups,
dilation,
) + F.conv_transpose2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_neg,
None,
stride,
padding,
out_padding,
groups,
dilation,
)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = F.conv_transpose2d(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_pos,
None,
stride,
padding,
out_padding,
groups,
dilation,
) + F.conv_transpose2d(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]),
weight_neg,
None,
stride,
padding,
out_padding,
groups,
dilation,
)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = F.conv_transpose2d(
self.x_l_bias,
weight_pos,
bias,
stride,
padding,
out_padding,
groups,
dilation,
) + F.conv_transpose2d(
self.x_u_bias,
weight_neg,
None,
stride,
padding,
out_padding,
groups,
dilation,
)
x_u_bias = F.conv_transpose2d(
self.x_u_bias,
weight_pos,
bias,
stride,
padding,
out_padding,
groups,
dilation,
) + F.conv_transpose2d(
self.x_l_bias,
weight_neg,
None,
stride,
padding,
out_padding,
groups,
dilation,
)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def linear(
self,
weight: Tensor,
bias: Union[Tensor, None] = None,
C: Union[Tensor, None] = None,
) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
weight_neg, weight_pos = get_neg_pos_comp(weight)
x_l_coef = F.linear(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]), weight_pos, None
) + F.linear(self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]), weight_neg, None)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = F.linear(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]), weight_pos, None
) + F.linear(self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]), weight_neg, None)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = F.linear(self.x_l_bias, weight_pos, bias) + F.linear(
self.x_u_bias, weight_neg, None
)
x_u_bias = F.linear(self.x_u_bias, weight_pos, bias) + F.linear(
self.x_l_bias, weight_neg, None
)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def matmul(self, other: Tensor) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
weight_neg, weight_pos = get_neg_pos_comp(other)
x_l_coef = torch.matmul(
self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]), weight_pos
) + torch.matmul(self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]), weight_neg)
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = torch.matmul(
self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]), weight_pos
) + torch.matmul(self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]), weight_neg)
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = torch.matmul(self.x_l_bias, weight_pos) + torch.matmul(
self.x_u_bias, weight_neg
)
x_u_bias = torch.matmul(self.x_u_bias, weight_pos) + torch.matmul(
self.x_l_bias, weight_neg
)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def einsum(self, defining_str: str, other: Tensor) -> DeepPoly_f:
input_self_str, rest = defining_str.split(",")
input_other_str, output_str = rest.split("->")
input_self_str, input_other_str, output_str = (
input_self_str.strip(),
input_other_str.strip(),
output_str.strip(),
)
weight_neg, weight_pos = get_neg_pos_comp(other)
x_l_coef = torch.einsum(
f"i{input_self_str},{input_other_str} -> i{output_str}",
self.x_l_coef,
weight_pos,
) + torch.einsum(
f"i{input_self_str},{input_other_str} -> i{output_str}",
self.x_u_coef,
weight_neg,
)
x_u_coef = torch.einsum(
f"i{input_self_str},{input_other_str} -> i{output_str}",
self.x_u_coef,
weight_pos,
) + torch.einsum(
f"i{input_self_str},{input_other_str} -> i{output_str}",
self.x_l_coef,
weight_neg,
)
x_l_bias = torch.einsum(defining_str, self.x_l_bias, weight_pos) + torch.einsum(
defining_str, self.x_u_bias, weight_neg
)
x_u_bias = torch.einsum(defining_str, self.x_u_bias, weight_pos) + torch.einsum(
defining_str, self.x_l_bias, weight_neg
)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def rev_matmul(self, other: Tensor) -> "DeepPoly_f":
n_in_dims = self.x_l_coef.shape[0:2]
weight_neg, weight_pos = get_neg_pos_comp(other)
x_l_coef = torch.matmul(
weight_pos, self.x_l_coef.view(-1, *self.x_l_coef.shape[2:])
) + torch.matmul(weight_neg, self.x_u_coef.view(-1, *self.x_l_coef.shape[2:]))
x_l_coef = x_l_coef.view(*n_in_dims, *x_l_coef.shape[1:])
x_u_coef = torch.matmul(
weight_pos, self.x_u_coef.view(-1, *self.x_l_coef.shape[2:])
) + torch.matmul(weight_neg, self.x_l_coef.view(-1, *self.x_l_coef.shape[2:]))
x_u_coef = x_u_coef.view(*n_in_dims, *x_u_coef.shape[1:])
x_l_bias = torch.matmul(weight_pos, self.x_l_bias) + torch.matmul(
weight_neg, self.x_u_bias
)
x_u_bias = torch.matmul(weight_pos, self.x_u_bias) + torch.matmul(
weight_neg, self.x_l_bias
)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def batch_norm(self, bn: BatchNorm2d) -> "DeepPoly_f":
view_dim_list = [1, -1] + (self.x_l_bias.dim() - 2) * [1]
self_stat_dim_list = [0, 2, 3] if self.x_l_bias.dim() == 4 else [0]
if bn.training and (bn.current_var is None or bn.current_mean is None):
if bn.running_mean is not None and bn.running_var is not None:
bn.current_mean = bn.running_mean
bn.current_var = bn.running_var
else:
bn.current_mean = (
(0.5 * (self.x_l_bias + self.x_u_bias))
.mean(dim=self_stat_dim_list)
.detach()
)
bn.current_var = (
(0.5 * (self.x_l_bias + self.x_u_bias))
.var(unbiased=False, dim=self_stat_dim_list)
.detach()
)
c: Tensor = bn.weight / torch.sqrt(
bn.current_var + bn.eps
) # type: ignore # Type inference does not work for the module attributes
b: Tensor = -bn.current_mean * c + bn.bias # type: ignore # type: ignore # Type inference does not work for the module attributes
out_dp = self * c.view(*view_dim_list) + b.view(*view_dim_list)
return out_dp
def relu(
self,
deepz_lambda: Optional[Tensor] = None,
bounds: Optional[Tuple[Tensor, Tensor]] = None,
) -> Tuple["DeepPoly_f", Optional[Tensor]]:
lb, ub = self.concretize()
init_lambda = False
assert (ub - lb >= 0).all(), f"max violation: {(ub - lb).min()}"
dtype = lb.dtype
D = 1e-8
if bounds is not None:
lb_refined, ub_refined = bounds
lb = torch.max(lb_refined, lb)
ub = torch.min(ub_refined, ub)
is_cross = (lb < 0) & (ub > 0)
lambda_u = torch.where(is_cross, ub / (ub - lb + D), (lb >= 0).to(dtype))
lambda_l = torch.where(ub < -lb, torch.zeros_like(lb), torch.ones_like(lb))
lambda_l = torch.where(is_cross, lambda_l, (lb >= 0).to(dtype))
# lambda_l = torch.where(is_cross, lambda_u, (lb >= 0).to(dtype))
if deepz_lambda is not None:
if (
(deepz_lambda >= 0).all() and (deepz_lambda <= 1).all()
) and not init_lambda:
lambda_l = deepz_lambda
else:
deepz_lambda.data = lambda_l.data.detach().requires_grad_(True)
mu_l = torch.zeros_like(lb)
mu_u = torch.where(
is_cross, -lb * lambda_u, torch.zeros_like(lb)
) # height of upper bound intersection with y axis
x_l_bias = mu_l + lambda_l * self.x_l_bias
x_u_bias = mu_u + lambda_u * self.x_u_bias
lambda_l, lambda_u = lambda_l.unsqueeze(0), lambda_u.unsqueeze(0)
x_l_coef = self.x_l_coef * lambda_l
x_u_coef = self.x_u_coef * lambda_u
DP_out = DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
assert (DP_out.concretize()[1] - DP_out.concretize()[0] >= 0).all()
# return DeepPoly_f(self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias), deepz_lambda
return DP_out, deepz_lambda
def sigmoid(
self,
step_size: float,
max_x: float,
deepz_lambda: Optional[Tensor] = None,
bounds: Optional[Tuple[Tensor, Tensor]] = None,
tangent_points: Optional[Tensor] = None,
) -> Tuple["DeepPoly_f", Optional[Tensor]]:
def sig(x: Tensor) -> Tensor:
return torch.sigmoid(x)
def d_sig(x: Tensor) -> Tensor:
sig = torch.sigmoid(x)
return sig * (1 - sig)
assert tangent_points is not None, "Tangent points not set"
if tangent_points.device != self.device:
tangent_points = tangent_points.to(device=self.device)
if tangent_points.dtype != self.dtype:
tangent_points = tangent_points.to(dtype=self.dtype)
lb, ub = self.concretize()
assert (ub - lb >= 0).all(), f"max violation: {(ub - lb).min()}"
if bounds is not None:
lb_refined, ub_refined = bounds
lb = torch.max(lb_refined, lb)
ub = torch.min(ub_refined, ub)
(
lb_slope,
ub_slope,
lb_intercept,
ub_intercept,
) = SigBase._get_approximation_slopes_and_intercepts_for_act(
(lb, ub),
tangent_points,
step_size,
max_x,
sig,
d_sig,
)
x_l_bias = lb_intercept + lb_slope * self.x_l_bias
x_u_bias = ub_intercept + ub_slope * self.x_u_bias
lambda_l, lambda_u = lb_slope.unsqueeze(0), ub_slope.unsqueeze(0)
x_l_coef = self.x_l_coef * lambda_l
x_u_coef = self.x_u_coef * lambda_u
DP_out = DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
assert (DP_out.concretize()[1] - DP_out.concretize()[0] >= 0).all()
return DP_out, deepz_lambda
def unsqueeze(self, dim: int) -> "DeepPoly_f":
return DeepPoly_f(
self.inputs,
self.x_l_coef.unsqueeze(dim + 1),
self.x_u_coef.unsqueeze(dim + 1),
self.x_l_bias.unsqueeze(dim),
self.x_u_bias.unsqueeze(dim),
self.input_error_map,
)
def squeeze(self, dim: int) -> "DeepPoly_f":
return DeepPoly_f(
self.inputs,
self.x_l_coef.squeeze(dim + 1),
self.x_u_coef.squeeze(dim + 1),
self.x_l_bias.squeeze(dim),
self.x_u_bias.squeeze(dim),
self.input_error_map,
)
def split(
self, split_size_or_sections: Tuple[int, ...], dim: int
) -> Tuple[DeepPoly_f, ...]:
real_dim = dim if dim > 0 else self.x_l_bias.dim() + dim
new_x_l_coef = torch.split(self.x_l_coef, split_size_or_sections, real_dim + 1)
new_x_u_coef = torch.split(self.x_u_coef, split_size_or_sections, real_dim + 1)
new_x_l_bias = torch.split(self.x_l_bias, split_size_or_sections, real_dim)
new_x_u_bias = torch.split(self.x_u_bias, split_size_or_sections, real_dim)
outputs = [
DeepPoly_f(
self.inputs,
xlc,
xuc,
xlb,
xub,
self.input_error_map,
)
for xlc, xuc, xlb, xub in zip(
new_x_l_coef, new_x_u_coef, new_x_l_bias, new_x_u_bias
)
]
return tuple(outputs)
def slice(
self,
dim: int,
starts: int,
ends: int,
steps: int,
) -> DeepPoly_f:
real_dim = dim if dim > 0 else self.x_l_bias.dim() + dim
index = torch.tensor(range(starts, ends, steps), device=self.x_l_bias.device)
new_x_l_coef = torch.index_select(self.x_l_coef, real_dim + 1, index)
new_x_u_coef = torch.index_select(self.x_u_coef, real_dim + 1, index)
new_x_l_bias = torch.index_select(self.x_l_bias, real_dim, index)
new_x_u_bias = torch.index_select(self.x_u_bias, real_dim, index)
return DeepPoly_f(
self.inputs,
new_x_l_coef,
new_x_u_coef,
new_x_l_bias,
new_x_u_bias,
self.input_error_map,
)
def multiply_interval(self, interval: Tuple[Tensor, Tensor]) -> DeepPoly_f:
concrete_lb, concrete_ub = self.concretize()
(
mul_lb_slope,
mul_lb_intercept,
mul_ub_slope,
mul_ub_intercept,
) = self._get_multiplication_slopes_and_intercepts(
interval, (concrete_lb, concrete_ub)
)
new_x_l_bias = mul_lb_intercept + mul_lb_slope * self.x_l_bias
new_x_u_bias = mul_ub_intercept + mul_ub_slope * self.x_u_bias
mul_lb_slope, mul_ub_slope = mul_lb_slope.unsqueeze(0), mul_ub_slope.unsqueeze(
0
)
new_x_l_coef = self.x_l_coef * mul_lb_slope
new_x_u_coef = self.x_u_coef * mul_ub_slope
return DeepPoly_f(
self.inputs,
new_x_l_coef,
new_x_u_coef,
new_x_l_bias,
new_x_u_bias,
self.input_error_map,
)
def add(self, other: "DeepPoly_f") -> "DeepPoly_f":
x_l_coef = self.x_l_coef + other.x_l_coef
x_u_coef = self.x_u_coef + other.x_u_coef
x_l_bias = self.x_l_bias + other.x_l_bias
x_u_bias = self.x_u_bias + other.x_u_bias
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def sum(self, dim: int, reduce_dim: bool = False) -> "DeepPoly_f":
x_l_coef = self.x_l_coef.sum(dim + 1)
x_u_coef = self.x_u_coef.sum(dim + 1)
x_l_bias = self.x_l_bias.sum(dim)
x_u_bias = self.x_u_bias.sum(dim)
if not reduce_dim:
x_l_coef = x_l_coef.unsqueeze(dim + 1)
x_l_coef = x_l_coef.unsqueeze(dim + 1)
x_l_coef = x_l_coef.unsqueeze(dim)
x_l_coef = x_l_coef.unsqueeze(dim)
return DeepPoly_f(
self.inputs, x_l_coef, x_u_coef, x_l_bias, x_u_bias, self.input_error_map
)
def concretize(self) -> Tuple[Tensor, Tensor]:
input_lb, input_ub = self.inputs.concretize()
input_shape = input_lb.shape
input_lb = (
input_lb.flatten(start_dim=1)
.transpose(1, 0)
.view([-1, input_shape[0]] + (self.x_l_coef.dim() - 2) * [1])
)
input_ub = (
input_ub.flatten(start_dim=1)
.transpose(1, 0)
.view([-1, input_shape[0]] + (self.x_l_coef.dim() - 2) * [1])
)
neg_x_l_coef, pos_x_l_coef = get_neg_pos_comp(self.x_l_coef)
neg_x_u_coef, pos_x_u_coef = get_neg_pos_comp(self.x_u_coef)
lb = (
self.x_l_bias
+ (neg_x_l_coef * input_ub).sum(0)
+ (pos_x_l_coef * input_lb).sum(0)
)
ub = (
self.x_u_bias
+ (neg_x_u_coef * input_lb).sum(0)
+ (pos_x_u_coef * input_ub).sum(0)
)
return lb, ub
def avg_width(self) -> Tensor:
lb, ub = self.concretize()
return (ub - lb).mean()
def is_greater(
self, i: int, j: int, threshold_min: Union[Tensor, float] = 0
) -> Tuple[Tensor, Tensor]:
input_lb, input_ub = self.inputs.concretize()
b_dim = input_lb.shape[0]
dims = list(range(1, input_lb.dim()))
dims.append(0)
input_lb, input_ub = input_lb.permute(dims), input_ub.permute(
dims
) # dim0, ... dimn, batch_dim,
input_lb, input_ub = input_lb.view(-1, b_dim), input_ub.view(
-1, b_dim
) # dim, batch_dim
neg_x_l_coef, pos_x_l_coef = get_neg_pos_comp(
self.x_l_coef[:, :, i] - self.x_u_coef[:, :, j]
)
neg_x_l_coef, pos_x_l_coef = neg_x_l_coef.view(-1, b_dim), pos_x_l_coef.view(
-1, b_dim
)
delta = (
self.x_l_bias[:, i]
- self.x_u_bias[:, j]
+ (neg_x_l_coef * input_ub).sum(0)
+ (pos_x_l_coef * input_lb).sum(dim=0)
)
return delta, delta > threshold_min
def verify(
self,
targets: Tensor,
threshold_min: Union[Tensor, float] = 0,
corr_only: bool = False,
) -> Tuple[Tensor, Tensor, Tensor]:
n_class = self.x_l_bias.size()[1]
device = self.x_l_bias.device
dtype = self.x_l_coef.dtype
verified = torch.zeros(targets.size(), dtype=torch.uint8).to(device)
verified_corr = torch.zeros(targets.size(), dtype=torch.uint8).to(device)
if n_class == 1:
# assert len(targets) == 1
verified_list = torch.cat(
[
self.concretize()[1] < threshold_min,
self.concretize()[0] >= threshold_min,
],
dim=1,
)
verified[:] = torch.any(verified_list, dim=1)
verified_corr[:] = verified_list.gather(
dim=1, index=targets.long().unsqueeze(dim=1)
).squeeze(1)
threshold = (
torch.cat(self.concretize(), 1)
.gather(dim=1, index=(1 - targets).long().unsqueeze(dim=1))
.squeeze(1)
)
else:
threshold = np.inf * torch.ones(targets.size(), dtype=dtype).to(device)
for i in range(n_class):
if corr_only and i not in targets:
continue
isg = torch.ones(targets.size(), dtype=torch.uint8).to(device)
print(isg.shape)
margin = np.inf * torch.ones(targets.size(), dtype=dtype).to(device)
for j in range(n_class):
if i != j and isg.any():
margin_tmp, ok = self.is_greater(i, j, threshold_min)
margin = torch.min(margin, margin_tmp)
isg = isg & ok.byte()
verified = verified | isg
verified_corr = verified_corr | (targets.eq(i).byte() & isg)
threshold = torch.where(targets.eq(i).byte(), margin, threshold)
return verified, verified_corr, threshold
def get_wc_logits(self, targets: Tensor, use_margins: bool = False) -> Tensor:
n_class = self.shape[-1]
device = self.x_l_coef.device
dtype = self.x_l_coef.dtype
if use_margins:
def get_c_mat(n_class: int, target: Tensor) -> Tensor:
return torch.eye(n_class, dtype=dtype)[target].unsqueeze(
dim=0
) - torch.eye(n_class, dtype=dtype)
if n_class > 1:
c = torch.stack([get_c_mat(n_class, x) for x in targets], dim=0)
self = -(self.unsqueeze(dim=1) * c.to(device)).sum(
dim=2, reduce_dim=True
)
batch_size = targets.size()[0]
lb, ub = self.concretize()
if n_class == 1:
wc_logits = torch.cat([ub, lb], dim=1)
wc_logits = wc_logits.gather(dim=1, index=targets.long().unsqueeze(1))
else:
wc_logits = ub.clone()
wc_logits[np.arange(batch_size), targets] = lb[
np.arange(batch_size), targets
]
return wc_logits
def ce_loss(self, targets: Tensor) -> Tensor:
wc_logits = self.get_wc_logits(targets)
if wc_logits.size(1) == 1:
return F.binary_cross_entropy_with_logits(
wc_logits.squeeze(1), targets.float(), reduction="none"
)
else:
return F.cross_entropy(wc_logits, targets.long(), reduction="none")
def _get_multiplication_slopes_and_intercepts(
self, mul_bounds: Tuple[Tensor, Tensor], input_bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
input_lb, input_ub = input_bounds
D = 1e-12 if input_lb.dtype == torch.float64 else 1e-7
# Get the lower and upper bound of the multiplication
(mult_lb_lb, mult_lb_ub, mult_ub_lb, mult_ub_ub) = self._get_mul_lbs_and_ubs(
mul_bounds, input_bounds
)
# Get slopes and offsets
# TODO look at effect of soundness correction here
convex_lb_slope = (mult_ub_lb - mult_lb_lb) / (input_ub - input_lb + D)
convex_lb_intercept = mult_lb_lb - input_lb * convex_lb_slope - D
convex_ub_slope = (mult_ub_ub - mult_lb_ub) / (input_ub - input_lb + D)
convex_ub_intercept = mult_lb_ub - input_lb * convex_ub_slope + D
return (
convex_lb_slope,
convex_lb_intercept,
convex_ub_slope,
convex_ub_intercept,
)
def _get_mul_lbs_and_ubs(
self, b1: Tuple[Tensor, Tensor], b2: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
input_lb_opts = [b2[0] * b1[0], b2[0] * b1[1]]
input_ub_opts = [b2[1] * b1[0], b2[1] * b1[1]]
mult_lb_lb = tensor_reduce(torch.minimum, input_lb_opts)
mult_lb_ub = tensor_reduce(torch.maximum, input_lb_opts)
mult_ub_lb = tensor_reduce(torch.minimum, input_ub_opts)
mult_ub_ub = tensor_reduce(torch.maximum, input_ub_opts)
return (mult_lb_lb, mult_lb_ub, mult_ub_lb, mult_ub_ub)
| 48,126 | 35.267521 | 203 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/src/exceptions/invalid_bounds.py | from torch import Tensor
class InvalidBoundsError(ValueError):
"""When intermediate bounds are set with lb > ub"""
def __init__(self, invalid_bounds_mask_in_batch: Tensor):
self.invalid_bounds_mask_in_batch = invalid_bounds_mask_in_batch
| 257 | 27.666667 | 72 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/test_util.py | import functools
import time
from typing import Any, Callable, List, Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn as nn
import tqdm # type: ignore[import]
from bunch import Bunch # type: ignore[import]
from torch import Tensor
from torch.distributions.beta import Beta
from src.abstract_domains.DP_f import DeepPoly_f, HybridZonotope
from src.abstract_layers.abstract_linear import Linear
from src.abstract_layers.abstract_network import AbstractNetwork
from src.abstract_layers.abstract_relu import ReLU
from src.abstract_layers.abstract_sequential import Sequential
from src.abstract_layers.abstract_sigmoid import Sigmoid
from src.abstract_layers.abstract_tanh import Tanh
from src.branch_and_bound import BranchAndBound
from src.concrete_layers.pad import Pad
from src.concrete_layers.split_block import SplitBlock
from src.concrete_layers.unbinary_op import UnbinaryOp
from src.milp_network import MILPNetwork
from src.mn_bab_optimizer import MNBabOptimizer
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.mn_bab_verifier import MNBaBVerifier
from src.state.tags import layer_tag, query_tag
from src.utilities.config import (
MNBabOptimizerConfig,
MNBabVerifierConfig,
make_backsubstitution_config,
make_optimizer_config,
make_prima_hyperparameters,
make_verifier_config,
)
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import freeze_network, load_onnx_model, mnist_a_b
from src.utilities.queries import get_output_bound_initial_query_coef
from src.verification_subproblem import SubproblemState
MNIST_INPUT_DIM = (1, 28, 28)
CIFAR10_INPUT_DIM = (3, 32, 32)
MNIST_FC_DATA_TEST_CONFIG = Bunch(
{
"input_dim": [784],
"eps": 0.01,
}
)
MNIST_CONV_DATA_TEST_CONFIG = Bunch(
{
"input_dim": MNIST_INPUT_DIM,
"eps": 0.01,
"normalization_means": [0.1307],
"normalization_stds": [0.3081],
}
)
CIFAR10_CONV_DATA_TEST_CONFIG = Bunch(
{
"input_dim": CIFAR10_INPUT_DIM,
"eps": 0.01,
"normalization_means": [0.4914, 0.4822, 0.4465],
"normalization_stds": [0.2023, 0.1994, 0.2010],
}
)
def toy_net() -> Tuple[AbstractNetwork, Tuple[int]]:
"""
Running example of the DeepPoly paper:
https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf
"""
input_dim = (2,)
linear1 = Linear(2, 2, bias=True, input_dim=(2,))
relu1 = ReLU((2,))
linear2 = Linear(2, 2, bias=True, input_dim=(2,))
relu2 = ReLU((2,))
linear3 = Linear(2, 2, bias=True, input_dim=(2,))
linear_out = Linear(2, 1, bias=True, input_dim=(2,))
linear1.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear1.bias.data = torch.zeros(2)
linear2.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear2.bias.data = torch.zeros(2)
linear3.weight.data = torch.tensor([[1.0, 1.0], [0.0, 1.0]])
linear3.bias.data = torch.tensor([1.0, 0.0])
linear_out.weight.data = torch.tensor([[1.0, -1.0]])
linear_out.bias.data = torch.zeros(1)
return (
AbstractNetwork(
Sequential([linear1, relu1, linear2, relu2, linear3, linear_out]).layers
),
input_dim,
)
def toy_sig_net() -> Tuple[AbstractNetwork, Tuple[int]]:
input_dim = (2,)
linear1 = Linear(2, 2, bias=True, input_dim=(2,))
sig1 = Sigmoid((2,))
linear2 = Linear(2, 2, bias=True, input_dim=(2,))
sig2 = Sigmoid((2,))
linear3 = Linear(2, 2, bias=True, input_dim=(2,))
linear_out = Linear(2, 1, bias=True, input_dim=(2,))
linear1.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear1.bias.data = torch.zeros(2)
linear2.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear2.bias.data = torch.zeros(2)
linear3.weight.data = torch.tensor([[1.0, 1.0], [0.0, 1.0]])
linear3.bias.data = torch.tensor([1.0, 0.0])
linear_out.weight.data = torch.tensor([[1.0, -1.0]])
linear_out.bias.data = torch.zeros(1)
return (
AbstractNetwork(
Sequential([linear1, sig1, linear2, sig2, linear3, linear_out]).layers
),
input_dim,
)
def toy_sig_tanh_net() -> Tuple[AbstractNetwork, Tuple[int]]:
input_dim = (2,)
linear1 = Linear(2, 2, bias=True, input_dim=(2,))
sig1 = Sigmoid((2,))
linear2 = Linear(2, 2, bias=True, input_dim=(2,))
sig2 = Tanh((2,))
linear3 = Linear(2, 2, bias=True, input_dim=(2,))
linear_out = Linear(2, 1, bias=True, input_dim=(2,))
linear1.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear1.bias.data = torch.zeros(2)
linear2.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear2.bias.data = torch.zeros(2)
linear3.weight.data = torch.tensor([[1.0, 1.0], [0.0, 1.0]])
linear3.bias.data = torch.tensor([1.0, 0.0])
linear_out.weight.data = torch.tensor([[1.0, -1.0]])
linear_out.bias.data = torch.zeros(1)
return (
AbstractNetwork(
Sequential([linear1, sig1, linear2, sig2, linear3, linear_out]).layers
),
input_dim,
)
def toy_all_layer_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""
A network that contains all currently supported layers
"""
input_dim = (1, 5, 5)
conv1 = nn.Conv2d(1, 3, (3, 3)) # 1x1x5x5 -> 1x3x3x3
bn1 = nn.BatchNorm2d(3)
flatten1 = nn.Flatten()
relu1 = nn.ReLU()
linear1 = nn.Linear(27, 10)
relu2 = nn.ReLU()
return (
AbstractNetwork.from_concrete_module(
nn.Sequential(*[conv1, bn1, flatten1, relu1, linear1, relu2]), input_dim
),
input_dim,
)
def toy_all_layer_net_1d() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
"""
A network that contains all currently supported layers
"""
# Input size: 6x6
input_size = (4,)
layers: List[torch.nn.Module] = []
layers += [nn.Linear(4, 10), nn.ReLU()]
layers += [nn.Linear(10, 20), nn.ReLU()]
layers += [nn.Linear(20, 10), nn.ReLU()]
layers += [nn.Linear(10, 1)]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def toy_avg_pool_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""A toy network containing three avg. pool layers
Returns:
AbstractNetwork: AS Wrapper around the above network
"""
input_dim = (1, 6, 6)
# Input size: 6x6
avg_p1 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2)) # 3x3
avg_p2 = nn.AvgPool2d(
kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
) # Identity 3x3
avg_p3 = nn.AvgPool2d(kernel_size=(2, 2), stride=(1, 1)) # 2x2
flatten1 = nn.Flatten()
return (
AbstractNetwork.from_concrete_module(
nn.Sequential(*[avg_p1, avg_p2, avg_p3, flatten1]), input_dim
),
input_dim,
)
def toy_max_pool_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""A toy network containing three max. pool layers
Returns:
abstract_network: AS Wrapper around the above network
"""
input_dim = (1, 6, 6)
# Input size: 6x6
max_p1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) # 3x3
max_p2 = nn.MaxPool2d(
kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
) # Identity 3x3
max_p3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(1, 1)) # 2x2
flatten1 = nn.Flatten()
return (
AbstractNetwork.from_concrete_module(
nn.Sequential(*[max_p1, max_p2, max_p3, flatten1]), input_dim
),
input_dim,
)
def pad_toy_max_pool_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""A toy network containing three max. pool layers, none of which are the inital layer
Returns:
abstract_network: AS Wrapper around the above network
"""
input_dim = (1, 10, 10)
# Input size: 6x6
conv_1 = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(1, 1))
max_p1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) # 3x3
max_p2 = nn.MaxPool2d(
kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
) # Identity 3x3
max_p3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(1, 1)) # 2x2
flatten1 = nn.Flatten()
relu = nn.ReLU()
return (
AbstractNetwork.from_concrete_module(
nn.Sequential(*[conv_1, relu, max_p1, max_p2, max_p3, flatten1]), input_dim
),
input_dim,
)
def toy_max_pool_mixed_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""A toy network containing three max. pool layers, none of which are the inital layer
Returns:
abstract_network: AS Wrapper around the above network
input_size: size of the input
"""
# Input size: 6x6
input_size = (1, 6, 6)
layers: List[torch.nn.Module] = []
layers += [
nn.Conv2d(
in_channels=input_size[0],
out_channels=3,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
]
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(1, 1), padding=(1, 1))] # 6x6
layers += [
nn.Conv2d(in_channels=3, out_channels=2, kernel_size=(3, 3), padding=(1, 1))
]
layers += [nn.ReLU()]
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=(1, 1))] # 4x4
layers += [nn.Conv2d(in_channels=2, out_channels=2, kernel_size=(2, 2))] # 2x2
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))] # 1x1
layers += [nn.Flatten()] # 2
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def toy_max_pool_tiny_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""A toy network containing three max. pool layers, none of which are the inital layer
Returns:
abstract_network: AS Wrapper around the above network
input_size: size of the input
"""
# Input size: 6x6
input_size = (1, 1, 1)
layers: List[torch.nn.Module] = []
layers += [nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))] # 4x4
layers += [nn.Flatten()] # 4
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def toy_max_avg_pool_net() -> AbstractNetwork:
"""A toy network containing three max. pool layers
Returns:
abstract_network: AS Wrapper around the above network
"""
# Input size: 1x16x16
conv1 = nn.Conv2d(1, 3, (3, 3))
max_p1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
relu1 = nn.ReLU()
avg_p1 = nn.AvgPool2d(kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
flatten1 = nn.Flatten()
relu2 = nn.ReLU()
linear1 = nn.Linear(147, 10)
return AbstractNetwork.from_concrete_module(
nn.Sequential(*[conv1, max_p1, relu1, avg_p1, flatten1, relu2, linear1]),
(1, 16, 16),
)
def toy_pad_net() -> nn.Sequential:
"""A toy network containing two padding layers
Returns:
nn.Sequential: The corresponding network
"""
# Input: 1x16x16
pad_1 = Pad((1, 1, 2, 2), value=1) # 1x20x18
conv_1 = nn.Conv2d(1, 3, (2, 2), stride=2) # 3x10x9
pad_2 = Pad((1, 1), value=2) # 3x10x11
flatten_1 = nn.Flatten()
lin_1 = nn.Linear(330, 10)
return nn.Sequential(*[pad_1, conv_1, pad_2, flatten_1, lin_1])
def toy_reshape_net() -> nn.Sequential:
"""A toy network containing three reshape layers
Returns:
nn.Sequential: The corresponding network
"""
class ReshapeLayer(nn.Module):
def __init__(
self,
new_shape: Tuple[int, ...],
) -> None:
super().__init__()
self.new_shape = new_shape
def forward(self, x: Tensor) -> Tensor:
return x.reshape(self.new_shape)
# Input: 1x16x16
res_1 = ReshapeLayer(new_shape=(1, 1, 16, 16)) # 1x16x16
conv_1 = nn.Conv2d(1, 3, (2, 2), stride=2) # 3x8x8
res_2 = ReshapeLayer(new_shape=(1, 3, 64)) # 3x64
res_3 = ReshapeLayer(new_shape=(1, 192)) # 1x192 - Flatten
lin_1 = nn.Linear(192, 10)
return nn.Sequential(*[res_1, conv_1, res_2, res_3, lin_1])
def abs_toy_pad_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""
A toy network containing two padding layers
Returns:
abstract_network: AS wrapper around the network
input_size: size of the input
"""
input_size = (1, 4, 4)
# Input: 1x16x16
layers: List[nn.Module] = []
layers += [Pad((1, 1, 2, 2), value=1)] # 1x7x7
layers += [nn.Conv2d(1, 2, (3, 3), stride=2)] # 3x10x9
layers += [Pad((1, 1))] # 3x10x11
layers += [nn.Flatten()]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def abs_toy_pad_tiny_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""
A toy network containing two padding layers
Returns:
abstract_network: AS wrapper around the network
input_size: size of the input
"""
input_size = (1, 1, 1)
layers: List[nn.Module] = []
layers += [Pad((1, 1, 0, 0), value=1)] # 1x3x1
layers += [nn.Flatten()]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def toy_permute_net() -> nn.Sequential:
"""A toy network containing two padding layers"""
class PermuteLayer(nn.Module):
def __init__(
self,
perm_ind: Tuple[int, ...],
) -> None:
super().__init__()
self.perm_ind = perm_ind
def forward(self, x: Tensor) -> Tensor:
return torch.permute(x, self.perm_ind)
# Input: 1x4x8
pad_1 = PermuteLayer((0, 1, 3, 2)) # 1x8x4
conv_1 = nn.Conv2d(1, 3, (2, 2), stride=2) # 3x4x2
pad_2 = PermuteLayer((0, 1, 3, 2)) # 3x2x4
flatten_1 = nn.Flatten()
lin_1 = nn.Linear(24, 10)
return nn.Sequential(*[pad_1, conv_1, pad_2, flatten_1, lin_1])
def toy_unbinary_net() -> AbstractNetwork:
"""A toy network containing all unbinary layers
Returns:
abstract_network: AS wrapper around the network
"""
# Input: 1x4x4
op_1 = UnbinaryOp(op="add", const_val=torch.Tensor([5]), apply_right=False) # 1x4x4
conv_1 = nn.Conv2d(1, 3, (1, 1), stride=1) # 3x4x4
op_2 = UnbinaryOp(op="sub", const_val=torch.Tensor([5]), apply_right=False)
op_3 = UnbinaryOp(
op="sub", const_val=torch.Tensor([5, 4, 3]).reshape((3, 1, 1)), apply_right=True
)
op_4 = UnbinaryOp(op="mul", const_val=torch.Tensor([5]), apply_right=False)
op_5 = UnbinaryOp(op="div", const_val=torch.Tensor([5]), apply_right=False)
flatten_1 = nn.Flatten()
lin_1 = nn.Linear(48, 10)
return AbstractNetwork.from_concrete_module(
nn.Sequential(*[op_1, conv_1, op_2, op_3, op_4, op_5, flatten_1, lin_1]),
(1, 4, 4),
)
def toy_stack_seq_net() -> AbstractNetwork:
"""A toy network containing stacked sequential layers
Returns:
abstract_network: AS wrapper around the network
"""
linear1 = nn.Linear(2, 2)
relu1 = nn.ReLU()
linear2 = nn.Linear(2, 2)
relu2 = nn.ReLU()
linear3 = nn.Linear(2, 2)
relu3 = nn.ReLU()
linear4 = nn.Linear(2, 2)
relu4 = nn.ReLU()
linear_out = nn.Linear(2, 1)
# (weights duplicated from toy_net)
linear1.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear1.bias.data = torch.zeros(2)
linear2.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear2.bias.data = torch.zeros(2)
linear3.weight.data = torch.tensor([[1.0, 1.0], [0.0, 1.0]])
linear3.bias.data = torch.tensor([1.0, 0.0])
linear4.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
linear4.bias.data = torch.zeros(2)
linear_out.weight.data = torch.tensor([[1.0, -1.0]])
linear_out.bias.data = torch.zeros(1)
return AbstractNetwork.from_concrete_module(
nn.Sequential(
nn.Sequential(linear1, relu1),
nn.Sequential(
linear2,
relu2,
nn.Sequential(
linear3, relu3
), # This layer creates full_back_prop=True and has a propagate_call_back
),
nn.Sequential(linear4, relu4, linear_out),
),
input_dim=(2,),
)
def get_mnist_net() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
# original_network = original_network[:-1]
network = AbstractNetwork.from_concrete_module(original_network, (784,))
return (network, (784,))
def get_relu_layer() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
layers = [nn.ReLU()]
original_network = nn.Sequential(*layers)
network = AbstractNetwork.from_concrete_module(original_network, (10,))
return network, (10,)
def get_relu_lin_layer() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
# layers = [nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, 10, False)]
# layers[-1].weight.data = torch.eye(10)
layers = [nn.Linear(20, 10), nn.ReLU()]
original_network = nn.Sequential(*layers)
network = AbstractNetwork.from_concrete_module(original_network, (20,))
return network, (20,)
def get_two_relu_lin_layer() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
layers = [nn.Linear(20, 20), nn.ReLU()]
# layers += [nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, 10, False)]
# layers[-1].weight.data = torch.eye(10)
layers += [nn.Linear(20, 10), nn.ReLU()]
original_network = nn.Sequential(*layers)
network = AbstractNetwork.from_concrete_module(original_network, (20,))
freeze_network(network)
return network, (20,)
def get_three_relu_lin_layer() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
layers = [nn.Linear(784, 50), nn.ReLU()]
layers += [nn.Linear(50, 50), nn.ReLU()]
# layers += [nn.Linear(50, 10), nn.ReLU(), nn.Linear(10, 10, False)]
# layers[-1].weight.data = torch.eye(10)
layers += [nn.Linear(50, 10), nn.ReLU()]
original_network = nn.Sequential(*layers)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
return network, (784,)
def toy_convtranspose2d_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
"""
A toy network containing a convtranspose2d layer
Returns:
abstract_network: AS wrapper around the network
input_size: size of the input
"""
input_size = (1, 4, 4)
# Input: 1x4x4
layers: List[nn.Module] = []
layers += [
nn.ConvTranspose2d(
in_channels=1,
out_channels=2,
kernel_size=(2, 2),
stride=(1, 1),
padding=(0, 0),
output_padding=(0, 0),
bias=True,
dilation=1,
)
] # 2x5x5
layers += [nn.Flatten()]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def get_convtranspose2d_conv_net() -> Tuple[AbstractNetwork, Tuple[int, int, int]]:
input_size = (1, 4, 4)
# Input: 1x4x4
layers: List[nn.Module] = []
layers += [
nn.ConvTranspose2d(
in_channels=1,
out_channels=3,
kernel_size=(2, 2),
stride=(1, 1),
padding=(0, 0),
output_padding=(0, 0),
bias=True,
dilation=1,
)
] # 2x5x5
layers += [
nn.Conv2d(
in_channels=3,
out_channels=5,
kernel_size=(2, 2),
stride=(1, 1),
padding=(1, 1),
bias=True,
dilation=1,
)
] # 5x6x6
layers += [nn.ReLU()]
layers += [
nn.ConvTranspose2d(
in_channels=5,
out_channels=1,
kernel_size=(2, 2),
stride=(2, 2),
padding=(0, 0),
output_padding=(0, 0),
bias=True,
dilation=1,
)
] # 1x12x12
layers += [nn.Flatten()]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def get_toy_split_block() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
input_size = (1, 3, 4)
# Input: 1x3x4
path = nn.Sequential(*[nn.Conv2d(1, 1, 3, 1, 1), nn.ReLU()])
layers = [
SplitBlock(
split=(False, (3, 1), None, -1, True),
center_path=path,
inner_reduce=(1, False, False),
outer_reduce=(1, False, False),
),
nn.Flatten(),
]
return (
AbstractNetwork.from_concrete_module(nn.Sequential(*layers), input_size),
input_size,
)
def get_nn4sys_128d_splitblock() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
onnx_path = "vnn-comp-2022-sup/benchmark_vnn22/nn4sys2022/model/mscn_128d.onnx"
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
net = o2p_net[0].paths[2]
# in_shape = (11, 14)
net = nn.Sequential(net[2])
in_shape = (3, 7)
freeze_network(net)
net.eval()
abs_net = AbstractNetwork.from_concrete_module(net, in_shape)
return abs_net, in_shape
def get_nn4sys_128d_block() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
onnx_path = "vnn-comp-2022-sup/benchmark_vnn22/nn4sys2022/model/mscn_128d.onnx"
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
net = o2p_net[0].paths[2]
in_shape = (11, 14)
# net = nn.Sequential(net[2])
# in_shape = (3, 7)
freeze_network(net)
net.eval()
abs_net = AbstractNetwork.from_concrete_module(net, in_shape)
return abs_net, in_shape
def get_nn4sys_128d_multipath_block_stacked() -> Tuple[
AbstractNetwork, Tuple[int, ...]
]:
onnx_path = "vnn-comp-2022-sup/benchmark_vnn22/nn4sys2022/model/mscn_128d.onnx"
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
net = o2p_net[0]
in_shape = (11, 14)
# net = nn.Sequential(net[2])
# in_shape = (3, 7)
freeze_network(net)
net.eval()
abs_net = AbstractNetwork.from_concrete_module(net, in_shape)
return abs_net, in_shape
def get_deep_poly_bounds(
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
use_dependence_sets: bool = False,
use_early_termination: bool = False,
reset_input_bounds: bool = True,
recompute_intermediate_bounds: bool = True,
max_num_query: int = 10000,
) -> Tuple[Tensor, Tensor]:
device = input_lb.device
query_coef = get_output_bound_initial_query_coef(
dim=network.output_dim,
intermediate_bounds_to_recompute=None, # get all bounds
use_dependence_sets=use_dependence_sets,
batch_size=1,
device=device,
dtype=None, # TODO: should this be something else?
)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(network),
query_prev_layer=None,
queries_to_compute=None,
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None,
subproblem_state=None,
)
output_shape = network.backsubstitute_mn_bab_shape(
make_backsubstitution_config(
use_dependence_sets=use_dependence_sets,
use_early_termination=use_early_termination,
max_num_query=max_num_query,
),
input_lb,
input_ub,
query_coef=None,
abstract_shape=abstract_shape,
compute_upper_bound=True,
reset_input_bounds=reset_input_bounds,
recompute_intermediate_bounds=recompute_intermediate_bounds,
optimize_intermediate_bounds=False,
)
out_lbs, out_ubs = output_shape.concretize(input_lb, input_ub)
assert out_ubs is not None
return (out_lbs, out_ubs)
def get_deep_poly_lower_bounds(
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
use_dependence_sets: bool = False,
use_early_termination: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
device = input_lb.device
query_coef = get_output_bound_initial_query_coef(
dim=network.output_dim,
intermediate_bounds_to_recompute=None, # get all bounds
use_dependence_sets=False,
batch_size=1,
device=device,
dtype=None, # TODO: should this be something else?
)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(network),
query_prev_layer=None,
queries_to_compute=None,
lb=AffineForm(query_coef),
ub=None,
unstable_queries=None,
subproblem_state=None,
)
output_shape = network.backsubstitute_mn_bab_shape(
config=make_backsubstitution_config(
use_dependence_sets=use_dependence_sets,
use_early_termination=use_early_termination,
),
input_lb=input_lb,
input_ub=input_ub,
query_coef=None,
abstract_shape=abstract_shape,
compute_upper_bound=False,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=False,
)
out_lbs, out_ubs = output_shape.concretize(input_lb, input_ub)
return (out_lbs, out_ubs)
def get_deep_poly_forward_bounds(
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
) -> Tuple[Tensor, Tensor]:
abstract_shape = DeepPoly_f.construct_from_bounds(input_lb, input_ub)
in_lb, in_ub = abstract_shape.concretize()
assert torch.isclose(in_lb, input_lb, atol=1e-10, rtol=1e-10).all()
assert torch.isclose(in_ub, input_ub, atol=1e-10, rtol=1e-10).all()
output_shape = network.propagate_abstract_element(
abstract_shape,
)
out_lbs, out_ubs = output_shape.concretize()
assert out_ubs is not None
return (out_lbs, out_ubs)
def get_zono_bounds(
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
) -> Tuple[Tensor, Tensor]:
abstract_shape = HybridZonotope.construct_from_bounds(
input_lb, input_ub, domain="zono"
)
output_shape = network.propagate_abstract_element(
abstract_shape,
)
out_lbs, out_ubs = output_shape.concretize()
assert out_ubs is not None
return (out_lbs, out_ubs)
def get_input_splitting_bounds(
network: AbstractNetwork, input_lb: Tensor, input_ub: Tensor, domain_splitting: dict
) -> Tuple[Tensor, Tensor]:
config = MNBabVerifierConfig(Bunch())
for k, v in domain_splitting.items():
if hasattr(config.domain_splitting, k):
setattr(config.domain_splitting, k, v)
config.outer.adversarial_attack = False
verifier = MNBaBVerifier(network, input_lb.device, config)
dim = int(np.random.randint(0, network.output_dim))
out_lbs = None
ub = 100.0
lb = -100.0
while ub - lb > 1e-3:
mid = (ub + lb) / 2
properties_to_verify = [[(dim, -1, mid)]]
queue, out_lbs_tmp = verifier._verify_with_input_domain_splitting(
config.domain_splitting,
input_lb,
input_ub,
properties_to_verify,
20 + time.time(),
)
if len(queue) == 0:
lb = out_lbs_tmp.detach().item() + mid
out_lbs = torch.zeros_like(out_lbs_tmp) + lb
else:
ub = mid
assert out_lbs is not None
return out_lbs, torch.ones_like(out_lbs) * torch.inf
def opt_intermediate_bounds(
network: AbstractNetwork,
input_lb: Tensor,
input_ub: Tensor,
use_prima: bool = False,
use_milp: bool = False,
) -> MN_BaB_Shape:
if use_milp:
milp_model = MILPNetwork.build_model_from_abstract_net(
(input_lb + input_ub) / 2, input_lb, input_ub, network
)
for idx, layer in enumerate(network.layers):
if isinstance(layer, ReLU):
layer.optim_input_bounds = milp_model.get_network_bounds_at_layer_multi(
layer_tag(network.layers[idx]), True, 100, 300, time.time() + 300
)
device = input_lb.device
query_coef = get_output_bound_initial_query_coef(
dim=network.output_dim,
intermediate_bounds_to_recompute=None, # get all bounds
use_dependence_sets=False,
batch_size=1,
device=device,
dtype=None, # TODO: should this be something else?
)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(network),
query_prev_layer=None,
queries_to_compute=None,
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None,
subproblem_state=SubproblemState.create_default(
split_state=None,
optimize_prima=use_prima,
batch_size=1,
device=device,
use_params=True,
),
# best_layer_bounds=None, # TODO: it used to pass None here, is this a problem?
)
layer_ids_for_which_to_compute_prima_constraints = []
config = make_backsubstitution_config()
if use_prima:
prima_hyperparameters = make_prima_hyperparameters()
layer_ids_for_which_to_compute_prima_constraints = (
network.get_activation_layer_ids()
)
config = config.with_prima(
prima_hyperparameters,
layer_ids_for_which_to_compute_prima_constraints,
)
output_shape = network.backsubstitute_mn_bab_shape(
config=make_backsubstitution_config(),
input_lb=input_lb,
input_ub=input_ub,
query_coef=None,
abstract_shape=abstract_shape,
compute_upper_bound=True,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=True,
)
return output_shape
def run_fuzzing_test(
as_net: AbstractNetwork,
input: Tensor,
input_lb: Tensor,
input_ub: Tensor,
input_shape: Tuple[int, ...],
bounding_call: Callable[
[AbstractNetwork, Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]
],
use_beta: bool = True,
use_adv: bool = False,
seed: int = 42,
) -> None:
device = input_lb.device
(lb, ub) = bounding_call(as_net, (input_lb, input_ub))
print(f"lb: {lb} ub: {ub}")
if use_beta:
seed_everything(seed)
m = Beta(concentration0=0.5, concentration1=0.5)
eps = (input_ub - input_lb) / 2
out = as_net(input)
lb, ub = lb.to(device), ub.to(device)
for i in range(100):
shape_check = (256, *input_shape[1:])
check_x = input_lb + 2 * eps * m.sample(shape_check).to(device)
out = as_net(check_x)
assert (lb - 1e-4 <= out).all() and (
out <= ub + 1e-4
).all(), f"Failed with lb violation: {(lb- out).max()} and ub violation: {(out - ub).max()}"
if use_adv:
bounds = (lb.to(device), ub.to(device))
target = torch.argmax(as_net(input)).item()
_pgd_whitebox(
as_net,
input,
bounds,
target,
input_lb,
input_ub,
input.device,
num_steps=200,
)
def optimize_output_node_bounds_with_prima_crown(
network: AbstractNetwork,
output_idx: int,
input_lb: Tensor,
input_ub: Tensor,
optimize_alpha: bool = False,
optimize_prima: bool = False,
custom_optimizer_config: Optional[MNBabOptimizerConfig] = None,
) -> Tuple[float, float]:
config = (
make_optimizer_config(
optimize_alpha=optimize_alpha, optimize_prima=optimize_prima
)
if custom_optimizer_config is None
else custom_optimizer_config
)
backsubstitution_config = make_backsubstitution_config()
optimizer = MNBabOptimizer(config, backsubstitution_config)
print(f"computing lower bound to x_{output_idx}")
lb_query_coef = torch.zeros(1, 1, *network.output_dim)
lb_query_coef.data[(0,) * (lb_query_coef.dim() - 1) + (output_idx,)] = 1
lb_bounded_subproblem, _ = optimizer.bound_root_subproblem(
input_lb,
input_ub,
network,
lb_query_coef,
device=input_lb.device,
)
lb = lb_bounded_subproblem.lower_bound
print(f"computing upper bound to x_{output_idx}")
ub_query_coef = torch.zeros(1, 1, *network.output_dim)
ub_query_coef.data[(0,) * (lb_query_coef.dim() - 1) + (output_idx,)] = -1
ub_bounded_subproblem, _ = optimizer.bound_root_subproblem(
input_lb,
input_ub,
network,
ub_query_coef,
device=input_lb.device,
)
ub = ub_bounded_subproblem.lower_bound
ub = (-1) * ub
return lb, ub
def lower_bound_output_node_with_branch_and_bound(
network: AbstractNetwork,
output_idx: int,
input_lb: Tensor,
input_ub: Tensor,
batch_sizes: Sequence[int],
early_stopping_threshold: Optional[float] = None,
optimize_alpha: bool = False,
optimize_prima: bool = False,
) -> float:
config = make_verifier_config(
optimize_alpha=optimize_alpha,
optimize_prima=optimize_prima,
beta_lr=0.1,
bab_batch_sizes=batch_sizes,
recompute_intermediate_bounds_after_branching=True,
)
optimizer = MNBabOptimizer(config.optimizer, config.backsubstitution)
bab = BranchAndBound(
optimizer, config.bab, config.backsubstitution, torch.device("cpu")
)
query_coef = torch.zeros(1, 1, *network.output_dim)
query_coef.data[0, 0, output_idx] = 1
return bab.bound_minimum_with_branch_and_bound(
"dummy_id",
query_coef,
network,
input_lb,
input_ub,
early_stopping_threshold,
)[0]
def prima_crown_wrapper_call(
optimize_alpha: bool, optimize_prima: bool
) -> Callable[[AbstractNetwork, Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]:
def prima_crown_call(
network: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
device = bounds[0].device
out_dim = np.prod(network.output_dim)
output_lb_with_alpha = torch.full(
size=(out_dim,),
fill_value=-1 * float("inf"),
dtype=torch.float64,
device=device,
)
output_ub_with_alpha = torch.full(
size=(out_dim,),
fill_value=1 * float("inf"),
dtype=torch.float64,
device=device,
)
prior_type = torch.get_default_dtype()
torch.set_default_dtype(torch.float64)
for j in range(out_dim):
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
bounds[0],
bounds[1],
optimize_alpha=optimize_alpha,
optimize_prima=optimize_prima,
)
output_lb_with_alpha[j] = prima_crown_alpha_lb
output_ub_with_alpha[j] = prima_crown_alpha_ub
torch.set_default_dtype(prior_type)
return (output_lb_with_alpha, output_ub_with_alpha)
return prima_crown_call
def milp_call(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
# get_deep_poly_bounds(net, bounds[0], bounds[1])
if len(bounds[0].shape) in [1, 3]: # Add batch dimesnion
input_lb = bounds[0].unsqueeze(0)
input_ub = bounds[1].unsqueeze(0)
else:
input_lb = bounds[0]
input_ub = bounds[1]
input = (input_lb + input_ub) / 2
milp_model = MILPNetwork.build_model_from_abstract_net(
input, bounds[0], bounds[1], net
)
return milp_model.get_network_output_bounds()
def dp_call(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
return get_deep_poly_bounds(net, bounds[0], bounds[1])
def dpf_call(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
return get_deep_poly_forward_bounds(net, bounds[0], bounds[1])
def zono_call(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
return get_zono_bounds(net, bounds[0], bounds[1])
def splitting_call(
net: AbstractNetwork,
bounds: Tuple[Tensor, Tensor],
domain_splitting: dict,
) -> Tuple[Tensor, Tensor]:
return get_input_splitting_bounds(net, bounds[0], bounds[1], domain_splitting)
def _pgd_whitebox(
model: AbstractNetwork,
X: Tensor,
bounds: Tuple[Tensor, Tensor],
target: float,
specLB: Tensor,
specUB: Tensor,
device: torch.device,
num_steps: int = 2000,
step_size: float = 0.2,
restarts: int = 1,
seed: int = 42,
mode: str = "soundness",
) -> None:
n_class: int = model(X).shape[-1]
repeats = int(np.floor(100 / 2 / n_class))
batch_size = int(repeats * n_class * 2)
device = X.device
dtype = X.dtype
assert mode in ["soundness", "accuracy"]
D = 1e-5
seed_everything(seed)
for _ in range(restarts):
X_pgd = torch.autograd.Variable(
X.data.repeat((batch_size,) + (1,) * (X.dim() - 1)), requires_grad=True
).to(device)
random_noise = torch.ones_like(X_pgd).uniform_(-0.5, 0.5) * (specUB - specLB)
X_pgd = torch.autograd.Variable(
torch.clamp(X_pgd.data + random_noise, specLB, specUB),
requires_grad=True,
)
lr_scale = specUB - specLB
pbar = tqdm.trange(num_steps + 1)
for i in pbar:
opt = torch.optim.SGD([X_pgd], lr=1e-1)
opt.zero_grad()
assert (X_pgd <= specUB).all() and (
X_pgd >= specLB
).all(), "Adv example invalid"
with torch.enable_grad():
out = model(X_pgd)
sub_mat = -1 * torch.eye(n_class, dtype=out.dtype, device=out.device)
sub_mat[:, target] = 1
sub_mat[target, :] = 0
deltas = torch.matmul(out, sub_mat.T)
if mode == "soundness":
if not (
(bounds[0] <= out + D).all() and (bounds[1] >= out - D).all()
):
violating_index = (
(bounds[0] > out.detach())
.__or__(bounds[1] < out.detach())
.sum(1)
.nonzero()[0][0]
)
print("Violating sample: ", X_pgd[violating_index])
print("Corresponding output: ", out[violating_index])
print("LB: ", bounds[0])
print("UB: ", bounds[1])
assert (bounds[0] <= out + D).all() and (
bounds[1] >= out - D
).all(), f"max lb violation: {torch.max(bounds[0] - out)}, max ub violation {torch.max(out - bounds[1])}"
elif mode == "accuracy":
if not out.argmax(1).eq(target).all():
violating_index = (~out.argmax(1).eq(target)).nonzero()[0][0]
assert False, f"Violation: {X_pgd[violating_index]}"
if mode == "soundness":
loss = (
torch.cat(
[
torch.ones(
repeats * n_class, dtype=dtype, device=device
),
-torch.ones(
repeats * n_class, dtype=dtype, device=device
),
],
0,
)
* out[
torch.eye(n_class, dtype=torch.bool, device=device).repeat(
2 * repeats, 1
)
]
)
elif mode == "accuracy":
loss = (
-torch.ones(repeats * n_class * 2, dtype=dtype, device=device)
* deltas[
torch.eye(n_class, dtype=torch.bool, device=device).repeat(
2 * repeats, 1
)
]
)
loss.sum().backward()
pbar.set_description(f"Loss: {loss.sum():.3f}")
eta = lr_scale * step_size * X_pgd.grad.data.sign()
X_pgd = torch.autograd.Variable(
torch.clamp(X_pgd.data + eta, specLB, specUB),
requires_grad=True,
)
def set_torch_precision(dtype: torch.dtype) -> Callable[[Callable], Callable]:
def set_torch_precision_dec(func: Callable) -> Callable[[Any], Any]:
@functools.wraps(func)
def wrapper_decorator(*args: Any, **kwargs: Any) -> Any:
prior_torch_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
value = func(*args, **kwargs)
torch.set_default_dtype(prior_torch_dtype)
return value
return wrapper_decorator
return set_torch_precision_dec
| 41,368 | 30.994586 | 125 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/gurobi_util.py | """
This code is mostly adapted from ERAN
# source: https://github.com/mnmueller/eran/blob/all_constraints/tf_verify/ai_milp.py
# commit hash: 4d25107db9db743a008eb63c8fa5a4fe8463b16d
"""
from typing import List, Tuple
import numpy as np
from gurobipy import GRB, LinExpr, Model, Var # type: ignore[import]
from torch import Tensor
from src.abstract_layers.abstract_conv2d import Conv2d
from src.abstract_layers.abstract_flatten import Flatten
from src.abstract_layers.abstract_linear import Linear
from src.abstract_layers.abstract_network import AbstractNetwork
from src.abstract_layers.abstract_normalization import Normalization
from src.abstract_layers.abstract_relu import ReLU
def create_milp_model(
network: AbstractNetwork, input_lb: Tensor, input_ub: Tensor
) -> Tuple[Model, List[Var]]:
model = Model("milp")
model.setParam("OutputFlag", 0)
model.setParam(GRB.Param.FeasibilityTol, 1e-5)
var_list = _encode_inputs(model, input_lb, input_ub)
prev_layer_start_index = 0
for i, layer in enumerate(network.layers):
assert layer.input_bounds # mypy
if isinstance(layer, Linear):
output_bounds = layer.propagate_interval(layer.input_bounds)
new_start_index = _add_linear_layer_constraints_to(
model,
var_list,
layer,
output_bounds[0],
output_bounds[1],
prev_layer_start_index,
)
prev_layer_start_index = new_start_index
elif isinstance(layer, Conv2d):
output_bounds = layer.propagate_interval(layer.input_bounds)
new_start_index = _add_conv_layer_constraints_to(
model,
var_list,
layer,
output_bounds[0],
output_bounds[1],
prev_layer_start_index,
)
prev_layer_start_index = new_start_index
elif isinstance(layer, ReLU):
new_start_index = _add_relu_layer_constraints_to(
model, var_list, layer, prev_layer_start_index
)
prev_layer_start_index = new_start_index
elif isinstance(layer, Normalization):
output_bounds = layer.propagate_interval(layer.input_bounds)
new_start_index = _add_normalization_layer_constraints_to(
model,
var_list,
layer,
output_bounds[0],
output_bounds[1],
prev_layer_start_index,
)
prev_layer_start_index = new_start_index
elif isinstance(layer, Flatten):
# no constraints to add
continue
else:
raise NotImplementedError
return model, var_list
def _encode_inputs(model: Model, input_lb: Tensor, input_ub: Tensor) -> List[Var]:
var_list = []
flattenend_input_lb = input_lb.detach().flatten().numpy()
flattenend_input_ub = input_ub.detach().flatten().numpy()
for i in range(np.prod(input_lb.shape)):
var_name = "x" + str(i)
var_lb = flattenend_input_lb[i]
var_ub = flattenend_input_ub[i]
var = model.addVar(vtype=GRB.CONTINUOUS, lb=var_lb, ub=var_ub, name=var_name)
var_list.append(var)
return var_list
def _add_linear_layer_constraints_to(
model: Model,
var_list: List[Var],
layer: Linear,
layer_lb: Tensor,
layer_ub: Tensor,
prev_start_var_index: int,
) -> int:
weights = layer.weight.detach().numpy()
bias = layer.bias.detach().numpy()
layer_lb = layer_lb.squeeze(0).detach().numpy()
layer_ub = layer_ub.squeeze(0).detach().numpy()
start_var_index = len(var_list)
n_output_neurons = weights.shape[0]
for i in range(n_output_neurons):
var_name = "x" + str(start_var_index + i)
var = model.addVar(
vtype=GRB.CONTINUOUS,
lb=layer_lb[i],
ub=layer_ub[i],
name=var_name,
)
var_list.append(var)
for i in range(n_output_neurons):
n_input_neurons = weights.shape[1]
expr = LinExpr()
expr += -1 * var_list[start_var_index + i]
# matmult constraints
for k in range(n_input_neurons):
expr.addTerms(weights[i][k], var_list[prev_start_var_index + k])
expr.addConstant(bias[i])
model.addLConstr(expr, GRB.EQUAL, 0)
return start_var_index
def _add_conv_layer_constraints_to(
model: Model,
var_list: List[Var],
layer: Conv2d,
layer_lb: Tensor,
layer_ub: Tensor,
prev_start_var_index: int,
) -> int:
filters = layer.weight.data
assert layer.bias is not None
biases = layer.bias.data
filter_size = layer.kernel_size
num_out_neurons = np.prod(layer.output_dim)
num_in_neurons = np.prod(
layer.input_dim
) # input_shape[0]*input_shape[1]*input_shape[2]
# print("filters", filters.shape, filter_size, input_shape, strides, out_shape, pad_top, pad_left)
flattenend_layer_lb = layer_lb.detach().flatten().numpy()
flattenend_layer_ub = layer_ub.detach().flatten().numpy()
start = len(var_list)
for j in range(num_out_neurons):
var_name = "x" + str(start + j)
var = model.addVar(
vtype=GRB.CONTINUOUS,
lb=flattenend_layer_lb[j],
ub=flattenend_layer_ub[j],
name=var_name,
)
var_list.append(var)
for out_z in range(layer.output_dim[0]):
for out_x in range(layer.output_dim[1]):
for out_y in range(layer.output_dim[2]):
dst_ind = (
out_z * layer.output_dim[1] * layer.output_dim[2]
+ out_x * layer.output_dim[2]
+ out_y
)
expr = LinExpr()
# print("dst ind ", dst_ind)
expr += -1 * var_list[start + dst_ind]
for inp_z in range(layer.input_dim[0]):
for x_shift in range(filter_size[0]):
for y_shift in range(filter_size[1]):
x_val = out_x * layer.stride[0] + x_shift - layer.padding[0]
y_val = out_y * layer.stride[1] + y_shift - layer.padding[1]
if y_val < 0 or y_val >= layer.input_dim[2]:
continue
if x_val < 0 or x_val >= layer.input_dim[1]:
continue
mat_offset = (
x_val * layer.input_dim[2]
+ y_val
+ inp_z * layer.input_dim[1] * layer.input_dim[2]
)
if mat_offset >= num_in_neurons:
continue
src_ind = prev_start_var_index + mat_offset
# print("src ind ", mat_offset)
# filter_index = x_shift*filter_size[1]*input_shape[0]*out_shape[1] + y_shift*input_shape[0]*out_shape[1] + inp_z*out_shape[1] + out_z
expr.addTerms(
filters[out_z][inp_z][x_shift][y_shift],
var_list[src_ind],
)
expr.addConstant(biases[out_z])
model.addLConstr(expr, GRB.EQUAL, 0)
return start
def _add_relu_layer_constraints_to(
model: Model, var_list: List[Var], layer: ReLU, prev_start_var_index: int
) -> int:
start_var_index = len(var_list)
n_output_neurons = np.prod(layer.output_dim)
relu_counter = start_var_index
assert layer.input_bounds # mypy
layer_input_lb = layer.input_bounds[0].flatten().detach().numpy()
layer_input_ub = layer.input_bounds[1].flatten().detach().numpy()
crossing_node_idx = list(np.nonzero(layer_input_lb * layer_input_ub < 0)[0])
temp_idx = np.ones(n_output_neurons, dtype=bool)
temp_idx[crossing_node_idx] = False
relax_encode_idx = np.arange(n_output_neurons)[temp_idx]
if len(crossing_node_idx) > 0:
for i, __ in enumerate(crossing_node_idx):
var_name = "x_bin_" + str(start_var_index + i)
var_bin = model.addVar(vtype=GRB.BINARY, name=var_name)
var_list.append(var_bin)
relu_counter += 1
# relu output variables
for i in range(n_output_neurons):
var_name = "x" + str(relu_counter + i)
upper_bound = max(0.0, layer_input_ub[i])
var = model.addVar(vtype=GRB.CONTINUOUS, lb=0.0, ub=upper_bound, name=var_name)
var_list.append(var)
if len(crossing_node_idx) > 0:
for i, j in enumerate(crossing_node_idx):
var_bin = var_list[start_var_index + i]
if layer_input_ub[j] <= 0:
expr = var_list[relu_counter + j]
model.addLConstr(expr, GRB.EQUAL, 0)
elif layer_input_lb[j] >= 0:
expr = var_list[relu_counter + j] - var_list[prev_start_var_index + j]
model.addLConstr(expr, GRB.EQUAL, 0)
else:
# y <= x - l(1-a)
expr = (
var_list[relu_counter + j]
- var_list[prev_start_var_index + j]
- layer_input_lb[j] * var_bin
)
model.addLConstr(expr, GRB.LESS_EQUAL, -layer_input_lb[j])
# y >= x
expr = var_list[relu_counter + j] - var_list[prev_start_var_index + j]
model.addLConstr(expr, GRB.GREATER_EQUAL, 0)
# y <= u . a
expr = var_list[relu_counter + j] - layer_input_ub[j] * var_bin
model.addLConstr(expr, GRB.LESS_EQUAL, 0)
# y >= 0
expr = var_list[relu_counter + j]
model.addLConstr(expr, GRB.GREATER_EQUAL, 0)
# indicator constraint
model.addGenConstrIndicator(
var_bin,
True,
var_list[prev_start_var_index + j],
GRB.GREATER_EQUAL,
0.0,
)
if len(relax_encode_idx) > 0:
for j in relax_encode_idx:
if layer_input_ub[j] <= 0:
expr = var_list[relu_counter + j]
model.addLConstr(expr, GRB.EQUAL, 0)
elif layer_input_lb[j] >= 0:
expr = var_list[relu_counter + j] - var_list[prev_start_var_index + j]
model.addLConstr(expr, GRB.EQUAL, 0)
return relu_counter
def _add_normalization_layer_constraints_to(
model: Model,
var_list: List[Var],
layer: Normalization,
layer_lb: Tensor,
layer_ub: Tensor,
prev_start_var_index: int,
) -> int:
num_out_neurons = int(np.prod(layer.output_dim))
flattenend_layer_lb = layer_lb.detach().flatten().numpy()
flattenend_layer_ub = layer_ub.detach().flatten().numpy()
start = len(var_list)
for j in range(num_out_neurons):
var_name = "x" + str(start + j)
var = model.addVar(
vtype=GRB.CONTINUOUS,
lb=flattenend_layer_lb[j],
ub=flattenend_layer_ub[j],
name=var_name,
)
var_list.append(var)
means = layer.means.flatten()
stds = layer.stds.flatten()
for j in range(num_out_neurons):
node_index_in_original_layer_shape = np.unravel_index(j, layer.output_dim)
channel_index = int(node_index_in_original_layer_shape[0])
expr = LinExpr()
expr += (var_list[prev_start_var_index + j] - means[channel_index]) / stds[
channel_index
] - 1 * var_list[start + j]
model.addLConstr(expr, GRB.EQUAL, 0)
return start
| 11,849 | 34.909091 | 162 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/regression_tests/dependence_sets_regression_test.py | import csv
from typing import Dict, Sequence, Tuple
import numpy as np
import torch
from torch import Tensor
from src.abstract_layers.abstract_network import AbstractNetwork
from src.mn_bab_optimizer import MNBabOptimizer
from src.utilities.config import make_backsubstitution_config, make_optimizer_config
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import (
cifar10_cnn_A,
freeze_network,
mnist_conv_small,
resnet2b,
)
from src.verification_subproblem import SubproblemState
from tests.test_util import (
CIFAR10_CONV_DATA_TEST_CONFIG,
CIFAR10_INPUT_DIM,
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
)
class TestDependenceSets:
def _get_bounds_with_all_methods(
self,
network: AbstractNetwork,
optimizer: MNBabOptimizer,
query_coef: Tensor,
input_lb: Tensor,
input_ub: Tensor,
use_dependence_sets: bool,
) -> Dict[str, Tuple[Sequence[float], Sequence[float]]]:
network.reset_input_bounds()
batch_size = query_coef.shape[0]
subproblem_state = SubproblemState.create_default(
split_state=None,
optimize_prima=True,
batch_size=batch_size,
device=torch.device("cpu"),
use_params=True,
)
dp_lbs, dp_ubs, _, _ = optimizer.bound_minimum_with_deep_poly(
optimizer.backsubstitution_config,
input_lb,
input_ub,
network,
query_coef,
subproblem_state=subproblem_state,
reset_input_bounds=True,
)
assert isinstance(dp_lbs, Sequence)
assert isinstance(dp_ubs, Sequence)
alpha_lbs, alpha_ubs, _ = optimizer._bound_minimum_optimizing_alpha(
optimizer.backsubstitution_config,
subproblem_state,
input_lb,
input_ub,
network,
query_coef,
opt_iterations=optimizer.config.alpha.opt_iterations,
)
prima_lbs, prima_ubs, _ = optimizer._bound_minimum_optimizing_alpha_prima(
optimizer.backsubstitution_config,
subproblem_state,
input_lb,
input_ub,
network,
query_coef,
opt_iterations=optimizer.config.prima.opt_iterations,
)
return {
"deeppoly": (dp_lbs, dp_ubs),
"alpha": (alpha_lbs, alpha_ubs),
"prima": (prima_lbs, prima_ubs),
}
def test_conv_small(self) -> None:
seed_everything(1)
test_pad = False
test_asym_pad = False
test_res = False
assert int(test_res) + int(test_pad) + int(test_asym_pad) <= 1
using_cifar = test_pad or test_res
if test_pad:
network_path = (
"networks/cifar10_CNN_A_CIFAR_MIX.pyt" # Has non-zero padding
)
original_network = cifar10_cnn_A()
elif test_res:
network_path = "networks/resnet_2b.pth" # Has non-zero padding
original_network = resnet2b()
else:
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, CIFAR10_INPUT_DIM if using_cifar else MNIST_INPUT_DIM
)
freeze_network(network)
if using_cifar:
test_data_path = "test_data/cifar10_test_100.csv"
else:
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
optimizer = MNBabOptimizer(
make_optimizer_config(
optimize_alpha=True,
optimize_prima=True,
),
make_backsubstitution_config(),
)
batch_size = 3
for i, (label, *pixel_values) in enumerate(test_instances):
print(f"Testing test instance: {i}/5")
if i == 5:
break
_, input_lb, input_ub = transform_and_bound(
pixel_values,
CIFAR10_CONV_DATA_TEST_CONFIG
if using_cifar
else MNIST_CONV_DATA_TEST_CONFIG,
)
# batch_size-1 random queries
query_coef = torch.randint(
low=-10,
high=11,
size=(batch_size, 1, network.output_dim[0]),
dtype=torch.float,
)
# 1 verification query
query_coef.data[0, 0] = 0
query_coef.data[0, 0, int(label)] = 1
query_coef.data[0, 0, 9 - int(label)] = -1
bounds_old = self._get_bounds_with_all_methods(
network,
optimizer,
query_coef,
input_lb,
input_ub,
use_dependence_sets=False,
)
bounds_new = self._get_bounds_with_all_methods(
network,
optimizer,
query_coef,
input_lb,
input_ub,
use_dependence_sets=True,
)
for method in ["deeppoly", "alpha", "prima"]:
for i in range(2):
curr_bounds_old = np.asarray(bounds_old[method][i])
curr_bounds_new = np.asarray(bounds_new[method][i])
print(f"[{method}] {curr_bounds_old} vs {curr_bounds_new}")
assert np.isclose(
curr_bounds_old, curr_bounds_new, rtol=1e-5, atol=1e-6
).all()
# rel_diff = (curr_bounds_old - curr_bounds_new) / curr_bounds_old
# assert np.max(np.abs(rel_diff)) < 1e-5, f"{np.max(np.abs(rel_diff))}"
| 6,046 | 33.752874 | 91 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/milp_full_test.py | import random
import time
import torch
from torch import nn
from src.abstract_layers.abstract_network import AbstractNetwork
from src.milp_network import MILPNetwork
from src.state.tags import layer_tag
from src.utilities.argument_parsing import get_config_from_json
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import load_net_from
from tests.test_util import get_deep_poly_bounds, toy_all_layer_net, toy_net
class TestMILP:
"""
We test our MILP implementation for layer bounds and network verification
"""
def test_milp_toy_example(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([[-1.0, -1.0]])
input_ub = torch.tensor([[1.0, 1.0]])
# Our new implementation
# input_lb = input_lb.unsqueeze(0)
# input_ub = input_ub.unsqueeze(0)
x = (input_lb + input_ub) / 2
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, network
)
lbs, ubs = milp_model.get_network_output_bounds()
assert lbs[0] == 1
assert ubs[0] == 3
def test_milp_all_layers(self) -> None:
torch.manual_seed(0)
random.seed(0)
eps = 0.1
num_eps = 1e-6
for i in range(10):
network = toy_all_layer_net()[0]
input = torch.rand(size=(1, 1, 5, 5))
# input = torch.rand(size=(1, 27))
input_lb, input_ub = input - eps, input + eps
q = network(input)
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, network
)
lbs, ubs = milp_model.get_network_output_bounds()
(
output_lb_deep_poly,
output_ub_deep_poly,
) = get_deep_poly_bounds(network, input_lb, input_ub)
assert (q >= lbs).all()
assert (q <= ubs).all()
assert (
output_lb_deep_poly < lbs + num_eps
).all(), (
f"Found violation lb {torch.max(output_lb_deep_poly - (lbs + num_eps))}"
)
if not (output_ub_deep_poly > ubs - num_eps).all():
for i, layer in enumerate(milp_model.net.layers):
lbi, ubi = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} ============== \n LBS: {lbi} \n UBS: {ubi}")
assert (
output_ub_deep_poly > ubs - num_eps
).all(), f"Found violation ub {torch.max(ubs - num_eps - output_ub_deep_poly)}"
def test_recursive_encoding(self) -> None:
config = get_config_from_json("configs/baseline/cifar10_resnet_4b_bn.json")
seed_everything(config.random_seed)
network = load_net_from(config)
assert isinstance(network, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(network, config.input_dim)
eps = 0.1
input = torch.rand(config.input_dim).unsqueeze(0)
input_lb = torch.clamp(input - eps, min=0)
input_ub = torch.clamp(input + eps, max=1)
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, abs_net
)
net_layers = dict(network.named_modules()).items()
filtered_net_layers = set([k for (k, v) in net_layers])
milp_layers = set(milp_model.layer_id_to_prefix_map.values())
# The outer ResNet identifier and 8 individual path identifiers
assert len(filtered_net_layers - milp_layers) == 9
def test_intermediate_layer_access(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([[-1.0, -1.0]])
input_ub = torch.tensor([[1.0, 1.0]])
# Our new implementation
# input_lb = input_lb.unsqueeze(0)
# input_ub = input_ub.unsqueeze(0)
x = (input_lb + input_ub) / 2
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, network
)
assert len(milp_model.layer_id_to_prefix_map) == 6
def test_intermediate_refinement_single(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([[-1.0, -1.0]])
input_ub = torch.tensor([[1.0, 1.0]])
# Our new implementation
input_lb = input_lb.unsqueeze(0)
input_ub = input_ub.unsqueeze(0)
x = (input_lb + input_ub) / 2
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, network, max_milp_neurons=1
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model._get_network_bounds_at_layer_single(
layer_tag(layer), timeout=20
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (lbs >= milp_model.net.layers[i].output_bounds[0]).all()
assert (ubs <= milp_model.net.layers[i].output_bounds[1]).all()
def test_intermediate_refinement_multi(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([[-1.0, -1.0]])
input_ub = torch.tensor([[1.0, 1.0]])
# Our new implementation
input_lb = input_lb.unsqueeze(0)
input_ub = input_ub.unsqueeze(0)
x = (input_lb + input_ub) / 2
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, network
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (lbs >= milp_model.net.layers[i].output_bounds[0]).all()
assert (ubs <= milp_model.net.layers[i].output_bounds[1]).all()
if __name__ == "__main__":
T = TestMILP()
# T.test_intermediate_refinement_single()
T.test_milp_toy_example()
| 6,256 | 39.367742 | 95 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/pooling_test.py | import time
import torch
from torch.distributions.beta import Beta
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.milp_network import MILPNetwork
from src.state.tags import layer_tag
from src.utilities.initialization import seed_everything
from tests.test_util import (
get_deep_poly_bounds,
pad_toy_max_pool_net,
toy_avg_pool_net,
toy_max_avg_pool_net,
toy_max_pool_net,
)
class TestPooling:
"""
We test our Avg and Max-Pooling layers
"""
def test_avg_pooling_sound(self) -> None:
as_net, shape = toy_avg_pool_net()
eps = 2 / 255
m = Beta(concentration0=0.5, concentration1=0.5)
for i in range(20):
x = torch.rand(shape)
lb = x - eps
ub = x + eps
(dp_lb, dp_ub) = get_deep_poly_bounds(as_net, lb, ub)
in_zono = HybridZonotope.construct_from_bounds(lb, ub, domain="zono")
out_zono = as_net.propagate_abstract_element(in_zono)
in_dpf = DeepPoly_f.construct_from_bounds(lb, ub, domain="DPF")
out_dpf = as_net.propagate_abstract_element(in_dpf)
for _ in range(10):
shape_check = (256, *shape[1:])
check_x = lb + 2 * eps * m.sample(shape_check)
out = as_net(check_x)
assert (dp_lb <= out).all() and (out <= dp_ub).all()
assert out_zono.may_contain_point(out)
assert out_dpf.may_contain_point(out)
def test_avg_pooling_milp(self) -> None:
as_net = toy_avg_pool_net()[0]
shape = (1, 1, 6, 6)
eps = 0.25
x = torch.ones(shape) * 0.5
input_lb = x - eps
input_ub = x + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, as_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (lbs >= milp_model.net.layers[i].output_bounds[0].flatten()).all()
assert (ubs <= milp_model.net.layers[i].output_bounds[1].flatten()).all()
def test_max_pooling_milp(self) -> None:
seed_everything(42)
as_net = toy_max_pool_net()[0]
shape = (1, 1, 6, 6)
eps = 0.25
x = torch.rand(shape) * 0.5
input_lb = x - eps
input_ub = x + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, as_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (lbs >= milp_model.net.layers[i].output_bounds[0].flatten()).all()
assert (ubs <= milp_model.net.layers[i].output_bounds[1].flatten()).all()
def test_max_pooling_dp(self) -> None:
seed_everything(42)
as_net, shape = pad_toy_max_pool_net()
eps = 0.25
x = torch.ones(1, *shape) * 0.5
input_lb = x - eps
input_ub = x + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, as_net
)
milp_lb, milp_ub = milp_model.get_network_output_bounds()
(dp_lb, dp_ub) = get_deep_poly_bounds(as_net, input_lb, input_ub)
assert (dp_lb <= milp_lb).all()
assert (dp_ub >= milp_ub).all()
m = Beta(concentration0=0.5, concentration1=0.5)
eps = 8 / 255
for i in range(100):
x = torch.rand(1, *shape)
input_lb = x - eps
input_ub = x + eps
as_net = pad_toy_max_pool_net()[0]
(dp_lb, dp_ub) = get_deep_poly_bounds(as_net, input_lb, input_ub)
# in_zono = HybridZonotope.construct_from_bounds(
# input_lb, input_ub, domain="zono"
# )
# out_zono = as_net.propagate_abstract_element(in_zono)
# in_dpf = DeepPoly_f.construct_from_bounds(input_lb, input_ub, domain="DPF")
# out_dpf = as_net.propagate_abstract_element(in_dpf)
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, as_net
)
out = as_net(x)
assert (out >= dp_lb).all()
assert (out <= dp_ub).all()
for j in range(10):
shape_check = (256, *shape)
check_x = input_lb + 2 * eps * m.sample(shape_check)
out = as_net(check_x)
assert (dp_lb - 1e-7 <= out).all() and (out <= dp_ub + 1e-7).all()
# assert out_zono.may_contain_point(out)
# assert out_dpf.may_contain_point(out)
def test_max_avg_pooling_milp(self) -> None:
seed_everything(41)
shape = (1, 1, 16, 16)
eps = 2 / 255
m = Beta(concentration0=0.5, concentration1=0.5)
for i in range(5):
x = torch.rand(shape)
input_lb = x - eps
input_ub = x + eps
as_net = toy_max_avg_pool_net()
milp_model = MILPNetwork.build_model_from_abstract_net(
x, input_lb, input_ub, as_net
)
f_lbs, f_ubs = milp_model.get_network_output_bounds()
for _ in range(20):
shape_check = (256, *shape[1:])
check_x = input_lb + 2 * eps * m.sample(shape_check)
out = as_net(check_x)
assert (f_lbs <= out).all() and (out <= f_ubs).all()
if __name__ == "__main__":
t = TestPooling()
t.test_max_pooling_dp()
# t.test_avg_pooling_milp()
# t.test_max_pooling_milp()
# t.test_max_avg_pooling_milp()
| 6,159 | 34.2 | 89 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/conv2d_test.py | import csv
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.abstract_layers.abstract_conv2d import Conv2d
from src.abstract_layers.abstract_network import AbstractNetwork
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
from src.utilities.config import (
make_backsubstitution_config,
make_prima_hyperparameters,
)
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import mnist_conv_small
from src.verification_subproblem import SubproblemState
from tests.test_util import MNIST_CONV_DATA_TEST_CONFIG, MNIST_INPUT_DIM
class TestConv2d:
def test_backsubstitution_mn_bab_shape_with_padding(self) -> None:
in_channels = 1
out_channels = 1
input_dim = (in_channels, 5, 3)
output_dim = (out_channels, 5, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
input_dim=input_dim,
padding=1,
bias=True,
)
lb_coef = torch.eye(np.prod(output_dim)).view(-1, *output_dim).unsqueeze(0)
lb = AffineForm(lb_coef)
ub = AffineForm(2 * lb_coef)
initial_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
propagated_shape = layer.backsubstitute(
make_backsubstitution_config(), initial_shape
)
assert isinstance(propagated_shape.lb.coef, Tensor)
assert propagated_shape.lb.coef.shape == (1, np.prod(output_dim)) + input_dim
assert propagated_shape.lb.bias.shape == (1, np.prod(output_dim))
def test_backsubstitution_mn_bab_shape_without_padding(self) -> None:
in_channels = 1
out_channels = 1
input_dim = (in_channels, 5, 3)
output_dim = (out_channels, 3, 1)
layer = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
input_dim=input_dim,
padding=0,
bias=True,
)
lb_coef = torch.eye(np.prod(output_dim)).view(-1, *output_dim).unsqueeze(0)
lb = AffineForm(lb_coef)
ub = AffineForm(2 * lb_coef)
initial_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
propagated_shape = layer.backsubstitute(
make_backsubstitution_config(), initial_shape
)
assert isinstance(propagated_shape.lb.coef, Tensor)
assert propagated_shape.lb.coef.shape == (1, np.prod(output_dim)) + input_dim
assert propagated_shape.lb.bias.shape == (1, np.prod(output_dim))
def test_propagate_abs_conv_padding(self) -> None:
in_channels = 1
out_channels = 2
input_dim = (in_channels, 5, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
input_dim=input_dim,
padding=1,
bias=True,
)
x = torch.rand((2, *input_dim))
x_out = layer(x)
in_zono = HybridZonotope.construct_from_noise(
x, eps=0.01, domain="zono", data_range=(-torch.inf, torch.inf)
)
out_zono = layer.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out)
in_dpf = DeepPoly_f.construct_from_noise(
x, eps=0.01, domain="DPF", data_range=(-torch.inf, torch.inf)
)
out_dpf = layer.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out)
def test_propagate_abs_conv_no_padding(self) -> None:
in_channels = 1
out_channels = 2
input_dim = (in_channels, 5, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
input_dim=input_dim,
padding=0,
bias=True,
)
x = torch.rand((2, *input_dim)) * 2 - 1
x_out = layer(x)
in_zono = HybridZonotope.construct_from_noise(
x, eps=0.01, domain="zono", data_range=(-torch.inf, torch.inf)
)
out_zono = layer.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out), "Bound violation found for Zono!"
in_dpf = DeepPoly_f.construct_from_noise(
x, eps=0.01, domain="DPF", data_range=(-torch.inf, torch.inf)
)
out_dpf = layer.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out), "Bound violation found for DPF!"
def test_propagate_abs_conv_pos(self) -> None:
in_channels = 1
out_channels = 2
input_dim = (in_channels, 5, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
input_dim=input_dim,
padding=0,
bias=False,
)
layer.weight.data = layer.weight.data.abs()
x = torch.rand((2, *input_dim)) * 2
x_out = layer(x)
in_zono = HybridZonotope.construct_from_noise(
x, eps=0.01, domain="zono", data_range=(-torch.inf, torch.inf)
)
out_zono = layer.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out)
assert (out_zono.concretize()[0] >= 0).all()
in_dpf = DeepPoly_f.construct_from_noise(
x, eps=0.01, domain="DPF", data_range=(-torch.inf, torch.inf)
)
out_dpf = layer.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out)
assert (out_dpf.concretize()[0] >= 0).all()
def test_propagate_interval_identity_layer(self) -> None:
in_channels = 1
input_dim = (in_channels, 3, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=1,
kernel_size=3,
input_dim=input_dim,
padding=1,
bias=False,
)
nn.init.dirac_(layer.weight.data)
input_lb = torch.full(size=input_dim, fill_value=-1.0).unsqueeze(0)
input_ub = torch.full(size=input_dim, fill_value=1.0).unsqueeze(0)
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == input_lb).all()
assert (output_ub == input_ub).all()
def test_propagate_interval_identity_layer_with_bias(self) -> None:
in_channels = 1
input_dim = (in_channels, 3, 3)
layer = Conv2d(
in_channels=in_channels,
out_channels=1,
kernel_size=3,
input_dim=input_dim,
padding=1,
bias=True,
)
nn.init.dirac_(layer.weight.data)
assert layer.bias is not None
nn.init.constant_(layer.bias, 0)
input_lb = torch.full(size=input_dim, fill_value=-1.0).unsqueeze(0)
input_ub = torch.full(size=input_dim, fill_value=1.0).unsqueeze(0)
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == input_lb).all()
assert (output_ub == input_ub).all()
def test_propagate_positive_interval_through_positive_conv_layer(self) -> None:
test_lb = torch.abs(torch.rand(MNIST_INPUT_DIM).unsqueeze(0))
test_ub = test_lb + 0.1
assert (test_lb >= 0).all()
layer = Conv2d(
in_channels=1,
out_channels=16,
kernel_size=3,
input_dim=MNIST_INPUT_DIM,
padding=1,
bias=True,
)
nn.init.uniform_(layer.weight, a=0.0, b=1.0)
assert layer.bias is not None
nn.init.uniform_(layer.bias, a=0.0, b=1.0)
assert (layer.weight >= 0).all()
assert (layer.bias >= 0).all()
expected_output_lb = layer.forward(test_lb)
expected_output_ub = layer.forward(test_ub)
after = layer.propagate_interval((test_lb, test_ub))
assert (after[0] <= after[1]).all()
assert (expected_output_lb == after[0]).all()
assert (expected_output_ub == after[1]).all()
def test_small_cnn_backsubstitution_pass_does_not_crash(self) -> None:
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
label, *pixel_values = next(test_instances)
__, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
initial_bound_coef = (
torch.eye(np.prod(network.output_dim))
.view(-1, *network.output_dim)
.unsqueeze(0)
)
network.get_mn_bab_shape(
config=make_backsubstitution_config(),
input_lb=input_lb,
input_ub=input_ub,
query_id=query_tag(network),
query_coef=initial_bound_coef,
subproblem_state=SubproblemState.create_default(
split_state=None,
optimize_prima=False,
batch_size=1,
device=initial_bound_coef.device,
use_params=True,
),
compute_upper_bound=True,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=False,
)
def test_small_cnn_backsubstitution_pass_with_prima_constraints_does_not_crash(
self,
) -> None:
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
label, *pixel_values = next(test_instances)
__, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
initial_bound_coef = (
torch.eye(np.prod(network.output_dim))
.view(-1, *network.output_dim)
.unsqueeze(0)
)
prima_hyperparameters = make_prima_hyperparameters()
layer_ids_for_which_to_compute_prima_constraints = (
network.get_activation_layer_ids()
)
network.get_mn_bab_shape(
make_backsubstitution_config().with_prima(
prima_hyperparameters,
layer_ids_for_which_to_compute_prima_constraints,
),
input_lb,
input_ub,
query_id=query_tag(network),
query_coef=initial_bound_coef,
subproblem_state=SubproblemState.create_default(
split_state=None,
optimize_prima=True,
batch_size=1,
device=initial_bound_coef.device,
use_params=True,
),
compute_upper_bound=True,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=False,
)
def test_small_cnn_forward_pass_does_not_crash(self) -> None:
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
label, *pixel_values = next(test_instances)
__, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
if __name__ == "__main__":
T = TestConv2d()
T.test_propagate_abs_conv_pos()
T.test_propagate_abs_conv_no_padding()
T.test_propagate_abs_conv_padding()
T.test_propagate_interval_identity_layer_with_bias()
T.test_small_cnn_forward_pass_does_not_crash()
T.test_propagate_interval_identity_layer()
T.test_backsubstitution_mn_bab_shape_with_padding()
T.test_propagate_positive_interval_through_positive_conv_layer()
T.test_small_cnn_backsubstitution_pass_with_prima_constraints_does_not_crash()
| 13,661 | 33.675127 | 85 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/alpha_optimization_test.py | import torch
from tests.test_util import (
get_deep_poly_bounds,
optimize_output_node_bounds_with_prima_crown,
toy_net,
)
class TestAlphaOptimization:
def test_toy_net(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True
)
assert output_lb_with_alpha >= output_lb_without_alpha
assert output_ub_with_alpha <= output_ub_without_alpha
if __name__ == "__main__":
T = TestAlphaOptimization()
T.test_toy_net()
| 917 | 24.5 | 63 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/backprop_test.py | import torch
from tests.test_util import (
get_deep_poly_bounds,
optimize_output_node_bounds_with_prima_crown,
toy_stack_seq_net,
)
class TestBackprop:
"""
Test the more intricate parts of the backsubstitution implementation.
"""
def test_seq_layer_stacking_sound(self) -> None:
network = toy_stack_seq_net()
print(network)
input_lb = torch.tensor([0, -1.0]).unsqueeze(0)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True, optimize_prima=False
)
assert output_lb_with_alpha >= output_lb_without_alpha
assert output_ub_with_alpha < output_ub_without_alpha
assert output_lb_without_alpha == 0.0
assert torch.isclose(output_ub_without_alpha, torch.tensor(4 + 2 / 3))
assert output_lb_with_alpha == 0.0
assert output_ub_with_alpha == 4.0
concrete_lb = float("inf")
concrete_ub = -float("inf")
for x1 in torch.linspace(float(input_lb[0][0]), float(input_ub[0][0]), 5):
for x2 in torch.linspace(float(input_lb[0][1]), float(input_ub[0][1]), 5):
out = network(torch.Tensor([x1, x2]))
concrete_lb = min(concrete_lb, float(out))
concrete_ub = max(concrete_ub, float(out))
assert concrete_lb == output_lb_with_alpha
assert concrete_ub == output_ub_with_alpha
if __name__ == "__main__":
T = TestBackprop()
T.test_seq_layer_stacking_sound()
| 1,813 | 29.745763 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/onnx_test.py | import gzip
import numpy as np
# import onnx
import onnxruntime as ort # type: ignore[import]
import torch
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import load_onnx_model
def is_float_try(str: str) -> bool:
try:
float(str)
return True
except ValueError:
return False
class TestONNX:
"""
We test our ONNX parser implementation
"""
def test_onnx_read(self) -> None:
torch.set_default_dtype(torch.float32)
net = load_onnx_model("benchmarks_vnn21/cifar10_resnet/onnx/resnet_2b.onnx")[0]
x = torch.rand((1, 3, 32, 32))
ort_session = ort.InferenceSession(
"benchmarks_vnn21/cifar10_resnet/onnx/resnet_2b.onnx"
)
outputs = ort_session.run(
None,
{"input.1": np.array(x).astype(np.float32)},
)
print(outputs[0])
net.eval()
out = net(x)
assert torch.isclose(torch.Tensor(outputs[0]), out, atol=1e-5).all()
def test_onnx_read_gz(self) -> None:
path = "vnn-comp-2022-sup/benchmarks/sri_resnet_a/onnx/resnet_3b2_bn_mixup_adv_4.0_bs128_lr-1.onnx"
net, in_shape, in_name = load_onnx_model(path)
x = torch.rand((1, *in_shape))
if path.endswith(".gz"):
ort_session = ort.InferenceSession(gzip.open(path).read())
else:
ort_session = ort.InferenceSession(path)
outputs = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)
print(outputs[0])
net.eval()
out = net(x)
assert torch.isclose(torch.Tensor(outputs[0]), out, atol=1e-5).all()
def test_onnx_nn4sys_2022(self) -> None:
seed_everything(42)
mscn_128d_path = "vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d.onnx"
mscn_128d_dual_path = (
"vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d_dual.onnx"
)
net, _, _ = load_onnx_model(mscn_128d_path)
net.eval()
if mscn_128d_path.endswith(".gz"):
ort_session = ort.InferenceSession(gzip.open(mscn_128d_path).read())
else:
ort_session = ort.InferenceSession(mscn_128d_path)
for i in range(20):
x = torch.rand((1, 11, 14))
outputs = ort_session.run(
None,
{"modelInput": np.array(x).astype(np.float32)},
)
out = net(x)
assert torch.isclose(torch.Tensor(outputs[0]), out, atol=1e-5).all()
net, _, _ = load_onnx_model(mscn_128d_dual_path)
net.eval()
if mscn_128d_dual_path.endswith(".gz"):
ort_session = ort.InferenceSession(gzip.open(mscn_128d_dual_path).read())
else:
ort_session = ort.InferenceSession(mscn_128d_dual_path)
# skl_model = skl_load_onnx_model("vnn-comp-2022-sup/benchmark_vnn22/nn4sys2022/model/mscn_128d_dual.onnx")
# interm_model = select_model_inputs_outputs(skl_model, "138")
# save_onnx_model(interm_model, "interm.onnx")
# interm_session = ort.InferenceSession(
# "interm.onnx"
# )
# res = interm_session.run(None,
# {"modelInput": np.array(x).astype(np.float32)},
# )
# print(torch.tensor(res[0]))
# net(x)
for i in range(20):
x = torch.rand((1, 22, 14))
outputs = ort_session.run(
None,
{"modelInput": np.array(x).astype(np.float32)},
)
out = net(x)
# print(f"onnx: {torch.Tensor(outputs[0]).item():.6f} o2p: {out.item():.6f}")
assert torch.isclose(torch.Tensor(outputs[0]), out, atol=1e-5).all()
def test_onnx_unet_2022(self) -> None:
# Note this only works with skl2onnx installed
pass
# seed_everything(42)
# path = "vnn-comp-2022-sup/benchmarks/carvana_unet_2022/onnx/unet_upsample_small.onnx.gz"
# net, _, _ = load_onnx_model(path)
# net.eval()
# from skl2onnx.helpers.onnx_helper import load_onnx_model as skl_load_onnx_model
# from skl2onnx.helpers.onnx_helper import (
# select_model_inputs_outputs,
# save_onnx_model,
# )
# skl_model = skl_load_onnx_model(gzip.open(path).read())
# interm_model = select_model_inputs_outputs(skl_model, "out_mask")
# save_onnx_model(interm_model, "interm.onnx")
# interm_session = ort.InferenceSession("interm.onnx")
# for _ in range(20):
# input = torch.rand(1, 4, 31, 47)
# out = net(input)
# res = interm_session.run(
# None,
# {"input": np.array(input).astype(np.float32)},
# )
# res = torch.tensor(res[0])
# assert torch.isclose(res, out, atol=1e-5).all()
def test_onnx_nn4sys(self) -> None:
load_onnx_model("benchmarks_vnn21/nn4sys/nets/lognormal_100.onnx")
load_onnx_model("benchmarks_vnn21/nn4sys/nets/lognormal_1000.onnx")
# q3 = load_onnx_model("benchmarks_vnn21/nn4sys/nets/normal_100.onnx.gz")[0]
# q4 = load_onnx_model("benchmarks_vnn21/nn4sys/nets/normal_1000.onnx.gz")[0]
# q5 = load_onnx_model("benchmarks_vnn21/nn4sys/nets/piecewise_100.onnx.gz")[0]
# q6 = load_onnx_model("benchmarks_vnn21/nn4sys/nets/piecewise_1000.onnx.gz")[0]
print("Done")
if __name__ == "__main__":
t = TestONNX()
t.test_onnx_read()
# t.test_onnx_unet_2022()
# t.test_onnx_nn4sys_2022()
# t.test_onnx_nn4sys()
| 5,657 | 32.678571 | 115 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/pad_permute_test.py | import shutil
import time
from pathlib import Path
import torch
from torch import nn
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.abstract_layers.abstract_network import AbstractNetwork
from src.milp_network import MILPNetwork
from src.state.tags import layer_tag
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import load_onnx_model
from tests.test_util import (
get_deep_poly_bounds,
toy_pad_net,
toy_permute_net,
toy_unbinary_net,
)
class TestPadPermute:
"""
We test our Padding and Permutation layer.
"""
def test_padding(self) -> None:
net_pt = toy_pad_net()
shape = (1, 16, 16)
onnx_shape = (1, 1, 16, 16)
eps = 2 / 255
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Compare results
for i in range(10):
x = torch.rand(onnx_shape)
out_pt_net = net_pt(x)
out_o2p_net = o2p_net(x)
assert torch.isclose(out_pt_net, out_o2p_net).all()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
x = input
x_out = abs_net(x)
in_zono = HybridZonotope.construct_from_bounds(
input_lb, input_ub, domain="zono"
)
out_zono = abs_net.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out)
in_dpf = DeepPoly_f.construct_from_bounds(input_lb, input_ub, domain="DPF")
out_dpf = abs_net.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_padding_milp(self) -> None:
seed_everything(1)
net_pt = toy_pad_net()
shape = (1, 16, 16)
onnx_shape = (1, 1, 16, 16)
eps = 2 / 255
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, abs_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20.0,
timeout_total=400.0,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (
lbs >= milp_model.net.layers[i].output_bounds[0].flatten()
).all()
assert (
ubs <= milp_model.net.layers[i].output_bounds[1].flatten()
).all()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_permutation(self) -> None:
net_pt = toy_permute_net()
shape = (1, 4, 8)
onnx_shape = (1, 1, 4, 8)
eps = 2 / 255
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Compare results
for i in range(10):
x = torch.rand(onnx_shape)
out_pt_net = net_pt(x)
out_o2p_net = o2p_net(x)
assert torch.isclose(out_pt_net, out_o2p_net).all()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_permutation_milp(self) -> None:
net_pt = toy_permute_net()
shape = (1, 4, 8)
onnx_shape = (1, 1, 4, 8)
eps = 2 / 255
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, abs_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (
lbs >= milp_model.net.layers[i].output_bounds[0].flatten()
).all()
assert (
ubs <= milp_model.net.layers[i].output_bounds[1].flatten()
).all()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_unbinary_op(self) -> None:
seed_everything(0)
network = toy_unbinary_net()
input = torch.ones(size=(1, 1, 4, 4))
eps = 0.5
num_eps = 1e-6
input_lb, input_ub = input - eps, input + eps
q = network(input)
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, network
)
lbs, ubs = milp_model.get_network_output_bounds()
(
output_lb_deep_poly,
output_ub_deep_poly,
) = get_deep_poly_bounds(network, input_lb, input_ub)
assert (q >= lbs).all()
assert (q <= ubs).all()
assert (
output_lb_deep_poly < lbs + num_eps
).all(), (
f"Found violation lb {torch.max(output_lb_deep_poly - (lbs + num_eps))}"
)
def test_unbinary_op_milp(self) -> None:
net_pt = toy_unbinary_net()
eps = 2 / 255
shape = (1, 4, 4)
onnx_shape = (1, 1, 4, 4)
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, abs_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (
lbs >= milp_model.net.layers[i].output_bounds[0].flatten()
).all()
assert (
ubs <= milp_model.net.layers[i].output_bounds[1].flatten()
).all()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
t = TestPadPermute()
# t.test_padding()
t.test_padding_milp()
# t.test_permutation()
# t.test_permutation_milp()
# t.test_unbinary_op_milp()
# t.test_max_avg_pooling_milp()
| 11,374 | 30.685237 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/mn_bab_shape_test.py | import torch
from src.abstract_layers.abstract_relu import ReLU
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
class TestMNBaBShape:
def test_init_with_full_argument_list(self) -> None:
lb = AffineForm(torch.rand(1, 2, 2), torch.rand(1, 2, 2))
ub = AffineForm(torch.rand(1, 2, 2), torch.rand(1, 2, 2))
MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
def test_init_with_partial_argument_list(self) -> None:
lb = AffineForm(torch.rand(1, 2, 2))
ub = AffineForm(torch.rand(1, 2, 2))
MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
| 1,032 | 29.382353 | 65 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/sigmoid_approx_test.py | import numpy as np
import torch
import torch.nn as nn
from src.abstract_layers.abstract_network import AbstractNetwork
from src.abstract_layers.abstract_sigmoid import Sigmoid
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
from src.utilities.argument_parsing import get_config_from_json
from src.utilities.attacks import torch_whitebox_attack
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import freeze_network, load_net_from
from tests.test_util import (
get_deep_poly_bounds,
optimize_output_node_bounds_with_prima_crown,
toy_sig_net,
toy_sig_tanh_net,
)
class TestSigmoid:
"""
We test our MILP implementation for layer bounds and network verification
"""
def test_sigmoid_bounds(self) -> None:
seed_everything(42)
shape = (100,)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
sig = Sigmoid.from_concrete_module(nn.Sigmoid(), shape)
sig = sig.to(device)
sig.eval()
for lb in torch.linspace(-5, 5, 100):
lbs = lb * torch.ones(shape).to(device)
ub_eps = torch.linspace(0, 10, 100).to(device)
dummy_as = MN_BaB_Shape(
query_id=query_tag(sig),
query_prev_layer=None,
queries_to_compute=None,
lb=AffineForm(torch.tensor([[[0]]], device=device)),
ub=AffineForm(torch.tensor([[[0]]], device=device)),
unstable_queries=None,
subproblem_state=None,
)
ubs = lbs + ub_eps
(
lb_slope,
ub_slope,
lb_intercept,
ub_intercept,
) = sig.get_approximation_slopes_and_intercepts(
bounds=(lbs, ubs), abstract_shape=dummy_as
)
# Check if bounds are valid
lb_dist = []
ub_dist = []
for i in torch.linspace(0, 1, 100):
check_x = lb + i * ub_eps
lb_dist.append(
torch.mean(
(torch.sigmoid(check_x) - (lb_slope * check_x + lb_intercept))
).item()
)
ub_dist.append(
torch.mean(
((ub_slope * check_x + ub_intercept) - torch.sigmoid(check_x))
).item()
)
assert (
lb_slope * check_x + lb_intercept <= torch.sigmoid(check_x)
).all(), "Lower bound failure"
assert (
ub_slope * check_x + ub_intercept >= torch.sigmoid(check_x)
).all(), "Upper bound failure"
print(
f"Mean lb-dist = {np.mean(np.array(lb_dist))} Max lb-dist = {np.max(np.array(lb_dist))}"
)
print(
f"Mean ub-dist = {np.mean(np.array(ub_dist))} Max ub-dist = {np.max(np.array(ub_dist))}"
)
def test_sigmoid_net_sound(self) -> None:
seed_everything(42)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
network = toy_sig_net()[0]
network = network.to(device)
network.eval()
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0).to(device)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0).to(device)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
for x1 in torch.linspace(float(input_lb[0][0]), float(input_ub[0][0]), 50):
for x2 in torch.linspace(float(input_lb[0][1]), float(input_ub[0][1]), 50):
out = network(torch.tensor([x1, x2], device=device))
assert out <= output_ub_without_alpha
assert out >= output_lb_without_alpha
def test_sigmoid_bound_optimization(self) -> None:
seed_everything(42)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
network = toy_sig_net()[0]
network = network.to(device)
network.eval()
freeze_network(network)
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0).to(device)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0).to(device)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True, optimize_prima=False
)
assert output_lb_with_alpha >= output_lb_without_alpha
assert output_ub_with_alpha <= output_ub_without_alpha
def test_sigmoid_tanh_layers(self) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
network = toy_sig_tanh_net()[0]
network = network.to(device)
network.eval()
freeze_network(network)
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0).to(device)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0).to(device)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True, optimize_prima=False
)
assert output_lb_with_alpha >= output_lb_without_alpha
assert output_ub_with_alpha <= output_ub_without_alpha
def test_sigmoid_large_net(self) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
config = get_config_from_json("configs/baseline/mnist_sig_2_50.json")
seed_everything(config.random_seed)
network = load_net_from(config)
network = network.to(device)
network.eval()
assert isinstance(network, nn.Sequential)
network = AbstractNetwork.from_concrete_module(network, config.input_dim)
freeze_network(network)
eps = 0.1
for i in range(5):
input = torch.rand(config.input_dim).unsqueeze(0).to(device)
input_lb = torch.clamp(input - eps, min=0)
input_ub = torch.clamp(input + eps, max=1)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
i = int(torch.randint(0, 10, (1,)).item())
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network,
i,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=False,
)
assert output_lb_with_alpha > output_lb_without_alpha[0][i]
assert output_ub_with_alpha < output_ub_without_alpha[0][i]
def test_sigmoid_net_large_soundness(self) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
dtype = torch.get_default_dtype()
config = get_config_from_json("configs/baseline/mnist_sig_2_50.json")
seed_everything(config.random_seed)
network = load_net_from(config)
network = network.to(device).to(dtype)
network.eval()
assert isinstance(network, nn.Sequential)
network = AbstractNetwork.from_concrete_module(network, config.input_dim)
freeze_network(network)
eps = 0.1
for i in range(5):
network.reset_output_bounds()
network.reset_input_bounds()
input = torch.rand(config.input_dim).unsqueeze(0).to(device)
input_lb = torch.clamp(input - eps, min=0)
input_ub = torch.clamp(input + eps, max=1)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
i = int(torch.randint(0, 10, (1,)).item())
properties_to_verify = [[(i, j, 0)] for j in range(10) if j != i]
adversarial_example, worst_x = torch_whitebox_attack(
network,
input_lb.device,
input,
properties_to_verify,
input_lb,
input_ub,
restarts=5,
)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network,
i,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=False,
)
assert output_lb_with_alpha > output_lb_without_alpha[0][i]
assert output_ub_with_alpha < output_ub_without_alpha[0][i]
x = input
adv_out = network(x)
assert output_lb_with_alpha < adv_out[0][i]
assert output_ub_with_alpha > adv_out[0][i]
if worst_x is not None:
x = torch.tensor(worst_x[0], device=device)
adv_out = network(x)
assert output_lb_with_alpha < adv_out[i]
assert output_ub_with_alpha > adv_out[i]
assert adversarial_example is not None
for x_ndarray in adversarial_example:
x = torch.tensor(x_ndarray, device=device)[0]
adv_out = network(x)
assert output_lb_with_alpha < adv_out[i]
assert output_ub_with_alpha > adv_out[i]
b_s = 50
rand_sample_batch = (
torch.rand((b_s, *config.input_dim), device=device) * eps
)
rand_sample_batch = torch.clamp(
input.repeat((b_s, 1)) + rand_sample_batch, 0, 1
)
rand_out = network(rand_sample_batch)
for j in range(b_s):
assert output_lb_with_alpha < rand_out[j][i]
assert output_ub_with_alpha > rand_out[j][i]
rand_sample_batch = (
torch.randint(-1, 1, (b_s, *config.input_dim), device=device) * eps
)
rand_sample_batch = torch.clamp(
input.repeat((b_s, 1)) + rand_sample_batch, 0, 1
)
rand_out = network(rand_sample_batch)
for j in range(b_s):
assert output_lb_with_alpha < rand_out[j][i]
assert output_ub_with_alpha > rand_out[j][i]
def test_large_sigmoid_input_bounds(self) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
seed_everything(42)
input_lb = torch.tensor(
[
-1000,
-1000,
-1000,
-1000,
-500,
-500,
-500,
-500,
-499,
-499,
-499,
-499,
],
device=device,
)
input_ub = torch.tensor(
[-1000, -500, 499, 500, -1000, -500, 499, 500, -1000, -500, 499, 500],
device=device,
)
sig_layer = Sigmoid(input_lb.shape)
sig_layer = sig_layer.to(device)
sig_layer.eval()
(
lb_slope,
ub_slope,
lb_intercept,
ub_intercept,
) = sig_layer.get_approximation_slopes_and_intercepts((input_lb, input_ub))
for i, (lb, ub) in enumerate(zip(input_lb, input_ub)):
if lb < 0:
assert abs(lb_slope[i] - 0) <= 1e-6
assert abs(lb_intercept[i] - 0) <= 1e-6
if ub > 0:
assert abs(ub_slope[i] - 0) <= 1e-6
assert abs(ub_intercept[i] - 1) <= 1e-6
print("Done")
if __name__ == "__main__":
t = TestSigmoid()
t.test_sigmoid_net_sound()
# t.test_sigmoid_net_large_soundness()
# t.test_sigmoid_large_net()
# t.test_large_sigmoid_input_bounds()
# t.test_sigmoid_bounds()
t.test_sigmoid_bound_optimization()
t.test_sigmoid_tanh_layers()
| 12,725 | 35.674352 | 104 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/relu_test.py | import pytest
import torch
from torch import Tensor
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.abstract_layers.abstract_relu import ReLU
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
from src.utilities.config import make_backsubstitution_config
from src.utilities.queries import get_output_bound_initial_query_coef
from tests.test_util import set_torch_precision
class TestReLU:
def test_backsubstitution_with_missing_bounds(self) -> None:
with pytest.raises(RuntimeError):
layer = ReLU((1,))
query_coef = get_output_bound_initial_query_coef(
dim=(2,),
intermediate_bounds_to_recompute=None, # get all
use_dependence_sets=False,
batch_size=1,
device=torch.device("cpu"),
dtype=None, # TODO: should this be something else?
)
dummy_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=AffineForm(query_coef),
ub=AffineForm(query_coef),
unstable_queries=None,
subproblem_state=None,
)
layer.backsubstitute(make_backsubstitution_config(), dummy_shape)
@set_torch_precision(torch.float32)
def test_approximation_stable_inactive(self) -> None:
layer = ReLU((2,))
layer_lb = torch.full(size=(1, 2), fill_value=-2)
layer_ub = torch.full(size=(1, 2), fill_value=-1)
layer.update_input_bounds((layer_lb, layer_ub))
assert layer.input_bounds # mypy
lb_slope = layer._get_lower_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
ub_slope = layer._get_upper_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
lb_intercept, ub_intercept = layer._get_approximation_intercepts(
layer.input_bounds
)
assert (lb_slope == 0).all()
assert (ub_slope == 0).all()
assert (lb_intercept == 0).all()
assert (ub_intercept == 0).all()
@set_torch_precision(torch.float32)
def test_approximation_stable_active(self) -> None:
layer = ReLU((2,))
layer_lb = torch.full(size=(1, 2), fill_value=1)
layer_ub = torch.full(size=(1, 2), fill_value=2)
layer.update_input_bounds((layer_lb, layer_ub))
assert layer.input_bounds # mypy
lb_slope = layer._get_lower_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
ub_slope = layer._get_upper_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
lb_intercept, ub_intercept = layer._get_approximation_intercepts(
layer.input_bounds
)
assert (lb_slope == 1).all()
assert (ub_slope == 1).all()
assert (lb_intercept == 0).all()
assert (ub_intercept == 0).all()
@set_torch_precision(torch.float32)
def test_approximation_unstable_lower_triangle(self) -> None:
layer = ReLU((2,))
layer_lb = torch.full(size=(1, 2), fill_value=-1)
layer_ub = torch.full(size=(1, 2), fill_value=2)
layer.update_input_bounds((layer_lb, layer_ub))
expected_ub_slope = 2 / (2 - (-1))
expected_ub_intercept = -(-1) * expected_ub_slope
assert layer.input_bounds # mypy
lb_slope = layer._get_lower_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
ub_slope = layer._get_upper_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
lb_intercept, ub_intercept = layer._get_approximation_intercepts(
layer.input_bounds
)
assert (lb_slope == 1).all()
assert (ub_slope == expected_ub_slope).all()
assert (lb_intercept == 0).all()
assert (ub_intercept == expected_ub_intercept).all()
@set_torch_precision(torch.float32)
def test_approximation_unstable_upper_triangle(self) -> None:
layer = ReLU((2,))
layer_lb = torch.full(size=(1, 2), fill_value=-2)
layer_ub = torch.full(size=(1, 2), fill_value=1)
layer.update_input_bounds((layer_lb, layer_ub))
expected_ub_slope = 1 / (1 - (-2))
expected_ub_intercept = -(-2) * expected_ub_slope
assert layer.input_bounds # mypy
lb_slope = layer._get_lower_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
ub_slope = layer._get_upper_approximation_slopes(
make_backsubstitution_config(), layer.input_bounds
)
lb_intercept, ub_intercept = layer._get_approximation_intercepts(
layer.input_bounds
)
assert (lb_slope == 0).all()
assert (ub_slope == expected_ub_slope).all()
assert (lb_intercept == 0).all()
assert (ub_intercept == expected_ub_intercept).all()
@set_torch_precision(torch.float32)
def test_mn_bab_backsubstitution_coef(self) -> None:
layer = ReLU((2,))
layer_lb = torch.full(size=(1, 2), fill_value=-2.0)
layer_ub = torch.full(size=(1, 2), fill_value=1.0)
layer.update_input_bounds((layer_lb, layer_ub))
lb = AffineForm(torch.eye(2).unsqueeze(0))
ub = AffineForm(torch.tensor([[1.0, 0.0], [0.0, -1.0]]).unsqueeze(0))
initial_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
resulting_shape = layer.backsubstitute(
make_backsubstitution_config(), initial_shape
)
expected_lb_slope = 0
expected_ub_slope = 1 / (1 - (-2))
lb_coef_as_expected = resulting_shape.lb.coef == torch.zeros(
2, 2
).fill_diagonal_(expected_lb_slope)
assert isinstance(lb_coef_as_expected, Tensor)
assert lb_coef_as_expected.all()
assert resulting_shape.ub is not None
assert isinstance(resulting_shape.ub.coef, Tensor)
assert resulting_shape.ub.coef[0, 0, 0] == expected_ub_slope
assert resulting_shape.ub.coef[0, 1, 1] == expected_lb_slope
@set_torch_precision(torch.float32)
def test_mn_bab_backsubstitution_bias(self) -> None:
layer_lb = torch.full(size=(1, 2), fill_value=-2.0)
layer_ub = torch.full(size=(1, 2), fill_value=1.0)
layer = ReLU((2,))
lb = AffineForm(torch.eye(2).unsqueeze(0))
ub = AffineForm(torch.tensor([[1.0, 0.0], [0.0, -1.0]]).unsqueeze(0))
initial_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
layer.update_input_bounds((layer_lb, layer_ub))
resulting_shape = layer.backsubstitute(
make_backsubstitution_config(), initial_shape
)
expected_lb_intercept = 0
expected_ub_slope = 1 / (1 - (-2))
expected_ub_intercept = -(-2) * expected_ub_slope
assert (resulting_shape.lb.bias == expected_lb_intercept).all()
assert resulting_shape.ub is not None
assert resulting_shape.ub.bias[0, 0] == expected_ub_intercept
assert resulting_shape.ub.bias[0, 1] == expected_lb_intercept
@set_torch_precision(torch.float32)
def test_propagate_abs(self) -> None:
in_channels = 1
input_dim = (in_channels, 5, 3)
batch_size = 2
layer = ReLU(input_dim)
x = torch.rand((batch_size, *input_dim))
x_out = layer(x)
in_zono = HybridZonotope.construct_from_noise(
x, eps=0.01, domain="zono", data_range=(-torch.inf, torch.inf)
)
out_zono = layer.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out)
in_dpf = DeepPoly_f.construct_from_noise(
x, eps=0.01, domain="DPF", data_range=(-torch.inf, torch.inf)
)
out_dpf = layer.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out)
@set_torch_precision(torch.float32)
def test_propagate_interval_toy_example(self) -> None:
layer = ReLU((2,))
input_lb = torch.tensor([-2, -2])
input_ub = torch.tensor([2, 2])
expected_output_lb = torch.tensor([0, 0])
expected_output_ub = torch.tensor([2, 2])
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == expected_output_lb).all()
assert (output_ub == expected_output_ub).all()
@set_torch_precision(torch.float32)
def test_propagate_interval_forcing_lower_triangle_approximation(self) -> None:
layer = ReLU((2,))
input_lb = torch.tensor([-2, -2])
input_ub = torch.tensor([2.1, 2.1])
expected_output_lb = torch.tensor([0, 0])
expected_output_ub = torch.tensor([2.1, 2.1])
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == expected_output_lb).all()
assert (output_ub == expected_output_ub).all()
| 9,655 | 36.866667 | 83 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/prima_optimization_test.py | import torch
from tests.test_util import optimize_output_node_bounds_with_prima_crown, toy_net
class TestPrimaOptimization:
def test_toy_net(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0)
(
output_lb_with_alpha,
output_ub_with_alpha,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True
)
(
output_lb_with_alpha_prima,
output_ub_with_alpha_prima,
) = optimize_output_node_bounds_with_prima_crown(
network, 0, input_lb, input_ub, optimize_alpha=True, optimize_prima=True
)
assert output_lb_with_alpha_prima >= output_lb_with_alpha
assert output_ub_with_alpha_prima <= output_ub_with_alpha
if __name__ == "__main__":
T = TestPrimaOptimization()
# torch.autograd.set_detect_anomaly(True)
T.test_toy_net()
| 1,027 | 28.371429 | 84 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/fuzzing_test.py | from typing import Callable, List, Optional, Tuple, Union
import torch
from src.abstract_layers.abstract_network import AbstractNetwork
# from src.utilities.config import AbstractDomain
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import freeze_network
from tests.test_util import ( # toy_all_layer_net_1d,
abs_toy_pad_net,
abs_toy_pad_tiny_net,
dp_call,
dpf_call,
get_convtranspose2d_conv_net,
milp_call,
run_fuzzing_test,
splitting_call,
toy_all_layer_net,
toy_avg_pool_net,
toy_convtranspose2d_net,
toy_max_pool_mixed_net,
toy_net,
toy_sig_net,
toy_sig_tanh_net,
zono_call,
)
# from tests.test_util import toy_max_pool_tiny_net
class TestFuzzing:
"""
We test with our Fuzzing implementation
"""
@staticmethod
def fuzzing_test_network(
network_constructor: Callable[[], Tuple[AbstractNetwork, Tuple[int, ...]]],
eps: Union[float, List[float]] = 0.1,
n: int = 20,
dp: bool = True,
milp: bool = True,
dpf: bool = False,
zono: bool = False,
input_splitting: bool = False,
splitting_dict: Optional[dict] = None,
) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
print(f"Running Test with{network_constructor} using {device}")
old_dtype = torch.get_default_dtype()
dtype = torch.float64
torch.set_default_dtype(dtype)
seed_everything(42)
network, input_dim = network_constructor()
network = network.to(device).to(dtype)
network.eval()
freeze_network(network)
print(f"Testing with eps={eps}.")
if not isinstance(eps, list):
eps = [eps]
in_shape = (1, *input_dim)
for e in eps:
for i in range(n):
seed_everything(42 + i)
input = torch.rand(in_shape, device=device, dtype=dtype) * 2 - 1
tight_mask = torch.rand(input.shape, device=device) > 0.6
if (~tight_mask).sum() == 0:
tight_mask = torch.zeros_like(tight_mask)
input_lb = input - e
input_ub = torch.where(tight_mask, input - e, input + e)
bounding_calls = []
if dp:
bounding_calls += [dp_call]
if dpf:
bounding_calls += [dpf_call]
if zono:
bounding_calls += [zono_call]
if milp:
bounding_calls += [milp_call]
if input_splitting and splitting_dict is not None:
bounding_calls += [
lambda net, bounds: splitting_call(net, bounds, splitting_dict) # type: ignore # does not realize splitting_dict is not None
]
for bounding_call in bounding_calls:
network.reset_output_bounds()
network.reset_input_bounds()
run_fuzzing_test(
network,
input,
input_lb,
input_ub,
in_shape,
bounding_call,
use_beta=True,
use_adv=True,
)
torch.set_default_dtype(old_dtype)
def test_maxpool_toy_example(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_max_pool_mixed_net, [0.0001, 0.001, 0.01, 0.05], n, dp=True, dpf=True
)
# self.fuzzing_test_network(toy_max_pool_mixed_net, [0.1, 0.5], n) slow
def test_pad_toy_example(self, n: int = 1) -> None:
self.fuzzing_test_network(
abs_toy_pad_net, [0.001, 0.01], n, zono=True, dpf=True
)
self.fuzzing_test_network(abs_toy_pad_tiny_net, 0.01, n, zono=True, dpf=True)
def test_toy_net(self, n: int = 1) -> None:
self.fuzzing_test_network(toy_net, [0.001, 0.01], n, zono=True, dpf=True)
# def test_toy_net_split(self, n: int = 1) -> None:
# splitting_dict = {
# "initial_splits": 2,
# "initial_split_dims": [0, 1],
# "max_depth": 5,
# "domain": AbstractDomain("DPF"),
# "batch_size": 100,
# "split_factor": 3,
# }
# self.fuzzing_test_network(
# toy_net,
# [0.001, 0.01],
# n,
# milp=False,
# dp=False,
# input_splitting=True,
# splitting_dict=splitting_dict,
# )
#
# def test_all_layer_split(self, n: int = 1) -> None:
# splitting_dict = {
# "initial_splits": 2,
# "initial_split_dims": [0, 1],
# "max_depth": 5,
# "domain": AbstractDomain("DPF"),
# "batch_size": 100,
# "split_factor": 3,
# }
# for _ in range(n):
# self.fuzzing_test_network(
# toy_all_layer_net_1d,
# [0.001, 0.01],
# 1,
# milp=False,
# dp=False,
# input_splitting=True,
# splitting_dict=splitting_dict,
# )
#
# splitting_dict = {
# "initial_splits": 1,
# "initial_split_dims": [0, 1],
# "max_depth": 5,
# "domain": AbstractDomain("dp"),
# "batch_size": 100,
# "split_factor": 3,
# }
#
# for _ in range(n):
# self.fuzzing_test_network(
# toy_all_layer_net_1d,
# [0.001, 0.01],
# 1,
# milp=False,
# dp=False,
# input_splitting=True,
# splitting_dict=splitting_dict,
# )
#
# splitting_dict = {
# "initial_splits": 3,
# "initial_split_dims": [0],
# "max_depth": 5,
# "domain": AbstractDomain("zono"),
# "batch_size": 100,
# "split_factor": 3,
# }
# for _ in range(n):
# self.fuzzing_test_network(
# toy_all_layer_net_1d,
# [0.001, 0.01],
# 1,
# milp=False,
# dp=False,
# input_splitting=True,
# splitting_dict=splitting_dict,
# )
#
# splitting_dict = {
# "initial_splits": 3,
# "initial_split_dims": [0],
# "max_depth": 5,
# "domain": AbstractDomain("box"),
# "batch_size": 100,
# "split_factor": 3,
# }
# for _ in range(n):
# self.fuzzing_test_network(
# toy_all_layer_net_1d,
# [0.001, 0.01],
# 1,
# milp=False,
# dp=False,
# input_splitting=True,
# splitting_dict=splitting_dict,
# )
def test_toy_sig_net(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_sig_net, [0.001, 0.01], n, milp=False, zono=False, dpf=True
)
def test_toy_sig_tanh_net(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_sig_tanh_net, [0.001, 0.01], n, milp=False, zono=False, dpf=False
)
def test_toy_mixed(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_all_layer_net, [0.001, 0.01], n, zono=True, dpf=False
)
def test_toy_avg_pool_net(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_avg_pool_net, [0.001, 0.01], n, zono=True, dpf=True
)
def test_nn4sys(self) -> None:
# Pass here as they are quite slow and test only sub-behaviour checked in other tests.
pass
# for _ in range(5):
# self.fuzzing_test_network(
# get_toy_split_block,
# 0.5,
# 1,
# milp=False,
# dpf=True,
# )
# self.fuzzing_test_network(
# get_nn4sys_128d_splitblock,
# 0.2,
# 5,
# milp=False,
# dpf=True,
# )
# self.fuzzing_test_network(
# get_nn4sys_128d_block,
# 0.2,
# 5,
# milp=False,
# dpf=True,
# )
# self.fuzzing_test_network(
# get_nn4sys_128d_multipath_block_stacked,
# 0.2,
# 5,
# milp=False,
# dpf=True,
# )
def test_convtranspose2d_net(self, n: int = 1) -> None:
self.fuzzing_test_network(
toy_convtranspose2d_net, [0.001, 0.01], n, milp=False, zono=True, dpf=True
)
self.fuzzing_test_network(
get_convtranspose2d_conv_net,
[0.001, 0.01],
n,
milp=False,
zono=True,
dpf=True,
)
if __name__ == "__main__":
T = TestFuzzing()
n = 50
# T.test_toy_net_split(n)
# T.test_all_layer_split(n)
T.test_nn4sys()
# T.test_convtranspose2d_net(n)
T.test_maxpool_toy_example(n)
T.test_pad_toy_example(n)
T.test_toy_net(n)
T.test_toy_sig_net(n)
T.test_toy_sig_tanh_net(n)
T.test_toy_mixed(n)
T.test_toy_avg_pool_net(n)
| 9,519 | 30.733333 | 149 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/ibp_test.py | from typing import Dict, Tuple
import torch
from torch import Tensor
from src.milp_network import MILPNetwork
from tests.test_util import get_deep_poly_bounds, toy_net
class TestIBP:
"""
We test our IBP implementation (also with respect to existing bound reusage)
"""
def test_milp_toy_example(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([[-1.0, -1.0]])
input_ub = torch.tensor([[1.0, 1.0]])
# Our new implementation
input_lb = input_lb.unsqueeze(0)
input_ub = input_ub.unsqueeze(0)
# Simple IBP pass
has_intermediate_layer_bounds = MILPNetwork._check_layer_bounds(network.layers)
assert not has_intermediate_layer_bounds
output_lb, output_ub = network.set_layer_bounds_via_interval_propagation(
input_lb, input_ub, use_existing_bounds=False
)
print("Basic IBP")
prior_bounds: Dict[int, Tuple[Tensor]] = {}
for i, layer in enumerate(network.layers):
if hasattr(layer, "input_bounds"):
print(f"Layer: {i} Bounds: {layer.input_bounds}")
prior_bounds[i] = layer.input_bounds
# Reset bounds
network.reset_input_bounds()
network.reset_output_bounds()
# Run DP pass yielding better bounds
get_deep_poly_bounds(network, input_lb[0], input_ub[0])
has_intermediate_layer_bounds = MILPNetwork._check_layer_bounds(network.layers)
assert not has_intermediate_layer_bounds
print("After DP")
for i, layer in enumerate(network.layers):
if hasattr(layer, "input_bounds"):
print(f"Layer: {i} Bounds: {layer.input_bounds}")
# Run IBP re-using intermediate results
output_lb, output_ub = network.set_layer_bounds_via_interval_propagation(
input_lb, input_ub, use_existing_bounds=True
)
print("IBP Reusing DP Bounds")
for i, layer in enumerate(network.layers):
if hasattr(layer, "input_bounds"):
print(f"Layer: {i} Bounds: {layer.input_bounds}")
assert (layer.input_bounds[0] >= prior_bounds[i][0]).all()
assert (layer.input_bounds[1] <= prior_bounds[i][1]).all() # type: ignore[misc] # mypy throws false index out of range
if __name__ == "__main__":
T = TestIBP()
T.test_milp_toy_example()
| 2,406 | 36.030769 | 135 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/unbinaryop_test.py | import shutil
import time
from pathlib import Path
import torch
from torch import nn
from src.abstract_layers.abstract_network import AbstractNetwork
from src.milp_network import MILPNetwork
from src.state.tags import layer_tag
from src.utilities.loading.network import load_onnx_model
from tests.test_util import get_deep_poly_bounds, toy_unbinary_net
class TestUnbinaryOp:
"""
We test our UnbinaryOp-LAyer
"""
def test_unbinary_op(self) -> None:
network = toy_unbinary_net()
input = torch.ones(size=(1, 1, 4, 4))
eps = 0.5
num_eps = 2e-6
input_lb, input_ub = input - eps, input + eps
q = network(input)
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, network
)
lbs, ubs = milp_model.get_network_output_bounds()
(
output_lb_deep_poly,
output_ub_deep_poly,
) = get_deep_poly_bounds(network, input_lb, input_ub)
assert (q + num_eps >= lbs).all()
assert (q - num_eps <= ubs).all()
assert (
output_lb_deep_poly < lbs + num_eps
).all(), (
f"Found violation lb {torch.max(output_lb_deep_poly - (lbs + num_eps))}"
)
def test_unbinary_op_milp(self) -> None:
net_pt = toy_unbinary_net()
eps = 2 / 255
shape = (1, 4, 4)
onnx_shape = (1, 1, 4, 4)
try:
temp_dir = "tests/temp"
net_pref = "pad_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
milp_model = MILPNetwork.build_model_from_abstract_net(
input, input_lb, input_ub, abs_net
)
for i, layer in enumerate(milp_model.net.layers):
lbs, ubs = milp_model.get_network_bounds_at_layer_multi(
layer_tag(layer),
timeout_per_instance=20,
timeout_total=400,
timeout=time.time() + 400,
)
print(f"Layer {i} - LBS: {lbs} UBS: {ubs}")
assert (
lbs >= milp_model.net.layers[i].output_bounds[0].flatten()
).all()
assert (
ubs <= milp_model.net.layers[i].output_bounds[1].flatten()
).all()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
t = TestUnbinaryOp()
# t.test_unbinary_op()
# t.test_unbinary_op_milp()
| 3,317 | 30.301887 | 84 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/reshape_test.py | import shutil
from pathlib import Path
import torch
from torch import nn
from torch.distributions import Beta
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.loading.network import load_onnx_model
from tests.test_util import get_deep_poly_bounds, toy_reshape_net
class TestReshape:
"""
We test our Reshape layer.
"""
def test_reshape(self) -> None:
net_pt = toy_reshape_net()
shape = (256,)
onnx_shape = (1, 256)
eps = 2 / 255
try:
temp_dir = "tests/temp"
net_pref = "reshape_test"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand(onnx_shape)
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Compare results
for i in range(10):
x = torch.rand(onnx_shape)
out_pt_net = net_pt(x)
out_o2p_net = o2p_net(x)
assert torch.isclose(out_pt_net, out_o2p_net).all()
# Get abstract net
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, shape)
input = torch.rand(onnx_shape)
input_lb = input - eps
input_ub = input + eps
(
dp_lb,
dp_ub,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
in_zono = HybridZonotope.construct_from_bounds(
input_lb, input_ub, domain="zono"
)
out_zono = abs_net.propagate_abstract_element(in_zono)
in_dpf = DeepPoly_f.construct_from_bounds(input_lb, input_ub, domain="DPF")
out_dpf = abs_net.propagate_abstract_element(in_dpf)
m = Beta(concentration0=0.5, concentration1=0.5)
for _ in range(10):
shape_check = (256, *shape[1:])
check_x = input_lb + 2 * eps * m.sample(shape_check)
out = abs_net(check_x)
assert (dp_lb <= out).all() and (out <= dp_ub).all()
assert out_zono.may_contain_point(out)
assert out_dpf.may_contain_point(out)
print("Done")
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
T = TestReshape()
T.test_reshape()
| 2,895 | 29.166667 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/milp_test.py | import torch
from gurobipy import GRB # type: ignore[import]
from tests.gurobi_util import create_milp_model
from tests.test_util import get_deep_poly_bounds, toy_net
class TestAgainstMILP:
"""
We compare our bounds with the bounds obtained by solving the verification with a MILP solver. The MILP bounds are exact.
"""
def test_deep_poly_toy_example(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0])
input_ub = torch.tensor([1.0, 1.0])
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
output_node_var = var_list[-1]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
output_lb = model.objVal
assert output_lb == 1
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
output_ub = model.objVal
assert output_ub == 3
def test_deep_poly_sound_on_toy_net(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0])
input_ub = torch.tensor([1.0, 1.0])
(
output_lb_deep_poly,
output_ub_deep_poly,
) = get_deep_poly_bounds(network, input_lb.unsqueeze(0), input_ub.unsqueeze(0))
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
output_node_var = var_list[-1]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
output_lb_milp = model.objVal
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
output_ub_milp = model.objVal
assert output_lb_deep_poly <= output_lb_milp
assert output_ub_deep_poly >= output_ub_milp
| 1,892 | 30.032787 | 125 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/batch_test.py | import csv
from copy import deepcopy
import torch
from src.abstract_layers.abstract_network import AbstractNetwork
from src.branch_and_bound import BranchAndBound
from src.mn_bab_optimizer import MNBabOptimizer
from src.utilities.config import make_verifier_config
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, mnist_a_b, mnist_conv_tiny
from tests.test_util import (
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_FC_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
)
class TestBatchProcessing:
def test_batch_with_bab(self) -> None:
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
label_as_string, *pixel_values = next(test_instances)
label = int(label_as_string)
test_config = deepcopy(MNIST_FC_DATA_TEST_CONFIG)
test_config.eps = 0.003
image, input_lb, input_ub = transform_and_bound(pixel_values, test_config)
pred_label = torch.argmax(original_network(image))
assert pred_label == label
competing_label = 1
assert label != competing_label
query_coef = torch.zeros(1, 1, *network.output_dim)
query_coef.data[:, 0, label] = 1
query_coef.data[0, 0, competing_label] = -1
config = make_verifier_config(
optimize_alpha=True,
optimize_prima=True,
bab_batch_sizes=[4, 4],
recompute_intermediate_bounds_after_branching=True,
)
optimizer = MNBabOptimizer(config.optimizer, config.backsubstitution)
bab = BranchAndBound(
optimizer, config.bab, config.backsubstitution, torch.device("cpu")
)
bab.bound_minimum_with_branch_and_bound(
"dummy_id", query_coef, network, input_lb, input_ub
)
def test_batch_with_bab_with_conv(self) -> None:
network_path = "networks/mnist_convTiny.pyt"
original_network = mnist_conv_tiny()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
label_as_string, *pixel_values = next(test_instances)
label = int(label_as_string)
test_config = deepcopy(MNIST_CONV_DATA_TEST_CONFIG)
test_config.eps = 0.002
image, input_lb, input_ub = transform_and_bound(pixel_values, test_config)
pred_label = torch.argmax(original_network(image))
assert pred_label == label
competing_label = 1
assert label != competing_label
query_coef = torch.zeros(1, 1, *network.output_dim)
query_coef.data[:, 0, label] = 1
query_coef.data[0, 0, competing_label] = -1
config = make_verifier_config(
optimize_alpha=True,
optimize_prima=True,
bab_batch_sizes=[4, 4, 4],
recompute_intermediate_bounds_after_branching=True,
)
optimizer = MNBabOptimizer(config.optimizer, config.backsubstitution)
bab = BranchAndBound(
optimizer, config.bab, config.backsubstitution, torch.device("cpu")
)
bab.bound_minimum_with_branch_and_bound(
"dummy_id", query_coef, network, input_lb, input_ub
)
if __name__ == "__main__":
T = TestBatchProcessing()
T.test_batch_with_bab()
T.test_batch_with_bab_with_conv()
| 4,032 | 33.470085 | 84 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/sequential_test.py | import torch
from tests.test_util import toy_net
class TestSequential:
def test_set_layer_bounds_via_interval_propagation(self) -> None:
"""
Correct bounds found by hand on toy_net.
"""
model = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0)
model.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
layer1_lb, layer1_ub = model.layers[0].input_bounds
assert (layer1_lb == torch.tensor([-1, -1])).all()
assert (layer1_ub == torch.tensor([1, 1])).all()
layer2_lb, layer2_ub = model.layers[1].input_bounds
assert (layer2_lb == torch.tensor([-2, -2])).all()
assert (layer2_ub == torch.tensor([2, 2])).all()
layer3_lb, layer3_ub = model.layers[2].input_bounds
assert (layer3_lb == torch.tensor([0, 0])).all()
assert (layer3_ub == torch.tensor([2, 2])).all()
layer4_lb, layer4_ub = model.layers[3].input_bounds
assert (layer4_lb == torch.tensor([0, -2])).all()
assert (layer4_ub == torch.tensor([4, 2])).all()
layer5_lb, layer5_ub = model.layers[4].input_bounds
assert (layer5_lb == torch.tensor([0, 0])).all()
assert (layer5_ub == torch.tensor([4, 2])).all()
layer6_lb, layer6_ub = model.layers[5].input_bounds
assert (layer6_lb == torch.tensor([1, 0])).all()
assert (layer6_ub == torch.tensor([7, 2])).all()
| 1,495 | 35.487805 | 75 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/determinism_test.py | import torch
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import load_onnx_model
class TestDeterminism:
"""
We test our deterministic behaviour
"""
def test_determ(self) -> None:
d_type = torch.float64
seed_everything(1)
assert_flag = False
net, onnx_shape, inp_name = load_onnx_model(
"vnn-comp-2022-sup/benchmarks/sri_resnet_b/onnx/resnet_3b2_bn_mixup_ssadv_4.0_bs128_lr-1_v2.onnx"
)
x = torch.rand((1, 3, 32, 32), dtype=d_type)
net.eval()
if d_type == torch.float64:
print("Using double precision")
net.double()
out = net(x)
X_pgd = torch.rand((50, 3, 32, 32), dtype=d_type)
X_cat = torch.cat([x, X_pgd])
print("Running cpu test")
print("Running batch_size tests")
issues_found = 0
for bs in range(1, 50):
# print(bs)
out_i = net(X_cat[:bs])[0]
if not torch.allclose(out, out_i): # (out_i != out).any():
print(
f"Mismatch at {bs}: {out} to {out_i} - Diff = {torch.sum(torch.abs(out-out_i))}"
)
assert_flag = True
issues_found += 1
out = out_i
# print(f"Out {bs}: {out_i}")
print(f"Found {issues_found} mismatches")
if torch.cuda.is_available():
print("Running cuda tests")
net.to("cuda")
out_cpu = out
out_cuda = net(x.cuda())
if not torch.allclose(out_cpu, out_cuda.cpu()):
print(
f"CPU and CUDA not consistent: CPU {out_cpu} GPU {out_cuda.cpu()} - Diff = {torch.sum(torch.abs(out_cpu-out_cuda.cpu()))}"
)
assert_flag = True
print("Running batch_size tests")
out = out_cuda
issues_found = 0
for bs in range(1, 50):
out_i = net(X_cat[:bs].cuda())[0]
if not torch.allclose(out, out_i):
print(
f"Mismatch at {bs}: {out} to {out_i} - Diff = {torch.sum(torch.abs(out-out_i))}"
)
assert_flag = True
issues_found += 1
out = out_i
print(f"Found {issues_found} mismatches")
print("Running equal batch-sizes, different aux batch test")
for bs in range(2, 20):
issues_found = 0
out = out_cuda
for j in range(10):
X_pgd = torch.rand((bs - 1, 3, 32, 32), dtype=d_type).cuda()
X_cat = torch.cat([x.cuda(), X_pgd])
out_i = net(X_cat.cuda())[0]
if j == 0:
out = out_i
if not torch.allclose(out, out_i):
print(f"Mismatch {bs} - {j}: {out} to {out_i}")
assert_flag = True
issues_found += 1
out = out_i
print(f"Found {issues_found} mismatches")
if assert_flag:
pass
# assert False, "Non-deterministic behaviour detected"
if __name__ == "__main__":
t = TestDeterminism()
t.test_determ()
| 3,355 | 33.958333 | 142 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/reduced_parameter_sharing_test.py | import torch
from src.utilities.config import make_optimizer_config
from tests.test_util import optimize_output_node_bounds_with_prima_crown, toy_net
class TestReducedParameterSharing:
def test_toy_net(self) -> None:
network = toy_net()[0]
input_lb = torch.tensor([-1.0, -1.0]).unsqueeze(0)
input_ub = torch.tensor([1.0, 1.0]).unsqueeze(0)
optimizer_config = make_optimizer_config(
optimize_alpha=True,
optimize_prima=True,
parameter_sharing={
"fully_connected": "none",
"conv2d": "in_channel", # (test parsing, no conv2d layers in network)
},
parameter_sharing_layer_id_filter="layer_ids[-2:]",
)
assert optimizer_config.parameter_sharing_config.reduce_parameter_sharing
(
output_lb_with_alpha_prima_rps,
output_ub_with_alpha_prima_rps,
) = optimize_output_node_bounds_with_prima_crown(
network,
0,
input_lb,
input_ub,
custom_optimizer_config=optimizer_config,
)
assert output_lb_with_alpha_prima_rps >= 1.0
assert output_ub_with_alpha_prima_rps <= 3.65402
if __name__ == "__main__":
T = TestReducedParameterSharing()
# torch.autograd.set_detect_anomaly(True)
T.test_toy_net()
| 1,369 | 29.444444 | 86 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/leaky_gradient_min_max_test.py | import torch
from src.utilities.leaky_gradient_maximum_function import LeakyGradientMaximumFunction
from src.utilities.leaky_gradient_minimum_function import LeakyGradientMinimumFunction
leaky_gradient_minimum = LeakyGradientMinimumFunction.apply
leaky_gradient_maximum = LeakyGradientMaximumFunction.apply
class TestLeakyGradientMinMaxFunctions:
def test_min_forward(self) -> None:
smaller = torch.tensor(2.0)
larger = torch.tensor(3.0)
assert torch.minimum(smaller, larger) == leaky_gradient_minimum(smaller, larger)
def test_min_backward(self) -> None:
smaller = torch.tensor(2.0, requires_grad=True)
larger = torch.tensor(3.0, requires_grad=True)
min = leaky_gradient_minimum(smaller, larger)
min.backward()
assert smaller.grad == 1
assert larger.grad == 1
def test_max_forward(self) -> None:
smaller = torch.tensor(2.0)
larger = torch.tensor(3.0)
assert torch.maximum(smaller, larger) == leaky_gradient_maximum(smaller, larger)
def test_max_backward(self) -> None:
smaller = torch.tensor(2.0, requires_grad=True)
larger = torch.tensor(3.0, requires_grad=True)
max = leaky_gradient_maximum(smaller, larger)
max.backward()
assert smaller.grad == 1
assert larger.grad == 1
| 1,345 | 33.512821 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/einsum_test.py | from typing import Callable, Tuple, Union
import torch
from torch import Tensor
from torch.distributions.beta import Beta
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.utilities.initialization import seed_everything
from tests.test_util import _pgd_whitebox
def check_bounds(
lb: Tensor,
ub: Tensor,
input_lb: Tensor,
input_ub: Tensor,
x: Tensor,
net: Callable[[Tensor], Tensor],
use_beta: bool = True,
use_adv: bool = True,
) -> None:
seed = 42
device = input_lb.device
input_shape = x.shape
if use_beta:
seed_everything(seed)
m = Beta(concentration0=0.5, concentration1=0.5)
eps = (input_ub - input_lb) / 2
out = net(x)
lb, ub = lb.to(device), ub.to(device)
for i in range(100):
shape_check = (256, *input_shape[1:])
check_x = input_lb + 2 * eps * m.sample(shape_check).to(device)
out = net(check_x)
assert (lb - 1e-7 <= out).all() and (out <= ub + 1e-7).all()
if use_adv:
bounds = (lb.to(device), ub.to(device))
target = torch.argmax(net(x)).item()
_pgd_whitebox(
net, # type: ignore # slight abuse of passing a single layer instead of network
x,
bounds,
target,
input_lb,
input_ub,
x.device,
num_steps=200,
)
def einsum_test(
defining_str: str,
a_shape: Tuple[int, ...],
b_shape: Tuple[int, ...],
eps: float,
n: int = 1,
) -> None:
def einsum_layer(x: Tensor) -> Tensor:
return torch.einsum(defining_str, x, b)
for _ in range(n):
a_input = torch.rand(a_shape)
b = torch.rand(b_shape)
a_abs: Union[HybridZonotope, DeepPoly_f] = HybridZonotope.construct_from_bounds(
a_input - eps, a_input + eps, domain="zono"
)
output_shape = a_abs.einsum(defining_str, b)
lb, ub = output_shape.concretize()
check_bounds(
lb,
ub,
a_input - eps,
a_input + eps,
a_input,
einsum_layer,
use_beta=True,
use_adv=True,
)
a_abs = HybridZonotope.construct_from_bounds(
a_input - eps, a_input + eps, domain="box"
)
output_shape = a_abs.einsum(defining_str, b)
lb, ub = output_shape.concretize()
check_bounds(
lb,
ub,
a_input - eps,
a_input + eps,
a_input,
einsum_layer,
use_beta=True,
use_adv=True,
)
a_abs = DeepPoly_f.construct_from_bounds(
a_input - eps, a_input + eps, domain="DPF"
)
output_shape = a_abs.einsum(defining_str, b)
lb, ub = output_shape.concretize()
check_bounds(
lb,
ub,
a_input - eps,
a_input + eps,
a_input,
einsum_layer,
use_beta=True,
use_adv=True,
)
class TestEinsum:
def test_einsums(self, eps: float = 0.001, n: int = 1) -> None:
einsum_test("bs, bqs -> bq", (1, 5), (1, 3, 5), eps, n)
einsum_test("bsq, bqs -> bq", (1, 5, 3), (1, 3, 5), eps, n)
einsum_test("blj, blk -> bk", (1, 3, 5), (1, 3, 4), eps, n)
if __name__ == "__main__":
T = TestEinsum()
n = 1
T.test_einsums(0.001, n)
T.test_einsums(0.01, n)
| 3,535 | 26.2 | 92 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/disjunctive_encoding_test.py | import torch
from src.mn_bab_verifier import MNBaBVerifier
from src.state.subproblem_state import SubproblemState
from src.utilities.argument_parsing import get_config_from_json
from src.utilities.config import make_config
# from src.utilities.config import AbstractDomain
from src.utilities.output_property_form import OutputPropertyForm
from src.verification_instance import get_asnet, get_io_constraints_from_spec
class TestDisjunctiveEncoding:
"""
We test with our Disjunctive ReLU encoding implementation
"""
def test_disjunctive_encoding_generation(self, n: int = 1) -> None:
# Reads from the vnn spec for robot
cfg_path = "configs/vnncomp22/vnn22_reach_robot.json"
spec_path = (
"vnn-comp-2022-sup/benchmarks/reach_prob_density/vnnlib/robot_0.vnnlib"
)
net_path = "vnn-comp-2022-sup/benchmarks/reach_prob_density/onnx/robot.onnx"
bunch_config = get_config_from_json(cfg_path)
config = make_config(**bunch_config)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
config.use_gpu = torch.cuda.is_available()
(inputs, input_constraints, target_constr,) = get_io_constraints_from_spec(
spec_path=spec_path,
config=config,
device=device,
)
net, as_network = get_asnet(net_path, config, device)
# Get verifier
verifier = MNBaBVerifier(as_network, device, config.verifier)
input_point = inputs[0]
(input_lb, input_ub) = input_constraints[0]
gt_constraint = target_constr[0]
# Orig. constraint
# y_1 > 0.0649 || y_1 < -0.0649 || y_2 > 0.0649 || y_2 < -0.0649 || y_0 < 0.155
# We add the following constraints
# y_0 > -50 || y_2 > -0.0649 (found by dp bounds)
new_clause = [(0, -1, -50), (2, -1, 0.0649)]
out_constr = [*gt_constraint, new_clause]
# No batch dimension
input_lb = input_lb.unsqueeze(0)
input_ub = input_ub.unsqueeze(0)
input_point = input_point.unsqueeze(0)
out_prop_form = OutputPropertyForm.create_from_properties(
out_constr,
disjunction_adapter=None,
use_disj_adapter=True,
n_class=as_network.output_dim[-1],
device=input_lb.device,
dtype=input_lb.dtype,
)
assert len(out_prop_form.properties_to_verify) == 2
assert (
out_prop_form.property_matrix == torch.eye(2, device=out_prop_form.device)
).all()
assert (out_prop_form.property_matrix == out_prop_form.combination_matrix).all()
assert out_prop_form.disjunction_adapter is not None
assert out_prop_form.disjunction_adapter[0].weight.shape == torch.Size([7, 5])
assert out_prop_form.disjunction_adapter[0].bias.shape == torch.Size([7])
assert out_prop_form.disjunction_adapter[2].weight.shape == torch.Size([2, 7])
assert out_prop_form.disjunction_adapter[2].bias.shape == torch.Size([2])
verifier.append_out_adapter(
out_prop_form.disjunction_adapter,
device=out_prop_form.device,
dtype=out_prop_form.dtype,
)
(
dp_out_prop_form,
bounds,
verified,
falsified,
ub_inputs,
) = verifier._verify_output_form_with_deep_poly(
input_lb,
input_ub,
out_prop_form,
compute_sensitivity=False,
subproblem_state=SubproblemState.create_default(
split_state=None,
optimize_prima=False,
batch_size=1,
device=input_lb.device,
use_params=False,
),
ibp_pass=True,
)
assert len(dp_out_prop_form.properties_to_verify) == 1
if __name__ == "__main__":
T = TestDisjunctiveEncoding()
T.test_disjunctive_encoding_generation()
| 4,014 | 34.530973 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/linear_test.py | import torch
from torch import Tensor
from src.abstract_domains.DP_f import DeepPoly_f
from src.abstract_domains.zonotope import HybridZonotope
from src.abstract_layers.abstract_linear import Linear
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
from src.utilities.config import make_backsubstitution_config
class TestLinear:
def test_backsubstitution_mn_bab(self) -> None:
layer = Linear(10, 2, bias=True, input_dim=(10,))
lb_coef = torch.eye(2).unsqueeze(0)
lb = AffineForm(lb_coef)
ub = AffineForm(2 * lb_coef)
initial_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=lb,
ub=ub,
unstable_queries=None,
subproblem_state=None,
)
assert isinstance(initial_shape.lb.coef, Tensor)
assert initial_shape.ub is not None
assert isinstance(initial_shape.ub.coef, Tensor)
layer = Linear(10, 2, bias=True, input_dim=(10,))
expected_lb_coef = initial_shape.lb.coef.matmul(layer.weight)
expected_lb_bias = initial_shape.lb.coef.matmul(layer.bias)
expected_lb = AffineForm(expected_lb_coef, expected_lb_bias)
expected_ub_coef = initial_shape.ub.coef.matmul(layer.weight)
expected_ub_bias = initial_shape.ub.coef.matmul(layer.bias)
expected_ub = AffineForm(expected_ub_coef, expected_ub_bias)
expected_shape = MN_BaB_Shape(
query_id=query_tag(layer),
query_prev_layer=None,
queries_to_compute=None,
lb=expected_lb,
ub=expected_ub,
unstable_queries=None,
subproblem_state=None,
)
assert isinstance(expected_shape.lb.coef, Tensor)
assert expected_shape.ub is not None
assert isinstance(expected_shape.ub.coef, Tensor)
actual_shape = layer.backsubstitute(
make_backsubstitution_config(), initial_shape
)
assert isinstance(actual_shape.lb.coef, Tensor)
assert actual_shape.ub is not None
assert isinstance(actual_shape.ub.coef, Tensor)
assert expected_shape.lb.coef.equal(actual_shape.lb.coef)
assert expected_shape.ub.coef.equal(actual_shape.ub.coef)
assert expected_shape.lb.bias.equal(actual_shape.lb.bias)
assert expected_shape.ub.bias.equal(actual_shape.ub.bias)
def test_propagate_abs_linear(self) -> None:
input_dim = (12,)
batch_size = 2
eps = 0.01
layer = Linear(input_dim[0], 3, bias=True, input_dim=(12,))
x = torch.rand((batch_size, *input_dim))
x_out = layer(x)
in_zono = HybridZonotope.construct_from_noise(x, eps=eps, domain="zono")
out_zono = layer.propagate_abstract_element(in_zono)
assert out_zono.shape == x_out.shape
assert out_zono.may_contain_point(x_out)
in_dpf = DeepPoly_f.construct_from_noise(x, eps=eps, domain="DPF")
out_dpf = layer.propagate_abstract_element(in_dpf)
assert out_dpf.shape == x_out.shape
assert out_dpf.may_contain_point(x_out)
def test_propagate_interval_identity_layer(self) -> None:
layer = Linear(2, 2, bias=True, input_dim=(2,))
layer.weight.data = torch.eye(2)
layer.bias.data = torch.zeros(2)
input_lb = torch.tensor([-1.0, -1.0])
input_ub = torch.tensor([1.0, 1.0])
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == input_lb).all()
assert (output_ub == input_ub).all()
def test_propagate_interval_toy_example_layer(self) -> None:
layer = Linear(2, 2, bias=True, input_dim=(2,))
layer.weight.data = torch.tensor([[1.0, 1.0], [1.0, -1.0]])
layer.bias.data = torch.tensor([1, -1])
input_lb = torch.tensor([-1.0, -1.0])
input_ub = torch.tensor([1.0, 1.0])
expected_output_lb = torch.tensor([-1, -3])
expected_output_ub = torch.tensor([3, 1])
output_lb, output_ub = layer.propagate_interval((input_lb, input_ub))
assert (output_lb == expected_output_lb).all()
assert (output_ub == expected_output_ub).all()
| 4,292 | 35.692308 | 80 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/deeppoly_test.py | import torch
from tests.test_util import get_deep_poly_bounds, get_deep_poly_lower_bounds, toy_net
# DeepPoly paper source: https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf
class TestDeepPoly:
"""
PRIMA_CROWN is an extension of DeepPoly. Without any optimization,
it should yield the same approximations as DeepPoly does.
"""
def test_toy_net(self) -> None:
"""
Expected lower and upper bound taken from DeepPoly paper
"""
expected_output_lb = 1
expected_output_ub = 4
model = toy_net()[0]
input_lb = torch.tensor([-1, -1]).unsqueeze(0)
input_ub = torch.tensor([1, 1]).unsqueeze(0)
output_lb, output_ub = get_deep_poly_bounds(model, input_lb, input_ub)
assert output_lb == expected_output_lb
assert output_ub == expected_output_ub
only_lb, only_lb_ub = get_deep_poly_lower_bounds(model, input_lb, input_ub)
assert only_lb == expected_output_lb
if __name__ == "__main__":
T = TestDeepPoly()
T.test_toy_net()
| 1,060 | 26.921053 | 85 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/unit_tests/dependence_sets_test.py | import torch
import torch.nn as nn
from torch import Tensor
from src.abstract_layers.abstract_conv2d import Conv2d
from src.abstract_layers.abstract_max_pool2d import MaxPool2d
from src.abstract_layers.abstract_network import AbstractNetwork
from src.abstract_layers.abstract_relu import ReLU
from src.mn_bab_shape import AffineForm, MN_BaB_Shape
from src.state.tags import query_tag
from src.utilities.config import make_backsubstitution_config
from src.utilities.dependence_sets import DependenceSets
from src.utilities.initialization import seed_everything
from tests.test_util import toy_max_pool_mixed_net
class TestDependenceSets:
def test_unfold_to_shapes(self) -> None:
B, C, H, W = 10, 3, 4, 4
c, h, w, d = 15, 13, 13, 7
x = torch.rand((B, c, h, w))
xs = [x, x.unsqueeze(1)]
sets = torch.rand((B, C * H * W, c, d, d))
idxs = torch.arange(H * W).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=3, cpadding=2
)
for x in xs:
x_unfolded = DependenceSets.unfold_to(x, coef)
assert list(x_unfolded.shape) == [B, C * H * W, c, d, d]
def test_fold_to_shapes(self) -> None:
B, C, H, W = 300, 5, 2, 2
c, h, w, d = 70, 5, 5, 4
old_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float64)
devices = ["cpu", "cuda"]
for device in devices:
x = torch.rand((B, c, h, w), device=device)
sets = torch.rand((B, C * H * W, c, d, d), device=device)
idxs = torch.arange(H * W, device=device).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=3, cpadding=2
)
x_unfolded = DependenceSets.unfold_to(x, coef)
out_unfold = x_unfolded * coef.sets
coef_tensor = coef.to_tensor((c, h, w))
out_tensor = x.unsqueeze(1) * coef_tensor
assert torch.isclose(
out_unfold.flatten(2).sum(-1),
out_tensor.flatten(2).sum(-1),
atol=1e-12,
rtol=1e-12,
).all(), f"failed for device {device}"
torch.set_default_dtype(old_dtype)
def test_concretize_shapes(self) -> None:
B, C, H, W = 10, 3, 4, 4
c, h, w, d = 15, 13, 13, 7
input_bounds = torch.rand((c, h, w)).expand((B, c, h, w))
sets = torch.rand((B, C * H * W, c, d, d))
idxs = torch.arange(H * W).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=3, cpadding=2
)
bias = torch.rand((B, C * H * W))
affine = AffineForm(coef, bias)
output_lb, output_ub = MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=affine,
ub=affine,
unstable_queries=None,
subproblem_state=None,
).concretize(
input_bounds,
input_bounds,
)
assert list(output_lb.shape) == [B, C * H * W]
assert output_ub is not None
assert list(output_ub.shape) == [B, C * H * W]
def test_conv2d_shapes(self) -> None:
B, C, H, W = 10, 3, 4, 4
c_pre, h_pre, w_pre = 8, 26, 26
ksz, stride, padding = 4, 3, 2
layer = nn.Conv2d(c_pre, 2 * c_pre, ksz, stride, padding)
abstract_layer = Conv2d.from_concrete_module(layer, (c_pre, h_pre, w_pre))
c, _, _ = abstract_layer.output_dim
d = 3
sets = torch.rand((B, C * H * W, c, d, d))
idxs = torch.arange(H * W).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=3, cpadding=2
)
bias = torch.rand((B, C * H * W))
affine = AffineForm(coef, bias)
unstable_queries = torch.randint(0, 2, size=(C * H * W,), dtype=torch.bool)
Q = unstable_queries.sum().item()
affine = affine.filter_queries(unstable_queries)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=affine,
ub=affine,
unstable_queries=unstable_queries,
subproblem_state=None,
)
abstract_shape = abstract_layer.backsubstitute(
make_backsubstitution_config(), abstract_shape
)
d_new = (d - 1) * stride + ksz
assert isinstance(abstract_shape.lb.coef, DependenceSets)
assert abstract_shape.ub is not None
assert isinstance(abstract_shape.ub.coef, DependenceSets)
for coef in [abstract_shape.lb.coef, abstract_shape.ub.coef]:
assert all(
[
type(coef) is DependenceSets,
list(coef.sets.shape) == [B, Q, c_pre, d_new, d_new],
coef.cstride == 3 * 3,
coef.cpadding == 3 * 2 + 2,
]
)
for bias in [abstract_shape.lb.bias, abstract_shape.ub.bias]:
assert list(bias.shape) == [B, Q]
def test_maxpool_shapes(self) -> None:
device = torch.device("cuda")
seed_everything(10)
n = 2
for _ in range(n):
abstract_net, input_dim = toy_max_pool_mixed_net()
abstract_net.layers = abstract_net.layers[:-1]
abstract_net = abstract_net.to(device)
abstract_net.output_dim = abstract_net.layers[-1].output_dim
abstract_net.dependence_set_applicable = True
c, H, W = abstract_net.output_dim
d = 1
B, C = 1, c
e = 0.05
input = torch.rand((B, *input_dim), device=device)
tight_mask = torch.rand(input.shape, device=device) > 0.2
input_lb = input - e
input_ub = torch.where(tight_mask, input - e, input + e)
sets = torch.rand((B, C * H * W, c, d, d), device=device)
idxs = torch.arange(H * W, device=device).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=1, cpadding=0
)
bias = torch.rand((B, C * H * W), device=device)
affine = AffineForm(coef, bias)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(abstract_net),
query_prev_layer=None,
queries_to_compute=None,
lb=affine.clone(),
ub=affine.clone(),
unstable_queries=None,
subproblem_state=None,
)
abstract_shape_t = abstract_shape.clone_with_new_bounds(
AffineForm(
abstract_shape.lb.coef.to_tensor(abstract_net.output_dim)
if isinstance(abstract_shape.lb.coef, DependenceSets)
else abstract_shape.lb.coef,
abstract_shape.lb.bias,
),
None
if abstract_shape.ub is None
else AffineForm(
abstract_shape.ub.coef.to_tensor(abstract_net.output_dim)
if isinstance(abstract_shape.ub.coef, DependenceSets)
else abstract_shape.ub.coef,
abstract_shape.ub.bias,
),
)
abstract_shape_t = abstract_net.backsubstitute_mn_bab_shape(
make_backsubstitution_config(
use_dependence_sets=False,
use_early_termination=False,
),
input_lb,
input_ub,
query_coef=None,
abstract_shape=abstract_shape_t,
compute_upper_bound=True,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=False,
)
output_lb_t, output_ub_t = abstract_shape_t.concretize(input_lb, input_ub)
abstract_shape_ds = abstract_shape.clone_with_new_bounds(
abstract_shape.lb.clone(),
None if abstract_shape.ub is None else abstract_shape.ub.clone(),
)
abstract_shape_ds = abstract_net.backsubstitute_mn_bab_shape(
make_backsubstitution_config(
use_dependence_sets=True,
use_early_termination=False,
),
input_lb,
input_ub,
query_coef=None,
abstract_shape=abstract_shape_ds,
compute_upper_bound=True,
reset_input_bounds=True,
recompute_intermediate_bounds=True,
optimize_intermediate_bounds=False,
)
output_lb_ds, output_ub_ds = abstract_shape_ds.concretize(
input_lb, input_ub
)
assert torch.isclose(output_lb_ds, output_lb_t, atol=1e-10).all()
assert output_ub_ds is not None
assert output_ub_t is not None
assert torch.isclose(output_ub_ds, output_ub_t, atol=1e-10).all()
# assert isinstance(abstract_shape_t.lb.coef, Tensor)
# assert torch.isclose(
# abstract_shape_t.lb.coef,
# abstract_shape_ds.lb.coef
# if isinstance(abstract_shape_ds.lb.coef, Tensor)
# else abstract_shape_ds.lb.coef.to_tensor(
# abstract_shape_t.lb.coef.shape[-3:]
# ),
# atol=1e-10,
# ).all()
def test_single_maxpool_shapes(self) -> None:
device = torch.device("cuda")
old_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float64)
seed_everything(10)
c_pre, h_pre, w_pre = 2, 3, 3
layer = nn.MaxPool2d(padding=1, stride=2, kernel_size=2)
abstract_net = MaxPool2d.from_concrete_module(layer, (c_pre, h_pre, w_pre)).to(
device
)
c, H, W = abstract_net.output_dim
d = 1
B = 1
C = c
e = 0.05
input = torch.rand((B, *(c_pre, h_pre, w_pre)), device=device)
tight_mask = torch.rand(input.shape, device=device) > 1.0
input_lb = input - e
input_ub = torch.where(tight_mask, input - e, input + e)
abstract_net.input_bounds = (input_lb, input_ub)
sets = torch.rand((B, C * H * W, c, d, d), device=device)
idxs = torch.arange(H * W, device=device).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=1, cpadding=0
)
bias = torch.rand((B, C * H * W), device=device)
affine = AffineForm(coef, bias)
unstable_queries = torch.ones(
size=(C * H * W,), dtype=torch.bool, device=device
)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=affine.clone(),
ub=affine.clone(),
unstable_queries=unstable_queries,
subproblem_state=None,
)
config = make_backsubstitution_config()
abstract_shape_t = abstract_shape.clone_with_new_bounds(
AffineForm(
abstract_shape.lb.coef.to_tensor(abstract_net.output_dim)
if isinstance(abstract_shape.lb.coef, DependenceSets)
else abstract_shape.lb.coef,
abstract_shape.lb.bias,
),
None
if abstract_shape.ub is None
else AffineForm(
abstract_shape.ub.coef.to_tensor(abstract_net.output_dim)
if isinstance(abstract_shape.ub.coef, DependenceSets)
else abstract_shape.ub.coef,
abstract_shape.ub.bias,
),
)
abstract_shape_t = abstract_net.backsubstitute(config, abstract_shape_t)
abstract_shape_ds = abstract_shape.clone_with_new_bounds(
abstract_shape.lb.clone(),
None if abstract_shape.ub is None else abstract_shape.ub.clone(),
)
abstract_shape_ds = abstract_net.backsubstitute(config, abstract_shape_ds)
assert torch.isclose(
abstract_shape_t.lb.bias, abstract_shape_ds.lb.bias, atol=1e-10
).all()
assert isinstance(abstract_shape_t.lb.coef, Tensor)
assert torch.isclose(
abstract_shape_t.lb.coef,
abstract_shape_ds.lb.coef.to_tensor(abstract_shape_t.lb.coef.shape[-3:])
if isinstance(abstract_shape_ds.lb.coef, DependenceSets)
else abstract_shape_ds.lb.coef,
atol=1e-10,
).all()
torch.set_default_dtype(old_dtype)
def test_multi_conv2d_shapes(self) -> None:
device = torch.device("cuda")
c_pre, h_pre, w_pre = 2, 6, 6
layer_a = nn.Conv2d(c_pre, 2 * c_pre, 2, 1, 1)
layer_b = nn.Conv2d(2 * c_pre, 3 * c_pre, 3, 2, 2)
abstract_net = AbstractNetwork.from_concrete_module(
nn.Sequential(layer_a, layer_b), (c_pre, h_pre, w_pre)
).to(device)
c, _, _ = abstract_net.output_dim
d = 1
B, C, H, W = 1, 1, 2, 2
sets = torch.rand((B, C * H * W, c, d, d), device=device)
idxs = torch.arange(H * W, device=device).repeat(C)
coef = DependenceSets(
sets=sets, spatial_idxs=idxs, input_dim=(C, H, W), cstride=2, cpadding=0
)
bias = torch.rand((B, C * H * W), device=device)
affine = AffineForm(coef, bias)
unstable_queries = torch.ones(
size=(C * H * W,), dtype=torch.bool, device=device
)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=affine.clone(),
ub=affine.clone(),
unstable_queries=unstable_queries,
subproblem_state=None,
)
config = make_backsubstitution_config()
assert isinstance(abstract_shape.lb.coef, DependenceSets)
assert abstract_shape.ub is not None and isinstance(
abstract_shape.ub.coef, DependenceSets
)
abstract_shape_t = abstract_shape.clone_with_new_bounds(
AffineForm(
abstract_shape.lb.coef.to_tensor(abstract_net.output_dim),
abstract_shape.lb.bias,
),
None
if abstract_shape.ub is None
else AffineForm(
abstract_shape.ub.coef.to_tensor(abstract_net.output_dim),
abstract_shape.ub.bias,
),
)
abstract_shape_t = abstract_net.layers[0].backsubstitute(
config, abstract_net.layers[1].backsubstitute(config, abstract_shape_t)
)
abstract_shape_ds = abstract_shape.clone_with_new_bounds(
abstract_shape.lb.clone(),
None if abstract_shape.ub is None else abstract_shape.ub.clone(),
)
abstract_shape_ds = abstract_net.layers[0].backsubstitute(
config, abstract_net.layers[1].backsubstitute(config, abstract_shape_ds)
)
assert torch.isclose(
abstract_shape_t.lb.bias, abstract_shape_ds.lb.bias, atol=1e-10
).all()
assert isinstance(abstract_shape_t.lb.coef, Tensor)
assert isinstance(abstract_shape_ds.lb.coef, DependenceSets)
assert torch.isclose(
abstract_shape_t.lb.coef,
abstract_shape_ds.lb.coef.to_tensor(abstract_shape_t.lb.coef.shape[-3:]),
atol=1e-10,
).all()
def test_relu_shapes(self) -> None:
B, C, H, W = 10, 3, 4, 4
c, h, w, d = 15, 13, 13, 7
stride, padding = 3, 2
abstract_layer = ReLU((c, h, w))
input_bounds = torch.rand((B, c, h, w))
abstract_layer.update_input_bounds((input_bounds, input_bounds))
sets = torch.rand((B, C * H * W, c, d, d))
idxs = torch.arange(H * W).repeat(C)
coef = DependenceSets(
sets=sets,
spatial_idxs=idxs,
input_dim=(C, H, W),
cstride=stride,
cpadding=padding,
)
bias = torch.rand((B, C * H * W))
affine = AffineForm(coef, bias)
unstable_queries = torch.randint(0, 2, size=(C * H * W,), dtype=torch.bool)
Q = unstable_queries.sum()
affine = affine.filter_queries(unstable_queries)
abstract_shape = MN_BaB_Shape(
query_id=query_tag(ReLU((1,))),
query_prev_layer=None,
queries_to_compute=None,
lb=affine,
ub=affine,
unstable_queries=unstable_queries,
subproblem_state=None,
)
abstract_shape = abstract_layer.backsubstitute(
make_backsubstitution_config(), abstract_shape
)
assert isinstance(abstract_shape.lb.coef, DependenceSets)
assert abstract_shape.ub is not None
assert isinstance(abstract_shape.ub.coef, DependenceSets)
for coef in [abstract_shape.lb.coef, abstract_shape.ub.coef]:
assert all(
[
type(coef) is DependenceSets,
list(coef.sets.shape) == [B, Q, c, d, d],
coef.cstride == stride,
coef.cpadding == padding,
]
)
for bias in [abstract_shape.lb.bias, abstract_shape.ub.bias]:
assert list(bias.shape) == [B, Q]
if __name__ == "__main__":
T = TestDependenceSets()
T.test_single_maxpool_shapes()
T.test_maxpool_shapes()
T.test_multi_conv2d_shapes()
T.test_fold_to_shapes()
T.test_concretize_shapes()
T.test_unfold_to_shapes()
T.test_conv2d_shapes()
| 17,973 | 37.989154 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/vnn22_to_as_test.py | import gzip
import numpy as np
import onnxruntime as ort # type: ignore[import]
import torch
from torch import nn
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.loading.network import load_onnx_model
from tests.test_util import get_deep_poly_bounds
def is_float_try(str: str) -> bool:
try:
float(str)
return True
except ValueError:
return False
class TestVNN:
"""
Integration test for the vnn22 benchmarks
"""
def test_onnx_single_net(self) -> None:
# Test ONNX Parse
onnx_path = "vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d.onnx.gz"
ort_session = ort.InferenceSession(gzip.open(onnx_path).read())
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape_batch = (1, *in_shape)
o2p_net.eval()
for i in range(50):
x = torch.rand(in_shape_batch)
output_onnx = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)[0]
out_o2p_net = o2p_net(x)
assert torch.isclose(
torch.Tensor(output_onnx), out_o2p_net, atol=1e-5
).all()
assert isinstance(o2p_net, nn.Sequential)
# Run AS Parse
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
eps = 2 / 255
# Run interval propagation
in_shape_squeezed = tuple([1] + [i for i in in_shape_batch if i != 1])
input = torch.rand(in_shape_squeezed)
input_lb = input - eps
input_ub = input + eps
lb, ub = abs_net.set_layer_bounds_via_interval_propagation(
input_lb, input_ub, use_existing_bounds=False, has_batch_dim=True
)
print(f"Found lower {lb} and upper {ub}")
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
print(f"Succesful run of {onnx_path} - LB {output_lb_without_alpha}")
def test_onnx_dual_net(self) -> None:
onnx_path = "vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d_dual.onnx.gz"
ort_session = ort.InferenceSession(gzip.open(onnx_path).read())
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape_batch = (1, *in_shape)
o2p_net.eval()
for i in range(50):
x = torch.rand(in_shape_batch)
output_onnx = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)[0]
out_o2p_net = o2p_net(x)
assert torch.isclose(
torch.Tensor(output_onnx), out_o2p_net, atol=1e-5
).all()
assert isinstance(o2p_net, nn.Sequential)
# Run AS Parse
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
eps = 2 / 255
# Run interval propagation
in_shape_squeezed = tuple([1] + [i for i in in_shape_batch if i != 1])
input = torch.rand(in_shape_squeezed)
input_lb = input - eps
input_ub = input + eps
lb, ub = abs_net.set_layer_bounds_via_interval_propagation(
input_lb, input_ub, use_existing_bounds=False, has_batch_dim=True
)
print(f"Found lower {lb} and upper {ub}")
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
print(f"Succesful run of {onnx_path} - LB {output_lb_without_alpha}")
def test_onnx_u_net(self) -> None:
onnx_path = "vnn-comp-2022-sup/benchmarks/carvana_unet_2022/onnx/unet_upsample_small.onnx.gz"
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape_batch = (1, *in_shape)
o2p_net.eval()
# Run AS Parse
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
eps = 2 / 255
# Run interval propagation
input = torch.rand(in_shape_batch)
input_lb = input - eps
input_ub = input + eps
lb, ub = abs_net.set_layer_bounds_via_interval_propagation(
input_lb, input_ub, use_existing_bounds=False, has_batch_dim=True
)
print(f"Found lower {lb} and upper {ub}")
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
print(f"Succesful run of {onnx_path} - LB {output_lb_without_alpha}")
# from skl2onnx.helpers.onnx_helper import load_onnx_model as skl_load_onnx_model
# from skl2onnx.helpers.onnx_helper import (
# select_model_inputs_outputs,
# save_onnx_model,
# )
# skl_model = skl_load_onnx_model(gzip.open(path).read())
# interm_model = select_model_inputs_outputs(skl_model, "out_mask")
# save_onnx_model(interm_model, "interm.onnx")
# interm_session = ort.InferenceSession("interm.onnx")
# for _ in range(20):
# input = torch.rand(1, 4, 31, 47)
# out = net(input)
# res = interm_session.run(
# None,
# {"input": np.array(input).astype(np.float32)},
# )
# res = torch.tensor(res[0])
# assert torch.isclose(res, out, atol=1e-5).all()
if __name__ == "__main__":
t = TestVNN()
# t.test_onnx_u_net()
t.test_onnx_single_net()
t.test_onnx_dual_net()
| 5,518 | 31.087209 | 101 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/intermediate_layer_bound_opt_test.py | import csv
import torch
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, load_onnx_model, mnist_a_b
from tests.test_util import (
MNIST_FC_DATA_TEST_CONFIG,
get_deep_poly_bounds,
opt_intermediate_bounds,
optimize_output_node_bounds_with_prima_crown,
)
class TestAlphaIntermediateOptimization:
"""These tests currently do not assert anything besides the fact that the code isrunning without a crash."""
def test_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 5
network_path = "networks/mnist_2_50_flattened.pyt"
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
original_network = mnist_a_b(5, 100)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
pre_net = original_network[:-1]
network = AbstractNetwork.from_concrete_module(original_network, (784,))
pre_network = AbstractNetwork.from_concrete_module(pre_net, (784,))
freeze_network(network)
freeze_network(pre_network)
print("Testing test instance:", i)
MNIST_FC_DATA_TEST_CONFIG.eps = 0.03
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
# milp_model = MILPNetwork.build_model_from_abstract_net(
# image, input_lb, input_ub, network
# )
# lbs, ubs = milp_model.get_network_bounds_at_layer_multi(layer_tag(network.layers[4]), 100, 300, time.time())
dp_lb, dp_ub = get_deep_poly_bounds(pre_network, input_lb, input_ub)
opt_lb, opt_ub = optimize_output_node_bounds_with_prima_crown(
pre_network,
int(label),
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
opt_intermediate_bounds(
pre_network,
input_lb.view((1, -1)),
input_ub.view((1, -1)),
use_prima=True,
)
double_opt_lb, double_opt_ub = optimize_output_node_bounds_with_prima_crown(
pre_network,
int(label),
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
print(f"======= {dp_lb[0][pred_label]} - {opt_lb} - {double_opt_lb}")
# Get last layer improvement
def test_resnet_onnx(self) -> None:
seed_everything(42)
o2p_net = load_onnx_model(
"benchmarks_vnn21/cifar10_resnet/onnx/resnet_2b.onnx"
)[0]
freeze_network(o2p_net)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, (3, 32, 32))
x = torch.rand((1, 3, 32, 32))
eps = 20 / 255
input_lb = x - eps
input_ub = x + eps
out_shape = opt_intermediate_bounds(abs_net, input_lb, input_ub, use_prima=True)
final_lb, final_ub = out_shape.concretize(input_lb, input_ub)
print(f"Mean: {torch.mean(final_ub-final_lb)}")
if __name__ == "__main__":
t = TestAlphaIntermediateOptimization()
# t.test_small_mnist_net()
t.test_resnet_onnx()
| 3,899 | 34.779817 | 122 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/prima_optimization_integration_test.py | import csv
import torch
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, mnist_a_b, mnist_conv_small
from tests.test_util import (
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_FC_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
optimize_output_node_bounds_with_prima_crown,
)
class TestPrimaOptimization:
def test_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_with_alpha = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
output_lb_with_alpha_prima = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha_prima = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
for j in range(10):
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network, j, input_lb, input_ub, optimize_alpha=True
)
output_lb_with_alpha[j] = prima_crown_alpha_lb
output_ub_with_alpha[j] = prima_crown_alpha_ub
(
prima_crown_alpha_prima_lb,
prima_crown_alpha_prima_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
output_lb_with_alpha_prima[j] = prima_crown_alpha_prima_lb
output_ub_with_alpha_prima[j] = prima_crown_alpha_prima_ub
rounding_error_margin = 1e-5
assert (
output_lb_with_alpha_prima + rounding_error_margin
>= output_lb_with_alpha
).all()
assert (
output_ub_with_alpha_prima - rounding_error_margin
<= output_ub_with_alpha
).all()
def test_small_cnn(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
output_lb_with_alpha = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
output_lb_with_alpha_prima = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha_prima = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
for j in range(10):
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network, j, input_lb, input_ub, optimize_alpha=True
)
output_lb_with_alpha[j] = prima_crown_alpha_lb
output_ub_with_alpha[j] = prima_crown_alpha_ub
(
prima_crown_alpha_prima_lb,
prima_crown_alpha_prima_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
output_lb_with_alpha_prima[j] = prima_crown_alpha_prima_lb
output_ub_with_alpha_prima[j] = prima_crown_alpha_prima_ub
rounding_error_margin = 1e-5
assert (
output_lb_with_alpha_prima + rounding_error_margin
>= output_lb_with_alpha
).all()
assert (
output_ub_with_alpha_prima - rounding_error_margin
<= output_ub_with_alpha
).all()
| 6,143 | 35.571429 | 85 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/alpha_optimization_integration_test.py | import csv
import torch
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, mnist_a_b, mnist_conv_small
from tests.test_util import (
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_FC_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
get_deep_poly_bounds,
optimize_output_node_bounds_with_prima_crown,
)
class TestAlphaOptimization:
def test_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
output_lb_with_alpha = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
for j in range(10):
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network, j, input_lb, input_ub, optimize_alpha=True
)
output_lb_with_alpha[j] = prima_crown_alpha_lb
output_ub_with_alpha[j] = prima_crown_alpha_ub
rounding_error_margin = 1e-5
assert (
output_lb_with_alpha + rounding_error_margin >= output_lb_without_alpha
).all()
assert (
output_ub_with_alpha - rounding_error_margin <= output_ub_without_alpha
).all()
def test_small_cnn(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_convSmallRELU__Point.pyt"
original_network = mnist_conv_small()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(network, input_lb, input_ub)
output_lb_with_alpha = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_with_alpha = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
for j in range(10):
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network, j, input_lb, input_ub, optimize_alpha=True
)
output_lb_with_alpha[j] = prima_crown_alpha_lb
output_ub_with_alpha[j] = prima_crown_alpha_ub
rounding_error_margin = 1e-4
assert (
output_lb_with_alpha + rounding_error_margin >= output_lb_without_alpha
).all()
assert (
output_ub_with_alpha - rounding_error_margin <= output_ub_without_alpha
).all()
if __name__ == "__main__":
T = TestAlphaOptimization()
T.test_small_mnist_net()
| 4,879 | 34.362319 | 87 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/completeness_test.py | import csv
import torch
from gurobipy import GRB # type: ignore[import]
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, mnist_a_b, mnist_conv_tiny
from tests.gurobi_util import create_milp_model
from tests.test_util import (
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_FC_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
lower_bound_output_node_with_branch_and_bound,
)
TOL = 1e-3
class TestCompleteness:
def test_branch_and_bound_completeness_on_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_bab = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
network.reset_input_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
# skip the uninteresting cases
if output_lb_milp[j] != 0:
output_lb_bab[j] = lower_bound_output_node_with_branch_and_bound(
network,
j,
input_lb,
input_ub,
batch_sizes=[4, 4, 4],
early_stopping_threshold=output_lb_milp[j].item() - TOL,
optimize_alpha=True,
optimize_prima=False,
)
assert (output_lb_bab >= output_lb_milp - TOL).all()
def test_branch_and_bound_completeness_on_small_mnist_conv_tiny(self) -> None:
TOL = 5e-3 # BaB-Optimization not optimal with 20 iterations in alpha
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_convTiny.pyt"
original_network = mnist_conv_tiny()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_bab = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
network.reset_input_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
# skip the uninteresting cases
if output_lb_milp[j] != 0:
output_lb_bab[j] = lower_bound_output_node_with_branch_and_bound(
network,
j,
input_lb,
input_ub,
batch_sizes=[4, 4, 4],
early_stopping_threshold=output_lb_milp[j].item() - TOL,
optimize_alpha=True,
optimize_prima=False,
)
assert (output_lb_bab >= output_lb_milp - TOL).all()
if __name__ == "__main__":
T = TestCompleteness()
T.test_branch_and_bound_completeness_on_small_mnist_conv_tiny()
| 5,741 | 37.536913 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/soundness_test.py | import csv
from copy import deepcopy
import torch
from gurobipy import GRB # type: ignore[import]
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.data import transform_and_bound
from src.utilities.loading.network import freeze_network, mnist_a_b, mnist_conv_tiny
from tests.gurobi_util import create_milp_model
from tests.test_util import (
MNIST_CONV_DATA_TEST_CONFIG,
MNIST_FC_DATA_TEST_CONFIG,
MNIST_INPUT_DIM,
get_deep_poly_bounds,
lower_bound_output_node_with_branch_and_bound,
optimize_output_node_bounds_with_prima_crown,
)
NUM_EPS = 1e-6
class TestSoundness:
def test_deep_poly_soundness_on_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
(
output_lb_deep_poly,
output_ub_deep_poly,
) = get_deep_poly_bounds(network, input_lb, input_ub)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_ub_milp = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_ub_milp[j] = model.objVal
model.reset(0)
assert (output_lb_deep_poly <= output_lb_milp + NUM_EPS).all()
assert (output_ub_deep_poly >= output_ub_milp - NUM_EPS).all()
network.reset_input_bounds()
def test_prima_crown_alpha_soundness_on_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_prima_crown = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_prima_crown = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_ub_milp = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_ub_milp[j] = model.objVal
model.reset(0)
(
prima_crown_alpha_lb,
prima_crown_alpha_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
input_lb,
input_ub,
optimize_alpha=True,
)
output_lb_prima_crown[j] = prima_crown_alpha_lb
output_ub_prima_crown[j] = prima_crown_alpha_ub
assert (output_lb_prima_crown <= output_lb_milp + NUM_EPS).all()
assert (output_ub_prima_crown >= output_ub_milp - NUM_EPS).all()
def test_prima_crown_alpha_prima_soundness_on_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_FC_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_prima_crown = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_prima_crown = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_ub_milp = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_ub_milp[j] = model.objVal
model.reset(0)
(
prima_crown_alpha_prima_lb,
prima_crown_alpha_prima_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
output_lb_prima_crown[j] = prima_crown_alpha_prima_lb
output_ub_prima_crown[j] = prima_crown_alpha_prima_ub
assert (output_lb_prima_crown <= output_lb_milp).all()
assert (output_ub_prima_crown >= output_ub_milp).all()
def test_prima_crown_alpha_prima_soundness_on_mnist_conv_tiny(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_convTiny.pyt"
original_network = mnist_conv_tiny()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(
pixel_values, MNIST_CONV_DATA_TEST_CONFIG
)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_prima_crown = torch.full(
size=(10,), fill_value=0.0, dtype=torch.float64
)
output_ub_prima_crown = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_ub_milp = torch.full(
size=(10,), fill_value=float("inf"), dtype=torch.float64
)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
model.setObjective(obj, GRB.MAXIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_ub_milp[j] = model.objVal
model.reset(0)
(
prima_crown_alpha_prima_lb,
prima_crown_alpha_prima_ub,
) = optimize_output_node_bounds_with_prima_crown(
network,
j,
input_lb,
input_ub,
optimize_alpha=True,
optimize_prima=True,
)
output_lb_prima_crown[j] = prima_crown_alpha_prima_lb
output_ub_prima_crown[j] = prima_crown_alpha_prima_ub
assert (output_lb_prima_crown <= output_lb_milp + NUM_EPS).all()
assert (output_ub_prima_crown >= output_ub_milp - NUM_EPS).all()
def test_branch_and_bound_soundness_on_small_mnist_net(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_2_50_flattened.pyt"
original_network = mnist_a_b(2, 50)
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(original_network, (784,))
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
test_config = deepcopy(MNIST_FC_DATA_TEST_CONFIG)
test_config.eps = 0.005
tolerance = 1e-5
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(pixel_values, test_config)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_bab = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
# skip the uninteresting cases
if output_lb_milp[j] != 0:
output_lb_bab[j] = lower_bound_output_node_with_branch_and_bound(
network,
j,
input_lb,
input_ub,
batch_sizes=[4, 4, 4],
optimize_alpha=True,
optimize_prima=False,
)
assert (output_lb_bab <= output_lb_milp + tolerance).all()
def test_branch_and_bound_soundness_on_mnist_conv_tiny(self) -> None:
seed_everything(10)
num_samples = 10
network_path = "networks/mnist_convTiny.pyt"
original_network = mnist_conv_tiny()
state_dict = torch.load(network_path)
original_network.load_state_dict(state_dict)
network = AbstractNetwork.from_concrete_module(
original_network, MNIST_INPUT_DIM
)
freeze_network(network)
test_data_path = "test_data/mnist_test_100.csv"
test_file = open(test_data_path, "r")
test_instances = csv.reader(test_file, delimiter=",")
test_config = deepcopy(MNIST_CONV_DATA_TEST_CONFIG)
test_config.eps = 0.005
tolerance = 1e-5
for i, (label, *pixel_values) in enumerate(test_instances):
if i >= num_samples:
break
print("Testing soundness for test instance:", i)
image, input_lb, input_ub = transform_and_bound(pixel_values, test_config)
pred_label = torch.argmax(original_network(image))
if pred_label != int(label):
print("Network fails on test image, skipping.")
continue
output_lb_bab = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
output_lb_milp = torch.full(size=(10,), fill_value=0.0, dtype=torch.float64)
network.reset_input_bounds()
network.reset_output_bounds()
network.set_layer_bounds_via_interval_propagation(input_lb, input_ub)
model, var_list = create_milp_model(network, input_lb, input_ub)
for j in range(10):
output_node_var = var_list[-10 + j]
obj = output_node_var
model.setObjective(obj, GRB.MINIMIZE)
model.optimize()
assert model.status == GRB.OPTIMAL
output_lb_milp[j] = model.objVal
model.reset(0)
# skip the uninteresting cases
if output_lb_milp[j] != 0:
output_lb_bab[j] = lower_bound_output_node_with_branch_and_bound(
network,
j,
input_lb,
input_ub,
batch_sizes=[4, 4, 4],
optimize_alpha=True,
optimize_prima=False,
)
assert (output_lb_bab <= output_lb_milp + tolerance).all()
if __name__ == "__main__":
T = TestSoundness()
# T.test_deep_poly_soundness_on_small_mnist_net()
T.test_prima_crown_alpha_soundness_on_small_mnist_net()
T.test_branch_and_bound_soundness_on_small_mnist_net()
| 17,642 | 38.033186 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/vnn21_to_as_test.py | import os
import numpy as np
import onnxruntime as ort # type: ignore[import]
import pytest
import torch
from torch import nn
from torch.distributions.beta import Beta
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.initialization import seed_everything
from src.utilities.loading.network import load_onnx_model
from tests.test_util import get_deep_poly_bounds, get_deep_poly_lower_bounds
def is_float_try(str: str) -> bool:
try:
float(str)
return True
except ValueError:
return False
class TestVNN:
"""
Integration test for the vnn21 benchmarks
"""
def test_onnx_specific_net(self) -> None:
onnx_path = "benchmarks_vnn21/marabou-cifar10/nets/cifar10_small.onnx"
ort_session = ort.InferenceSession(onnx_path)
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape_batch = (1, *in_shape)
o2p_net.eval()
for i in range(50):
x = torch.rand(in_shape_batch)
output_onnx = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)[0]
out_o2p_net = o2p_net(x)
assert torch.isclose(
torch.Tensor(output_onnx), out_o2p_net, atol=1e-5
).all()
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
eps = 2 / 255
in_shape_squeezed = tuple([1] + [i for i in in_shape_batch if i != 1])
input = torch.rand(in_shape_squeezed)
input_lb = input - eps
input_ub = input + eps
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
print(f"Succesful run of {onnx_path} - LB {output_lb_without_alpha}")
# Creating a coverage file here requires too much RAM
@pytest.mark.skip(reason="Creating a coverage file here requires too much RAM")
def test_onnx_to_abstract_net_benchmark(self) -> None:
seed_everything(42)
dir = "benchmarks_vnn21"
eps = 0.1
m = Beta(concentration0=0.5, concentration1=0.5)
for root, dirs, files in os.walk(dir):
for name in files:
if name.split(".")[-1] == "onnx":
if name in [
"cifar10_2_255_simplified.onnx",
"cifar10_2_255.onnx",
"cifar10_8_255.onnx",
"convBigRELU__PGD.onnx",
"Convnet_maxpool.onnx",
]:
continue
try:
onnx_path = os.path.join(root, name)
# compare onnx to pytorch
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
assert isinstance(o2p_net, nn.Sequential)
abs_net = AbstractNetwork.from_concrete_module(
o2p_net, in_shape
)
in_shape_squeezed = tuple([1] + [i for i in in_shape if i != 1])
batch_in_shape = (1, *in_shape)
input = torch.rand(in_shape_squeezed)
input_lb = input - eps
input_ub = input + eps
(
output_lb_without_alpha,
output_ub_without_alpha,
) = get_deep_poly_bounds(abs_net, input_lb, input_ub)
only_lb, _ = get_deep_poly_lower_bounds(
abs_net, input_lb, input_ub
)
assert (output_lb_without_alpha == only_lb).all()
shape_check = (256, *in_shape)
if (
len(input_lb.shape) < len(batch_in_shape)
and input_lb.shape == batch_in_shape[: len(input_lb.shape)]
):
input_lb = input_lb.unsqueeze(-1)
for _ in range(2):
check_x = input_lb.broadcast_to(
batch_in_shape
) + 2 * eps * m.sample(shape_check)
if len(check_x.shape) == 1: # Special case for Nano input
check_x = check_x.unsqueeze(1)
out = o2p_net(check_x)
assert (output_lb_without_alpha <= out).all() and (
out <= output_ub_without_alpha
).all()
print(f"Succesful run of {onnx_path}")
except Exception as e:
assert False, f"Couldn't run {onnx_path} - skipping with: {e}"
if __name__ == "__main__":
t = TestVNN()
t.test_onnx_specific_net()
t.test_onnx_to_abstract_net_benchmark()
| 5,121 | 36.940741 | 88 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/fuzzing_tests.py | import os.path
import shutil
import time
from pathlib import Path
from typing import Callable, Tuple
import onnx # type: ignore [import]
import torch
from torch import Tensor
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.config import make_backsubstitution_config
from src.utilities.initialization import seed_everything
from src.utilities.loading.dnnv_simplify import simplify_onnx
from src.utilities.loading.network import (
freeze_network,
load_onnx_from_proto,
load_onnx_model,
)
from tests.test_util import ( # dpf_call,; get_nn4sys_128d_block,; get_nn4sys_128d_multipath_block_stacked,; get_nn4sys_128d_splitblock,
abs_toy_pad_net,
abs_toy_pad_tiny_net,
dp_call,
get_deep_poly_bounds,
get_mnist_net,
get_relu_lin_layer,
get_three_relu_lin_layer,
get_two_relu_lin_layer,
milp_call,
prima_crown_wrapper_call,
run_fuzzing_test,
toy_max_pool_mixed_net,
)
TEST = run_fuzzing_test
SCRIPT_PATH = os.path.realpath(os.path.dirname(__file__))
# from tests.test_util import toy_max_pool_tiny_net
class TestFuzzing:
"""
We test with our Fuzzing implementation
"""
@staticmethod
def fuzzing_test_network(
network_constructor: Callable[[], Tuple[AbstractNetwork, Tuple[int, ...]]],
bounding_call: Callable[
[AbstractNetwork, Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]
],
eps: float = 0.1,
n: int = 20,
input_domain: Tuple[float, float] = (-1, 1),
) -> None:
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
dtype = torch.float64
torch.set_default_dtype(dtype)
seed_everything(42)
network, input_dim = network_constructor()
network = network.to(device).to(dtype=dtype)
network.eval()
freeze_network(network)
print(f"Testing with eps={eps}.")
batched_in_shape = (1, *input_dim)
for i in range(n):
seed_everything(42 + i)
network.reset_output_bounds()
network.reset_input_bounds()
input = (
torch.rand(batched_in_shape, device=device) * input_domain[1]
- (input_domain[0])
+ input_domain[0]
)
input_lb = (input - eps)[0]
input_ub = (input + eps)[0]
TEST(
network,
input,
input_lb,
input_ub,
batched_in_shape,
bounding_call=bounding_call,
use_beta=True,
use_adv=True,
)
torch.set_default_dtype(torch.float32)
def test_maxpool_toy_example(self) -> None:
self.fuzzing_test_network(toy_max_pool_mixed_net, dp_call, 0.001, 1)
self.fuzzing_test_network(toy_max_pool_mixed_net, milp_call, 0.001, 1)
self.fuzzing_test_network(toy_max_pool_mixed_net, dp_call, 0.01, 1)
self.fuzzing_test_network(toy_max_pool_mixed_net, milp_call, 0.01, 1)
def test_pad_toy_example(self) -> None:
self.fuzzing_test_network(abs_toy_pad_net, dp_call, 0.01, 1)
self.fuzzing_test_network(abs_toy_pad_net, milp_call, 0.01, 1)
self.fuzzing_test_network(abs_toy_pad_tiny_net, dp_call, 0.01, 1)
self.fuzzing_test_network(abs_toy_pad_tiny_net, milp_call, 0.01, 1)
def test_alpha_optimization(self) -> None:
alpha_call = prima_crown_wrapper_call(optimize_alpha=True, optimize_prima=False)
self.fuzzing_test_network(
get_mnist_net,
dp_call,
0.001,
5,
)
self.fuzzing_test_network(
get_mnist_net,
alpha_call,
0.001,
5,
)
self.fuzzing_test_network(
get_mnist_net,
alpha_call,
0.01,
5,
)
def test_alpha_prima_optimization(self) -> None:
alpha_prima_call = prima_crown_wrapper_call(
optimize_alpha=True, optimize_prima=True
)
# self.fuzzing_test_network(
# get_mnist_net,
# dp_call,
# 0.01,
# 1,
# )
# self.fuzzing_test_network(
# get_mnist_net,
# alpha_prima_call,
# 0.001,
# 5,
# )
# self.fuzzing_test_network(
# get_mnist_net,
# alpha_prima_call,
# 0.01,
# 1,
# )
self.fuzzing_test_network(
get_relu_lin_layer,
alpha_prima_call,
0.4,
10,
)
self.fuzzing_test_network(
get_two_relu_lin_layer,
alpha_prima_call,
0.2,
10,
)
self.fuzzing_test_network(
get_three_relu_lin_layer,
alpha_prima_call,
0.2,
10,
)
self.fuzzing_test_network(
get_three_relu_lin_layer,
alpha_prima_call,
0.1,
5,
)
self.fuzzing_test_network(
get_three_relu_lin_layer,
alpha_prima_call,
0.01,
5,
)
def test_nn4sys(self) -> None:
# alpha_call = prima_crown_wrapper_call(optimize_alpha=True, optimize_prima=False)
# alpha_prima_call = prima_crown_wrapper_call(
# optimize_alpha=True, optimize_prima=True
# )
def get_nn4sys_128d() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
onnx_path = os.path.realpath(
os.path.join(
SCRIPT_PATH,
"../../vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d.onnx",
)
)
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
# o2p_net[1] = o2p_net[1][:-1]
freeze_network(o2p_net)
o2p_net.eval()
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
return abs_net, in_shape
def get_nn4sys_128d_dual() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
onnx_path = os.path.realpath(
os.path.join(
SCRIPT_PATH,
"../../vnn-comp-2022-sup/benchmarks/nn4sys/onnx/mscn_128d_dual.onnx",
)
)
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
# o2p_net[1] = o2p_net[1][:-1]
freeze_network(o2p_net)
o2p_net.eval()
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
return abs_net, in_shape
def prev_interval_to_call(
call: Callable[
[AbstractNetwork, Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]
]
) -> Callable[[AbstractNetwork, Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]:
def internal_call(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
bounds = (bounds[0].unsqueeze(0), bounds[1].unsqueeze(0))
lb, ub = net.set_layer_bounds_via_interval_propagation(
bounds[0], bounds[1], use_existing_bounds=False, has_batch_dim=True
)
return call(net, bounds)
return internal_call
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.1,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.05,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.01,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.005,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.001,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.0005,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d_dual,
prev_interval_to_call(dp_call),
0.0001,
5,
input_domain=(0.1, 2),
)
# Single
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.1,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.05,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.01,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.005,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.001,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.0005,
5,
input_domain=(0.1, 2),
)
self.fuzzing_test_network(
get_nn4sys_128d,
prev_interval_to_call(dp_call),
0.0001,
5,
input_domain=(0.1, 2),
)
# self.fuzzing_test_network(
# get_nn4sys_128d, prev_interval_to_call(alpha_call), 0.01, 1
# )
# self.fuzzing_test_network(
# get_nn4sys_128d, prev_interval_to_call(alpha_prima_call), 0.03, 1
# )
def test_carvana_unet(self) -> None:
def get_carvana_unet() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
path = "vnn-comp-2022-sup/benchmarks/carvana_unet_2022/onnx/unet_simp_small.onnx"
o2p_net, in_shape, _ = load_onnx_model(path)
o2p_net.eval()
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
return abs_net, (1, *in_shape)
def forward_to_dp(
net: AbstractNetwork, bounds: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tensor]:
bounds = (bounds[0], bounds[1])
bs_config = make_backsubstitution_config(
use_dependence_sets=False,
use_early_termination=False,
max_num_queries=1000,
)
with torch.no_grad():
net.set_layer_bounds_via_forward_dp_pass(
bs_config,
input_lb=bounds[0],
input_ub=bounds[1],
timeout=time.time() + 200,
)
return get_deep_poly_bounds(
net,
bounds[0],
bounds[1],
use_dependence_sets=False,
use_early_termination=False,
reset_input_bounds=False,
recompute_intermediate_bounds=False,
max_num_query=500,
)
self.fuzzing_test_network(get_carvana_unet, forward_to_dp, 0.03, 5)
def test_onnx_simplification(self) -> None:
def get_tll_onnx() -> Tuple[AbstractNetwork, Tuple[int, ...]]:
onnx_path = "vnn-comp-2022-sup/benchmarks/tllverifybench/onnx/tllBench_n=2_N=M=24_m=1_instance_2_3.onnx.gz"
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
freeze_network(o2p_net)
o2p_net.eval()
o2p_net.to(dtype=torch.get_default_dtype())
simplify = False
if simplify:
assert in_shape is not None
assert in_name is not None
# export current model to onnx for dtype
try:
temp_dir = "temp_convert"
net_pref = "simplify"
onnx_path = f"{temp_dir}/{net_pref}.onnx"
Path(temp_dir).mkdir(parents=True, exist_ok=True)
x = torch.rand((1, *in_shape), device="cpu")
torch.onnx.export(
o2p_net,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=True,
verbose=False,
input_names=[in_name],
output_names=["output"],
)
onnx_model = onnx.load(onnx_path)
onnx_model = simplify_onnx(onnx_model)
net_new, _, _ = load_onnx_from_proto(onnx_model)
o2p_net = net_new
except Exception as e:
print("Exception simplifying onnx model", e)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
abs_net = AbstractNetwork.from_concrete_module(o2p_net, in_shape)
return abs_net, (1, *in_shape)
self.fuzzing_test_network(get_tll_onnx, dp_call, 10, 1)
if __name__ == "__main__":
T = TestFuzzing()
T.test_nn4sys()
# T.test_alpha_prima_optimization()
# T.test_maxpool_toy_example()
# T.test_carvana_unet()
# T.test_onnx_simplification()
| 13,984 | 31.075688 | 137 | py |
mn-bab-SABR_ready | mn-bab-SABR_ready/tests/integration_tests/onnx_integration_tests.py | import gzip
import os
import re
import shutil
from pathlib import Path
from typing import Tuple
import numpy as np
# import onnx
import onnxruntime as ort # type: ignore[import]
import torch
from torch import nn
from src.abstract_layers.abstract_network import AbstractNetwork
from src.utilities.loading.network import load_net, load_onnx_model
def is_float_try(str: str) -> bool:
try:
float(str)
return True
except ValueError:
return False
class TestONNX:
"""
We test our ONNX parser implementation
"""
def test_onnx_differential(self) -> None:
dir = "networks"
temp_dir = "tests/temp"
try:
Path(temp_dir).mkdir(parents=True, exist_ok=True)
for net_name in sorted(os.listdir(dir), reverse=True):
f = os.path.join(dir, net_name)
net_split = re.split(r"_", net_name)
dataset = net_split[0]
net_pref = re.split(r"\.", net_name)[0]
shape: Tuple[int, ...] = (784,)
if dataset == "cifar" or dataset == "cifar10":
shape = (1, 3, 32, 32)
elif dataset == "mnist":
if "flattened" in net_name:
shape = (1, 784)
else:
continue
elif dataset == "resnet":
shape = (1, 3, 32, 32)
else:
print(f"Unknown dataset {dataset}")
continue
n_layers = 3
n_neurons = 200
if (
len(net_split) >= 2
and is_float_try(net_split[1])
and is_float_try(net_split[2])
):
n_layers = int(net_split[1])
n_neurons = int(net_split[2])
try:
net_pt = load_net(f, n_layers, n_neurons)
print(f"Successfully loaded {net_name}")
except Exception:
print(f"Couldn't load {net_name} - skipping")
continue
net_pt.eval()
x = torch.rand(shape)
# store network to onnx
onnx_path = f"{temp_dir}/{net_pref}.onnx"
torch.onnx.export(
net_pt,
x,
onnx_path,
export_params=True,
training=0,
do_constant_folding=False,
verbose=False,
input_names=["input.1"],
output_names=["output"],
)
# load network from onnx
# ort_session = ort.InferenceSession(onnx_path)
# compare onnx to pytorch
o2p_net, _, in_name = load_onnx_model(onnx_path)
o2p_net.eval()
# Compare results
for i in range(10):
x = torch.rand(shape)
out_pt_net = net_pt(x)
out_o2p_net = o2p_net(x)
assert torch.isclose(out_pt_net, out_o2p_net).all()
assert isinstance(o2p_net, nn.Sequential)
AbstractNetwork.from_concrete_module(o2p_net, (1, *shape))
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_onnx_benchmark21(self) -> None:
dir = "benchmarks_vnn21"
for root, dirs, files in os.walk(dir):
for name in files:
if name.split(".")[-1] == "onnx":
try:
onnx_path = os.path.join(root, name)
ort_session = ort.InferenceSession(onnx_path)
# compare onnx to pytorch
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape = (1, *in_shape)
o2p_net.eval()
for i in range(50):
x = torch.rand(in_shape)
output_onnx = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)[0]
out_o2p_net = o2p_net(x)
assert torch.isclose(
torch.Tensor(output_onnx), out_o2p_net, atol=1e-5
).all()
print(f"Successfully ran {onnx_path}")
except Exception as e:
print(f"Couldn't load {onnx_path} - skipping with {e}")
def test_onnx_benchmark22(self) -> None:
dir = "vnn-comp-2022-sup/benchmarks"
for root, dirs, files in os.walk(dir):
for name in files:
if (
name.split(".")[-1] in ["onnx", "gz"]
and name.split(".")[-2] != "vnnlib"
):
try:
onnx_path = os.path.join(root, name)
if "vgg" in name:
print("Skipped vggnet")
continue
if name.split(".")[-1] == "gz":
onnx_byte_obj = gzip.open(onnx_path).read()
ort_session = ort.InferenceSession(onnx_byte_obj)
else:
ort_session = ort.InferenceSession(onnx_path)
# compare onnx to pytorch
o2p_net, in_shape, in_name = load_onnx_model(onnx_path)
in_shape = (1, *in_shape)
o2p_net.eval()
for i in range(50):
x = torch.rand(in_shape)
output_onnx = ort_session.run(
None,
{in_name: np.array(x).astype(np.float32)},
)[0]
out_o2p_net = o2p_net(x)
assert torch.isclose(
torch.Tensor(output_onnx), out_o2p_net, atol=1e-4
).all(), "Bound violation"
print(f"Successfully ran {onnx_path}")
except Exception as e:
print(f"Couldn't run {onnx_path} - skipping with {e}")
if __name__ == "__main__":
t = TestONNX()
t.test_onnx_differential()
t.test_onnx_benchmark21()
t.test_onnx_benchmark22()
| 6,655 | 35.173913 | 81 | py |
DualRE | DualRE-master/selection.py | """Select new instances given prediction and retrieval modules"""
import math
import collections
import torch
from torchtext import data
from utils import scorer
from utils.torch_utils import example_to_dict
TOKEN = data.Field(sequential=True, batch_first=True, lower=True, include_lengths=True)
RELATION = data.Field(sequential=False, unk_token=None, pad_token=None)
POS = data.Field(sequential=True, batch_first=True)
NER = data.Field(sequential=True, batch_first=True)
PST = data.Field(sequential=True, batch_first=True)
PR_CONFIDENCE = data.Field(sequential=False, use_vocab=False, dtype=torch.float)
SL_CONFIDENCE = data.Field(sequential=False, use_vocab=False, dtype=torch.float)
FIELDS = {
"tokens": ("token", TOKEN),
"stanford_pos": ("pos", POS),
"stanford_ner": ("ner", NER),
"relation": ("relation", RELATION),
"subj_pst": ("subj_pst", PST),
"obj_pst": ("obj_pst", PST),
"pr_confidence": ("pr_confidence", PR_CONFIDENCE),
"sl_confidence": ("sl_confidence", SL_CONFIDENCE),
}
def get_relation_distribution(dataset):
"""Get relation distribution of a dataset
Args:
dataset (data.Dataset or list): The dataset to consider
"""
if isinstance(dataset, data.Dataset):
counter = collections.Counter([ex.relation for ex in dataset.examples])
else:
counter = collections.Counter([pred for eid, pred, actual in dataset])
return {k: v / len(dataset) for k, v in counter.items()}
def split_samples(dataset, meta_idxs, batch_size=50, conf_p=None, conf_s=None):
"""Split dataset using idxs
Args:
dataset (data.Dataset): Dataset instance
meta_idxs (list): List of indexes with the form (idx, predict_label, gold_label)
batch_size (int, optional): Defaults to 50
conf_p (dict, optional): An optional attribute for confidence of samples for predictor
conf_s (dict, optional): An optional attribute for confidence of samples for selector
"""
iterator_unlabeled = data.Iterator(
dataset=dataset,
batch_size=batch_size,
repeat=False,
train=False,
shuffle=False,
sort=True,
sort_key=lambda x: -len(x.token),
sort_within_batch=False,
)
examples = iterator_unlabeled.data()
new_examples, rest_examples, example_ids = [], [], set(idx for idx, pred, actual in meta_idxs)
if conf_p is not None and conf_s is not None:
meta_idxs = [(idx, pred, actual, conf_p[idx], conf_s[idx]) for idx, pred, actual in meta_idxs]
elif conf_p is None and conf_s is None:
meta_idxs = [(idx, pred, actual, 1.0, 1.0) for idx, pred, actual in meta_idxs]
else:
raise NotImplementedError("Can not split_samples.")
for idx, pred, _, pr_confidence, sl_confidence in meta_idxs:
output = example_to_dict(examples[idx], pr_confidence, sl_confidence, pred)
new_examples.append(data.Example.fromdict(output, FIELDS))
rest_examples = [example for k, example in enumerate(examples) if k not in example_ids]
return new_examples, rest_examples
def intersect_samples(meta_idxs1, s_retrieve_fn, k_samples, prior_distribution):
upperbound, meta_idxs, confidence_idxs_s = k_samples, [], []
while len(meta_idxs) < min(k_samples, len(meta_idxs1)):
upperbound = math.ceil(1.25 * upperbound)
ori_meta_idxs_s, confidence_idxs_s = s_retrieve_fn(upperbound, prior_distribution)
meta_idxs = sorted(set(meta_idxs1[:upperbound]).intersection(set(ori_meta_idxs_s)))[:k_samples]
if upperbound > k_samples * 30: # set a limit for growing upperbound
break
print("Infer on combination...")
scorer.score([actual for _, _, actual in meta_idxs], [pred for _, pred, _ in meta_idxs], verbose=False)
scorer.score(
[actual for _, _, actual in meta_idxs], [pred for _, pred, _ in meta_idxs], verbose=False, NO_RELATION="-1"
)
return meta_idxs, confidence_idxs_s
def select_samples(model_p, model_s, dataset_infer, k_samples, args, default_distribution):
max_upperbound = int(math.ceil(k_samples * args.selector_upperbound))
# predictor selection
meta_idxs_p, confidence_idxs_p = model_p.retrieve(dataset_infer, len(dataset_infer)) # retrieve all the samples
print("Infer on predictor: ") # Track performance of predictor alone
gold, guess = [t[2] for t in meta_idxs_p[:k_samples]], [t[1] for t in meta_idxs_p[:k_samples]]
scorer.score(gold, guess, verbose=False)
scorer.score(gold, guess, verbose=False, NO_RELATION="-1")
# for self-training
if args.integrate_method == "p_only":
return split_samples(dataset_infer, meta_idxs_p[:k_samples], args.batch_size)
# selector selection
label_distribution = None
if args.integrate_method == "s_only" or max_upperbound == 0:
label_distribution = default_distribution
else:
label_distribution = get_relation_distribution(meta_idxs_p[:max_upperbound])
def s_retrieve_fn(k_samples, label_distribution):
return model_s.retrieve(dataset_infer, k_samples, label_distribution=label_distribution)
ori_meta_idxs_s, _ = s_retrieve_fn(k_samples, label_distribution)
print("Infer on selector: ")
gold, guess = [t[2] for t in ori_meta_idxs_s], [t[1] for t in ori_meta_idxs_s]
scorer.score(gold, guess, verbose=False)
scorer.score(gold, guess, verbose=False, NO_RELATION="-1")
# If we only care about performance of selector
if args.integrate_method == "s_only":
return split_samples(dataset_infer, ori_meta_idxs_s)
# integrate method
if args.integrate_method == "intersection":
meta_idxs, confidence_idxs_s = intersect_samples(meta_idxs_p, s_retrieve_fn, k_samples, label_distribution)
else:
raise NotImplementedError("integrate_method {} not implemented".format(args.integrate_method))
confidence_dict_p = dict((id, confidence) for id, confidence in confidence_idxs_p)
confidence_dict_s = dict((id, confidence) for id, confidence in confidence_idxs_s)
return split_samples(dataset_infer, meta_idxs, conf_p=confidence_dict_p, conf_s=confidence_dict_s)
| 6,145 | 44.525926 | 116 | py |
DualRE | DualRE-master/train.py | """Main file for training DualRE"""
import argparse
# pylint: disable=invalid-name, missing-docstring
import math
import random
import torch
from torchtext import data
from model.predictor import Predictor
from model.selector import Selector
from model.trainer import Trainer, evaluate
from selection import get_relation_distribution, select_samples
from utils import helper, scorer, torch_utils
parser = argparse.ArgumentParser()
# Begin DualRE specific arguments
parser.add_argument(
"--selector_model",
type=str,
default="pointwise",
choices=["pointwise", "pairwise", "none"],
help="Method for selector. 'none' indicates using self-training model",
)
parser.add_argument(
"--integrate_method",
type=str,
default="intersection",
choices=["intersection", "p_only", "s_only"],
help="Method to combine results from prediction and retrieval module.",
)
parser.add_argument("--selector_upperbound", type=float, default=3, help="# of samples / k taken before intersection.")
parser.add_argument(
"--num_iters", type=int, default=-1, help="# of iterations. -1 indicates it's determined by data_ratio."
)
parser.add_argument("--alpha", type=float, default=0.5, help="confidence hyperparameter for predictor.")
parser.add_argument("--beta", type=float, default=2, help="confidence hyperparameter for selector")
# Begin original TACRED arguments
parser.add_argument("--p_dir", type=str, help="Directory of the predictor.")
parser.add_argument("--s_dir", type=str, help="Directory of the selector.")
parser.add_argument("--data_dir", type=str, default="dataset/dataname")
parser.add_argument("--labeled_ratio", type=float)
parser.add_argument("--unlabeled_ratio", type=float)
# ratio of instances to promote each round
parser.add_argument("--data_ratio", type=float, default=0.1)
parser.add_argument("--emb_dim", type=int, default=300, help="Word embedding dimension.")
parser.add_argument("--ner_dim", type=int, default=30, help="NER embedding dimension.")
parser.add_argument("--pos_dim", type=int, default=30, help="POS embedding dimension.")
parser.add_argument("--hidden_dim", type=int, default=200, help="RNN hidden state size.")
parser.add_argument("--num_layers", type=int, default=2, help="Num of RNN layers.")
parser.add_argument("--p_dropout", type=float, default=0.5, help="Input and RNN dropout rate.")
parser.add_argument("--s_dropout", type=float, default=0.5, help="Input and RNN dropout rate for selector.")
parser.add_argument("--attn", dest="attn", action="store_true", help="Use attention layer.")
parser.add_argument("--no-attn", dest="attn", action="store_false")
parser.set_defaults(attn=True)
parser.add_argument("--attn_dim", type=int, default=200, help="Attention size.")
parser.add_argument("--pe_dim", type=int, default=30, help="Position encoding dimension.")
parser.add_argument("--lr", type=float, default=1.0, help="Applies to SGD and Adagrad.")
parser.add_argument("--lr_decay", type=float, default=0.9)
parser.add_argument("--optim", type=str, default="sgd", help="sgd, adagrad, adam or adamax.")
parser.add_argument("--num_epoch", type=int, default=30)
parser.add_argument("--patience", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=50)
parser.add_argument("--max_grad_norm", type=float, default=5.0, help="Gradient clipping.")
parser.add_argument("--log_step", type=int, default=20, help="Print log every k steps.")
parser.add_argument("--log", type=str, default="logs.txt", help="Write training log to file.")
parser.add_argument("--save_epoch", type=int, default=100, help="Save model checkpoints every k epochs.")
parser.add_argument("--save_dir", type=str, default="./saved_models", help="Root dir for saving models.")
parser.add_argument("--id", type=str, default="00", help="Model ID under which to save models.")
parser.add_argument("--info", type=str, default="", help="Optional info for the experiment.")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--cuda", type=bool, default=True)
parser.add_argument("--cpu", action="store_true", help="Ignore CUDA.")
args, _ = parser.parse_known_args()
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# make opt
opt = vars(args)
# load data
print("Loading data from {} with batch size {}...".format(opt["data_dir"], opt["batch_size"]))
TOKEN = data.Field(sequential=True, batch_first=True, lower=True, include_lengths=True)
RELATION = data.Field(sequential=False, pad_token=None)
POS = data.Field(sequential=True, batch_first=True)
NER = data.Field(sequential=True, batch_first=True)
PST = data.Field(sequential=True, batch_first=True)
PR_CONFIDENCE = data.Field(sequential=False, use_vocab=False, dtype=torch.float)
SL_CONFIDENCE = data.Field(sequential=False, use_vocab=False, dtype=torch.float)
FIELDS = {
"tokens": ("token", TOKEN),
"stanford_pos": ("pos", POS),
"stanford_ner": ("ner", NER),
"relation": ("relation", RELATION),
"subj_pst": ("subj_pst", PST),
"obj_pst": ("obj_pst", PST),
"pr_confidence": ("pr_confidence", PR_CONFIDENCE),
"sl_confidence": ("sl_confidence", SL_CONFIDENCE),
}
dataset_vocab = data.TabularDataset(path=opt["data_dir"] + "/train.json", format="json", fields=FIELDS)
dataset_train = data.TabularDataset(
path=opt["data_dir"] + "/train-" + str(opt["labeled_ratio"]) + ".json", format="json", fields=FIELDS
)
dataset_infer = data.TabularDataset(
path=opt["data_dir"] + "/raw-" + str(opt["unlabeled_ratio"]) + ".json", format="json", fields=FIELDS
)
dataset_dev = data.TabularDataset(path=opt["data_dir"] + "/dev.json", format="json", fields=FIELDS)
dataset_test = data.TabularDataset(path=opt["data_dir"] + "/test.json", format="json", fields=FIELDS)
print("=" * 100)
print("Labeled data path: " + opt["data_dir"] + "/train-" + str(opt["labeled_ratio"]) + ".json")
print("Unlabeled data path: " + opt["data_dir"] + "/raw-" + str(opt["unlabeled_ratio"]) + ".json")
print(
"Labeled instances #: %d, Unlabeled instances #: %d" % (len(dataset_train.examples), len(dataset_infer.examples))
)
print("=" * 100)
TOKEN.build_vocab(dataset_vocab)
RELATION.build_vocab(dataset_vocab)
POS.build_vocab(dataset_vocab)
NER.build_vocab(dataset_vocab)
PST.build_vocab(dataset_vocab)
opt["num_class"] = len(RELATION.vocab)
opt["vocab_pad_id"] = TOKEN.vocab.stoi["<pad>"]
opt["pos_pad_id"] = POS.vocab.stoi["<pad>"]
opt["ner_pad_id"] = NER.vocab.stoi["<pad>"]
opt["pe_pad_id"] = PST.vocab.stoi["<pad>"]
opt["vocab_size"] = len(TOKEN.vocab)
opt["pos_size"] = len(POS.vocab)
opt["ner_size"] = len(NER.vocab)
opt["pe_size"] = len(PST.vocab)
opt["rel_stoi"] = RELATION.vocab.stoi
opt["rel_itos"] = RELATION.vocab.itos
helper.ensure_dir(opt["p_dir"], verbose=True)
helper.ensure_dir(opt["s_dir"], verbose=True)
TOKEN.vocab.load_vectors("glove.840B.300d", cache="./dataset/.vectors_cache")
if TOKEN.vocab.vectors is not None:
opt["emb_dim"] = TOKEN.vocab.vectors.size(1)
def load_best_model(model_dir, model_type="predictor"):
model_file = model_dir + "/best_model.pt"
print("Loading model from {}".format(model_file))
model_opt = torch_utils.load_config(model_file)
if model_type == "predictor":
predictor = Predictor(model_opt)
model = Trainer(model_opt, predictor, model_type=model_type)
else:
selector = Selector(model_opt)
model = Trainer(model_opt, selector, model_type=model_type)
model.load(model_file)
helper.print_config(model_opt)
return model
num_iters = math.ceil(1.0 / opt["data_ratio"])
if args.num_iters > 0:
num_iters = min(num_iters, args.num_iters)
k_samples = math.ceil(len(dataset_infer.examples) * opt["data_ratio"])
train_label_distribution = get_relation_distribution(dataset_train)
dev_f1_iter, test_f1_iter = [], []
for num_iter in range(num_iters + 1):
print("")
print("=" * 100)
print("Training #: %d, Infer #: %d" % (len(dataset_train.examples), len(dataset_infer.examples)))
# ====================== #
# Begin Train on Predictor
# ====================== #
print("Training on iteration #%d for dualRE Predictor..." % num_iter)
opt["model_save_dir"] = opt["p_dir"]
opt["dropout"] = opt["p_dropout"]
# save config
helper.save_config(opt, opt["model_save_dir"] + "/config.json", verbose=True)
helper.print_config(opt)
# prediction module
predictor = Predictor(opt, emb_matrix=TOKEN.vocab.vectors)
model = Trainer(opt, predictor, model_type="predictor")
model.train(dataset_train, dataset_dev)
# Evaluate
best_model_p = load_best_model(opt["model_save_dir"], model_type="predictor")
print("Final evaluation #%d on train set..." % num_iter)
evaluate(best_model_p, dataset_train, verbose=True)
print("Final evaluation #%d on dev set..." % num_iter)
dev_f1 = evaluate(best_model_p, dataset_dev, verbose=True)[2]
print("Final evaluation #%d on test set..." % num_iter)
test_f1 = evaluate(best_model_p, dataset_test, verbose=True)[2]
dev_f1_iter.append(dev_f1)
test_f1_iter.append(test_f1)
best_model_p = load_best_model(opt["p_dir"], model_type="predictor")
# ====================== #
# Begin Train on Selector
# ====================== #
best_model_s = None
if args.selector_model != "none":
print("Training on iteration #%d for dualRE Selector..." % num_iter)
opt["model_save_dir"] = opt["s_dir"]
opt["dropout"] = opt["s_dropout"]
# save config
helper.save_config(opt, opt["model_save_dir"] + "/config.json", verbose=True)
helper.print_config(opt)
# model
selector = Selector(opt, emb_matrix=TOKEN.vocab.vectors)
if args.selector_model == "predictor":
selector = Predictor(opt, emb_matrix=TOKEN.vocab.vectors)
model = Trainer(opt, selector, model_type=args.selector_model)
model.train(dataset_train, dataset_dev)
# Sample from cur_model
best_model_s = load_best_model(opt["s_dir"], model_type=args.selector_model)
# ====================== #
# Select New Instances
# ====================== #
new_examples, rest_examples = select_samples(
best_model_p, best_model_s, dataset_infer, k_samples, args, train_label_distribution
)
# update dataset
dataset_train.examples = dataset_train.examples + new_examples
dataset_infer.examples = rest_examples
scorer.print_table(dev_f1_iter, test_f1_iter, header="Best dev and test F1 with seed=%s:" % args.seed)
| 10,578 | 41.657258 | 119 | py |
DualRE | DualRE-master/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch.optim import Optimizer
import numpy as np
# class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"] = 0
state["sum"] = torch.ones(p.data.size()).type_as(p.data) * init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["sum"].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state["step"] += 1
if group["weight_decay"] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group["weight_decay"], p.data)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state["sum"].add_(make_sparse(grad_values.pow(2)))
std = state["sum"]._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state["sum"].addcmul_(1, grad, grad)
std = state["sum"].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
# torch specific functions
def get_optimizer(name, parameters, lr):
name = name.strip()
if name == "sgd":
return torch.optim.SGD(parameters, lr=lr)
elif name in ["adagrad", "myadagrad"]:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1)
elif name == "adam":
return torch.optim.Adam(parameters, lr=lr, betas=(0.9, 0.99)) # use default lr
elif name == "adamax":
return torch.optim.Adamax(parameters) # use default lr
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
def unsort_idx(examples, batch_size):
def unsort(lengths):
return sorted(range(len(lengths)), key=lengths.__getitem__, reverse=True)
lengths = np.array([len(ex.token) for ex in examples])
# idxs = [np.argsort(np.argsort(- lengths[i:i+batch_size])) for i in range(0, len(lengths), batch_size)]
idxs = [np.argsort(unsort(lengths[i : i + batch_size])) for i in range(0, len(lengths), batch_size)]
return [torch.LongTensor(idx) for idx in idxs]
# model IO
def save(model, optimizer, opt, filename):
params = {"model": model.state_dict(), "optimizer": optimizer.state_dict(), "config": opt}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump["model"])
if optimizer is not None:
optimizer.load_state_dict(dump["optimizer"])
opt = dump["config"]
return model, optimizer, opt
def load_config(filename):
dump = torch.load(filename)
return dump["config"]
# data batch
def batch_to_input(batch, vocab_pad_id=0):
inputs = {}
inputs["words"], inputs["length"] = batch.token
inputs["pos"] = batch.pos
inputs["ner"] = batch.ner
inputs["subj_pst"] = batch.subj_pst
inputs["obj_pst"] = batch.obj_pst
inputs["masks"] = torch.eq(batch.token[0], vocab_pad_id)
inputs["pr_confidence"] = batch.pr_confidence
inputs["sl_confidence"] = batch.sl_confidence
return inputs, batch.relation
def example_to_dict(example, pr_confidence, sl_confidence, rel):
output = {}
output["tokens"] = example.token
output["stanford_pos"] = example.pos
output["stanford_ner"] = example.ner
output["subj_pst"] = example.subj_pst
output["obj_pst"] = example.obj_pst
output["relation"] = rel
output["pr_confidence"] = pr_confidence
output["sl_confidence"] = sl_confidence
return output
def arg_max(l):
bvl, bid = -1, -1
for k in range(len(l)):
if l[k] > bvl:
bvl = l[k]
bid = k
return bid, bvl
| 6,900 | 31.247664 | 109 | py |
DualRE | DualRE-master/model/predictor.py | from torch import nn
import torch.nn.functional as F
from .layers import Classifier
from .encoder import RNNEncoder
class Predictor(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(Predictor, self).__init__()
self.encoder = RNNEncoder(opt, emb_matrix)
self.classifier = Classifier(opt)
def forward(self, inputs):
encoding = self.encoder(inputs)
logits = self.classifier(encoding)
return logits, encoding
def predict(self, inputs):
encoding = self.encoder(inputs)
logits = self.classifier(encoding)
preds = F.softmax(logits, dim=-1)
return preds
| 708 | 26.269231 | 53 | py |
DualRE | DualRE-master/model/encoder.py | """
A rnn model for relation extraction, written in pytorch.
"""
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class PositionAwareAttention(nn.Module):
"""
A position-augmented attention layer where the attention weight is
a = T' . tanh(Ux + Vq + Wf)
where x is the input, q is the query, and f is additional position features.
"""
def __init__(self, input_size, query_size, feature_size, attn_size):
super(PositionAwareAttention, self).__init__()
self.input_size = input_size
self.query_size = query_size
self.feature_size = feature_size
self.attn_size = attn_size
self.ulinear = nn.Linear(input_size, attn_size)
self.vlinear = nn.Linear(query_size, attn_size, bias=False)
if feature_size > 0:
self.wlinear = nn.Linear(feature_size, attn_size, bias=False)
else:
self.wlinear = None
self.tlinear = nn.Linear(attn_size, 1)
self.init_weights()
def init_weights(self):
self.ulinear.weight.data.normal_(std=0.001)
self.vlinear.weight.data.normal_(std=0.001)
if self.wlinear is not None:
self.wlinear.weight.data.normal_(std=0.001)
self.tlinear.weight.data.zero_() # use zero to give uniform attention at the beginning
def forward(self, x, x_mask, q, f):
"""
x : batch_size * seq_len * input_size
q : batch_size * query_size
f : batch_size * seq_len * feature_size
x is the sequence of word embeddings
q is the last hidden state
f is the position embeddings
"""
batch_size, seq_len, _ = x.size()
x_proj = self.ulinear(x.contiguous().view(-1, self.input_size)).view(
batch_size, seq_len, self.attn_size
)
q_proj = (
self.vlinear(q.view(-1, self.query_size))
.contiguous()
.view(batch_size, self.attn_size)
.unsqueeze(1)
.expand(batch_size, seq_len, self.attn_size)
)
if self.wlinear is not None:
f_proj = (
self.wlinear(f.view(-1, self.feature_size))
.contiguous()
.view(batch_size, seq_len, self.attn_size)
)
projs = [x_proj, q_proj, f_proj]
else:
projs = [x_proj, q_proj]
scores = self.tlinear(torch.tanh(sum(projs)).view(-1, self.attn_size)).view(
batch_size, seq_len
)
# mask padding
scores.data.masked_fill_(x_mask.data, -float("inf"))
weights = F.softmax(scores, dim=-1)
# weighted average input vectors
outputs = weights.unsqueeze(1).bmm(x).squeeze(1)
return outputs
class RNNEncoder(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(RNNEncoder, self).__init__()
self.drop = nn.Dropout(opt["dropout"])
self.emb = nn.Embedding(
opt["vocab_size"], opt["emb_dim"], padding_idx=opt["vocab_pad_id"]
)
if opt["pos_dim"] > 0:
self.pos_emb = nn.Embedding(
opt["pos_size"], opt["pos_dim"], padding_idx=opt["pos_pad_id"]
)
if opt["ner_dim"] > 0:
self.ner_emb = nn.Embedding(
opt["ner_size"], opt["ner_dim"], padding_idx=opt["ner_pad_id"]
)
input_size = opt["emb_dim"] + opt["pos_dim"] + opt["ner_dim"]
self.rnn = nn.LSTM(
input_size,
opt["hidden_dim"],
opt["num_layers"],
batch_first=True,
dropout=opt["dropout"],
)
# attention layer
if opt["attn"]:
self.attn_layer = PositionAwareAttention(
opt["hidden_dim"], opt["hidden_dim"], 2 * opt["pe_dim"], opt["attn_dim"]
)
self.pe_emb = nn.Embedding(
opt["pe_size"], opt["pe_dim"], padding_idx=opt["pe_pad_id"]
)
self.opt = opt
self.use_cuda = opt["cuda"]
self.emb_matrix = emb_matrix
if emb_matrix is not None:
self.emb.weight.data.copy_(emb_matrix)
def zero_state(self, batch_size):
state_shape = (self.opt["num_layers"], batch_size, self.opt["hidden_dim"])
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
if self.use_cuda:
return h0.cuda(), c0.cuda()
else:
return h0, c0
def forward(self, inputs):
# words: [batch size, seq length]
words, masks = inputs["words"], inputs["masks"]
pos, ner = inputs["pos"], inputs["ner"]
subj_pst, obj_pst = inputs["subj_pst"], inputs["obj_pst"]
seq_lens = inputs["length"]
batch_size = words.size()[0]
# embedding lookup
# word_inputs: [batch size, seq length, embedding size]
# inputs: [batch size, seq length, embedding size * 3]
word_inputs = self.emb(words)
inputs = [word_inputs]
if self.opt["pos_dim"] > 0:
inputs += [self.pos_emb(pos)]
if self.opt["ner_dim"] > 0:
inputs += [self.ner_emb(ner)]
inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input
# rnn
h0, c0 = self.zero_state(batch_size)
inputs = nn.utils.rnn.pack_padded_sequence(
inputs, seq_lens.tolist(), batch_first=True
)
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
outputs, output_lens = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True
)
hidden = self.drop(ht[-1, :, :]) # get the outmost layer h_n
outputs = self.drop(outputs)
# attention
if self.opt["attn"]:
# convert all negative PE numbers to positive indices
# e.g., -2 -1 0 1 will be mapped to 98 99 100 101
subj_pe_inputs = self.pe_emb(subj_pst)
obj_pe_inputs = self.pe_emb(obj_pst)
pe_features = torch.cat((subj_pe_inputs, obj_pe_inputs), dim=2)
final_hidden = self.attn_layer(outputs, masks, hidden, pe_features)
else:
final_hidden = hidden
return final_hidden
class CNNEncoder(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(CNNEncoder, self).__init__()
# initialize drop out rate
self.drop = nn.Dropout(opt["dropout"])
# initialize embedding layer
self.emb = nn.Embedding(
opt["vocab_size"], opt["emb_dim"], padding_idx=opt["vocab_pad_id"]
)
if opt["pos_dim"] > 0:
self.pos_emb = nn.Embedding(
opt["pos_size"], opt["pos_dim"], padding_idx=opt["pos_pad_id"]
)
if opt["ner_dim"] > 0:
self.ner_emb = nn.Embedding(
opt["ner_size"], opt["ner_dim"], padding_idx=opt["ner_pad_id"]
)
if opt["pe_dim"] > 0:
self.pe_emb = nn.Embedding(
opt["pe_size"], opt["pe_dim"], padding_idx=opt["pe_pad_id"]
)
# input layer
input_size = (
opt["emb_dim"] + opt["pos_dim"] + opt["ner_dim"] + 2 * opt["pe_dim"]
)
# encoding layer
self.convs = nn.ModuleList(
[
torch.nn.Conv1d(input_size, opt["hidden_dim"], ksize, padding=2)
for ksize in opt["kernels"]
]
)
# prediction layer
self.linear = nn.Linear(
opt["hidden_dim"] * len(opt["kernels"]), opt["hidden_dim"]
)
# save other parameters
self.opt = opt
self.use_cuda = opt["cuda"]
if emb_matrix is not None:
self.emb.weight.data.copy_(emb_matrix)
def forward(self, inputs):
# words: [batch size, seq length]
words = inputs["words"]
pos, ner = inputs["pos"], inputs["ner"]
subj_pst, obj_pst = inputs["subj_pst"], inputs["obj_pst"]
word_inputs = self.emb(words)
inputs = [word_inputs]
if self.opt["pos_dim"] > 0:
inputs += [self.pos_emb(pos)]
if self.opt["ner_dim"] > 0:
inputs += [self.ner_emb(ner)]
if self.opt["pe_dim"] > 0:
inputs += [self.pe_emb(subj_pst)]
inputs += [self.pe_emb(obj_pst)]
inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input
embedded = torch.transpose(inputs, 1, 2)
hiddens = [F.relu(conv(embedded)) for conv in self.convs] # b *
hiddens = [
torch.squeeze(F.max_pool1d(hidden, hidden.size(2)), dim=2)
for hidden in hiddens
]
hidden = self.drop(torch.cat(hiddens, dim=1))
encoding = torch.tanh(self.linear(hidden))
return encoding
| 8,907 | 34.349206 | 95 | py |
DualRE | DualRE-master/model/layers.py | from torch import nn
from torch.nn import init
class Classifier(nn.Module):
def __init__(self, opt):
super(Classifier, self).__init__()
self.hidden_dim = opt["hidden_dim"]
self.num_class = opt["num_class"]
self.linear = nn.Linear(self.hidden_dim, self.num_class)
self.linear.bias.data.fill_(0)
init.xavier_uniform_(self.linear.weight) # initialize linear layer
def forward(self, inputs):
logits = self.linear(inputs)
return logits
class Discriminator(nn.Module):
"""Discriminator model for source domain."""
def __init__(self, input_dims, hidden_dims, output_dims):
"""Init discriminator."""
super(Discriminator, self).__init__()
self.layer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(input_dims, hidden_dims),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dims, hidden_dims),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dims, output_dims),
nn.Sigmoid(),
)
def forward(self, inputs):
"""Forward the discriminator."""
out = self.layer(inputs)
return out
| 1,169 | 27.536585 | 75 | py |
DualRE | DualRE-master/model/selector.py | from torch import nn
from .layers import Classifier
from .encoder import RNNEncoder
class Selector(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(Selector, self).__init__()
self.encoder = RNNEncoder(opt, emb_matrix)
self.classifier = Classifier(opt)
def forward(self, inputs):
encoding = self.encoder(inputs)
logits = self.classifier(encoding)
return logits, encoding
def predict(self, inputs):
encoding = self.encoder(inputs)
logits = self.classifier(encoding)
return logits
| 634 | 24.4 | 53 | py |
DualRE | DualRE-master/model/trainer.py | """
A rnn model for relation extraction, written in pytorch.
"""
import math
import time
import os
from datetime import datetime
from shutil import copyfile
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torchtext import data
from utils import torch_utils, scorer
from utils.torch_utils import batch_to_input, arg_max
def idx_to_onehot(target, opt, confidence=None):
sample_size, class_size = target.size(0), opt['num_class']
if confidence is None:
y = torch.zeros(sample_size, class_size)
y = y.scatter_(1, torch.unsqueeze(target.data, dim=1), 1)
else:
y = torch.ones(sample_size, class_size)
y = y * (1 - confidence.data).unsqueeze(1).expand(-1, class_size)
y[torch.arange(sample_size).long(), target.data] = confidence.data
y = Variable(y)
return y
def evaluate(model, dataset, evaluate_type='prf', verbose=False):
rel_stoi, rel_itos = model.opt['rel_stoi'], model.opt['rel_itos']
iterator_test = data.Iterator(
dataset=dataset,
batch_size=model.opt['batch_size'],
repeat=False,
train=True,
shuffle=False,
sort=True,
sort_key=lambda x: -len(x.token),
sort_within_batch=False)
if evaluate_type == 'prf':
predictions = []
all_probs = []
golds = []
all_loss = 0
for batch in iterator_test:
inputs, target = batch_to_input(batch, model.opt['vocab_pad_id'])
preds, probs, loss = model.predict(inputs, target)
predictions += preds
all_probs += probs
all_loss += loss
golds += target.data.tolist()
predictions = [rel_itos[p] for p in predictions]
golds = [rel_itos[p] for p in golds]
p, r, f1 = scorer.score(golds, predictions, verbose=verbose)
return p, r, f1, all_loss
elif evaluate_type == 'auc':
logits, labels = [], []
for batch in iterator_test:
inputs, target = batch_to_input(batch, model.opt['vocab_pad_id'])
logits += model.predict(inputs)[0]
labels += batch.relation.data.numpy().tolist()
p, q = 0, 0
for rel in range(len(rel_itos)):
if rel == rel_stoi['no_relation']:
continue
logits_rel = [logit[rel] for logit in logits]
labels_rel = [1 if label == rel else 0 for label in labels]
ranking = list(zip(logits_rel, labels_rel))
ranking = sorted(ranking, key=lambda x: x[0], reverse=True)
logits_rel, labels_rel = zip(*ranking)
p += scorer.AUC(logits_rel, labels_rel)
q += 1
dev_auc = p / q * 100
return dev_auc, None, None, None
def calc_confidence(probs, exp):
'''Calculate confidence score from raw probabilities'''
return max(probs)**exp
class Trainer(object):
""" A wrapper class for the training and evaluation of models. """
def __init__(self, opt, model, model_type='predictor'):
self.opt = opt
self.model_type = model_type
self.model = model
if model_type == 'predictor':
self.criterion = nn.CrossEntropyLoss(reduction='none')
elif model_type == 'pointwise':
self.criterion = nn.BCEWithLogitsLoss()
elif model_type == 'pairwise':
self.criterion = nn.BCEWithLogitsLoss() # Only a placeholder, will NOT use this criterion
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def train(self, dataset_train, dataset_dev):
opt = self.opt.copy()
iterator_train = data.Iterator(
dataset=dataset_train,
batch_size=opt['batch_size'],
repeat=False,
train=True,
shuffle=True,
sort_key=lambda x: len(x.token),
sort_within_batch=True)
iterator_dev = data.Iterator(
dataset=dataset_dev,
batch_size=opt['batch_size'],
repeat=False,
train=True,
sort_key=lambda x: len(x.token),
sort_within_batch=True)
dev_score_history = []
current_lr = opt['lr']
global_step = 0
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(iterator_train) * opt['num_epoch']
# start training
epoch = 0
patience = 0
while True:
epoch = epoch + 1
train_loss = 0
for batch in iterator_train:
start_time = time.time()
global_step += 1
inputs, target = batch_to_input(batch, opt['vocab_pad_id'])
loss = self.update(inputs, target)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(
format_str.format(datetime.now(), global_step, max_steps, epoch,
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
if self.model_type == 'predictor':
dev_p, dev_r, dev_score, dev_loss = evaluate(self, dataset_dev)
else:
dev_score = evaluate(self, dataset_dev, evaluate_type='auc')[0]
dev_loss = dev_score
# print training information
train_loss = train_loss / len(iterator_train) * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / len(iterator_dev) * opt['batch_size']
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_score = {:.4f}".format(
epoch, train_loss, dev_loss, dev_score))
# save the current model
model_file = opt['model_save_dir'] + '/checkpoint_epoch_{}.pt'.format(epoch)
self.save(model_file, epoch)
if epoch == 1 or dev_score > max(dev_score_history): # new best
copyfile(model_file, opt['model_save_dir'] + '/best_model.pt')
print("new best model saved.")
patience = 0
else:
patience = patience + 1
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# change learning rate
if len(dev_score_history) > 10 and dev_score <= dev_score_history[-1] and \
opt['optim'] in ['sgd', 'adagrad']:
current_lr *= opt['lr_decay']
self.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
if opt['patience'] != 0:
if patience == opt['patience'] and epoch > opt['num_epoch']:
break
else:
if epoch == opt['num_epoch']:
break
print("Training ended with {} epochs.".format(epoch))
def retrieve(self, dataset, k_samples, label_distribution=None):
if self.model_type != 'predictor' and label_distribution is None:
raise ValueError('Retrival from selector cannot be done without label_distribution')
iterator_unlabeled = data.Iterator(
dataset=dataset,
batch_size=self.opt['batch_size'],
repeat=False,
train=False,
shuffle=False,
sort=True,
sort_key=lambda x: -len(x.token),
sort_within_batch=False)
preds = []
for batch in iterator_unlabeled:
inputs, _ = batch_to_input(batch, self.opt['vocab_pad_id'])
preds += self.predict(inputs)[1]
meta_idxs = []
confidence_idxs = []
examples = iterator_unlabeled.data()
num_instance = len(examples)
if label_distribution:
label_distribution = {
k: math.ceil(v * k_samples)
for k, v in label_distribution.items()
}
if self.model_type == 'predictor':
# ranking
ranking = list(zip(range(num_instance), preds))
ranking = sorted(
ranking, key=lambda x: calc_confidence(x[1], self.opt['alpha']), reverse=True)
# selection
for eid, pred in ranking:
if len(meta_idxs) == k_samples:
break
rid, _ = arg_max(pred)
val = calc_confidence(pred, self.opt['alpha'])
rel = self.opt['rel_itos'][rid]
if label_distribution:
if not label_distribution[rel]:
continue
label_distribution[rel] -= 1
meta_idxs.append((eid, rel, examples[eid].relation))
confidence_idxs.append((eid, val))
return meta_idxs, confidence_idxs
else:
for rid in range(self.opt['num_class']):
# ranking
ranking = list(
zip(range(num_instance), [preds[k][rid] for k in range(num_instance)]))
ranking = sorted(ranking, key=lambda x: x[1], reverse=True)
rel = self.opt['rel_itos'][rid]
# selection
cnt = min(len(ranking), label_distribution.get(rel, 0))
for k in range(cnt):
eid, val = ranking[k]
meta_idxs.append((eid, rel, examples[eid].relation))
confidence_idxs.append((eid, val**self.opt['beta']))
meta_idxs.sort(key=lambda t: preds[t[0]][self.opt['rel_stoi'][t[1]]], reverse=True)
return meta_idxs, confidence_idxs
return meta_idxs
# train the model with a batch
def update(self, inputs, target):
""" Run a step of forward and backward model update. """
self.model.train()
self.optimizer.zero_grad()
sl_confidence = inputs['sl_confidence']
if self.model_type == 'pointwise':
target = idx_to_onehot(target, self.opt)
if self.opt['cuda']:
target = target.cuda()
inputs = dict([(k, v.cuda()) for k, v in inputs.items()])
pr_confidence = inputs['pr_confidence']
logits, _ = self.model(inputs)
if self.model_type == 'pointwise':
confidence = sl_confidence.unsqueeze(1).expand(-1, logits.size(1))
if self.opt['cuda']:
confidence = confidence.cuda()
loss = F.binary_cross_entropy_with_logits(logits, target, weight=confidence)
loss *= self.opt['num_class']
elif self.model_type == 'pairwise':
# Form a matrix with row_i indicate which samples are its negative samples (0, 1)
matrix = torch.stack(
[target.ne(rid) for rid in range(self.opt['num_class'])]) # R * B matrix
matrix = matrix.index_select(0, target) # B * B matrix
confidence = sl_confidence.unsqueeze(1).expand_as(matrix)
if self.opt['cuda']:
confidence = confidence.cuda()
pos_logits = logits.gather(1, target.view(-1, 1)) # B * 1 logits
# B * B logits out[i][j] = j-th sample's score on class y[i]
neg_logits = logits.t().index_select(0, target)
# calculate pairwise loss
loss = F.binary_cross_entropy_with_logits(
pos_logits - neg_logits, (matrix.float() * 1 / 2 + 1 / 2) * confidence
)
loss *= self.opt['num_class']
else:
loss = self.criterion(logits, target)
loss = torch.mean(loss * pr_confidence)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
loss_val = loss.item()
return loss_val
def predict(self, inputs, target=None):
""" Run forward prediction. If unsort is True, recover the original order of the batch. """
if self.opt['cuda']:
inputs = dict([(k, v.cuda()) for k, v in inputs.items()])
target = None if target is None else target.cuda()
self.model.eval()
logits, _ = self.model(inputs)
loss = None if target is None else torch.mean(self.criterion(logits, target)).item()
if self.model_type == 'predictor':
probs = F.softmax(logits, dim=1).data.cpu().numpy().tolist()
predictions = np.argmax(probs, axis=1).tolist()
elif self.model_type == 'pointwise':
probs = torch.sigmoid(logits).data.cpu().numpy().tolist()
predictions = logits.data.cpu().numpy().tolist()
elif self.model_type == 'pairwise':
probs = torch.sigmoid(logits).data.cpu().numpy().tolist()
predictions = logits.data.cpu().numpy().tolist()
return predictions, probs, loss
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
# save the model
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(), # model parameters
'encoder': self.model.encoder.state_dict(),
'classifier': self.model.classifier.state_dict(),
'config': self.opt, # options
'epoch': epoch, # current epoch
'model_type': self.model_type # current epoch
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
# load the model
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.encoder.load_state_dict(checkpoint['encoder'])
self.model.classifier.load_state_dict(checkpoint['classifier'])
self.opt = checkpoint['config']
self.model_type = checkpoint['model_type']
if self.model_type == 'predictor':
self.criterion = nn.CrossEntropyLoss()
elif self.model_type == 'pointwise':
self.criterion = nn.BCEWithLogitsLoss()
| 14,526 | 38.368564 | 102 | py |
PSGLoss | PSGLoss-main/data.py | import os
from PIL import Image
import torch.utils.data as data
import torchvision.transforms as transforms
import random
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
label = label.transpose(Image.FLIP_LEFT_RIGHT)
# flip_flag2 = random.randint(0, 1)
# if flip_flag2 == 1:
# img = img.transpose(Image.FLIP_TOP_BOTTOM)
# label = label.transpose(Image.FLIP_TOP_BOTTOM)
# img = img[:,:,::-1].copy()
# label = label[:,:,::-1].copy()
return img, label
class SalObjDataset(data.Dataset):
def __init__(self, image_root, gt_root, trainsize,randomflip = False):
self.trainsize = trainsize
self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg')]
# self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.jpg')
# or f.endswith('.png')]
self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.png')]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
self.filter_files()
self.size = len(self.images)
self.randomflip = randomflip
self.img_transform = transforms.Compose([
transforms.Resize((self.trainsize, self.trainsize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.gt_transform = transforms.Compose([
transforms.Resize((self.trainsize, self.trainsize)),
transforms.ToTensor()]) #[0, 1.0] torch.FloadTensor
def __getitem__(self, index):
image = self.rgb_loader(self.images[index])
gt = self.binary_loader(self.gts[index])
if self.randomflip:
image, gt = cv_random_flip(image, gt)
image = self.img_transform(image)
gt = self.gt_transform(gt)
# if self.randomflip:
# image, gt = cv_random_flip(image, gt)
return image, gt
def filter_files(self):
assert len(self.images) == len(self.gts)
images = []
gts = []
for img_path, gt_path in zip(self.images, self.gts):
img = Image.open(img_path)
gt = Image.open(gt_path)
if img.size == gt.size:
images.append(img_path)
gts.append(gt_path)
self.images = images
self.gts = gts
def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
# return img.convert('1')
return img.convert('L') #0,255
def resize(self, img, gt):
assert img.size == gt.size
w, h = img.size
if h < self.trainsize or w < self.trainsize:
h = max(h, self.trainsize)
w = max(w, self.trainsize)
return img.resize((w, h), Image.BILINEAR), gt.resize((w, h), Image.NEAREST)
else:
return img, gt
def __len__(self):
return self.size
def get_loader(image_root, gt_root, batchsize, trainsize, shuffle=True, num_workers=12, pin_memory=True,randomflip = False):
dataset = SalObjDataset(image_root, gt_root, trainsize,randomflip)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batchsize,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory, drop_last=True)
return data_loader
class test_dataset:
def __init__(self, image_root, gt_root, testsize):
self.testsize = testsize
self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg')]
self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.jpg')
or f.endswith('.png')]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
self.transform = transforms.Compose([
transforms.Resize((self.testsize, self.testsize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.gt_transform = transforms.ToTensor()
self.size = len(self.images)
self.index = 0
def load_data(self):
image = self.rgb_loader(self.images[self.index])
image = self.transform(image).unsqueeze(0)
gt = self.binary_loader(self.gts[self.index])
name = self.images[self.index].split('/')[-1]
if name.endswith('.jpg'):
name = name.split('.jpg')[0] + '.png'
self.index += 1
return image, gt, name
def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L')
class test_dataset2:
def __init__(self, image_root, gt_root, testsize):
self.testsize = testsize
self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg')]
self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.png')]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
print (len(self.images))
print (len(self.gts))
self.transform = transforms.Compose([
transforms.Resize((self.testsize, self.testsize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.gt_transform = transforms.ToTensor()
self.size = len(self.images)
self.index = 0
def load_data(self):
image = self.rgb_loader(self.images[self.index])
image = self.transform(image).unsqueeze(0)
gt = self.binary_loader(self.gts[self.index])
name = self.images[self.index].split('/')[-1]
if name.endswith('.jpg'):
name = name.split('.jpg')[0] + '.png'
self.index += 1
return image, gt, name
def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L')
class test_dataset3:
def __init__(self, image_root, gt_root, testsize):
self.testsize = testsize
self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg')]
self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.png')]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
print (len(self.images))
print (len(self.gts))
self.transform = transforms.Compose([
transforms.Resize((self.testsize, self.testsize)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.gt_transform = transforms.ToTensor()
self.size = len(self.images)
self.index = 0
def load_data(self):
image = self.rgb_loader(self.images[self.index])
image = self.transform(image).unsqueeze(0)
# gt = self.binary_loader(self.gts[self.index])
name = self.images[self.index].split('/')[-1]
if name.endswith('.jpg'):
name = name.split('.jpg')[0] + '.png'
self.index += 1
return image, name
def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L')
| 7,946 | 36.485849 | 124 | py |
PSGLoss | PSGLoss-main/ystrain.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pdb, os, argparse
from datetime import datetime
from model.ysmodel import *
from data import get_loader
from utils import clip_gradient, adjust_lr
# seed1 = 1026
# np.random.seed(seed1)
# torch.manual_seed(seed1)
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=100, help='epoch number')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--batchsize', type=int, default=10, help='training batch size')
parser.add_argument('--trainsize', type=int, default=352, help='training dataset size')
parser.add_argument('--clip', type=float, default=0.5, help='gradient clipping margin')
# parser.add_argument('--is_ResNet', type=bool, default=False, help='VGG or ResNet backbone')
parser.add_argument('--modelchoice', type=str, default='ysmodel')
parser.add_argument('--decay_rate', type=float, default=0.1, help='decay rate of learning rate')
parser.add_argument('--decay_epoch', type=int, default=50, help='every n epochs decay learning rate')
parser.add_argument('--trainset', type=str, default='DUTS-TR')
parser.add_argument('--loss', type=str, default='')
parser.add_argument('--onlypsgloss', action='store_true', help='only has psgloss') #not working
parser.add_argument('--psgloss', action='store_true', help='has PSGLosss')
parser.add_argument('--alpha', type=float, default=1.0, help='has PSGLosss')
parser.add_argument('--kernel_size', type=int, default=3, help='has psgloss')
parser.add_argument('--weighteddim', action='store_false', help='weighted dim')
parser.add_argument('--randomflip', action='store_true', help='randomflip')
opt = parser.parse_args()
print(opt)
# print('Learning Rate: {} ResNet: {} Modelchoice: {} , postprocessing: {}'.format(opt.lr, opt.is_ResNet ,opt.modelchoice, opt.postp))
# build models
# if opt.is_ResNet:
if opt.modelchoice == 'ysmodel':
model = ysmodel(Weighted = opt.weighteddim)
else:
print('using default ysmodel')
model = ysmodel(Weighted = opt.weighteddim)
# else:
# model = CPD_VGG()
model.cuda()
params = model.parameters()
optimizer = torch.optim.Adam(params, opt.lr)
# /home/syang/ysdata/salientobj_dataset/DUTS/DUTS-TR/DUTS-TR-Image
image_root = '/home/syang/ysdata/salientobj_dataset/DUTS/DUTS-TR/DUTS-TR-Image/'
gt_root = '/home/syang/ysdata/salientobj_dataset/DUTS/DUTS-TR/DUTS-TR-Mask/'
train_loader = get_loader(image_root, gt_root, batchsize=opt.batchsize, trainsize=opt.trainsize, randomflip = opt.randomflip)
total_step = len(train_loader)
if opt.loss =='l2':
CE =torch.nn.MSELoss()
elif opt.loss =='kld':
CE =KldLoss()
elif opt.loss =='l1':
CE =torch.nn.L1Loss()
elif opt.loss =='tvd':
CE =tvdLoss()
elif opt.loss =='bce':
CE =torch.nn.BCELoss()
elif opt.loss =='dice':
CE =DiceLoss()
elif opt.loss =='dicebce':
CE =DicebceLoss()
else:
CE =torch.nn.BCEWithLogitsLoss()
print ('using normal bce loss')
# CE2 =torch.nn.BCELoss()
def print_network(model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
def train(train_loader, model, optimizer, epoch):
model.train()
save_path ='models2/' + opt.modelchoice + str(opt.trainsize)+ '_' + str(opt.psgloss)+ str(opt.alpha) + '_' + str(opt.kernel_size) + '_aug' + str(opt.randomflip) + '_bs' + str(opt.batchsize) + '_' + str(opt.weighteddim) + '_' + str(opt.loss) + '_lr' + str(opt.lr)+ '/'
print (save_path)
# print_network(model, opt.modelchoice )
for i, pack in enumerate(train_loader, start=1):
optimizer.zero_grad()
images, gts = pack
images = Variable(images)
gts = Variable(gts)
images = images.cuda()
gts = gts.cuda()
_, atts, dets = model(images)
loss1 = CE(atts.sigmoid(), gts)
loss2 = CE(dets.sigmoid(), gts)
if opt.psgloss:
with torch.no_grad():
gts1 =postpnet( kernel_size=opt.kernel_size)(atts,gts)
#
gts2 =postpnet( kernel_size=opt.kernel_size)(dets,gts)
loss1a = CE(atts.sigmoid(), gts1)
loss2a = CE(dets.sigmoid(), gts2)
if opt.onlypsgloss:
loss = loss2a
else:
loss = loss2 + loss2a *opt.alpha
else:
loss = loss2
# loss = loss2 + loss1
loss.backward()
clip_gradient(optimizer, opt.clip)
optimizer.step()
if i % 100 == 0 or i == total_step:
if opt.psgloss:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Loss1a: {:.4f} Loss2: {:0.4f} Loss2a: {:.4f} Total loss: {:.4f} '.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss1.data, loss1a.data, loss2.data, loss2a.data, loss.data))
else:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss1: {:.4f} Loss2: {:0.4f}'.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss1.data, loss2.data))
# if opt.is_ResNet:
# save_path = 'models/CPD_Resnet_'+ opt.modelchoice + '_' + str(opt.postp) + '/'
# else:
# save_path = 'models/CPD_VGG_'+ opt.modelchoice + '/'
# print (save_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
if (epoch+1) % 20 == 0 : #5 or epoch <6
print ('saving %d' % epoch)
torch.save(model.state_dict(), save_path + opt.trainset + '_w.pth' + '.%d' % epoch)
print("Let's go!")
for epoch in range(1, opt.epoch):
adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
train(train_loader, model, optimizer, epoch)
| 5,861 | 36.819355 | 273 | py |
PSGLoss | PSGLoss-main/test_ys.py | import torch
import torch.nn.functional as F
import numpy as np
import pdb, os, argparse
from scipy import misc
from model.ysmodel import *
from data import test_dataset2
import time
import cv2
def get_test_info(sal_mode='ECSSD'):
if sal_mode == 'ECSSD':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/ECSSD/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/ECSSD/test.lst'
elif sal_mode == 'PASCAL':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/PASCALS/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/PASCALS/test.lst'
elif sal_mode == 'DUT-OMRON':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/DUTOMRON/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/DUTOMRON/test.lst'
elif sal_mode == 'HKUIS':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/HKU-IS/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/HKU-IS/test.lst'
elif sal_mode == 'SOD':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/SOD/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/SOD/test.lst'
elif sal_mode == 'DUTS-TEST':
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/DUTS-TE/Imgs/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test n.a.
image_root = '/home/syang/ysdata/salientobj_dataset/dataset_test/MSRA/Imgs_resized/'
image_source = '/home/syang/ysdata/salientobj_dataset/dataset_test/MSRA/test_resized.lst'
elif sal_mode == 'kk': # for speed test n.a.
image_root = '/home/syang/ysdata/QA_dataset/koniq10k_1024x768/1024x768/'
image_source = ''
return image_root, image_source
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=352, help='testing size')
# parser.add_argument('--is_ResNet', type=bool, default=False, help='VGG or ResNet backbone')
parser.add_argument('--checkpointfile', type=str, default='', help='loaded model file')
parser.add_argument('--modelchoice', type=str, default='ysmodel')
parser.add_argument('--loss', type=str, default='')
parser.add_argument('--psgloss', action='store_true', help='has postploss')
parser.add_argument('--alpha', type=float, default=1.0, help='has postploss')
parser.add_argument('--kernel_size', type=int, default=3, help='has postploss')
parser.add_argument('--batchsize', type=int, default=10, help='training batch size')
parser.add_argument('--weighteddim', action='store_false', help='weighted dim')
# parser.add_argument('--otherbackbone', action='store_true', help='weighted dim')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--noclip', action='store_true', help='noclip')
parser.add_argument('--modelv', type=str, default='')
opt = parser.parse_args()
if opt.modelchoice == 'ysmodel':
model = ysmodel(Weighted = opt.weighteddim)
else:
print('using default ysmodel')
model = ysmodel(Weighted = opt.weighteddim)
print (opt.checkpointfile)
# model=nn.DataParallel(model) #multi-gpu testing
model.load_state_dict(torch.load(opt.checkpointfile))
# model = CPD_VGG()
model.cuda()
model.eval()
# test_datasets =['ECSSD']
# test_datasets = ['PASCAL', 'ECSSD', 'DUT-OMRON', 'DUTS-TEST', 'HKUIS','SOD']
# test_datasets = ['ECSSD', 'HKUIS' ,'PASCAL', 'SOD', 'DUT-OMRON', 'DUTS-TEST']
test_datasets = ['ECSSD', 'HKUIS' ,'PASCAL', 'SOD', 'DUT-OMRON', 'DUTS-TEST']
# test_datasets = ['ECSSD', 'HKUIS' ]
# test_datasets = ['DUTS-TEST' ]
for dataset in test_datasets:
save_path = './results/' + opt.modelchoice + str(opt.trainsize)+ '_' + str(opt.psgloss) + '/'+ dataset + '/'
if not os.path.exists(save_path):
os.makedirs(save_path)
image_root,_ = get_test_info(dataset)
# image_root = dataset_path + dataset + '/images/'
gt_root = image_root
print('testing')
print(image_root)
print (save_path)
# print(len(image_root))
test_loader = test_dataset2(image_root, gt_root, opt.testsize)
accu_time = 0.0
for i in range(test_loader.size):
image, gt, name = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
image = image.cuda()
curr0 = time.time()
_,_, res = model(image)
accu_time += time.time() - curr0
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = 255*res
cv2.imwrite(save_path+name, res)
# res = (res - res.min()) / (res.max() - res.min() + 1e-8)
# misc.imsave(save_path+name, res)
average_time = accu_time/test_loader.size
print('average_time:', average_time)
| 4,976 | 41.177966 | 114 | py |
PSGLoss | PSGLoss-main/model/ResNet.py | import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1, dilation = 1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation =dilation,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation = 1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation = 1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation = dilation,
padding=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class B2_ResNet(nn.Module):
# ResNet50 with two branches
def __init__(self):
# self.inplanes = 128
self.inplanes = 64
super(B2_ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(Bottleneck, 64, 3)
self.layer2 = self._make_layer(Bottleneck, 128, 4, stride=2)
self.layer3_1 = self._make_layer(Bottleneck, 256, 6, stride=2)
self.layer4_1 = self._make_layer(Bottleneck, 512, 3, stride=2)
self.inplanes = 512
self.layer3_2 = self._make_layer(Bottleneck, 256, 6, stride=2)
self.layer4_2 = self._make_layer(Bottleneck, 512, 3, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x1 = self.layer3_1(x)
x1 = self.layer4_1(x1)
x2 = self.layer3_2(x)
x2 = self.layer4_2(x2)
return x1, x2
class B1_ResNet(nn.Module):
# ResNet50 with 1 branches
def __init__(self):
# self.inplanes = 128
self.inplanes = 64
super(B1_ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(Bottleneck, 64, 3)
self.layer2 = self._make_layer(Bottleneck, 128, 4, stride=2)
self.layer3_1 = self._make_layer(Bottleneck, 256, 6, stride=2)
self.layer4_1 = self._make_layer(Bottleneck, 512, 3, stride=2)
self.inplanes = 512
# self.layer3_2 = self._make_layer(Bottleneck, 256, 6, stride=2)
# self.layer4_2 = self._make_layer(Bottleneck, 512, 3, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x1 = self.layer3_1(x)
x1 = self.layer4_1(x1)
# x2 = self.layer3_2(x)
# x2 = self.layer4_2(x2)
return x1
class B1_DRN(nn.Module):
# ResNet50 with 1 branches
def __init__(self):
# self.inplanes = 128
self.inplanes = 64
super(B1_DRN, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(Bottleneck, 64, 3)
self.layer2 = self._make_layer(Bottleneck, 128, 4, stride=2)
self.layer3_1 = self._make_layer(Bottleneck, 256, 6, stride=1, dilation= 2)
self.layer4_1 = self._make_layer(Bottleneck, 512, 3, stride=1, dilation= 4)
self.inplanes = 512
# self.layer3_2 = self._make_layer(Bottleneck, 256, 6, stride=2)
# self.layer4_2 = self._make_layer(Bottleneck, 512, 3, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation= 1 ):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x1 = self.layer3_1(x)
x1 = self.layer4_1(x1)
# x2 = self.layer3_2(x)
# x2 = self.layer4_2(x2)
return x1 | 9,018 | 32.403704 | 97 | py |
PSGLoss | PSGLoss-main/model/ysmodel.py | import torch
import torch.nn as nn
import torchvision.models as models
from model.ResNet import B2_ResNet,B1_ResNet,B1_DRN
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1.
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class KldLoss(nn.Module):
def __init__(self):
super(KldLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
eps = 1e-8
input_flat = input.view(N, -1) #(20, 147456)
target_flat = target.view(N, -1)
# _,M=input_flat.size()
# print (input_flat.size()) #(20, 147456)
# print (input_flat.sum(1).unsqueeze(1).size()) # (20,1)
Q = torch.div(input_flat, input_flat.sum(1).unsqueeze(1) + eps) #prediction
P = torch.div(target_flat, target_flat.sum(1).unsqueeze(1) + eps) #ground turth
kld = P* torch.log(eps + P/ (eps + Q))
loss = kld.sum() / N
return loss
class tvdLoss(nn.Module):
def __init__(self):
super(tvdLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
eps = 1e-8
input_flat = input.view(N, -1) #(20, 147456)
target_flat = target.view(N, -1)
# _,M=input_flat.size()
# print (input_flat.size()) #(20, 147456)
# print (input_flat.sum(1).unsqueeze(1).size()) # (20,1)
Q = torch.div(input_flat, input_flat.sum(1).unsqueeze(1) + eps) #prediction
P = torch.div(target_flat, target_flat.sum(1).unsqueeze(1) + eps) #ground turth
# kld = P* torch.log(eps + P/ (eps + Q))
tv = torch.abs(P-Q)
loss = tv.sum() / N
return loss
class DicebceLoss(nn.Module):
def __init__(self):
super(DicebceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1.
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
loss2 = torch.nn.BCELoss()(input, target)
# loss = 1 - loss.sum() / N
# loss2 = torch.nn.BCELoss()(input, target)
return loss+loss2
class tvdbceLoss(nn.Module):
def __init__(self):
super(tvdbceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
eps = 1e-8
input_flat = input.view(N, -1) #(20, 147456)
target_flat = target.view(N, -1)
# _,M=input_flat.size()
# print (input_flat.size()) #(20, 147456)
# print (input_flat.sum(1).unsqueeze(1).size()) # (20,1)
Q = torch.div(input_flat, input_flat.sum(1).unsqueeze(1) + eps) #prediction
P = torch.div(target_flat, target_flat.sum(1).unsqueeze(1) + eps) #ground turth
# kld = P* torch.log(eps + P/ (eps + Q))
tv = torch.abs(P-Q)
loss = tv.sum() / N
loss2 = torch.nn.BCELoss()(input, target)
return loss +loss2
##########psg loss module
class postpnet(nn.Module):
def __init__(self, kernel_size=3):
super(postpnet, self).__init__()
self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=(kernel_size-1)//2)
def forward(self, x , y):
# print (x.size())
# x =F.sigmoid(x)
x = self.maxpool(x)
x = x.sigmoid()
x = x * y
return x
#########model def######################
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class DIM_BAM(nn.Module):
# RFB-like multi-scale module
def __init__(self, in_channel, out_channel,Weighted = True, reduction=4, shortcut = True):
super(DIM_BAM, self).__init__()
self.relu = nn.ReLU(True)
self.Weighted = Weighted
self.shortcut = shortcut
self.branch0 = nn.Sequential(
BasicConv2d(in_channel, out_channel, 1),
)
self.branch1 = nn.Sequential(
# BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, 3, padding=1, dilation=1),
# BasicConv2d_nobn(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),
# BasicConv2d_nobn(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),
BasicConv2d(out_channel, out_channel, 3, padding=2, dilation=2)
)
self.branch2 = nn.Sequential(
# BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, 3, padding=1, dilation=1),
BasicConv2d(out_channel, out_channel, 3, padding=4, dilation=4)
)
self.branch3 = nn.Sequential(
# BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, 3, padding=1, dilation=1),
BasicConv2d(out_channel, out_channel, 3, padding=8, dilation=8)
)
self.conv_cat = BasicConv2d(out_channel, out_channel, 3, padding=1)
self.conv_res = BasicConv2d(out_channel, out_channel, 1)
self.fc1 = nn.Sequential(
nn.Linear(out_channel, out_channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(out_channel // reduction, 3, bias=False),
nn.Sigmoid()
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x0)
x2 = self.branch2(x0)
x3 = self.branch3(x0)
b, c, _, _ = x0.size()
y = F.avg_pool2d(x0, kernel_size=x0.size()[2:]).view(b, c)
y = self.fc1(y).view(b, 3, 1, 1)
yall = list( torch.split(y,1,dim=1))
y1,y2,y3 = yall[0],yall[1],yall[2]
if self.Weighted :
# po = x1 * y1*3./(y1+y2+y3) + x2*y2*3./(y1+y2+y3) +x3*y3*3./(y1+y2+y3)
po = x1 * y1 + x2*y2 +x3*y3
else:
po = x1+x2+x3
# if self.shortcut:
# po = po + x0
# po = x1 * y1 + x2*y2 +x3*y3
x_cat = self.conv_cat(po)
x_cat = self.conv_res(x_cat)
x = self.relu(x_cat + x0)
return x
class fpn_aggregation(nn.Module):
# dense aggregation, it can be replaced by other aggregation model, such as DSS, amulet, and so on.
# used after MSF
#bn version
def __init__(self, channel,Weighted =True):
super(fpn_aggregation, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
# BasicConv2d( 256, channel, kernel_size=1, stride=1, padding=0)
self.smooth1 = BasicConv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.smooth2 = BasicConv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.smooth3 = BasicConv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.latlayer1 = BasicConv2d(channel, channel, kernel_size=1, stride=1, padding=0)
self.latlayer2 =BasicConv2d(channel, channel, kernel_size=1, stride=1, padding=0)
self.latlayer3 = BasicConv2d( channel , channel, kernel_size=1, stride=1, padding=0)
self.latlayer4 =BasicConv2d( channel , channel, kernel_size=1, stride=1, padding=0)
def _upsample_add(self, x, y):
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, x1, x2, x3 ,x4):
# x1_1 =
x1 = self.latlayer1(x1)
p3 = self._upsample_add(x1,self.latlayer2(x2))
p3 = self.smooth1(p3)
p3 = self.upsample( p3)
p4 = self._upsample_add(p3,self.latlayer3(x3))
# p4o = self.dimrfb(p4)
p4 = self.smooth2(p4)
p5 = self._upsample_add(p4,self.latlayer4(x4))
p5 = self.smooth3(p5)
# p5o = self.dimrfb(p5)
p4 = self.upsample( p4)
p3 = self.upsample( p3)
return p3,p4,p5
class ysmodel(nn.Module):
# resnet based encoder decoder
def __init__(self, channel=64, Weighted = True): #32
super(ysmodel, self).__init__()
self.resnet = B1_ResNet()
self.rfb1_1 = BasicConv2d( 256, channel, kernel_size=1, stride=1, padding=0)
self.rfb2_1 = DIM_BAM( 512, channel, Weighted)
self.rfb3_1 = DIM_BAM( 1024, channel, Weighted)
self.rfb4_1 = DIM_BAM( 2048, channel, Weighted)
self.agg1 = fpn_aggregation(channel, Weighted)
self.fusion0 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.fusion1 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.finalout = nn.Conv2d( channel, 1, kernel_size=1, stride=1, padding=0)
self.dimrfb2 = DIM_BAM(channel, channel, Weighted)
self.dimrfb2a = DIM_BAM(channel, channel, Weighted)
self.dimrfb2b = DIM_BAM(channel, channel, Weighted)
self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
if self.training:
self.initialize_weights()
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x1 = self.resnet.layer1(x) # 256 x 64 x 64
x2 = self.resnet.layer2(x1) # 512 x 32 x 32
x2_1 = x2
x3_1 = self.resnet.layer3_1(x2_1) # 1024 x 16 x 16
x4_1 = self.resnet.layer4_1(x3_1) # 2048 x 8 x 8
x1_1 = self.rfb1_1(x1)
x2_1 = self.rfb2_1(x2_1)
x3_1 = self.rfb3_1(x3_1)
x4_1 = self.rfb4_1(x4_1)
am, attention_map, detection_map = self.agg1(x4_1, x3_1, x2_1,x1_1)
detection_map = self.dimrfb2(detection_map)
detection_map = self.dimrfb2a(detection_map)
detection_map = self.dimrfb2b(detection_map)
am = self.finalout(self.fusion1(self.fusion0(am)))
attention_map = self.finalout(self.fusion1(self.fusion0(attention_map)))
detection_map = self.finalout(self.fusion1(self.fusion0(detection_map)))
return self.upsample(am),self.upsample(attention_map), self.upsample(detection_map)
def initialize_weights(self):
res50 = models.resnet50(pretrained=True)
pretrained_dict = res50.state_dict()
all_params = {}
for k, v in self.resnet.state_dict().items():
if k in pretrained_dict.keys():
v = pretrained_dict[k]
all_params[k] = v
elif '_1' in k:
name = k.split('_1')[0] + k.split('_1')[1]
v = pretrained_dict[name]
all_params[k] = v
assert len(all_params.keys()) == len(self.resnet.state_dict().keys())
self.resnet.load_state_dict(all_params)
| 11,529 | 35.37224 | 103 | py |
AFC | AFC-master/inclearn/train.py | import copy
import json
import logging
import os
import pickle
import random
import statistics
import sys
import time
import numpy as np
import torch
import yaml
from inclearn.lib import factory
from inclearn.lib import logger as logger_lib
from inclearn.lib import metrics, results_utils, utils
logger = logging.getLogger(__name__)
def train(args):
logger_lib.set_logging_level(args["logging"])
autolabel = _set_up_options(args)
if args["autolabel"]:
args["label"] = autolabel
if args["label"]:
logger.info("Label: {}".format(args["label"]))
try:
os.system("echo '\ek{}\e\\'".format(args["label"]))
except:
pass
if args["resume"] and not os.path.exists(args["resume"]):
raise IOError(f"Saved model {args['resume']} doesn't exist.")
if args["save_model"] != "never" and args["label"] is None:
raise ValueError(f"Saving model every {args['save_model']} but no label was specified.")
seed_list = copy.deepcopy(args["seed"])
device = copy.deepcopy(args["device"])
start_date = utils.get_date()
orders = copy.deepcopy(args["order"])
del args["order"]
if orders is not None:
assert isinstance(orders, list) and len(orders)
assert all(isinstance(o, list) for o in orders)
assert all([isinstance(c, int) for o in orders for c in o])
else:
orders = [None for _ in range(len(seed_list))]
avg_inc_accs, last_accs, forgettings = [], [], []
for i, seed in enumerate(seed_list):
logger.warning("Launching run {}/{}".format(i + 1, len(seed_list)))
args["seed"] = seed
args["device"] = device
start_time = time.time()
for avg_inc_acc, last_acc, forgetting in _train(args, start_date, orders[i], i):
yield avg_inc_acc, last_acc, forgetting, False
avg_inc_accs.append(avg_inc_acc)
last_accs.append(last_acc)
forgettings.append(forgetting)
logger.info("Training finished in {}s.".format(int(time.time() - start_time)))
yield avg_inc_acc, last_acc, forgetting, True
logger.info("Label was: {}".format(args["label"]))
logger.info(
"Results done on {} seeds: avg: {}, last: {}, forgetting: {}".format(
len(seed_list), _aggregate_results(avg_inc_accs), _aggregate_results(last_accs),
_aggregate_results(forgettings)
)
)
logger.info("Individual results avg: {}".format([round(100 * acc, 2) for acc in avg_inc_accs]))
logger.info("Individual results last: {}".format([round(100 * acc, 2) for acc in last_accs]))
logger.info(
"Individual results forget: {}".format([round(100 * acc, 2) for acc in forgettings])
)
logger.info(f"Command was {' '.join(sys.argv)}")
def _train(args, start_date, class_order, run_id):
_set_global_parameters(args)
inc_dataset, model = _set_data_model(args, class_order)
results, results_folder = _set_results(args, start_date)
memory, memory_val = None, None
metric_logger = metrics.MetricLogger(
inc_dataset.n_tasks, inc_dataset.n_classes, inc_dataset.increments
)
for task_id in range(inc_dataset.n_tasks):
task_info, train_loader, val_loader, test_loader = inc_dataset.new_task(memory, memory_val)
if task_info["task"] == args["max_task"]:
break
model.set_task_info(task_info)
# ---------------
# 1. Prepare Task
# ---------------
model.eval()
model.before_task(train_loader, val_loader if val_loader else test_loader)
# -------------
# 2. Train Task
# -------------
_train_task(args, model, train_loader, val_loader, test_loader, run_id, task_id, task_info)
# ----------------
# 3. Conclude Task
# ----------------
model.eval()
_after_task(args, model, inc_dataset, run_id, task_id, results_folder)
# ------------
# 4. Eval Task
# ------------
logger.info("Eval on {}->{}.".format(0, task_info["max_class"]))
ypreds, ytrue = model.eval_task(test_loader)
metric_logger.log_task(
ypreds, ytrue, task_size=task_info["increment"], zeroshot=args.get("all_test_classes")
)
if args["dump_predictions"] and args["label"]:
os.makedirs(
os.path.join(results_folder, "predictions_{}".format(run_id)), exist_ok=True
)
with open(
os.path.join(
results_folder, "predictions_{}".format(run_id),
str(task_id).rjust(len(str(30)), "0") + ".pkl"
), "wb+"
) as f:
pickle.dump((ypreds, ytrue), f)
if args["label"]:
logger.info(args["label"])
logger.info("Avg inc acc: {}.".format(metric_logger.last_results["incremental_accuracy"]))
logger.info("Current acc: {}.".format(metric_logger.last_results["accuracy"]))
logger.info(
"Avg inc acc top5: {}.".format(metric_logger.last_results["incremental_accuracy_top5"])
)
logger.info("Current acc top5: {}.".format(metric_logger.last_results["accuracy_top5"]))
logger.info("Forgetting: {}.".format(metric_logger.last_results["forgetting"]))
logger.info("Cord metric: {:.2f}.".format(metric_logger.last_results["cord"]))
if task_id > 0:
logger.info(
"Old accuracy: {:.2f}, mean: {:.2f}.".format(
metric_logger.last_results["old_accuracy"],
metric_logger.last_results["avg_old_accuracy"]
)
)
logger.info(
"New accuracy: {:.2f}, mean: {:.2f}.".format(
metric_logger.last_results["new_accuracy"],
metric_logger.last_results["avg_new_accuracy"]
)
)
if args.get("all_test_classes"):
logger.info(
"Seen classes: {:.2f}.".format(metric_logger.last_results["seen_classes_accuracy"])
)
logger.info(
"unSeen classes: {:.2f}.".format(
metric_logger.last_results["unseen_classes_accuracy"]
)
)
results["results"].append(metric_logger.last_results)
avg_inc_acc = results["results"][-1]["incremental_accuracy"]
last_acc = results["results"][-1]["accuracy"]["total"]
forgetting = results["results"][-1]["forgetting"]
yield avg_inc_acc, last_acc, forgetting
memory = model.get_memory()
memory_val = model.get_val_memory()
logger.info(
"Average Incremental Accuracy: {}.".format(results["results"][-1]["incremental_accuracy"])
)
if args["label"] is not None:
results_utils.save_results(
results, args["label"], args["model"], start_date, run_id, args["seed"]
)
del model
del inc_dataset
# ------------------------
# Lifelong Learning phases
# ------------------------
def _train_task(config, model, train_loader, val_loader, test_loader, run_id, task_id, task_info):
if config["resume"] is not None and os.path.isdir(config["resume"]) \
and ((config["resume_first"] and task_id == 0) or not config["resume_first"]):
model.load_parameters(config["resume"], run_id)
logger.info(
"Skipping training phase {} because reloading pretrained model.".format(task_id)
)
if config["model"] == "afc":
model._update_importance(train_loader)
elif config["resume"] is not None and os.path.isfile(config["resume"]) and \
os.path.exists(config["resume"]) and task_id == 0:
# In case we resume from a single model file, it's assumed to be from the first task.
model.network = config["resume"]
logger.info(
"Skipping initial training phase {} because reloading pretrained model.".
format(task_id)
)
else:
logger.info("Train on {}->{}.".format(task_info["min_class"], task_info["max_class"]))
model.train()
model.train_task(train_loader, val_loader if val_loader else test_loader)
def _after_task(config, model, inc_dataset, run_id, task_id, results_folder):
if config["resume"] and os.path.isdir(config["resume"]) and not config["recompute_meta"] \
and ((config["resume_first"] and task_id == 0) or not config["resume_first"]):
model.load_metadata(config["resume"], run_id)
else:
model.after_task_intensive(inc_dataset)
model.after_task(inc_dataset)
if config["label"] and (
config["save_model"] == "task" or
(config["save_model"] == "last" and task_id == inc_dataset.n_tasks - 1) or
(config["save_model"] == "first" and task_id == 0)
):
model.save_parameters(results_folder, run_id)
model.save_metadata(results_folder, run_id)
# ----------
# Parameters
# ----------
def _set_results(config, start_date):
if config["label"]:
results_folder = results_utils.get_save_folder(config["model"], start_date, config["label"])
else:
results_folder = None
if config["save_model"]:
logger.info("Model will be save at this rythm: {}.".format(config["save_model"]))
results = results_utils.get_template_results(config)
return results, results_folder
def _set_data_model(config, class_order):
inc_dataset = factory.get_data(config, class_order)
config["classes_order"] = inc_dataset.class_order
model = factory.get_model(config)
model.inc_dataset = inc_dataset
return inc_dataset, model
def _set_global_parameters(config):
_set_seed(config["seed"], config["threads"], config["no_benchmark"], config["detect_anomaly"])
factory.set_device(config)
def _set_seed(seed, nb_threads, no_benchmark, detect_anomaly):
logger.info("Set seed {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if no_benchmark:
logger.warning("CUDA algos are not determinists but faster!")
else:
logger.warning("CUDA algos are determinists but very slow!")
torch.backends.cudnn.deterministic = not no_benchmark # This will slow down training.
torch.set_num_threads(nb_threads)
if detect_anomaly:
logger.info("Will detect autograd anomaly.")
torch.autograd.set_detect_anomaly(detect_anomaly)
def _set_up_options(args):
options_paths = args["options"] or []
autolabel = []
for option_path in options_paths:
if not os.path.exists(option_path):
raise IOError("Not found options file {}.".format(option_path))
args.update(_parse_options(option_path))
autolabel.append(os.path.splitext(os.path.basename(option_path))[0])
return "_".join(autolabel)
def _parse_options(path):
with open(path) as f:
if path.endswith(".yaml") or path.endswith(".yml"):
return yaml.load(f, Loader=yaml.FullLoader)
elif path.endswith(".json"):
return json.load(f)["config"]
else:
raise Exception("Unknown file type {}.".format(path))
# ----
# Misc
# ----
def _aggregate_results(list_results):
res = str(round(statistics.mean(list_results) * 100, 2))
if len(list_results) > 1:
res = res + " +/- " + str(round(statistics.stdev(list_results) * 100, 2))
return res
| 11,516 | 33.689759 | 100 | py |
AFC | AFC-master/inclearn/convnet/ucir_resnet.py | import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if not self.last: # remove ReLU in the last layer
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, **kwargs):
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2, last_phase=True)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.out_dim = 64
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if last_phase:
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x, x
def resnet20(**kwargs):
n = 3
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet32(**kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
| 3,464 | 29.394737 | 87 | py |
AFC | AFC-master/inclearn/convnet/my_resnet2.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from inclearn.lib import pooling
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
x = x[..., ::2, ::2]
return torch.cat((x, x.mul(0)), 1)
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
def conv3x3(inplanes, planes, stride=1):
return nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(
self, inplanes, planes, final_relu=False, increase_dim=False, downsampling="stride"
):
super().__init__()
self.conv1 = conv3x3(inplanes, planes, 2 if increase_dim else 1)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.shortcut = DownsampleStride()
elif downsampling == "conv":
self.shortcut = DownsampleConv(inplanes, planes)
else:
raise ValueError("Unknown downsampler {}.".format(downsampling))
else:
self.shortcut = lambda x: x
self.final_relu = nn.ReLU(inplace=True) if final_relu else lambda x: x
def forward(self, x):
y = F.relu(self.bn1(self.conv1(x)), inplace=True)
y = self.bn2(self.conv2(y))
x = self.shortcut(x)
y = x + y
y = self.final_relu(y)
return y
class PreActResidualBlock(nn.Module):
"""ResNet v2 version of the residual block.
Instead of the order conv->bn->relu we use bn->relu->conv.
"""
expansion = 1
def __init__(
self, inplanes, planes, final_relu=False, increase_dim=False, downsampling="stride"
):
super().__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, planes, 2 if increase_dim else 1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
if increase_dim:
if downsampling == "stride":
self.shortcut = DownsampleStride()
elif downsampling == "conv":
self.shortcut = DownsampleConv(inplanes, planes)
else:
raise ValueError("Unknown downsampler {}.".format(downsampling))
else:
self.shortcut = lambda x: x
self.final_relu = nn.ReLU(inplace=True) if final_relu else lambda x: x
def forward(self, x):
y = self.conv1(F.relu(self.bn1(x), inplace=True))
y = self.conv2(F.relu(self.bn2(y), inplace=True))
shortcut = self.shortcut(x)
y = shortcut + y
return y
class ResNet(nn.Module):
def __init__(
self,
block_sizes,
nf=16,
channels=3,
preact=False,
zero_residual=False,
pooling_config={"type": "avg"},
downsampling="stride",
block_relu=False
):
super().__init__()
self._downsampling_type = downsampling
self._block_relu = block_relu
Block = ResidualBlock if not preact else PreActResidualBlock
self.conv_1_3x3 = conv3x3(channels, nf)
self.bn_1 = nn.BatchNorm2d(nf)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = nf
self.stages = nn.ModuleList(
[
self._make_layer(Block, 1 * nf, 1 * nf, block_sizes[0], stride=1),
self._make_layer(Block, 1 * nf, 2 * nf, block_sizes[1], stride=2),
self._make_layer(Block, 2 * nf, 4 * nf, block_sizes[2], stride=2),
self._make_layer(Block, 4 * nf, 8 * nf, block_sizes[3], stride=2, last=True)
]
)
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 8 * nf
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, Block, inplanes, planes, block_size, stride=1, last=False):
layers = []
if stride != 1:
layers.append(
Block(
inplanes,
planes,
increase_dim=True,
downsampling=self._downsampling_type,
final_relu=self._block_relu
)
)
else:
layers.append(Block(inplanes, planes, final_relu=self._block_relu))
for i in range(1, block_size):
if last and i == block_size - 1:
final_relu = False
else:
final_relu = self._block_relu
layers.append(Block(planes, planes, final_relu=final_relu))
return nn.Sequential(*layers)
def forward(self, x, attention_hook=False):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.maxpool(x)
intermediary_features = []
for stage in self.stages:
x = stage(x)
intermediary_features.append(x)
raw_features = self.end_features(x)
features = self.end_features(F.relu(x, inplace=False))
if attention_hook:
return raw_features, features, intermediary_features
return raw_features, features
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
return x
def resnet18(**kwargs):
return ResNet([2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet([3, 4, 6, 3], **kwargs)
| 6,532 | 28.035556 | 92 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.