| | """Base class for Membership Inference Attacks.""" |
| |
|
| | from abc import ABC, abstractmethod |
| | from typing import Dict, Optional, Literal |
| |
|
| | import numpy as np |
| |
|
| | from mia.experiment import MIAExperiment |
| | from mia.predictions import PredictionCache |
| |
|
| | AccessLevel = Literal["blackbox", "greybox", "whitebox"] |
| |
|
| |
|
| | class BaseAttack(ABC): |
| | """ |
| | Abstract base class for MIA attacks. |
| | |
| | Defines the interface for all attack implementations and provides |
| | common functionality for loading data. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | experiment: MIAExperiment, |
| | prediction_cache: PredictionCache, |
| | access_level: AccessLevel = "blackbox", |
| | ): |
| | """ |
| | Initialize attack. |
| | |
| | Args: |
| | experiment: MIA experiment with membership matrices |
| | prediction_cache: Cache with model predictions |
| | access_level: Access level for the attack |
| | """ |
| | self.experiment = experiment |
| | self.prediction_cache = prediction_cache |
| | self.access_level = access_level |
| |
|
| | @property |
| | @abstractmethod |
| | def name(self) -> str: |
| | """Attack name for logging/display.""" |
| | pass |
| |
|
| | @abstractmethod |
| | def run( |
| | self, |
| | target_split: str = "target_test", |
| | num_target_models: Optional[int] = None, |
| | verbose: bool = True, |
| | ) -> Dict[str, np.ndarray]: |
| | """ |
| | Run the attack on a target split. |
| | |
| | Args: |
| | target_split: Split to attack (target_train, target_val, target_test) |
| | num_target_models: Number of target models (None = all) |
| | verbose: Print progress |
| | |
| | Returns: |
| | Dictionary with: |
| | 'scores': (num_samples,) aggregated membership scores |
| | 'labels': (num_samples,) true membership labels |
| | 'per_model_scores': (num_samples, num_models) individual scores |
| | """ |
| | pass |
| |
|
| | def get_shadow_data(self) -> Dict[str, np.ndarray]: |
| | """ |
| | Get shadow model data for attack calibration. |
| | |
| | Returns: |
| | Dictionary with shadow model losses and membership matrix. |
| | """ |
| | shadow_losses = self.prediction_cache.store["shadow"]["losses"][:, :] |
| | shadow_membership = self.experiment.get_membership_matrix("shadow").matrix |
| | return { |
| | "losses": shadow_losses, |
| | "membership": shadow_membership, |
| | } |
| |
|
| | def get_target_data( |
| | self, split: str, num_models: Optional[int] = None |
| | ) -> Dict[str, np.ndarray]: |
| | """ |
| | Get target model data for the attack. |
| | |
| | Args: |
| | split: Target split name |
| | num_models: Number of models (None = all) |
| | |
| | Returns: |
| | Dictionary with target losses, logits, and membership. |
| | """ |
| | membership_matrix = self.experiment.get_membership_matrix(split) |
| |
|
| | if num_models is None: |
| | num_models = membership_matrix.num_models |
| |
|
| | target_losses = self.prediction_cache.store[split]["losses"][:, :num_models] |
| | target_logits = self.prediction_cache.store[split]["logits"][:, :num_models, :] |
| |
|
| | return { |
| | "losses": target_losses, |
| | "logits": target_logits, |
| | "membership": membership_matrix, |
| | "num_models": num_models, |
| | } |
| |
|
| | def compute_in_out_statistics( |
| | self, |
| | shadow_losses: np.ndarray, |
| | shadow_membership: np.ndarray, |
| | ) -> Dict[str, np.ndarray]: |
| | """ |
| | Compute IN/OUT loss statistics from shadow models. |
| | |
| | Args: |
| | shadow_losses: (num_samples, num_shadow_models) |
| | shadow_membership: (num_samples, num_shadow_models) boolean |
| | |
| | Returns: |
| | Dictionary with in_mean, in_std, out_mean, out_std |
| | """ |
| | in_mask = shadow_membership |
| | out_mask = ~shadow_membership |
| |
|
| | in_losses_masked = np.where(in_mask, shadow_losses, np.nan) |
| | out_losses_masked = np.where(out_mask, shadow_losses, np.nan) |
| |
|
| | return { |
| | "in_mean": np.nanmean(in_losses_masked, axis=1), |
| | "in_std": np.nanstd(in_losses_masked, axis=1) + 1e-10, |
| | "out_mean": np.nanmean(out_losses_masked, axis=1), |
| | "out_std": np.nanstd(out_losses_masked, axis=1) + 1e-10, |
| | } |
| |
|
| |
|
| | class GreyBoxAttack(BaseAttack): |
| | """ |
| | Base class for grey-box attacks with activation access. |
| | |
| | Extends BaseAttack with methods to load and use activation features. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | experiment: MIAExperiment, |
| | prediction_cache: PredictionCache, |
| | activation_cache=None, |
| | ): |
| | super().__init__(experiment, prediction_cache, access_level="greybox") |
| | self.activation_cache = activation_cache |
| |
|
| | def get_shadow_activations(self) -> np.ndarray: |
| | """Get shadow model activations.""" |
| | if self.activation_cache is None: |
| | raise ValueError("Activation cache not provided") |
| | return self.activation_cache.store["shadow"]["features"][:, :] |
| |
|
| | def get_target_activations( |
| | self, split: str, num_models: Optional[int] = None |
| | ) -> np.ndarray: |
| | """Get target model activations.""" |
| | if self.activation_cache is None: |
| | raise ValueError("Activation cache not provided") |
| | if num_models is None: |
| | return self.activation_cache.store[split]["features"][:, :] |
| | return self.activation_cache.store[split]["features"][:, :num_models, :] |
| |
|
| | def compute_activation_statistics( |
| | self, |
| | shadow_activations: np.ndarray, |
| | shadow_membership: np.ndarray, |
| | ) -> Dict[str, np.ndarray]: |
| | """ |
| | Compute IN/OUT activation statistics from shadow models. |
| | |
| | Args: |
| | shadow_activations: (num_samples, num_shadow_models, feature_dim) |
| | shadow_membership: (num_samples, num_shadow_models) boolean |
| | |
| | Returns: |
| | Dictionary with in_act_mean, in_act_std, out_act_mean, out_act_std |
| | """ |
| | in_mask = shadow_membership[:, :, np.newaxis] |
| | out_mask = ~shadow_membership[:, :, np.newaxis] |
| |
|
| | in_act_masked = np.where(in_mask, shadow_activations, np.nan) |
| | out_act_masked = np.where(out_mask, shadow_activations, np.nan) |
| |
|
| | return { |
| | "in_act_mean": np.nanmean(in_act_masked, axis=1), |
| | "in_act_std": np.nanstd(in_act_masked, axis=1) + 1e-10, |
| | "out_act_mean": np.nanmean(out_act_masked, axis=1), |
| | "out_act_std": np.nanstd(out_act_masked, axis=1) + 1e-10, |
| | } |
| |
|
| |
|
| | class WhiteBoxAttack(GreyBoxAttack): |
| | """ |
| | Base class for white-box attacks with gradient access. |
| | |
| | Extends GreyBoxAttack with methods to load and use gradient features. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | experiment: MIAExperiment, |
| | prediction_cache: PredictionCache, |
| | activation_cache=None, |
| | gradient_cache=None, |
| | ): |
| | super().__init__(experiment, prediction_cache, activation_cache) |
| | self.access_level = "whitebox" |
| | self.gradient_cache = gradient_cache |
| |
|
| | def get_shadow_gradients(self) -> Dict[str, np.ndarray]: |
| | """Get shadow model gradient features.""" |
| | if self.gradient_cache is None: |
| | raise ValueError("Gradient cache not provided") |
| | grp = self.gradient_cache.store["shadow"] |
| | return { |
| | "grad_norms": grp["grad_norms"][:, :], |
| | "grad_dot_weight": grp["grad_dot_weight"][:, :], |
| | "grad_features": grp["grad_features"][:, :, :], |
| | } |
| |
|
| | def get_target_gradients( |
| | self, split: str, num_models: Optional[int] = None |
| | ) -> Dict[str, np.ndarray]: |
| | """Get target model gradient features.""" |
| | if self.gradient_cache is None: |
| | raise ValueError("Gradient cache not provided") |
| | grp = self.gradient_cache.store[split] |
| | if num_models is None: |
| | return { |
| | "grad_norms": grp["grad_norms"][:, :], |
| | "grad_dot_weight": grp["grad_dot_weight"][:, :], |
| | "grad_features": grp["grad_features"][:, :, :], |
| | } |
| | return { |
| | "grad_norms": grp["grad_norms"][:, :num_models], |
| | "grad_dot_weight": grp["grad_dot_weight"][:, :num_models], |
| | "grad_features": grp["grad_features"][:, :num_models, :], |
| | } |
| |
|
| | def compute_gradient_statistics( |
| | self, |
| | shadow_grad_norms: np.ndarray, |
| | shadow_membership: np.ndarray, |
| | ) -> Dict[str, np.ndarray]: |
| | """ |
| | Compute IN/OUT gradient norm statistics from shadow models. |
| | |
| | Args: |
| | shadow_grad_norms: (num_samples, num_shadow_models) |
| | shadow_membership: (num_samples, num_shadow_models) boolean |
| | |
| | Returns: |
| | Dictionary with in_grad_mean, in_grad_std, out_grad_mean, out_grad_std |
| | """ |
| | in_mask = shadow_membership |
| | out_mask = ~shadow_membership |
| |
|
| | in_grad_masked = np.where(in_mask, shadow_grad_norms, np.nan) |
| | out_grad_masked = np.where(out_mask, shadow_grad_norms, np.nan) |
| |
|
| | return { |
| | "in_grad_mean": np.nanmean(in_grad_masked, axis=1), |
| | "in_grad_std": np.nanstd(in_grad_masked, axis=1) + 1e-10, |
| | "out_grad_mean": np.nanmean(out_grad_masked, axis=1), |
| | "out_grad_std": np.nanstd(out_grad_masked, axis=1) + 1e-10, |
| | } |
| |
|