repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mixture-of-diffusers | mixture-of-diffusers-master/generate_grid_from_json.py | import argparse
import datetime
from diffusers import LMSDiscreteScheduler, DDIMScheduler
import json
from pathlib import Path
import torch
from mixdiff.tiling import StableDiffusionTilingPipeline
def generate_grid(generation_arguments):
model_id = "CompVis/stable-diffusion-v1-4"
# Prepared scheduler
if generation_arguments["scheduler"] == "ddim":
scheduler = DDIMScheduler()
elif generation_arguments["scheduler"] == "lms":
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
else:
raise ValueError(f"Unrecognized scheduler {generation_arguments['scheduler']}")
pipe = StableDiffusionTilingPipeline.from_pretrained(model_id, scheduler=scheduler, use_auth_token=True).to("cuda:0")
pipeargs = {
"guidance_scale": generation_arguments["gc"],
"num_inference_steps": generation_arguments["steps"],
"seed": generation_arguments["seed"],
"prompt": generation_arguments["prompt"],
"tile_height": generation_arguments["tile_height"],
"tile_width": generation_arguments["tile_width"],
"tile_row_overlap": generation_arguments["tile_row_overlap"],
"tile_col_overlap": generation_arguments["tile_col_overlap"],
"guidance_scale_tiles": generation_arguments["gc_tiles"],
"cpu_vae": generation_arguments["cpu_vae"] if "cpu_vae" in generation_arguments else False,
}
if "seed_tiles" in generation_arguments: pipeargs = {**pipeargs, "seed_tiles": generation_arguments["seed_tiles"]}
if "seed_tiles_mode" in generation_arguments: pipeargs = {**pipeargs, "seed_tiles_mode": generation_arguments["seed_tiles_mode"]}
if "seed_reroll_regions" in generation_arguments: pipeargs = {**pipeargs, "seed_reroll_regions": generation_arguments["seed_reroll_regions"]}
image = pipe(**pipeargs)["sample"][0]
outname = "output"
outpath = "./outputs"
Path(outpath).mkdir(parents=True, exist_ok=True)
image.save(f"{outpath}/{outname}.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate a stable diffusion grid using a JSON file with all configuration parameters.')
parser.add_argument('config', type=str, help='Path to configuration file')
args = parser.parse_args()
with open(args.config, "r") as f:
generation_arguments = json.load(f)
generate_grid(generation_arguments)
| 2,454 | 49.102041 | 145 | py |
mixture-of-diffusers | mixture-of-diffusers-master/mixdiff/canvas.py | from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import Enum
import numpy as np
from numpy import pi, exp, sqrt
import re
import torch
from torchvision.transforms.functional import resize
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing import List, Optional, Tuple, Union
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
class MaskModes(Enum):
"""Modes in which the influence of diffuser is masked"""
CONSTANT = "constant"
GAUSSIAN = "gaussian"
QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
class RerollModes(Enum):
"""Modes in which the reroll regions operate"""
RESET = "reset" # Completely reset the random noise in the region
EPSILON = "epsilon" # Alter slightly the latents in the region
@dataclass
class CanvasRegion:
"""Class defining a rectangular region in the canvas"""
row_init: int # Region starting row in pixel space (included)
row_end: int # Region end row in pixel space (not included)
col_init: int # Region starting column in pixel space (included)
col_end: int # Region end column in pixel space (not included)
region_seed: int = None # Seed for random operations in this region
noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
def __post_init__(self):
# Initialize arguments if not specified
if self.region_seed is None:
self.region_seed = np.random.randint(9999999999)
# Check coordinates are non-negative
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
if coord < 0:
raise ValueError(f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})")
# Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
if coord // 8 != coord / 8:
raise ValueError(f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})")
# Check noise eps is non-negative
if self.noise_eps < 0:
raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
# Compute coordinates for this region in latent space
self.latent_row_init = self.row_init // 8
self.latent_row_end = self.row_end // 8
self.latent_col_init = self.col_init // 8
self.latent_col_end = self.col_end // 8
@property
def width(self):
return self.col_end - self.col_init
@property
def height(self):
return self.row_end - self.row_init
def get_region_generator(self, device="cpu"):
"""Creates a torch.Generator based on the random seed of this region"""
# Initialize region generator
return torch.Generator(device).manual_seed(self.region_seed)
@property
def __dict__(self):
return asdict(self)
@dataclass
class DiffusionRegion(CanvasRegion):
"""Abstract class defining a region where some class of diffusion process is acting"""
pass
@dataclass
class RerollRegion(CanvasRegion):
"""Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
reroll_mode: RerollModes = RerollModes.RESET.value
@dataclass
class Text2ImageRegion(DiffusionRegion):
"""Class defining a region where a text guided diffusion process is acting"""
prompt: str = "" # Text prompt guiding the diffuser in this region
guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
mask_weight: float = 1.0 # Global weights multiplier of the mask
tokenized_prompt = None # Tokenized prompt
encoded_prompt = None # Encoded prompt
def __post_init__(self):
super().__post_init__()
# Mask weight cannot be negative
if self.mask_weight < 0:
raise ValueError(f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}")
# Mask type must be an actual known mask
if self.mask_type not in [e.value for e in MaskModes]:
raise ValueError(f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})")
# Randomize arguments if given as None
if self.guidance_scale is None:
self.guidance_scale = np.random.randint(5, 30)
# Clean prompt
self.prompt = re.sub(' +', ' ', self.prompt).replace("\n", " ")
def tokenize_prompt(self, tokenizer):
"""Tokenizes the prompt for this diffusion region using a given tokenizer"""
self.tokenized_prompt = tokenizer(self.prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
def encode_prompt(self, text_encoder, device):
"""Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
assert self.tokenized_prompt is not None, ValueError("Prompt in diffusion region must be tokenized before encoding")
self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
@dataclass
class Image2ImageRegion(DiffusionRegion):
"""Class defining a region where an image guided diffusion process is acting"""
reference_image: torch.FloatTensor = None
strength: float = 0.8 # Strength of the image
def __post_init__(self):
super().__post_init__()
if self.reference_image is None:
raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
if self.strength < 0 or self.strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {self.strength}')
# Rescale image to region shape
self.reference_image = resize(self.reference_image, size=[self.height, self.width])
def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
"""Encodes the reference image for this Image2Image region into the latent space"""
# Place encoder in CPU or not following the parameter cpu_vae
if cpu_vae:
# Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
else:
self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(generator=generator)
self.reference_latents = 0.18215 * self.reference_latents
@property
def __dict__(self):
# This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
# Get all basic fields from parent class
super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
# Pack other fields
return {
**super_fields,
"reference_image": self.reference_image.cpu().tolist(),
"strength": self.strength
}
@dataclass
class MaskWeightsBuilder:
"""Auxiliary class to compute a tensor of weights for a given diffusion region"""
latent_space_dim: int # Size of the U-net latent space
nbatch: int = 1 # Batch size in the U-net
def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
"""Computes a tensor of weights for a given diffusion region"""
MASK_BUILDERS = {
MaskModes.CONSTANT.value: self._constant_weights,
MaskModes.GAUSSIAN.value: self._gaussian_weights,
MaskModes.QUARTIC.value: self._quartic_weights,
}
return MASK_BUILDERS[region.mask_type](region)
def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
"""Computes a tensor of constant for a given diffusion region"""
latent_width = region.latent_col_end - region.latent_col_init
latent_height = region.latent_row_end - region.latent_row_init
return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
"""Generates a gaussian mask of weights for tile contributions"""
latent_width = region.latent_col_end - region.latent_col_init
latent_height = region.latent_row_end - region.latent_row_init
var = 0.01
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
x_probs = [exp(-(x-midpoint)*(x-midpoint)/(latent_width*latent_width)/(2*var)) / sqrt(2*pi*var) for x in range(latent_width)]
midpoint = (latent_height -1) / 2
y_probs = [exp(-(y-midpoint)*(y-midpoint)/(latent_height*latent_height)/(2*var)) / sqrt(2*pi*var) for y in range(latent_height)]
weights = np.outer(y_probs, x_probs) * region.mask_weight
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
"""Generates a quartic mask of weights for tile contributions
The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
"""
quartic_constant = 15. / 16.
support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (region.latent_col_end - region.latent_col_init - 1) * 1.99 - (1.99 / 2.)
x_probs = quartic_constant * np.square(1 - np.square(support))
support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (region.latent_row_end - region.latent_row_init - 1) * 1.99 - (1.99 / 2.)
y_probs = quartic_constant * np.square(1 - np.square(support))
weights = np.outer(y_probs, x_probs) * region.mask_weight
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
class StableDiffusionCanvasPipeline(DiffusionPipeline):
"""Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def decode_latents(self, latents, cpu_vae=False):
"""Decodes a given array of latents into pixel space"""
# scale and decode the image latents with vae
if cpu_vae:
lat = deepcopy(latents).cpu()
vae = deepcopy(self.vae).cpu()
else:
lat = latents
vae = self.vae
lat = 1 / 0.18215 * lat
image = vae.decode(lat).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
return self.numpy_to_pil(image)
def get_latest_timestep_img2img(self, num_inference_steps, strength):
"""Finds the latest timesteps where an img2img strength does not impose latents anymore"""
# get the original timestep using init_timestep
offset = self.scheduler.config.get("steps_offset", 0)
init_timestep = int(num_inference_steps * (1 - strength)) + offset
init_timestep = min(init_timestep, num_inference_steps)
t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps-1)
latest_timestep = self.scheduler.timesteps[t_start]
return latest_timestep
@torch.no_grad()
def __call__(
self,
canvas_height: int,
canvas_width: int,
regions: List[DiffusionRegion],
num_inference_steps: Optional[int] = 50,
seed: Optional[int] = 12345,
reroll_regions: Optional[List[RerollRegion]] = None,
cpu_vae: Optional[bool] = False,
decode_steps: Optional[bool] = False
):
if reroll_regions is None:
reroll_regions = []
batch_size = 1
if decode_steps:
steps_images = []
# Prepare scheduler
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
# Split diffusion regions by their kind
text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
# Prepare text embeddings
for region in text2image_regions:
region.tokenize_prompt(self.tokenizer)
region.encode_prompt(self.text_encoder, self.device)
# Create original noisy latents using the timesteps
latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
generator = torch.Generator(self.device).manual_seed(seed)
init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
# Reset latents in seed reroll regions, if requested
for region in reroll_regions:
if region.reroll_mode == RerollModes.RESET.value:
region_shape = (latents_shape[0], latents_shape[1], region.latent_row_end - region.latent_row_init, region.latent_col_end - region.latent_col_init)
init_noise[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
# Apply epsilon noise to regions: first diffusion regions, then reroll regions
all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
for region in all_eps_rerolls:
if region.noise_eps > 0:
region_noise = init_noise[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end]
eps_noise = torch.randn(region_noise.shape, generator=region.get_region_generator(self.device), device=self.device) * region.noise_eps
init_noise[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end] += eps_noise
# scale the initial noise by the standard deviation required by the scheduler
latents = init_noise * self.scheduler.init_noise_sigma
# Get unconditional embeddings for classifier free guidance in text2image regions
for region in text2image_regions:
max_length = region.tokenized_prompt.input_ids.shape[-1]
uncond_input = self.tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
# Prepare image latents
for region in image2image_regions:
region.encode_reference_image(self.vae, device=self.device, generator=generator)
# Prepare mask of weights for each region
mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
# Diffusion timesteps
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
# Diffuse each region
noise_preds_regions = []
# text2image regions
for region in text2image_regions:
region_latents = latents[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end]
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([region_latents] * 2)
# scale model input following scheduler rules
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
noise_preds_regions.append(noise_pred_region)
# Merge noise predictions for all tiles
noise_pred = torch.zeros(latents.shape, device=self.device)
contributors = torch.zeros(latents.shape, device=self.device)
# Add each tile contribution to overall latents
for region, noise_pred_region, mask_weights_region in zip(text2image_regions, noise_preds_regions, mask_weights):
noise_pred[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end] += noise_pred_region * mask_weights_region
contributors[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end] += mask_weights_region
# Average overlapping areas with more than 1 contributor
noise_pred /= contributors
noise_pred = torch.nan_to_num(noise_pred) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
# Image2Image regions: override latents generated by the scheduler
for region in image2image_regions:
influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
# Only override in the timesteps before the last influence step of the image (given by its strength)
if t > influence_step:
timestep = t.repeat(batch_size)
region_init_noise = init_noise[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end]
region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
latents[:, :, region.latent_row_init:region.latent_row_end, region.latent_col_init:region.latent_col_end] = region_latents
if decode_steps:
steps_images.append(self.decode_latents(latents, cpu_vae))
# scale and decode the image latents with vae
image = self.decode_latents(latents, cpu_vae)
output = {"sample": image}
if decode_steps:
output = {**output, "steps_images": steps_images}
return output
| 20,181 | 49.836272 | 224 | py |
mixture-of-diffusers | mixture-of-diffusers-master/mixdiff/tiling.py | from enum import Enum
import inspect
from ligo.segments import segment
from typing import List, Optional, Tuple, Union
import torch
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import DDIMScheduler, PNDMScheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers.schedulers import LMSDiscreteScheduler
from .extrasmixin import StableDiffusionExtrasMixin
class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
class SeedTilesMode(Enum):
"""Modes in which the latents of a particular tile can be re-seeded"""
FULL = "full"
EXCLUSIVE = "exclusive"
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[List[str]]],
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
eta: Optional[float] = 0.0,
seed: Optional[int] = None,
tile_height: Optional[int] = 512,
tile_width: Optional[int] = 512,
tile_row_overlap: Optional[int] = 256,
tile_col_overlap: Optional[int] = 256,
guidance_scale_tiles: Optional[List[List[float]]] = None,
seed_tiles: Optional[List[List[int]]] = None,
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
cpu_vae: Optional[bool] = False,
):
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
grid_rows = len(prompt)
grid_cols = len(prompt[0])
if not all(len(row) == grid_cols for row in prompt):
raise ValueError(f"All prompt rows must have the same number of prompt columns")
if not isinstance(seed_tiles_mode, str) and (not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)):
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
if isinstance(seed_tiles_mode, str):
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
if any(mode not in (modes := [mode.value for mode in self.SeedTilesMode]) for row in seed_tiles_mode for mode in row):
raise ValueError(f"Seed tiles mode must be one of {modes}")
if seed_reroll_regions is None:
seed_reroll_regions = []
batch_size = 1
# create original noisy latents using the timesteps
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
generator = torch.Generator("cuda").manual_seed(seed)
latents = torch.randn(latents_shape, generator=generator, device=self.device)
# overwrite latents for specific tiles if provided
if seed_tiles is not None:
for row in range(grid_rows):
for col in range(grid_cols):
if (seed_tile := seed_tiles[row][col]) is not None:
mode = seed_tiles_mode[row][col]
if mode == self.SeedTilesMode.FULL.value:
row_init, row_end, col_init, col_end = _tile2latent_indices(row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
else:
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, grid_rows, grid_cols)
tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(tile_shape, generator=tile_generator, device=self.device)
# overwrite again for seed reroll regions
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
row_init, row_end, col_init, col_end = _pixel2latent_indices(row_init, row_end, col_init, col_end) # to latent space coordinates
reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(region_shape, generator=reroll_generator, device=self.device)
# Prepare scheduler
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
extra_set_kwargs = {}
if accepts_offset:
extra_set_kwargs["offset"] = 1
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
if isinstance(self.scheduler, LMSDiscreteScheduler):
latents = latents * self.scheduler.sigmas[0]
# get prompts text embeddings
text_input = [
[
self.tokenizer(
col,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
for col in row
]
for row in prompt
]
text_embeddings = [
[
self.text_encoder(col.input_ids.to(self.device))[0]
for col in row
]
for row in text_input
]
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
for i in range(grid_rows):
for j in range(grid_cols):
max_length = text_input[i][j].input_ids.shape[-1]
uncond_input = self.tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# Mask for tile weights strenght
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
# Diffusion timesteps
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
# Diffuse each tile
noise_preds = []
for row in range(grid_rows):
noise_preds_row = []
for col in range(grid_cols):
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])["sample"]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
guidance = guidance_scale if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None else guidance_scale_tiles[row][col]
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
noise_preds_row.append(noise_pred_tile)
noise_preds.append(noise_preds_row)
# Stitch noise predictions for all tiles
noise_pred = torch.zeros(latents.shape, device=self.device)
contributors = torch.zeros(latents.shape, device=self.device)
# Add each tile contribution to overall latents
for row in range(grid_rows):
for col in range(grid_cols):
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += noise_preds[row][col] * tile_weights
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
# Average overlapping areas with more than 1 contributor
noise_pred /= contributors
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
# scale and decode the image latents with vae
image = self.decode_latents(latents, cpu_vae)
return {"sample": image}
def _gaussian_weights(self, tile_width, tile_height, nbatches):
"""Generates a gaussian mask of weights for tile contributions"""
from numpy import pi, exp, sqrt
import numpy as np
latent_width = tile_width // 8
latent_height = tile_height // 8
var = 0.01
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
x_probs = [exp(-(x-midpoint)*(x-midpoint)/(latent_width*latent_width)/(2*var)) / sqrt(2*pi*var) for x in range(latent_width)]
midpoint = latent_height / 2
y_probs = [exp(-(y-midpoint)*(y-midpoint)/(latent_height*latent_height)/(2*var)) / sqrt(2*pi*var) for y in range(latent_height)]
weights = np.outer(y_probs, x_probs)
return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
Returns a tuple with:
- Starting coordinates of rows in pixel space
- Ending coordinates of rows in pixel space
- Starting coordinates of columns in pixel space
- Ending coordinates of columns in pixel space
"""
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
px_row_end = px_row_init + tile_height
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
px_col_end = px_col_init + tile_width
return px_row_init, px_row_end, px_col_init, px_col_end
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
"""Translates coordinates in pixel space to coordinates in latent space"""
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
Returns a tuple with:
- Starting coordinates of rows in latent space
- Ending coordinates of rows in latent space
- Starting coordinates of columns in latent space
- Ending coordinates of columns in latent space
"""
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
def _tile2latent_exclusive_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns):
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
Returns a tuple with:
- Starting coordinates of rows in latent space
- Ending coordinates of rows in latent space
- Starting coordinates of columns in latent space
- Ending coordinates of columns in latent space
"""
row_init, row_end, col_init, col_end = _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
row_segment = segment(row_init, row_end)
col_segment = segment(col_init, col_end)
# Iterate over the rest of tiles, clipping the region for the current tile
for row in range(rows):
for column in range(columns):
if row != tile_row and column != tile_col:
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap)
row_segment = row_segment - segment(clip_row_init, clip_row_end)
col_segment = col_segment - segment(clip_col_init, clip_col_end)
#return row_init, row_end, col_init, col_end
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
| 15,330 | 52.232639 | 218 | py |
mixture-of-diffusers | mixture-of-diffusers-master/mixdiff/imgtools.py | import numpy as np
import torch
from PIL import Image, ImageFilter
def preprocess_image(image):
"""Preprocess an input image
Same as https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
"""
w, h = image.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
image = image.resize((w, h), resample=Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return 2.0 * image - 1.0
def preprocess_mask(mask, smoothing=None):
"""Preprocess an inpainting mask"""
mask = mask.convert("L")
if smoothing is not None:
smoothed = mask.filter(ImageFilter.GaussianBlur(smoothing))
mask = Image.composite(mask, smoothed, mask) # Original mask values kept as 1, out of mask get smoothed
mask.save("outputs/smoothed_mask.png") # FIXME
w, h = mask.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
mask = mask.resize((w // 8, h // 8), resample=Image.NEAREST)
mask = np.array(mask).astype(np.float32) / 255.0
mask = np.tile(mask, (4, 1, 1))
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
mask = 1 - mask # repaint white, keep black
mask = torch.from_numpy(mask)
return mask
| 1,458 | 39.527778 | 180 | py |
ACME | ACME-master/test.py | import os
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.backends.cudnn as cudnn
from data_loader import get_loader
from args import get_parser
from models import *
from tqdm import tqdm
import pdb
import torch.nn.functional as F
from triplet_loss import *
import pickle
from build_vocab import Vocabulary
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
import torchvision.utils as vutils
device = [0]
with open(opts.vocab_path, 'rb') as f:
vocab = pickle.load(f)
image_model = ImageEmbedding()
image_model = torch.nn.DataParallel(image_model, device_ids=device).cuda()
image_model_pre = torch.load('acme/model_e045_v1.pkl')
image_model.load_state_dict(image_model_pre)
recipe_model = TextEmbedding()
recipe_model = torch.nn.DataParallel(recipe_model, device_ids=device).cuda()
recipe_model_pre = torch.load('acme/model_e045_v2.pkl')
recipe_model.load_state_dict(recipe_model_pre)
fc_sia = nn.Sequential(
nn.Linear(opts.embDim, opts.embDim),
nn.BatchNorm1d(opts.embDim),
nn.Tanh(),
).cuda()
fc_sia.load_state_dict(torch.load('acme/model_e045_v8.pkl'))
np.random.seed(opts.seed)
def main():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)])
val_loader = get_loader(opts.img_path, val_transform, vocab, opts.data_path, partition='test',
batch_size=opts.batch_size, shuffle=False,
num_workers=opts.workers, pin_memory=True)
print('Validation loader prepared.')
test(val_loader)
def test(test_loader):
image_model.eval()
recipe_model.eval()
for i, data in enumerate(tqdm(test_loader)):
with torch.no_grad():
img_emd_modal = image_model(data[0][0].cuda())
recipe_emb_modal = recipe_model(data[0][1].cuda(), data[0][2].cuda(), data[0][3].cuda(), data[0][4].cuda())
img_emd_modal = norm(fc_sia(img_emd_modal))
recipe_emb_modal = norm(fc_sia(recipe_emb_modal))
if i==0:
data0 = img_emd_modal.data.cpu().numpy()
data1 = recipe_emb_modal.data.cpu().numpy()
else:
data0 = np.concatenate((data0,img_emd_modal.data.cpu().numpy()),axis=0)
data1 = np.concatenate((data1,recipe_emb_modal.data.cpu().numpy()),axis=0)
medR_i2t, recall_i2t = rank_i2t(opts, data0, data1)
print('I2T Val medR {medR:.4f}\t'
'Recall {recall}'.format(medR=medR_i2t, recall=recall_i2t))
medR_t2i, recall_t2i = rank_t2i(opts, data0, data1)
print('T2I Val medR {medR:.4f}\t'
'Recall {recall}'.format(medR=medR_t2i, recall=recall_t2i))
return recall_i2t, recall_t2i, medR_i2t, medR_t2i
def rank_i2t(opts, img_embeds, rec_embeds):
random.seed(opts.seed)
im_vecs = img_embeds
instr_vecs = rec_embeds
# Ranker
N = 10000
idxs = range(N)
glob_rank = []
glob_recall = {1:0.0,5:0.0,10:0.0}
for i in range(10):
ids = random.sample(range(0,len(img_embeds)), N)
im_sub = im_vecs[ids,:]
instr_sub = instr_vecs[ids,:]
med_rank = []
recall = {1:0.0,5:0.0,10:0.0}
for ii in idxs:
distance = {}
for j in range(N):
distance[j] = np.linalg.norm(im_sub[ii] - instr_sub[j])
distance_sorted = sorted(distance.items(), key=lambda x:x[1])
pos = np.where(np.array(distance_sorted) == distance[ii])[0][0]
if (pos+1) == 1:
recall[1]+=1
if (pos+1) <=5:
recall[5]+=1
if (pos+1)<=10:
recall[10]+=1
# store the position
med_rank.append(pos+1)
for i in recall.keys():
recall[i]=recall[i]/N
med = np.median(med_rank)
for i in recall.keys():
glob_recall[i]+=recall[i]
glob_rank.append(med)
for i in glob_recall.keys():
glob_recall[i] = glob_recall[i]/10
return np.average(glob_rank), glob_recall
def rank_t2i(opts, img_embeds, rec_embeds):
random.seed(opts.seed)
im_vecs = img_embeds
instr_vecs = rec_embeds
# Ranker
N = 10000
idxs = range(N)
glob_rank = []
glob_recall = {1:0.0,5:0.0,10:0.0}
for i in range(10):
ids = random.sample(range(0,len(img_embeds)), N)
im_sub = im_vecs[ids,:]
instr_sub = instr_vecs[ids,:]
med_rank = []
recall = {1:0.0,5:0.0,10:0.0}
for ii in idxs:
distance = {}
for j in range(N):
distance[j] = np.linalg.norm(instr_sub[ii] - im_sub[j])
distance_sorted = sorted(distance.items(), key=lambda x:x[1])
pos = np.where(np.array(distance_sorted) == distance[ii])[0][0]
if (pos+1) == 1:
recall[1]+=1
if (pos+1) <=5:
recall[5]+=1
if (pos+1)<=10:
recall[10]+=1
# store the position
med_rank.append(pos+1)
for i in recall.keys():
recall[i]=recall[i]/N
med = np.median(med_rank)
for i in recall.keys():
glob_recall[i]+=recall[i]
glob_rank.append(med)
for i in glob_recall.keys():
glob_recall[i] = glob_recall[i]/10
return np.average(glob_rank), glob_recall
if __name__ == '__main__':
main()
| 5,828 | 28.145 | 119 | py |
ACME | ACME-master/args.py | import argparse
def get_parser():
parser = argparse.ArgumentParser(description='tri-joint parameters')
# general
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--device', default=[0], type=list)
# data
parser.add_argument('--img_path', default='../im2recipe-Pytorch/data')
parser.add_argument('--data_path', default='data/food_data/')
parser.add_argument('--workers', default=10, type=int)
parser.add_argument('--vocab_path', type=str, default='data/new_word_dict.pkl', help='path for vocabulary wrapper')
# model
parser.add_argument('--batch_size', default=64, type=int)
# im2recipe model
parser.add_argument('--embDim', default=1024, type=int)
parser.add_argument('--nRNNs', default=1, type=int)
parser.add_argument('--srnnDim', default=1024, type=int)
parser.add_argument('--irnnDim', default=300, type=int)
parser.add_argument('--imfeatDim', default=2048, type=int)
parser.add_argument('--stDim', default=1024, type=int)
parser.add_argument('--ingrW2VDim', default=300, type=int)
parser.add_argument('--maxSeqlen', default=20, type=int)
parser.add_argument('--maxIngrs', default=20, type=int)
parser.add_argument('--maxImgs', default=5, type=int)
parser.add_argument('--numClasses', default=1048, type=int)
#img-text
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
#text-img
parser.add_argument('--Z_DIM', type=int , default=100, help='noise dimension for image generation')
parser.add_argument('--DF_DIM', type=int , default=64, help='D dimension')
parser.add_argument('--GF_DIM', type=int , default=64, help='G dimension')
parser.add_argument('--EMBEDDING_DIM', type=int , default=128, help='embedding dimension')
parser.add_argument('--R_NUM', type=int , default=2, help='resudial unit number')
parser.add_argument('--BRANCH_NUM', type=int , default=3, help='the number of the stages')
parser.add_argument('--B_CONDITION', type=bool , default=True, help='if use condition loss')
# training
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--weight_decay', default=0, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--ingrW2V', default='data/vocab.bin',type=str)
# dataset
parser.add_argument('--maxlen', default=20, type=int)
parser.add_argument('--vocab', default = 'vocab.txt', type=str)
parser.add_argument('--dataset', default = '../data/recipe1M/', type=str)
parser.add_argument('--sthdir', default = '../data/', type=str)
return parser
| 2,906 | 47.45 | 119 | py |
ACME | ACME-master/data_loader.py | from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import sys
import pickle
import numpy as np
import lmdb
import torch
import pdb
import torchvision.transforms as transforms
import nltk
from build_vocab import Vocabulary
from args import get_parser
parser = get_parser()
opts = parser.parse_args()
def default_loader(path):
try:
im = Image.open(path).convert('RGB')
return im
except:
return Image.new('RGB', (224, 224), 'white')
class ImagerLoader(data.Dataset):
def __init__(self, img_path, transform=None, vocab=None,
loader=default_loader, data_path=None, partition=None):
if data_path == None:
raise Exception('No data path specified.')
if partition is None:
raise Exception('Unknown partition type %s.' % partition)
else:
self.partition = partition
self.env = lmdb.open(os.path.join(img_path, partition + '_lmdb'), max_readers=1, readonly=True, lock=False,
readahead=False, meminit=False)
with open(os.path.join('data/food_data/' + partition + '_ids.pkl'), 'rb') as f:
self.ids = pickle.load(f, encoding='latin1')
with open(os.path.join('data/food_data/' + partition + '_split.pkl'), 'rb') as f:
self.split = pickle.load(f, encoding='latin1')
self.imgPath = img_path
self.maxInst = 20
self.transform = transform
self.loader = loader
self.vocab = vocab
def __getitem__(self, index):
# for background
with self.env.begin(write=False) as txn:
serialized_sample = txn.get(self.ids[index].encode())
sample = pickle.loads(serialized_sample, encoding='latin1')
imgs = sample['imgs']
food_id = self.ids[index]
if self.partition != 'train':
imgIdx = 0
else:
imgIdx = np.random.choice(range(min(5, len(imgs))))
loader_path = [imgs[imgIdx]['id'][i] for i in range(4)]
loader_path = os.path.join(*loader_path)
path = os.path.join(self.imgPath, self.partition, loader_path, imgs[imgIdx]['id'])
# instructions
instrs = sample['intrs']
itr_ln = len(instrs)
t_inst = np.zeros((self.maxInst, np.shape(instrs)[1]), dtype=np.float32)
t_inst[:itr_ln][:] = instrs
instrs = torch.FloatTensor(t_inst)
# ingredients
ingrs = sample['ingrs'].astype(int)
ingrs = torch.LongTensor(ingrs)
igr_ln = max(np.nonzero(sample['ingrs'])[0]) + 1
# image
img = self.loader(path)
img = self.transform(img)
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
re_img = transforms.Resize(128)(img)
img = normalize(img)
ret = normalize(re_img)
class_label = sample['classes'] - 1
vocab = self.vocab
selec_ingrs = set(self.split[food_id]['ingredients'])
ingr_cap = []
ingr_cap.append(vocab('<start>'))
one_hot_vec = torch.zeros(4102)
for i in list(selec_ingrs):
one_hot_vec[vocab(str(i).lower())] = 1
ingr_cap = torch.Tensor(ingr_cap)
# output
## also output the length of captions, which could be used in LSTM prediction
return img, instrs, itr_ln, ingrs, igr_ln, \
ingr_cap, class_label, ret, one_hot_vec, food_id
def __len__(self):
return len(self.ids)
def collate_fn(data):
"""Creates mini-batch tensors from the list of tuples (image, caption).
We should build custom collate_fn rather than using default collate_fn,
because merging caption (including padding) is not supported in default.
Args:
data: list of tuple (image, caption).
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length (descending order).
data.sort(key=lambda x: len(x[5]), reverse=True)
img, instrs, itr_ln, ingrs, igr_ln,\
ingr_cap, class_label, ret, one_hot_vec, food_id = zip(*data)
# Merge images (from tuple of 3D tensor to 4D tensor).
images = torch.stack(img, 0)
instrs = torch.stack(instrs, 0)
itr_ln = torch.LongTensor(list(itr_ln))
ingrs = torch.stack(ingrs, 0)
igr_ln = torch.LongTensor(list(igr_ln))
class_label = torch.LongTensor(list(class_label))
ret = torch.stack(ret, 0)
# Merge captions (from tuple of 1D tensor to 2D tensor).
lengths = [len(cap) for cap in ingr_cap]
targets = torch.zeros(len(ingr_cap), max(lengths)).long()
for i, cap in enumerate(ingr_cap):
end = lengths[i]
targets[i, :end] = cap[:end]
one_hot_vec = torch.stack(one_hot_vec, 0)
return [images, instrs, itr_ln, ingrs, igr_ln, list(food_id)], \
[images, instrs, itr_ln, ingrs, igr_ln, targets, lengths, class_label, ret, one_hot_vec]
def get_loader(img_path, transform, vocab, data_path, partition, batch_size, shuffle, num_workers, pin_memory):
data_loader = torch.utils.data.DataLoader(ImagerLoader(img_path, transform, vocab,
data_path=data_path, partition=partition),
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True,
collate_fn=collate_fn)
return data_loader | 6,119 | 35 | 115 | py |
ACME | ACME-master/triplet_loss.py | from __future__ import print_function
import torch
from torch import nn
from torch.autograd import Variable
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
def __init__(self, device, margin=None):
self.margin = margin
self.device = device
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, dist_ap, dist_an):
with torch.cuda.device(self.device[0]):
y = Variable(dist_an.data.new().resize_as_(dist_an.data).fill_(1)).cuda()
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss
def normalize(x, axis=-1):
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
# import pdb; pdb.set_trace()
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels, return_inds=False):
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze( 0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
def global_loss(tri_loss, global_feat, labels, normalize_feature=False):
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
# shape [N, N]
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an = hard_example_mining(
dist_mat, labels, return_inds=False)
loss = tri_loss(dist_ap, dist_an)
return loss, dist_ap, dist_an, dist_mat | 3,225 | 30.940594 | 79 | py |
ACME | ACME-master/models.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.legacy as legacy
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchwordemb
from args import get_parser
import pdb
import torch.nn.functional as F
from torch.autograd import Variable
# =============================================================================
parser = get_parser()
opts = parser.parse_args()
# # =============================================================================
##############################################################
## some codes modified from Han Zhang's Stack-GAN (https://github.com/hanzhanggit/StackGAN)
## and Salvador's im2recipe (https://github.com/torralba-lab/im2recipe-Pytorch)
##############################################################
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * torch.sigmoid(x[:, nc:])
def conv3x3(in_planes, out_planes):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# ############## G networks ################################################
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU()
)
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU()
)
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num)
)
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
class TableModule(nn.Module):
def __init__(self):
super(TableModule, self).__init__()
def forward(self, x, dim):
y = torch.cat(x, dim)
return y
def norm(input, p=2, dim=1, eps=1e-12):
return input / input.norm(p,dim,keepdim=True).clamp(min=eps).expand_as(input)
# Skip-thoughts LSTM
class stRNN(nn.Module):
def __init__(self):
super(stRNN, self).__init__()
self.lstm = nn.LSTM(input_size=opts.stDim, hidden_size=opts.srnnDim, bidirectional=False, batch_first=True)
def forward(self, x, sq_lengths):
# here we use a previous LSTM to get the representation of each instruction
# sort sequence according to the length
sorted_len, sorted_idx = sq_lengths.sort(0, descending=True)
index_sorted_idx = sorted_idx\
.view(-1,1,1).expand_as(x)
sorted_inputs = x.gather(0, index_sorted_idx.long())
# pack sequence
packed_seq = torch.nn.utils.rnn.pack_padded_sequence(
sorted_inputs, sorted_len.cpu().data.numpy(), batch_first=True)
# pass it to the lstm
out, hidden = self.lstm(packed_seq)
# unsort the output
_, original_idx = sorted_idx.sort(0, descending=False)
unpacked, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
unsorted_idx = original_idx.view(-1,1,1).expand_as(unpacked)
# we get the last index of each sequence in the batch
idx = (sq_lengths-1).view(-1,1).expand(unpacked.size(0), unpacked.size(2)).unsqueeze(1)
# we sort and get the last element of each sequence
output = unpacked.gather(0, unsorted_idx.long()).gather(1,idx.long())
output = output.view(output.size(0),output.size(1)*output.size(2))
return output
class ingRNN(nn.Module):
def __init__(self):
super(ingRNN, self).__init__()
self.irnn = nn.LSTM(input_size=opts.ingrW2VDim, hidden_size=opts.irnnDim, bidirectional=True, batch_first=True)
_, vec = torchwordemb.load_word2vec_bin(opts.ingrW2V)
self.embs = nn.Embedding(vec.size(0), opts.ingrW2VDim, padding_idx=0) # not sure about the padding idx
self.embs.weight.data.copy_(vec)
# self.embs = nn.Embedding(3122, opts.ingrW2VDim)
# self.irnn = nn.LSTM(input_size=opts.ingrW2VDim, hidden_size=opts.irnnDim, bidirectional=True, batch_first=True)
def forward(self, x, sq_lengths):
# we get the w2v for each element of the ingredient sequence
x = self.embs(x) # torch.Size([64, 20, 300])
# sort sequence according to the length
sorted_len, sorted_idx = sq_lengths.sort(0, descending=True)
index_sorted_idx = sorted_idx\
.view(-1,1,1).expand_as(x)
sorted_inputs = x.gather(0, index_sorted_idx.long())
# pack sequence
packed_seq = torch.nn.utils.rnn.pack_padded_sequence(
sorted_inputs, sorted_len.cpu().data.numpy(), batch_first=True)
# pass it to the rnn
out, hidden = self.irnn(packed_seq)
# unsort the output
_, original_idx = sorted_idx.sort(0, descending=False)
# LSTM
# bi-directional
unsorted_idx = original_idx.view(1,-1,1).expand_as(hidden[0])
# 2 directions x batch_size x num features, we transpose 1st and 2nd dimension
output = hidden[0].gather(1,unsorted_idx).transpose(0,1).contiguous() # torch.Size([64, 2, 300])
output = output.view(output.size(0),output.size(1)*output.size(2))
return output
class TextEmbedding(nn.Module):
def __init__(self):
super(TextEmbedding, self).__init__()
self.recipe_embedding = nn.Sequential(
nn.Linear(opts.irnnDim*2 + opts.srnnDim, opts.embDim),
nn.Tanh(),
)
self.semantic_branch = nn.Linear(opts.embDim, opts.numClasses)
self.fc_recipe = nn.Sequential(
nn.Linear(opts.embDim, opts.embDim),
nn.BatchNorm1d(opts.embDim),
nn.Tanh(),
)
self.stRNN_ = stRNN()
self.ingRNN_ = ingRNN()
self.table = TableModule()
def forward(self, y1, y2, z1, z2, ingrs_emb=None):
if torch.is_tensor(ingrs_emb):
recipe_emb = self.table([self.stRNN_(y1,y2), ingrs_emb],1) # joining on the last dim
else:
ingrs_emb = self.ingRNN_(z1,z2)
recipe_emb = self.table([self.stRNN_(y1,y2), ingrs_emb],1) # joining on the last dim
recipe_emb_domain = self.recipe_embedding(recipe_emb)
output = recipe_emb_domain
return output
class ImageEmbedding(nn.Module):
def __init__(self):
super(ImageEmbedding, self).__init__()
resnet = models.resnet50(pretrained=True)
modules = list(resnet.children())[:-1] # we do not use the last fc layer.
self.visionMLP = nn.Sequential(*modules)
self.visual_embedding = nn.Sequential(
nn.Linear(opts.imfeatDim, opts.embDim),
nn.Tanh(),
)
self.semantic_branch = nn.Linear(opts.embDim, opts.numClasses)
self.fc_visual = nn.Sequential(
nn.Linear(opts.embDim, opts.embDim),
nn.BatchNorm1d(opts.embDim),
nn.Tanh(),
)
def forward(self, x):
visual_emb = self.visionMLP(x)
visual_emb = visual_emb.view(visual_emb.size(0), -1) # batch_size * 2048
visual_emb_domain = self.visual_embedding(visual_emb)
output = visual_emb_domain
return output
class MultiLabelNet(nn.Module):
def __init__(self):
super(MultiLabelNet, self).__init__()
self.model = nn.Sequential(
nn.Linear(opts.embDim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 4102)
)
self.ingrs_linear = nn.Sequential(
nn.Linear(opts.embDim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, opts.numClasses)
)
def forward(self, features):
output = self.model(features)
return [nn.Sigmoid()(output), self.ingrs_linear(features)]
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=100):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
# self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.lstm = nn.LSTM(input_size=embed_size, hidden_size=opts.irnnDim, bidirectional=False, batch_first=True)
self.linear = nn.Linear(opts.irnnDim, vocab_size)
self.ingrs_linear = nn.Linear(opts.irnnDim, opts.irnnDim*2)
self.max_seg_length = max_seq_length
def forward(self, features, captions, lengths):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings, lengths, batch_first=True)
outs, hidden = self.lstm(packed)
_, original_idx = lengths.sort(0, descending=False)
unsorted_idx = original_idx.view(1,-1,1).expand_as(hidden[0])
output = hidden[0].gather(1,unsorted_idx).transpose(0,1).contiguous()
# output = output.view(output.size(0),output.size(1)*output.size(2))
output_fea = self.ingrs_linear(output.view(output.size(0),output.size(1)*output.size(2)))
outputs = self.linear(outs[0])
return [outputs, output_fea]
def sample(self, features, states=None):
"""Generate captions for given image features using greedy search."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
outs, states = self.lstm(inputs, states) # outs: (batch_size, 1, hidden_size)
outputs = self.linear(outs.squeeze(1)) # outputs: (batch_size, vocab_size)
_, predicted = outputs.max(1) # predicted: (batch_size)
sampled_ids.append(predicted)
inputs = self.embed(predicted) # inputs: (batch_size, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (batch_size, 1, embed_size)
sampled_ids = torch.stack(sampled_ids, 1) # sampled_ids: (batch_size, max_seq_length)
return sampled_ids
class text_emb_discriminator(nn.Module):
def __init__(self):
super(text_emb_discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(opts.irnnDim * 2, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, opts.numClasses * 2)
)
def forward(self, fea):
output = self.model(fea)
return output
class cross_modal_discriminator(nn.Module):
def __init__(self):
super(cross_modal_discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(opts.embDim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1)
)
def forward(self, fea):
output = self.model(fea)
return output
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = opts.embDim
self.ef_dim = opts.EMBEDDING_DIM
self.fc = nn.Linear(self.t_dim, self.ef_dim * 4, bias=True)
self.relu = GLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.ef_dim]
logvar = x[:, self.ef_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
with torch.cuda.device(logvar.get_device()):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_().cuda()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class INIT_STAGE_G(nn.Module):
def __init__(self, ngf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
if opts.B_CONDITION:
self.in_dim = opts.Z_DIM + opts.EMBEDDING_DIM
else:
self.in_dim = opts.Z_DIM
self.define_module()
def define_module(self):
in_dim = self.in_dim
ngf = self.gf_dim
self.fc = nn.Sequential(
nn.Linear(in_dim, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code=None):
if opts.B_CONDITION and c_code is not None:
in_code = torch.cat((c_code, z_code), 1)
else:
in_code = z_code
# state size 16ngf x 4 x 4
out_code = self.fc(in_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size 8ngf x 8 x 8
out_code = self.upsample1(out_code)
# state size 4ngf x 16 x 16
out_code = self.upsample2(out_code)
# state size 2ngf x 32 x 32
out_code = self.upsample3(out_code)
# state size ngf x 64 x 64
out_code = self.upsample4(out_code)
return out_code
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, num_residual=opts.R_NUM):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
if opts.B_CONDITION:
self.ef_dim = opts.EMBEDDING_DIM
else:
self.ef_dim = opts.Z_DIM
self.num_residual = num_residual
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(self.num_residual):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
efg = self.ef_dim
self.jointConv = Block3x3_relu(ngf + efg, ngf)
self.residual = self._make_layer(ResBlock, ngf)
self.upsample = upBlock(ngf, ngf // 2)
def forward(self, h_code, c_code):
s_size = h_code.size(2)
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, s_size, s_size)
# state size (ngf+egf) x in_size x in_size
h_c_code = torch.cat((c_code, h_code), 1)
# state size ngf x in_size x in_size
out_code = self.jointConv(h_c_code)
out_code = self.residual(out_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self):
super(G_NET, self).__init__()
self.gf_dim = opts.GF_DIM
self.define_module()
def define_module(self):
if opts.B_CONDITION:
self.ca_net = CA_NET()
if opts.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(self.gf_dim * 16)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
if opts.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
if opts.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
if opts.BRANCH_NUM > 3: # Recommended structure (mainly limited by GPU memory), and not test yet
self.h_net4 = NEXT_STAGE_G(self.gf_dim // 4, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 8)
if opts.BRANCH_NUM > 4:
self.h_net4 = NEXT_STAGE_G(self.gf_dim // 8, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 16)
def forward(self, z_code, text_embedding=None):
if opts.B_CONDITION and text_embedding is not None:
c_code, mu, logvar = self.ca_net(text_embedding)
else:
c_code, mu, logvar = z_code, None, None
fake_imgs = []
h_code1 = self.h_net1(z_code, c_code)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
return fake_imgs, mu, logvar
# ############## D networks ################################################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self):
super(D_NET128, self).__init__()
self.df_dim = opts.DF_DIM
self.ef_dim = opts.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
self.logits_class = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if opts.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_branch = nn.Sequential(
nn.Conv2d(ndf * 8, ndf * 4, kernel_size=4, stride=4),
# nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True))
self.semantic_branch = nn.Linear(ndf * 4, opts.numClasses)
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s32_1(x_code)
if opts.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if opts.B_CONDITION:
out_uncond = self.uncond_branch(x_code)
out_uncond = out_uncond.view(out_uncond.size(0), -1)
out_uncond = self.semantic_branch(out_uncond)
return [output.view(-1), out_uncond]
else:
return [output.view(-1)]
| 21,061 | 34.698305 | 121 | py |
ACME | ACME-master/train.py | import os
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.backends.cudnn as cudnn
from data_loader import get_loader
from args import get_parser
from models import *
from torch.optim import lr_scheduler
from tqdm import tqdm
import pdb
import torch.nn.functional as F
from triplet_loss import *
import pickle
from build_vocab import Vocabulary
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
import torchvision.utils as vutils
# =============================================================================
parser = get_parser()
opts = parser.parse_args()
device = [0]
with open(opts.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# =============================================================================
##load models
image_model = torch.nn.DataParallel(ImageEmbedding().cuda(), device_ids=device)
recipe_model = torch.nn.DataParallel(TextEmbedding().cuda(), device_ids=device)
netG = torch.nn.DataParallel(G_NET().cuda(), device_ids=device)
multi_label_net = torch.nn.DataParallel(MultiLabelNet().cuda(), device_ids=device)
cm_discriminator = torch.nn.DataParallel(cross_modal_discriminator().cuda(), device_ids=device)
text_discriminator = torch.nn.DataParallel(text_emb_discriminator().cuda(), device_ids=device)
netsD = torch.nn.DataParallel(D_NET128().cuda(), device_ids=device)
## load loss functions
triplet_loss = TripletLoss(device, margin=0.3)
img2text_criterion = nn.MultiLabelMarginLoss().cuda()
weights_class = torch.Tensor(opts.numClasses).fill_(1)
weights_class[0] = 0
class_criterion = nn.CrossEntropyLoss(weight=weights_class).cuda()
GAN_criterion = nn.BCELoss().cuda()
nz = opts.Z_DIM
noise = Variable(torch.FloatTensor(opts.batch_size, nz)).cuda()
fixed_noise = Variable(torch.FloatTensor(opts.batch_size, nz).normal_(0, 1)).cuda()
real_labels = Variable(torch.FloatTensor(opts.batch_size).fill_(1)).cuda()
fake_labels = Variable(torch.FloatTensor(opts.batch_size).fill_(0)).cuda()
fc_sia = nn.Sequential(
nn.Linear(opts.embDim, opts.embDim),
nn.BatchNorm1d(opts.embDim),
nn.Tanh(),
).cuda()
model_list = [image_model, recipe_model, netG, multi_label_net, cm_discriminator, text_discriminator, netsD, fc_sia]
optimizer = torch.optim.Adam([
{'params': image_model.parameters()},
{'params': recipe_model.parameters()},
{'params': netG.parameters()},
{'params': multi_label_net.parameters()}
], lr=opts.lr, betas=(0.5, 0.999))
optimizers_imgD = torch.optim.Adam(netsD.parameters(), lr=opts.lr, betas=(0.5, 0.999))
optimizer_cmD = torch.optim.Adam(cm_discriminator.parameters(), lr=opts.lr, betas=(0.5, 0.999))
label = list(range(0, opts.batch_size))
label.extend(label)
label = np.array(label)
label = torch.tensor(label).cuda().long()
method = 'acme'
save_folder = method
os.makedirs(save_folder, exist_ok=True)
epoch_trace_f_dir = os.path.join(save_folder, "trace_" + method + ".csv")
with open(epoch_trace_f_dir, "w") as f:
f.write("epoch,lr,I2R,R@1,R@5,R@10,R2I,R@1,R@5,R@10\n")
def main():
# data preparation, loaders
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip()])
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)])
cudnn.benchmark = True
# preparing the training laoder
train_loader = get_loader(opts.img_path, train_transform, vocab, opts.data_path, partition='train',
batch_size=opts.batch_size, shuffle=True,
num_workers=opts.workers, pin_memory=True)
print('Training loader prepared.')
# preparing validation loader
val_loader = get_loader(opts.img_path, val_transform, vocab, opts.data_path, partition='test',
batch_size=opts.batch_size, shuffle=False,
num_workers=opts.workers, pin_memory=True)
print('Validation loader prepared.')
best_val_i2t = {1:0.0,5:0.0,10:0.0}
best_val_t2i = {1:0.0,5:0.0,10:0.0}
best_epoch_i2t = 0
best_epoch_t2i = 0
for epoch in range(0, opts.epochs):
train(train_loader, epoch, val_loader)
recall_i2t, recall_t2i, medR_i2t, medR_t2i = validate(val_loader)
with open(epoch_trace_f_dir, "a") as f:
lr = optimizer.param_groups[1]['lr']
f.write("{},{},{},{},{},{},{},{},{},{}\n".format\
(epoch,lr,medR_i2t,recall_i2t[1],recall_i2t[5],recall_i2t[10],\
medR_t2i,recall_t2i[1],recall_t2i[5],recall_t2i[10]))
for keys in best_val_i2t:
if recall_i2t[keys] > best_val_i2t[keys]:
best_val_i2t = recall_i2t
best_epoch = epoch+1
model_num = 1
for model_n in model_list:
filename = save_folder + '/model_e%03d_v%d.pkl' % (epoch+1, model_num)
torch.save(model_n.state_dict(), filename)
model_num += 1
break
print("best: ", best_epoch, best_val_i2t)
print('params lr: %f' % optimizer.param_groups[1]['lr'])
if epoch == 30:
optimizer.param_groups[0]['lr'] = 0.00001
optimizer.param_groups[1]['lr'] = 0.00001
optimizer.param_groups[2]['lr'] = 0.00001
optimizer.param_groups[3]['lr'] = 0.00001
optimizers_imgD.param_groups[0]['lr'] = 0.00001
optimizer_cmD.param_groups[0]['lr'] = 0.00001
def train_Dnet(idx, real_imgs, fake_imgs, mu, label_class):
netD = netsD
real_imgs = real_imgs[idx]
fake_imgs = fake_imgs[idx]
real_logits = netD(real_imgs, mu.detach())
fake_logits = netD(fake_imgs.detach(), mu.detach())
lossD_real = GAN_criterion(real_logits[0], real_labels)
lossD_fake = GAN_criterion(fake_logits[0], fake_labels)
lossD = lossD_real + lossD_fake
return lossD
def KL_loss(mu, logvar):
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def train_Gnet(idx, real_imgs, fake_imgs, mu, logvar, label_class):
netD = netsD
real_imgs = real_imgs[idx]
fake_imgs = fake_imgs[idx]
real_logits = netD(real_imgs, mu)
fake_logits = netD(fake_imgs, mu)
lossG_fake = GAN_criterion(fake_logits[0], real_labels)
lossG_real_cond = class_criterion(real_logits[1], label_class)
lossG_fake_cond = class_criterion(fake_logits[1], label_class)
lossG_cond = lossG_real_cond + lossG_fake_cond
lossG = lossG_fake + lossG_cond
kl_loss = KL_loss(mu, logvar) * 2
lossG = kl_loss + lossG
return lossG
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.cuda.FloatTensor(np.random.random((real_samples.size(0), 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = torch.autograd.Variable(torch.cuda.FloatTensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(
outputs=d_interpolates, # fack samples
inputs=interpolates, # real samples
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def train(train_loader, epoch, val_loader):
tri_losses = AverageMeter()
img_losses = AverageMeter()
text_losses = AverageMeter()
cmG_losses = AverageMeter()
image_model.train()
recipe_model.train()
for i, data in enumerate(tqdm(train_loader)):
img_emd_modal = image_model(data[0][0].cuda())
recipe_emb_modal = recipe_model(data[0][1].cuda(), data[0][2].cuda(), data[0][3].cuda(), data[0][4].cuda())
################################################################
# modal-level fusion
################################################################
real_validity = cm_discriminator(img_emd_modal.detach())
fake_validity = cm_discriminator(recipe_emb_modal.detach())
gradient_penalty = compute_gradient_penalty(cm_discriminator, img_emd_modal.detach(), recipe_emb_modal.detach())
loss_cmD = -torch.mean(real_validity) + torch.mean(fake_validity) + 10 * gradient_penalty
optimizer_cmD.zero_grad()
loss_cmD.backward()
optimizer_cmD.step()
g_fake_validity = cm_discriminator(recipe_emb_modal)
loss_cmG = -torch.mean(g_fake_validity)
################################################################
# cross-modal retrieval
################################################################
img_id_fea = norm(fc_sia(img_emd_modal))
rec_id_fea = norm(fc_sia(recipe_emb_modal))
tri_loss = global_loss(triplet_loss, torch.cat((img_id_fea, rec_id_fea)), label)[0]
################################################################
# translation consistency
label_class = data[1][7].cuda()
real_imgs = []
real_imgs.append(data[1][8].cuda())
ingr_cap = data[1][5].cuda()
lengths = torch.tensor(data[1][6]).cuda()
targets = pack_padded_sequence(ingr_cap, lengths, batch_first=True)[0]
one_hot_cap = data[1][9].cuda().long()
################################################################
# img2text
################################################################
recipe_out = multi_label_net(img_id_fea)
loss_i2t = img2text_criterion(recipe_out[0], one_hot_cap)
loss_t_class = class_criterion(recipe_out[1], label_class)
loss_text = loss_i2t + loss_t_class
###############################################################
# text2img
###############################################################
noise.data.normal_(0, 1)
fake_imgs, mu, logvar = netG(noise, rec_id_fea)
lossD_total = 0
lossD = train_Dnet(0, real_imgs, fake_imgs, mu, label_class)
optimizers_imgD.zero_grad()
lossD.backward()
optimizers_imgD.step()
lossG = train_Gnet(0, real_imgs, fake_imgs, mu, logvar, label_class)
loss_img = lossG
if loss_text.item() < loss_img.item():
loss_img = (loss_text.item()/loss_img.item()) * loss_img
else:
loss_text = (loss_img.item()/loss_text.item()) * loss_text
loss_g = loss_img + loss_text
###############################################################
# back-propogate
###############################################################
loss = tri_loss + 0.005 * loss_cmG + 0.002 * loss_g
tri_losses.update(tri_loss.item(), data[0][0].size(0))
img_losses.update(loss_img.item(), data[0][0].size(0))
text_losses.update(loss_text.item(), data[0][0].size(0))
cmG_losses.update(loss_cmG.item(), data[0][0].size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch)
print('Epoch: {0} '
'tri loss {tri_loss.val:.4f} ({tri_loss.avg:.4f}), '
'cm loss {loss_cmG.val:.4f} ({loss_cmG.avg:.4f}), '
'img loss {img_losses.val:.4f} ({img_losses.avg:.4f}), '
'text loss {loss_text.val:.4f} ({loss_text.avg:.4f})'
.format(
epoch, tri_loss=tri_losses, loss_cmG=cmG_losses,
img_losses=img_losses, loss_text=text_losses))
def validate(val_loader):
# switch to evaluate mode
image_model.eval()
recipe_model.eval()
end = time.time()
for i, data in enumerate(tqdm(val_loader)):
with torch.no_grad():
img_emd_modal = image_model(data[0][0].cuda())
recipe_emb_modal = recipe_model(data[0][1].cuda(), data[0][2].cuda(), data[0][3].cuda(), data[0][4].cuda())
img_emd_modal = norm(fc_sia(img_emd_modal))
recipe_emb_modal = norm(fc_sia(recipe_emb_modal))
if i==0:
data0 = img_emd_modal.data.cpu().numpy()
data1 = recipe_emb_modal.data.cpu().numpy()
else:
data0 = np.concatenate((data0,img_emd_modal.data.cpu().numpy()),axis=0)
data1 = np.concatenate((data1,recipe_emb_modal.data.cpu().numpy()),axis=0)
medR_i2t, recall_i2t = rank_i2t(opts, data0, data1)
print('I2T Val medR {medR:.4f}\t'
'Recall {recall}'.format(medR=medR_i2t, recall=recall_i2t))
medR_t2i, recall_t2i = rank_t2i(opts, data0, data1)
print('T2I Val medR {medR:.4f}\t'
'Recall {recall}'.format(medR=medR_t2i, recall=recall_t2i))
return recall_i2t, recall_t2i, medR_i2t, medR_t2i
def rank_i2t(opts, img_embeds, rec_embeds):
random.seed(opts.seed)
im_vecs = img_embeds
instr_vecs = rec_embeds
# Ranker
N = 1000
idxs = range(N)
glob_rank = []
glob_recall = {1:0.0,5:0.0,10:0.0}
for i in range(10):
ids = random.sample(range(0,len(img_embeds)), N)
im_sub = im_vecs[ids,:]
instr_sub = instr_vecs[ids,:]
med_rank = []
recall = {1:0.0,5:0.0,10:0.0}
for ii in idxs:
distance = {}
for j in range(N):
distance[j] = np.linalg.norm(im_sub[ii] - instr_sub[j])
distance_sorted = sorted(distance.items(), key=lambda x:x[1])
pos = np.where(np.array(distance_sorted) == distance[ii])[0][0]
if (pos+1) == 1:
recall[1]+=1
if (pos+1) <=5:
recall[5]+=1
if (pos+1)<=10:
recall[10]+=1
# store the position
med_rank.append(pos+1)
for i in recall.keys():
recall[i]=recall[i]/N
med = np.median(med_rank)
for i in recall.keys():
glob_recall[i]+=recall[i]
glob_rank.append(med)
for i in glob_recall.keys():
glob_recall[i] = glob_recall[i]/10
return np.average(glob_rank), glob_recall
def rank_t2i(opts, img_embeds, rec_embeds):
random.seed(opts.seed)
im_vecs = img_embeds
instr_vecs = rec_embeds
# Ranker
N = 1000
idxs = range(N)
glob_rank = []
glob_recall = {1:0.0,5:0.0,10:0.0}
for i in range(10):
ids = random.sample(range(0,len(img_embeds)), N)
im_sub = im_vecs[ids,:]
instr_sub = instr_vecs[ids,:]
med_rank = []
recall = {1:0.0,5:0.0,10:0.0}
for ii in idxs:
distance = {}
for j in range(N):
distance[j] = np.linalg.norm(instr_sub[ii] - im_sub[j])
distance_sorted = sorted(distance.items(), key=lambda x:x[1])
pos = np.where(np.array(distance_sorted) == distance[ii])[0][0]
if (pos+1) == 1:
recall[1]+=1
if (pos+1) <=5:
recall[5]+=1
if (pos+1)<=10:
recall[10]+=1
# store the position
med_rank.append(pos+1)
for i in recall.keys():
recall[i]=recall[i]/N
med = np.median(med_rank)
for i in recall.keys():
glob_recall[i]+=recall[i]
glob_rank.append(med)
for i in glob_recall.keys():
glob_recall[i] = glob_recall[i]/10
return np.average(glob_rank), glob_recall
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 16,687 | 34.430998 | 120 | py |
ACME | ACME-master/build_vocab.py | import nltk
import pickle
import argparse
from collections import Counter
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def build_vocab(path, threshold):
"""Build a simple vocabulary wrapper."""
dataset = ['train', 'val', 'test']
# dataset = ['train']
counter = Counter()
for i in dataset:
data_path = path + i + '_split.pkl'
with open(data_path, "rb") as f:
data = pickle.load(f)
ids = data.keys()
for j, id in enumerate(ids):
ingredient = data[id]['ingredients']
for k in range(len(ingredient)):
# tokens = nltk.tokenize.word_tokenize(ingredient[k].lower())
tokens = [ingredient[k].lower()]
counter.update(tokens)
if (j+1) % 1000 == 0:
print("[{}/{}] Tokenized the ingredients.".format(j+1, len(ids)))
# If the word frequency is less than 'threshold', then the word is discarded.
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word('<pad>')
vocab.add_word('<start>')
vocab.add_word('<end>')
vocab.add_word('<unk>')
vocab.add_word(',')
for i, word in enumerate(words):
vocab.add_word(word)
import pdb; pdb.set_trace()
return vocab
def main(args):
vocab = build_vocab(path=args.caption_path, threshold=args.threshold)
vocab_path = args.vocab_path
with open(vocab_path, 'wb') as f:
pickle.dump(vocab, f)
print("Total vocabulary size: {}".format(len(vocab)))
print("Saved the vocabulary wrapper to '{}'".format(vocab_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--caption_path', type=str,
default='../im2recipe-Pytorch/data/recipe1M/',
help='path for train annotation file')
parser.add_argument('--vocab_path', type=str, default='./data/ingredient_vocab.pkl',
help='path for saving vocabulary wrapper')
parser.add_argument('--threshold', type=int, default=4,
help='minimum word count threshold')
args = parser.parse_args()
main(args) | 2,783 | 31.752941 | 89 | py |
czsl | czsl-main/test.py | # Torch imports
import torch
from torch.utils.tensorboard import SummaryWriter
import torch.backends.cudnn as cudnn
import numpy as np
from flags import DATA_FOLDER
cudnn.benchmark = True
# Python imports
import tqdm
from tqdm import tqdm
import os
from os.path import join as ospj
# Local imports
from data import dataset as dset
from models.common import Evaluator
from utils.utils import load_args
from utils.config_model import configure_model
from flags import parser
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
config = [os.path.join(logpath, _) for _ in os.listdir(logpath) if _.endswith('yml')][0]
load_args(config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='train',
split=args.splitname,
model=args.image_extractor,
update_features=args.update_features,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world
)
valset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='val',
split=args.splitname,
model=args.image_extractor,
subset=args.subset,
update_features=args.update_features,
open_world=args.open_world
)
valoader = torch.utils.data.DataLoader(
valset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=8)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
model =args.image_extractor,
subset=args.subset,
update_features = args.update_features,
open_world=args.open_world
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
image_extractor, model, optimizer = configure_model(args, trainset)
args.extractor = image_extractor
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
if image_extractor:
try:
image_extractor.load_state_dict(checkpoint['image_extractor'])
image_extractor.eval()
except:
print('No Image extractor in checkpoint')
model.load_state_dict(checkpoint['net'])
model.eval()
threshold = None
if args.open_world and args.hard_masking:
assert args.model == 'compcos', args.model + ' does not have hard masking.'
if args.threshold is not None:
threshold = args.threshold
else:
evaluator_val = Evaluator(valset, model)
unseen_scores = model.compute_feasibility().to('cpu')
seen_mask = model.seen_mask.to('cpu')
min_feasibility = (unseen_scores+seen_mask*10.).min()
max_feasibility = (unseen_scores-seen_mask*10.).max()
thresholds = np.linspace(min_feasibility,max_feasibility, num=args.threshold_trials)
best_auc = 0.
best_th = -10
with torch.no_grad():
for th in thresholds:
results = test(image_extractor,model,valoader,evaluator_val,args,threshold=th,print_results=False)
auc = results['AUC']
if auc > best_auc:
best_auc = auc
best_th = th
print('New best AUC',best_auc)
print('Threshold',best_th)
threshold = best_th
evaluator = Evaluator(testset, model)
with torch.no_grad():
test(image_extractor, model, testloader, evaluator, args, threshold)
def test(image_extractor, model, testloader, evaluator, args, threshold=None, print_results=True):
if image_extractor:
image_extractor.eval()
model.eval()
accuracies, all_sub_gt, all_attr_gt, all_obj_gt, all_pair_gt, all_pred = [], [], [], [], [], []
for idx, data in tqdm(enumerate(testloader), total=len(testloader), desc='Testing'):
data = [d.to(device) for d in data]
if image_extractor:
data[0] = image_extractor(data[0])
if threshold is None:
_, predictions = model(data)
else:
_, predictions = model.val_forward_with_threshold(data,threshold)
attr_truth, obj_truth, pair_truth = data[1], data[2], data[3]
all_pred.append(predictions)
all_attr_gt.append(attr_truth)
all_obj_gt.append(obj_truth)
all_pair_gt.append(pair_truth)
if args.cpu_eval:
all_attr_gt, all_obj_gt, all_pair_gt = torch.cat(all_attr_gt), torch.cat(all_obj_gt), torch.cat(all_pair_gt)
else:
all_attr_gt, all_obj_gt, all_pair_gt = torch.cat(all_attr_gt).to('cpu'), torch.cat(all_obj_gt).to(
'cpu'), torch.cat(all_pair_gt).to('cpu')
all_pred_dict = {}
# Gather values as dict of (attr, obj) as key and list of predictions as values
if args.cpu_eval:
for k in all_pred[0].keys():
all_pred_dict[k] = torch.cat(
[all_pred[i][k].to('cpu') for i in range(len(all_pred))])
else:
for k in all_pred[0].keys():
all_pred_dict[k] = torch.cat(
[all_pred[i][k] for i in range(len(all_pred))])
# Calculate best unseen accuracy
results = evaluator.score_model(all_pred_dict, all_obj_gt, bias=args.bias, topk=args.topk)
stats = evaluator.evaluate_predictions(results, all_attr_gt, all_obj_gt, all_pair_gt, all_pred_dict,
topk=args.topk)
result = ''
for key in stats:
result = result + key + ' ' + str(round(stats[key], 4)) + '| '
result = result + args.name
if print_results:
print(f'Results')
print(result)
return results
if __name__ == '__main__':
main()
| 6,227 | 32.12766 | 120 | py |
czsl | czsl-main/train.py | # Torch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
# Python imports
import tqdm
from tqdm import tqdm
import os
from os.path import join as ospj
import csv
#Local imports
from data import dataset as dset
from models.common import Evaluator
from utils.utils import save_args, load_args
from utils.config_model import configure_model
from flags import parser, DATA_FOLDER
best_auc = 0
best_hm = 0
compose_switch = True
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
load_args(args.config, args)
logpath = os.path.join(args.cv_dir, args.name)
os.makedirs(logpath, exist_ok=True)
save_args(args, logpath, args.config)
writer = SummaryWriter(log_dir = logpath, flush_secs = 30)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='train',
split=args.splitname,
model =args.image_extractor,
num_negs=args.num_negs,
pair_dropout=args.pair_dropout,
update_features = args.update_features,
train_only= args.train_only,
open_world=args.open_world
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase=args.test_set,
split=args.splitname,
model =args.image_extractor,
subset=args.subset,
update_features = args.update_features,
open_world=args.open_world
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
image_extractor, model, optimizer = configure_model(args, trainset)
args.extractor = image_extractor
train = train_normal
evaluator_val = Evaluator(testset, model)
print(model)
start_epoch = 0
# Load checkpoint
if args.load is not None:
checkpoint = torch.load(args.load)
if image_extractor:
try:
image_extractor.load_state_dict(checkpoint['image_extractor'])
if args.freeze_features:
print('Freezing image extractor')
image_extractor.eval()
for param in image_extractor.parameters():
param.requires_grad = False
except:
print('No Image extractor in checkpoint')
model.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
print('Loaded model from ', args.load)
for epoch in tqdm(range(start_epoch, args.max_epochs + 1), desc = 'Current epoch'):
train(epoch, image_extractor, model, trainloader, optimizer, writer)
if model.is_open and args.model=='compcos' and ((epoch+1)%args.update_feasibility_every)==0 :
print('Updating feasibility scores')
model.update_feasibility(epoch+1.)
if epoch % args.eval_val_every == 0:
with torch.no_grad(): # todo: might not be needed
test(epoch, image_extractor, model, testloader, evaluator_val, writer, args, logpath)
print('Best AUC achieved is ', best_auc)
print('Best HM achieved is ', best_hm)
def train_normal(epoch, image_extractor, model, trainloader, optimizer, writer):
'''
Runs training for an epoch
'''
if image_extractor:
image_extractor.train()
model.train() # Let's switch to training
train_loss = 0.0
for idx, data in tqdm(enumerate(trainloader), total=len(trainloader), desc = 'Training'):
data = [d.to(device) for d in data]
if image_extractor:
data[0] = image_extractor(data[0])
loss, _ = model(data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss = train_loss/len(trainloader)
writer.add_scalar('Loss/train_total', train_loss, epoch)
print('Epoch: {}| Loss: {}'.format(epoch, round(train_loss, 2)))
def test(epoch, image_extractor, model, testloader, evaluator, writer, args, logpath):
'''
Runs testing for an epoch
'''
global best_auc, best_hm
def save_checkpoint(filename):
state = {
'net': model.state_dict(),
'epoch': epoch,
'AUC': stats['AUC']
}
if image_extractor:
state['image_extractor'] = image_extractor.state_dict()
torch.save(state, os.path.join(logpath, 'ckpt_{}.t7'.format(filename)))
if image_extractor:
image_extractor.eval()
model.eval()
accuracies, all_sub_gt, all_attr_gt, all_obj_gt, all_pair_gt, all_pred = [], [], [], [], [], []
for idx, data in tqdm(enumerate(testloader), total=len(testloader), desc='Testing'):
data = [d.to(device) for d in data]
if image_extractor:
data[0] = image_extractor(data[0])
_, predictions = model(data)
attr_truth, obj_truth, pair_truth = data[1], data[2], data[3]
all_pred.append(predictions)
all_attr_gt.append(attr_truth)
all_obj_gt.append(obj_truth)
all_pair_gt.append(pair_truth)
if args.cpu_eval:
all_attr_gt, all_obj_gt, all_pair_gt = torch.cat(all_attr_gt), torch.cat(all_obj_gt), torch.cat(all_pair_gt)
else:
all_attr_gt, all_obj_gt, all_pair_gt = torch.cat(all_attr_gt).to('cpu'), torch.cat(all_obj_gt).to(
'cpu'), torch.cat(all_pair_gt).to('cpu')
all_pred_dict = {}
# Gather values as dict of (attr, obj) as key and list of predictions as values
if args.cpu_eval:
for k in all_pred[0].keys():
all_pred_dict[k] = torch.cat(
[all_pred[i][k].to('cpu') for i in range(len(all_pred))])
else:
for k in all_pred[0].keys():
all_pred_dict[k] = torch.cat(
[all_pred[i][k] for i in range(len(all_pred))])
# Calculate best unseen accuracy
results = evaluator.score_model(all_pred_dict, all_obj_gt, bias=args.bias, topk=args.topk)
stats = evaluator.evaluate_predictions(results, all_attr_gt, all_obj_gt, all_pair_gt, all_pred_dict, topk=args.topk)
stats['a_epoch'] = epoch
result = ''
# write to Tensorboard
for key in stats:
writer.add_scalar(key, stats[key], epoch)
result = result + key + ' ' + str(round(stats[key], 4)) + '| '
result = result + args.name
print(f'Test Epoch: {epoch}')
print(result)
if epoch > 0 and epoch % args.save_every == 0:
save_checkpoint(epoch)
if stats['AUC'] > best_auc:
best_auc = stats['AUC']
print('New best AUC ', best_auc)
save_checkpoint('best_auc')
if stats['best_hm'] > best_hm:
best_hm = stats['best_hm']
print('New best HM ', best_hm)
save_checkpoint('best_hm')
# Logs
with open(ospj(logpath, 'logs.csv'), 'a') as f:
w = csv.DictWriter(f, stats.keys())
if epoch == 0:
w.writeheader()
w.writerow(stats)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Best AUC achieved is ', best_auc)
print('Best HM achieved is ', best_hm) | 7,551 | 31.13617 | 120 | py |
czsl | czsl-main/models/svm.py | import numpy as np
import tqdm
from data import dataset as dset
import os
from utils import utils
import torch
from torch.autograd import Variable
import h5py
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
import torch.nn.functional as F
from joblib import Parallel, delayed
import glob
import scipy.io
from sklearn.calibration import CalibratedClassifierCV
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--dataset', default='mitstates', help='mitstates|zappos')
parser.add_argument('--data_dir', default='data/mit-states/')
parser.add_argument('--generate', action ='store_true', default=False)
parser.add_argument('--evalsvm', action ='store_true', default=False)
parser.add_argument('--evaltf', action ='store_true', default=False)
parser.add_argument('--completed', default='tensor-completion/completed/complete.mat')
args = parser.parse_args()
#----------------------------------------------------------------------------------------#
search_params = {'C': np.logspace(-5,5,11)}
def train_svm(Y_train, sample_weight=None):
try:
clf = GridSearchCV(LinearSVC(class_weight='balanced', fit_intercept=False), search_params, scoring='f1', cv=4)
clf.fit(X_train, Y_train, sample_weight=sample_weight)
except:
clf = LinearSVC(C=0.1, class_weight='balanced', fit_intercept=False)
if Y_train.sum()==len(Y_train) or Y_train.sum()==0:
return None
clf.fit(X_train, Y_train, sample_weight=sample_weight)
return clf
def generate_svms():
# train an SVM for every attribute, object and pair primitive
Y = [(train_attrs==attr).astype(np.int) for attr in range(len(dataset.attrs))]
attr_clfs = Parallel(n_jobs=32, verbose=16)(delayed(train_svm)(Y[attr]) for attr in range(len(dataset.attrs)))
for attr, clf in enumerate(attr_clfs):
print (attr, dataset.attrs[attr])
print ('params:', clf.best_params_)
print ('-'*30)
torch.save(clf.best_estimator_, '%s/svm/attr_%d'%(args.data_dir, attr))
Y = [(train_objs==obj).astype(np.int) for obj in range(len(dataset.objs))]
obj_clfs = Parallel(n_jobs=32, verbose=16)(delayed(train_svm)(Y[obj]) for obj in range(len(dataset.objs)))
for obj, clf in enumerate(obj_clfs):
print (obj, dataset.objs[obj])
print ('params:', clf.best_params_)
print ('-'*30)
torch.save(clf.best_estimator_, '%s/svm/obj_%d'%(args.data_dir, obj))
Y, Y_attr, sample_weight = [], [], []
for idx, (attr, obj) in enumerate(dataset.train_pairs):
Y_train = ((train_attrs==attr)*(train_objs==obj)).astype(np.int)
# reweight instances to get a little more training data
Y_train_attr = (train_attrs==attr).astype(np.int)
instance_weights = 0.1*Y_train_attr
instance_weights[Y_train.nonzero()[0]] = 1.0
Y.append(Y_train)
Y_attr.append(Y_train_attr)
sample_weight.append(instance_weights)
pair_clfs = Parallel(n_jobs=32, verbose=16)(delayed(train_svm)(Y[pair]) for pair in range(len(dataset.train_pairs)))
for idx, (attr, obj) in enumerate(dataset.train_pairs):
clf = pair_clfs[idx]
print (dataset.attrs[attr], dataset.objs[obj])
try:
print ('params:', clf.best_params_)
torch.save(clf.best_estimator_, '%s/svm/pair_%d_%d'%(args.data_dir, attr, obj))
except:
print ('FAILED! #positive:', Y[idx].sum(), len(Y[idx]))
return
def make_svm_tensor():
subs, vals, size = [], [], (len(dataset.attrs), len(dataset.objs), X.shape[1])
fullsubs, fullvals = [], []
composite_clfs = glob.glob('%s/svm/pair*'%args.data_dir)
print ('%d composite classifiers found'%(len(composite_clfs)))
for clf in tqdm.tqdm(composite_clfs):
_, attr, obj = os.path.basename(clf).split('_')
attr, obj = int(attr), int(obj)
clf = torch.load(clf)
weight = clf.coef_.squeeze()
for i in range(len(weight)):
subs.append((attr, obj, i))
vals.append(weight[i])
for attr, obj in dataset.pairs:
for i in range(X.shape[1]):
fullsubs.append((attr, obj, i))
subs, vals = np.array(subs), np.array(vals).reshape(-1,1)
fullsubs, fullvals = np.array(fullsubs), np.ones(len(fullsubs)).reshape(-1,1)
savedat = {'subs':subs, 'vals':vals, 'size':size, 'fullsubs':fullsubs, 'fullvals':fullvals}
scipy.io.savemat('tensor-completion/incomplete/%s.mat'%args.dataset, savedat)
print (subs.shape, vals.shape, size)
def evaluate_svms():
attr_clfs = [torch.load('%s/svm/attr_%d'%(args.data_dir, attr)) for attr in range(len(dataset.attrs))]
obj_clfs = [torch.load('%s/svm/obj_%d'%(args.data_dir, obj)) for obj in range(len(dataset.objs))]
# Calibrate all classifiers first
Y = [(train_attrs==attr).astype(np.int) for attr in range(len(dataset.attrs))]
for attr in tqdm.tqdm(range(len(dataset.attrs))):
clf = attr_clfs[attr]
calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
calibrated.fit(X_train, Y[attr])
attr_clfs[attr] = calibrated
Y = [(train_objs==obj).astype(np.int) for obj in range(len(dataset.objs))]
for obj in tqdm.tqdm(range(len(dataset.objs))):
clf = obj_clfs[obj]
calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
calibrated.fit(X_train, Y[obj])
obj_clfs[obj] = calibrated
# Generate all the scores
attr_scores, obj_scores = [], []
for attr in tqdm.tqdm(range(len(dataset.attrs))):
clf = attr_clfs[attr]
score = clf.predict_proba(X_test)[:,1]
attr_scores.append(score)
attr_scores = np.vstack(attr_scores)
for obj in tqdm.tqdm(range(len(dataset.objs))):
clf = obj_clfs[obj]
score = clf.predict_proba(X_test)[:,1]
obj_scores.append(score)
obj_scores = np.vstack(obj_scores)
attr_pred = torch.from_numpy(attr_scores).transpose(0,1)
obj_pred = torch.from_numpy(obj_scores).transpose(0,1)
x = [None, Variable(torch.from_numpy(test_attrs)).long(), Variable(torch.from_numpy(test_objs)).long(), Variable(torch.from_numpy(test_pairs)).long()]
attr_pred, obj_pred, _ = utils.generate_prediction_tensors([attr_pred, obj_pred], dataset, x[2].data, source='classification')
attr_match, obj_match, zsl_match, gzsl_match, fixobj_match = utils.performance_stats(attr_pred, obj_pred, x)
print (attr_match.mean(), obj_match.mean(), zsl_match.mean(), gzsl_match.mean(), fixobj_match.mean())
def evaluate_tensorcompletion():
def parse_tensor(fl):
tensor = scipy.io.loadmat(fl)
nz_idx = zip(*(tensor['subs']))
composite_clfs = np.zeros((len(dataset.attrs), len(dataset.objs), X.shape[1]))
composite_clfs[nz_idx[0], nz_idx[1], nz_idx[2]] = tensor['vals'].squeeze()
return composite_clfs, nz_idx, tensor['vals'].squeeze()
# see recon error
tr_file = 'tensor-completion/incomplete/%s.mat'%args.dataset
ts_file = args.completed
tr_clfs, tr_nz_idx, tr_vals = parse_tensor(tr_file)
ts_clfs, ts_nz_idx, ts_vals = parse_tensor(ts_file)
print (tr_vals.min(), tr_vals.max(), tr_vals.mean())
print (ts_vals.min(), ts_vals.max(), ts_vals.mean())
print ('Completed Tensor: %s'%args.completed)
# see train recon error
err = 1.0*((tr_clfs[tr_nz_idx[0], tr_nz_idx[1], tr_nz_idx[2]]-ts_clfs[tr_nz_idx[0], tr_nz_idx[1], tr_nz_idx[2]])**2).sum()/(len(tr_vals))
print ('recon error:', err)
# Create and scale classifiers for each pair
clfs = {}
test_pair_set = set(map(tuple, dataset.test_pairs.numpy().tolist()))
for idx, (attr, obj) in tqdm.tqdm(enumerate(dataset.pairs), total=len(dataset.pairs)):
clf = LinearSVC(fit_intercept=False)
clf.fit(np.eye(2), [0,1])
if (attr, obj) in test_pair_set:
X_ = X_test
Y_ = (test_attrs==attr).astype(np.int)*(test_objs==obj).astype(np.int)
clf.coef_ = ts_clfs[attr, obj][None,:]
else:
X_ = X_train
Y_ = (train_attrs==attr).astype(np.int)*(train_objs==obj).astype(np.int)
clf.coef_ = tr_clfs[attr, obj][None,:]
calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
calibrated.fit(X_, Y_)
clfs[(attr, obj)] = calibrated
scores = {}
for attr, obj in tqdm.tqdm(dataset.pairs):
score = clfs[(attr, obj)].predict_proba(X_test)[:,1]
scores[(attr, obj)] = torch.from_numpy(score).float().unsqueeze(1)
x = [None, Variable(torch.from_numpy(test_attrs)).long(), Variable(torch.from_numpy(test_objs)).long(), Variable(torch.from_numpy(test_pairs)).long()]
attr_pred, obj_pred, _ = utils.generate_prediction_tensors(scores, dataset, x[2].data, source='manifold')
attr_match, obj_match, zsl_match, gzsl_match, fixobj_match = utils.performance_stats(attr_pred, obj_pred, x)
print (attr_match.mean(), obj_match.mean(), zsl_match.mean(), gzsl_match.mean(), fixobj_match.mean())
#----------------------------------------------------------------------------------------#
if args.dataset == 'mitstates':
DSet = dset.MITStatesActivations
elif args.dataset == 'zappos':
DSet = dset.UTZapposActivations
dataset = DSet(root=args.data_dir, phase='train')
train_idx, train_attrs, train_objs, train_pairs = map(np.array, zip(*dataset.train_data))
test_idx, test_attrs, test_objs, test_pairs = map(np.array, zip(*dataset.test_data))
X = dataset.activations.numpy()
X_train, X_test = X[train_idx,:], X[test_idx,:]
print (len(dataset.attrs), len(dataset.objs), len(dataset.pairs))
print (X_train.shape, X_test.shape)
if args.generate:
generate_svms()
make_svm_tensor()
if args.evalsvm:
evaluate_svms()
if args.evaltf:
evaluate_tensorcompletion()
| 9,922 | 40.345833 | 154 | py |
czsl | czsl-main/models/visual_product.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import numpy as np
from .common import MLP
class VisualProductNN(nn.Module):
def __init__(self, dset, args):
super(VisualProductNN, self).__init__()
self.attr_clf = MLP(dset.feat_dim, len(dset.attrs), 2, relu = False)
self.obj_clf = MLP(dset.feat_dim, len(dset.objs), 2, relu = False)
self.dset = dset
def train_forward(self, x):
img, attrs, objs = x[0],x[1], x[2]
attr_pred = self.attr_clf(img)
obj_pred = self.obj_clf(img)
attr_loss = F.cross_entropy(attr_pred, attrs)
obj_loss = F.cross_entropy(obj_pred, objs)
loss = attr_loss + obj_loss
return loss, None
def val_forward(self, x):
img = x[0]
attr_pred = F.softmax(self.attr_clf(img), dim =1)
obj_pred = F.softmax(self.obj_clf(img), dim = 1)
scores = {}
for itr, (attr, obj) in enumerate(self.dset.pairs):
attr_id, obj_id = self.dset.attr2idx[attr], self.dset.obj2idx[obj]
score = attr_pred[:,attr_id] * obj_pred[:, obj_id]
scores[(attr, obj)] = score
return None, scores
def forward(self, x):
if self.training:
loss, pred = self.train_forward(x)
else:
with torch.no_grad():
loss, pred = self.val_forward(x)
return loss, pred | 1,457 | 29.375 | 78 | py |
czsl | czsl-main/models/symnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .word_embedding import load_word_embeddings
from .common import MLP
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Symnet(nn.Module):
def __init__(self, dset, args):
super(Symnet, self).__init__()
self.dset = dset
self.args = args
self.num_attrs = len(dset.attrs)
self.num_objs = len(dset.objs)
self.image_embedder = MLP(dset.feat_dim, args.emb_dim, relu = False)
self.ce_loss = nn.CrossEntropyLoss()
self.mse_loss = nn.MSELoss()
self.nll_loss = nn.NLLLoss()
self.softmax = nn.Softmax(dim=1)
# Attribute embedder and object classifier
self.attr_embedder = nn.Embedding(len(dset.attrs), args.emb_dim)
self.obj_classifier = MLP(args.emb_dim, len(dset.objs), num_layers = 2, relu = False, dropout = True, layers = [512]) # original paper uses a second fc classifier for final product
self.attr_classifier = MLP(args.emb_dim, len(dset.attrs), num_layers = 2, relu = False, dropout = True, layers = [512])
# CoN and DecoN
self.CoN_fc_attention = MLP(args.emb_dim, args.emb_dim, num_layers = 2, relu = False, dropout = True, layers = [512])
self.CoN_emb = MLP(args.emb_dim + args.emb_dim, args.emb_dim, num_layers = 2, relu = False, dropout = True, layers = [768])
self.DecoN_fc_attention = MLP(args.emb_dim, args.emb_dim, num_layers = 2, relu = False, dropout = True, layers = [512])
self.DecoN_emb = MLP(args.emb_dim + args.emb_dim, args.emb_dim, num_layers = 2, relu = False, dropout = True, layers = [768])
# if args.glove_init:
pretrained_weight = load_word_embeddings(args.emb_init, dset.attrs)
self.attr_embedder.weight.data.copy_(pretrained_weight)
for param in self.attr_embedder.parameters():
param.requires_grad = False
def CoN(self, img_embedding, attr_embedding):
attention = torch.sigmoid(self.CoN_fc_attention(attr_embedding))
img_embedding = attention*img_embedding + img_embedding
hidden = torch.cat([img_embedding, attr_embedding], dim = 1)
output = self.CoN_emb(hidden)
return output
def DeCoN(self, img_embedding, attr_embedding):
attention = torch.sigmoid(self.DecoN_fc_attention(attr_embedding))
img_embedding = attention*img_embedding + img_embedding
hidden = torch.cat([img_embedding, attr_embedding], dim = 1)
output = self.DecoN_emb(hidden)
return output
def distance_metric(self, a, b):
return torch.norm(a-b, dim = -1)
def RMD_prob(self, feat_plus, feat_minus, repeat_img_feat):
"""return attribute classification probability with our RMD"""
# feat_plus, feat_minus: shape=(bz, #attr, dim_emb)
# d_plus: distance between feature before&after CoN
# d_minus: distance between feature before&after DecoN
d_plus = self.distance_metric(feat_plus, repeat_img_feat).reshape(-1, self.num_attrs)
d_minus = self.distance_metric(feat_minus, repeat_img_feat).reshape(-1, self.num_attrs)
# not adding softmax because it is part of cross entropy loss
return d_minus - d_plus
def train_forward(self, x):
pos_image_feat, pos_attr_id, pos_obj_id = x[0], x[1], x[2]
neg_attr_id = x[4][:,0]
batch_size = pos_image_feat.size(0)
loss = []
pos_attr_emb = self.attr_embedder(pos_attr_id)
neg_attr_emb = self.attr_embedder(neg_attr_id)
pos_img = self.image_embedder(pos_image_feat)
# rA = remove positive attribute A
# aA = add positive attribute A
# rB = remove negative attribute B
# aB = add negative attribute B
pos_rA = self.DeCoN(pos_img, pos_attr_emb)
pos_aA = self.CoN(pos_img, pos_attr_emb)
pos_rB = self.DeCoN(pos_img, neg_attr_emb)
pos_aB = self.CoN(pos_img, neg_attr_emb)
# get all attr embedding #attr, embedding
attr_emb = torch.LongTensor(np.arange(self.num_attrs)).to(device)
attr_emb = self.attr_embedder(attr_emb)
tile_attr_emb = attr_emb.repeat(batch_size, 1) # (batch*attr, dim_emb)
# Now we calculate all the losses
if self.args.lambda_cls_attr > 0:
# Original image
score_pos_A = self.attr_classifier(pos_img)
loss_cls_pos_a = self.ce_loss(score_pos_A, pos_attr_id)
# After removing pos_attr
score_pos_rA_A = self.attr_classifier(pos_rA)
total = sum(score_pos_rA_A)
# prob_pos_rA_A = 1 - self.softmax(score_pos_rA_A)
# loss_cls_pos_rA_a = self.nll_loss(torch.log(prob_pos_rA_A), pos_attr_id)
loss_cls_pos_rA_a = self.ce_loss(total - score_pos_rA_A, pos_attr_id) #should be maximum for the gt label
# rmd time
repeat_img_feat = torch.repeat_interleave(pos_img, self.num_attrs, 0) #(batch*attr, dim_rep)
feat_plus = self.CoN(repeat_img_feat, tile_attr_emb)
feat_minus = self.DeCoN(repeat_img_feat, tile_attr_emb)
score_cls_rmd = self.RMD_prob(feat_plus, feat_minus, repeat_img_feat)
loss_cls_rmd = self.ce_loss(score_cls_rmd, pos_attr_id)
loss_cls_attr = self.args.lambda_cls_attr*sum([loss_cls_pos_a, loss_cls_pos_rA_a, loss_cls_rmd])
loss.append(loss_cls_attr)
if self.args.lambda_cls_obj > 0:
# Original image
score_pos_O = self.obj_classifier(pos_img)
loss_cls_pos_o = self.ce_loss(score_pos_O, pos_obj_id)
# After removing pos attr
score_pos_rA_O = self.obj_classifier(pos_rA)
loss_cls_pos_rA_o = self.ce_loss(score_pos_rA_O, pos_obj_id)
# After adding neg attr
score_pos_aB_O = self.obj_classifier(pos_aB)
loss_cls_pos_aB_o = self.ce_loss(score_pos_aB_O, pos_obj_id)
loss_cls_obj = self.args.lambda_cls_obj * sum([loss_cls_pos_o, loss_cls_pos_rA_o, loss_cls_pos_aB_o])
loss.append(loss_cls_obj)
if self.args.lambda_sym > 0:
loss_sys_pos = self.mse_loss(pos_aA, pos_img)
loss_sys_neg = self.mse_loss(pos_rB, pos_img)
loss_sym = self.args.lambda_sym * (loss_sys_pos + loss_sys_neg)
loss.append(loss_sym)
##### Axiom losses
if self.args.lambda_axiom > 0:
loss_clo = loss_inv = loss_com = 0
# closure
pos_aA_rA = self.DeCoN(pos_aA, pos_attr_emb)
pos_rB_aB = self.CoN(pos_rB, neg_attr_emb)
loss_clo = self.mse_loss(pos_aA_rA, pos_rA) + self.mse_loss(pos_rB_aB, pos_aB)
# invertibility
pos_rA_aA = self.CoN(pos_rA, pos_attr_emb)
pos_aB_rB = self.DeCoN(pos_aB, neg_attr_emb)
loss_inv = self.mse_loss(pos_rA_aA, pos_img) + self.mse_loss(pos_aB_rB, pos_img)
# commutative
pos_aA_rB = self.DeCoN(pos_aA, neg_attr_emb)
pos_rB_aA = self.DeCoN(pos_rB, pos_attr_emb)
loss_com = self.mse_loss(pos_aA_rB, pos_rB_aA)
loss_axiom = self.args.lambda_axiom * (loss_clo + loss_inv + loss_com)
loss.append(loss_axiom)
# triplet loss
if self.args.lambda_trip > 0:
pos_triplet = F.triplet_margin_loss(pos_img, pos_aA, pos_rA)
neg_triplet = F.triplet_margin_loss(pos_img, pos_rB, pos_aB)
loss_triplet = self.args.lambda_trip * (pos_triplet + neg_triplet)
loss.append(loss_triplet)
loss = sum(loss)
return loss, None
def val_forward(self, x):
pos_image_feat, pos_attr_id, pos_obj_id = x[0], x[1], x[2]
batch_size = pos_image_feat.shape[0]
pos_img = self.image_embedder(pos_image_feat)
repeat_img_feat = torch.repeat_interleave(pos_img, self.num_attrs, 0) #(batch*attr, dim_rep)
# get all attr embedding #attr, embedding
attr_emb = torch.LongTensor(np.arange(self.num_attrs)).to(device)
attr_emb = self.attr_embedder(attr_emb)
tile_attr_emb = attr_emb.repeat(batch_size, 1) # (batch*attr, dim_emb)
feat_plus = self.CoN(repeat_img_feat, tile_attr_emb)
feat_minus = self.DeCoN(repeat_img_feat, tile_attr_emb)
score_cls_rmd = self.RMD_prob(feat_plus, feat_minus, repeat_img_feat)
prob_A_rmd = F.softmax(score_cls_rmd, dim = 1)
score_obj = self.obj_classifier(pos_img)
prob_O = F.softmax(score_obj, dim = 1)
scores = {}
for itr, (attr, obj) in enumerate(self.dset.pairs):
attr_id, obj_id = self.dset.attr2idx[attr], self.dset.obj2idx[obj]
score = prob_A_rmd[:,attr_id] * prob_O[:, obj_id]
scores[(attr, obj)] = score
return None, scores
def forward(self, x):
if self.training:
loss, pred = self.train_forward(x)
else:
with torch.no_grad():
loss, pred = self.val_forward(x)
return loss, pred | 9,135 | 41.691589 | 188 | py |
czsl | czsl-main/models/manifold_methods.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .word_embedding import load_word_embeddings
from .common import MLP, Reshape
from flags import DATA_FOLDER
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class ManifoldModel(nn.Module):
def __init__(self, dset, args):
super(ManifoldModel, self).__init__()
self.args = args
self.dset = dset
def get_all_ids(relevant_pairs):
# Precompute validation pairs
attrs, objs = zip(*relevant_pairs)
attrs = [dset.attr2idx[attr] for attr in attrs]
objs = [dset.obj2idx[obj] for obj in objs]
pairs = [a for a in range(len(relevant_pairs))]
attrs = torch.LongTensor(attrs).to(device)
objs = torch.LongTensor(objs).to(device)
pairs = torch.LongTensor(pairs).to(device)
return attrs, objs, pairs
#Validation
self.val_attrs, self.val_objs, self.val_pairs = get_all_ids(self.dset.pairs)
# for indivual projections
self.uniq_attrs, self.uniq_objs = torch.arange(len(self.dset.attrs)).long().to(device), \
torch.arange(len(self.dset.objs)).long().to(device)
self.factor = 2
# Precompute training compositions
if args.train_only:
self.train_attrs, self.train_objs, self.train_pairs = get_all_ids(self.dset.train_pairs)
else:
self.train_attrs, self.train_objs = self.val_subs, self.val_attrs, self.val_objs
if args.lambda_aux > 0 or args.lambda_cls_attr > 0 or args.lambda_cls_obj > 0:
print('Initializing classifiers')
self.obj_clf = nn.Linear(args.emb_dim, len(dset.objs))
self.attr_clf = nn.Linear(args.emb_dim, len(dset.attrs))
def train_forward_bce(self, x):
img, attrs, objs = x[0], x[1], x[2]
neg_attrs, neg_objs = x[4][:,0],x[5][:,0] #todo: do a less hacky version
img_feat = self.image_embedder(img)
# Sample 25% positive and 75% negative pairs
labels = np.random.binomial(1, 0.25, attrs.shape[0])
labels = torch.from_numpy(labels).bool().to(device)
sampled_attrs, sampled_objs = neg_attrs.clone(), neg_objs.clone()
sampled_attrs[labels] = attrs[labels]
sampled_objs[labels] = objs[labels]
labels = labels.float()
composed_clf = self.compose(attrs, objs)
p = torch.sigmoid((img_feat*composed_clf).sum(1))
loss = F.binary_cross_entropy(p, labels)
return loss, None
def train_forward_triplet(self, x):
img, attrs, objs = x[0], x[1], x[2]
neg_attrs, neg_objs = x[4][:,0], x[5][:,0] #todo:do a less hacky version
img_feats = self.image_embedder(img)
positive = self.compose(attrs, objs)
negative = self.compose(neg_attrs, neg_objs)
loss = F.triplet_margin_loss(img_feats, positive, negative, margin = self.args.margin)
# Auxiliary object/ attribute prediction loss both need to be correct
if self.args.lambda_aux > 0:
obj_pred = self.obj_clf(positive)
attr_pred = self.attr_clf(positive)
loss_aux = F.cross_entropy(attr_pred, attrs) + F.cross_entropy(obj_pred, objs)
loss += self.args.lambda_aux * loss_aux
return loss, None
def val_forward_distance(self, x):
img = x[0]
batch_size = img.shape[0]
img_feats = self.image_embedder(img)
scores = {}
pair_embeds = self.compose(self.val_attrs, self.val_objs)
for itr, pair in enumerate(self.dset.pairs):
pair_embed = pair_embeds[itr, None].expand(batch_size, pair_embeds.size(1))
score = self.compare_metric(img_feats, pair_embed)
scores[pair] = score
return None, scores
def val_forward_distance_fast(self, x):
img = x[0]
batch_size = img.shape[0]
img_feats = self.image_embedder(img)
pair_embeds = self.compose(self.val_attrs, self.val_objs) # Evaluate all pairs
batch_size, pairs, features = img_feats.shape[0], pair_embeds.shape[0], pair_embeds.shape[1]
img_feats = img_feats[:,None,:].expand(-1, pairs, -1)
pair_embeds = pair_embeds[None,:,:].expand(batch_size, -1, -1)
diff = (img_feats - pair_embeds)**2
score = diff.sum(2) * -1
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:,self.dset.all_pair2idx[pair]]
return None, scores
def val_forward_direct(self, x):
img = x[0]
batch_size = img.shape[0]
img_feats = self.image_embedder(img)
pair_embeds = self.compose(self.val_attrs, self.val_objs).permute(1,0) # Evaluate all pairs
score = torch.matmul(img_feats, pair_embeds)
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:,self.dset.all_pair2idx[pair]]
return None, scores
def forward(self, x):
if self.training:
loss, pred = self.train_forward(x)
else:
with torch.no_grad():
loss, pred = self.val_forward(x)
return loss, pred
#########################################
class RedWine(ManifoldModel):
def __init__(self, dset, args):
super(RedWine, self).__init__(dset, args)
self.image_embedder = lambda img: img
self.compare_metric = lambda img_feats, pair_embed: torch.sigmoid((img_feats*pair_embed).sum(1))
self.train_forward = self.train_forward_bce
self.val_forward = self.val_forward_distance_fast
in_dim = dset.feat_dim if args.clf_init else args.emb_dim
self.T = nn.Sequential(
nn.Linear(2*in_dim, 3*in_dim),
nn.LeakyReLU(0.1, True),
nn.Linear(3*in_dim, 3*in_dim//2),
nn.LeakyReLU(0.1, True),
nn.Linear(3*in_dim//2, dset.feat_dim))
self.attr_embedder = nn.Embedding(len(dset.attrs), in_dim)
self.obj_embedder = nn.Embedding(len(dset.objs), in_dim)
# initialize the weights of the embedders with the svm weights
if args.emb_init:
pretrained_weight = load_word_embeddings(args.emb_init, dset.attrs)
self.attr_embedder.weight.data.copy_(pretrained_weight)
pretrained_weight = load_word_embeddings(args.emb_init, dset.objs)
self.obj_embedder.weight.data.copy_(pretrained_weight)
elif args.clf_init:
for idx, attr in enumerate(dset.attrs):
at_id = self.dset.attr2idx[attr]
weight = torch.load('%s/svm/attr_%d'%(args.data_dir, at_id)).coef_.squeeze()
self.attr_embedder.weight[idx].data.copy_(torch.from_numpy(weight))
for idx, obj in enumerate(dset.objs):
obj_id = self.dset.obj2idx[obj]
weight = torch.load('%s/svm/obj_%d'%(args.data_dir, obj_id)).coef_.squeeze()
self.obj_embedder.weight[idx].data.copy_(torch.from_numpy(weight))
else:
print ('init must be either glove or clf')
return
if args.static_inp:
for param in self.attr_embedder.parameters():
param.requires_grad = False
for param in self.obj_embedder.parameters():
param.requires_grad = False
def compose(self, attrs, objs):
attr_wt = self.attr_embedder(attrs)
obj_wt = self.obj_embedder(objs)
inp_wts = torch.cat([attr_wt, obj_wt], 1) # 2D
composed_clf = self.T(inp_wts)
return composed_clf
class LabelEmbedPlus(ManifoldModel):
def __init__(self, dset, args):
super(LabelEmbedPlus, self).__init__(dset, args)
if 'conv' in args.image_extractor:
self.image_embedder = torch.nn.Sequential(torch.nn.Conv2d(dset.feat_dim,args.emb_dim,7),
torch.nn.ReLU(True),
Reshape(-1,args.emb_dim)
)
else:
self.image_embedder = MLP(dset.feat_dim, args.emb_dim)
self.compare_metric = lambda img_feats, pair_embed: -F.pairwise_distance(img_feats, pair_embed)
self.train_forward = self.train_forward_triplet
self.val_forward = self.val_forward_distance_fast
input_dim = dset.feat_dim if args.clf_init else args.emb_dim
self.attr_embedder = nn.Embedding(len(dset.attrs), input_dim)
self.obj_embedder = nn.Embedding(len(dset.objs), input_dim)
self.T = MLP(2*input_dim, args.emb_dim, num_layers= args.nlayers)
# init with word embeddings
if args.emb_init:
pretrained_weight = load_word_embeddings(args.emb_init, dset.attrs)
self.attr_embedder.weight.data.copy_(pretrained_weight)
pretrained_weight = load_word_embeddings(args.emb_init, dset.objs)
self.obj_embedder.weight.data.copy_(pretrained_weight)
# init with classifier weights
elif args.clf_init:
for idx, attr in enumerate(dset.attrs):
at_id = dset.attrs.index(attr)
weight = torch.load('%s/svm/attr_%d'%(args.data_dir, at_id)).coef_.squeeze()
self.attr_embedder.weight[idx].data.copy_(torch.from_numpy(weight))
for idx, obj in enumerate(dset.objs):
obj_id = dset.objs.index(obj)
weight = torch.load('%s/svm/obj_%d'%(args.data_dir, obj_id)).coef_.squeeze()
self.obj_emb.weight[idx].data.copy_(torch.from_numpy(weight))
# static inputs
if args.static_inp:
for param in self.attr_embedder.parameters():
param.requires_grad = False
for param in self.obj_embedder.parameters():
param.requires_grad = False
def compose(self, attrs, objs):
inputs = [self.attr_embedder(attrs), self.obj_embedder(objs)]
inputs = torch.cat(inputs, 1)
output = self.T(inputs)
return output
class AttributeOperator(ManifoldModel):
def __init__(self, dset, args):
super(AttributeOperator, self).__init__(dset, args)
self.image_embedder = MLP(dset.feat_dim, args.emb_dim)
self.compare_metric = lambda img_feats, pair_embed: -F.pairwise_distance(img_feats, pair_embed)
self.val_forward = self.val_forward_distance_fast
self.attr_ops = nn.ParameterList([nn.Parameter(torch.eye(args.emb_dim)) for _ in range(len(self.dset.attrs))])
self.obj_embedder = nn.Embedding(len(dset.objs), args.emb_dim)
if args.emb_init:
pretrained_weight = load_word_embeddings('glove', dset.objs)
self.obj_embedder.weight.data.copy_(pretrained_weight)
self.inverse_cache = {}
if args.lambda_ant>0 and args.dataset=='mitstates':
antonym_list = open(DATA_FOLDER+'/data/antonyms.txt').read().strip().split('\n')
antonym_list = [l.split() for l in antonym_list]
antonym_list = [[self.dset.attrs.index(a1), self.dset.attrs.index(a2)] for a1, a2 in antonym_list]
antonyms = {}
antonyms.update({a1:a2 for a1, a2 in antonym_list})
antonyms.update({a2:a1 for a1, a2 in antonym_list})
self.antonyms, self.antonym_list = antonyms, antonym_list
if args.static_inp:
for param in self.obj_embedder.parameters():
param.requires_grad = False
def apply_ops(self, ops, rep):
out = torch.bmm(ops, rep.unsqueeze(2)).squeeze(2)
out = F.relu(out)
return out
def compose(self, attrs, objs):
obj_rep = self.obj_embedder(objs)
attr_ops = torch.stack([self.attr_ops[attr.item()] for attr in attrs])
embedded_reps = self.apply_ops(attr_ops, obj_rep)
return embedded_reps
def apply_inverse(self, img_rep, attrs):
inverse_ops = []
for i in range(img_rep.size(0)):
attr = attrs[i]
if attr not in self.inverse_cache:
self.inverse_cache[attr] = self.attr_ops[attr].inverse()
inverse_ops.append(self.inverse_cache[attr])
inverse_ops = torch.stack(inverse_ops) # (B,512,512)
obj_rep = self.apply_ops(inverse_ops, img_rep)
return obj_rep
def train_forward(self, x):
img, attrs, objs = x[0], x[1], x[2]
neg_attrs, neg_objs, inv_attrs, comm_attrs = x[4][:,0], x[5][:,0], x[6], x[7]
batch_size = img.size(0)
loss = []
anchor = self.image_embedder(img)
obj_emb = self.obj_embedder(objs)
pos_ops = torch.stack([self.attr_ops[attr.item()] for attr in attrs])
positive = self.apply_ops(pos_ops, obj_emb)
neg_obj_emb = self.obj_embedder(neg_objs)
neg_ops = torch.stack([self.attr_ops[attr.item()] for attr in neg_attrs])
negative = self.apply_ops(neg_ops, neg_obj_emb)
loss_triplet = F.triplet_margin_loss(anchor, positive, negative, margin=self.args.margin)
loss.append(loss_triplet)
# Auxiliary object/attribute loss
if self.args.lambda_aux>0:
obj_pred = self.obj_clf(positive)
attr_pred = self.attr_clf(positive)
loss_aux = F.cross_entropy(attr_pred, attrs) + F.cross_entropy(obj_pred, objs)
loss.append(self.args.lambda_aux*loss_aux)
# Inverse Consistency
if self.args.lambda_inv>0:
obj_rep = self.apply_inverse(anchor, attrs)
new_ops = torch.stack([self.attr_ops[attr.item()] for attr in inv_attrs])
new_rep = self.apply_ops(new_ops, obj_rep)
new_positive = self.apply_ops(new_ops, obj_emb)
loss_inv = F.triplet_margin_loss(new_rep, new_positive, positive, margin=self.args.margin)
loss.append(self.args.lambda_inv*loss_inv)
# Commutative Operators
if self.args.lambda_comm>0:
B = torch.stack([self.attr_ops[attr.item()] for attr in comm_attrs])
BA = self.apply_ops(B, positive)
AB = self.apply_ops(pos_ops, self.apply_ops(B, obj_emb))
loss_comm = ((AB-BA)**2).sum(1).mean()
loss.append(self.args.lambda_comm*loss_comm)
# Antonym Consistency
if self.args.lambda_ant>0:
select_idx = [i for i in range(batch_size) if attrs[i].item() in self.antonyms]
if len(select_idx)>0:
select_idx = torch.LongTensor(select_idx).cuda()
attr_subset = attrs[select_idx]
antonym_ops = torch.stack([self.attr_ops[self.antonyms[attr.item()]] for attr in attr_subset])
Ao = anchor[select_idx]
if self.args.lambda_inv>0:
o = obj_rep[select_idx]
else:
o = self.apply_inverse(Ao, attr_subset)
BAo = self.apply_ops(antonym_ops, Ao)
loss_cycle = ((BAo-o)**2).sum(1).mean()
loss.append(self.args.lambda_ant*loss_cycle)
loss = sum(loss)
return loss, None
def forward(self, x):
loss, pred = super(AttributeOperator, self).forward(x)
self.inverse_cache = {}
return loss, pred | 15,296 | 40.681199 | 118 | py |
czsl | czsl-main/models/common.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from scipy.stats import hmean
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class MLP(nn.Module):
'''
Baseclass to create a simple MLP
Inputs
inp_dim: Int, Input dimension
out-dim: Int, Output dimension
num_layer: Number of hidden layers
relu: Bool, Use non linear function at output
bias: Bool, Use bias
'''
def __init__(self, inp_dim, out_dim, num_layers = 1, relu = True, bias = True, dropout = False, norm = False, layers = []):
super(MLP, self).__init__()
mod = []
incoming = inp_dim
for layer in range(num_layers - 1):
if len(layers) == 0:
outgoing = incoming
else:
outgoing = layers.pop(0)
mod.append(nn.Linear(incoming, outgoing, bias = bias))
incoming = outgoing
if norm:
mod.append(nn.LayerNorm(outgoing))
# mod.append(nn.BatchNorm1d(outgoing))
mod.append(nn.ReLU(inplace = True))
# mod.append(nn.LeakyReLU(inplace=True, negative_slope=0.2))
if dropout:
mod.append(nn.Dropout(p = 0.5))
mod.append(nn.Linear(incoming, out_dim, bias = bias))
if relu:
mod.append(nn.ReLU(inplace = True))
# mod.append(nn.LeakyReLU(inplace=True, negative_slope=0.2))
self.mod = nn.Sequential(*mod)
def forward(self, x):
return self.mod(x)
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
def calculate_margines(domain_embedding, gt, margin_range = 5):
'''
domain_embedding: pairs * feats
gt: batch * feats
'''
batch_size, pairs, features = gt.shape[0], domain_embedding.shape[0], domain_embedding.shape[1]
gt_expanded = gt[:,None,:].expand(-1, pairs, -1)
domain_embedding_expanded = domain_embedding[None, :, :].expand(batch_size, -1, -1)
margin = (gt_expanded - domain_embedding_expanded)**2
margin = margin.sum(2)
max_margin, _ = torch.max(margin, dim = 0)
margin /= max_margin
margin *= margin_range
return margin
def l2_all_batched(image_embedding, domain_embedding):
'''
Image Embedding: Tensor of Batch_size * pairs * Feature_dim
domain_embedding: Tensor of pairs * Feature_dim
'''
pairs = image_embedding.shape[1]
domain_embedding_extended = image_embedding[:,None,:].expand(-1,pairs,-1)
l2_loss = (image_embedding - domain_embedding_extended) ** 2
l2_loss = l2_loss.sum(2)
l2_loss = l2_loss.sum() / l2_loss.numel()
return l2_loss
def same_domain_triplet_loss(image_embedding, trip_images, gt, hard_k = None, margin = 2):
'''
Image Embedding: Tensor of Batch_size * Feature_dim
Triplet Images: Tensor of Batch_size * num_pairs * Feature_dim
GT: Tensor of Batch_size
'''
batch_size, pairs, features = trip_images.shape
batch_iterator = torch.arange(batch_size).to(device)
image_embedding_expanded = image_embedding[:,None,:].expand(-1, pairs, -1)
diff = (image_embedding_expanded - trip_images)**2
diff = diff.sum(2)
positive_anchor = diff[batch_iterator, gt][:,None]
positive_anchor = positive_anchor.expand(-1, pairs)
# Calculating triplet loss
triplet_loss = positive_anchor - diff + margin
# Setting positive anchor loss to 0
triplet_loss[batch_iterator, gt] = 0
# Removing easy triplets
triplet_loss[triplet_loss < 0] = 0
# If only mining hard triplets
if hard_k:
triplet_loss, _ = triplet_loss.topk(hard_k)
# Counting number of valid pairs
num_positive_triplets = triplet_loss[triplet_loss > 1e-16].size(0)
# Calculating the final loss
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss
def cross_domain_triplet_loss(image_embedding, domain_embedding, gt, hard_k = None, margin = 2):
'''
Image Embedding: Tensor of Batch_size * Feature_dim
Domain Embedding: Tensor of Num_pairs * Feature_dim
gt: Tensor of Batch_size with ground truth labels
margin: Float of margin
Returns:
Triplet loss of all valid triplets
'''
batch_size, pairs, features = image_embedding.shape[0], domain_embedding.shape[0], domain_embedding.shape[1]
batch_iterator = torch.arange(batch_size).to(device)
# Now dimensions will be Batch_size * Num_pairs * Feature_dim
image_embedding = image_embedding[:,None,:].expand(-1, pairs, -1)
domain_embedding = domain_embedding[None,:,:].expand(batch_size, -1, -1)
# Calculating difference
diff = (image_embedding - domain_embedding)**2
diff = diff.sum(2)
# Getting the positive pair
positive_anchor = diff[batch_iterator, gt][:,None]
positive_anchor = positive_anchor.expand(-1, pairs)
# Calculating triplet loss
triplet_loss = positive_anchor - diff + margin
# Setting positive anchor loss to 0
triplet_loss[batch_iterator, gt] = 0
# Removing easy triplets
triplet_loss[triplet_loss < 0] = 0
# If only mining hard triplets
if hard_k:
triplet_loss, _ = triplet_loss.topk(hard_k)
# Counting number of valid pairs
num_positive_triplets = triplet_loss[triplet_loss > 1e-16].size(0)
# Calculating the final loss
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss
def same_domain_triplet_loss_old(image_embedding, positive_anchor, negative_anchor, margin = 2):
'''
Image Embedding: Tensor of Batch_size * Feature_dim
Positive anchor: Tensor of Batch_size * Feature_dim
negative anchor: Tensor of Batch_size * negs *Feature_dim
'''
batch_size, negs, features = negative_anchor.shape
dist_pos = (image_embedding - positive_anchor)**2
dist_pos = dist_pos.sum(1)
dist_pos = dist_pos[:, None].expand(-1, negs)
image_embedding_expanded = image_embedding[:,None,:].expand(-1, negs, -1)
dist_neg = (image_embedding_expanded - negative_anchor)**2
dist_neg = dist_neg.sum(2)
triplet_loss = dist_pos - dist_neg + margin
triplet_loss[triplet_loss < 0] = 0
num_positive_triplets = triplet_loss[triplet_loss > 1e-16].size(0)
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x**2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf)
class Evaluator:
def __init__(self, dset, model):
self.dset = dset
# Convert text pairs to idx tensors: [('sliced', 'apple'), ('ripe', 'apple'), ...] --> torch.LongTensor([[0,1],[1,1], ...])
pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.pairs]
self.train_pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.train_pairs]
self.pairs = torch.LongTensor(pairs)
# Mask over pairs that occur in closed world
# Select set based on phase
if dset.phase == 'train':
print('Evaluating with train pairs')
test_pair_set = set(dset.train_pairs)
test_pair_gt = set(dset.train_pairs)
elif dset.phase == 'val':
print('Evaluating with validation pairs')
test_pair_set = set(dset.val_pairs + dset.train_pairs)
test_pair_gt = set(dset.val_pairs)
else:
print('Evaluating with test pairs')
test_pair_set = set(dset.test_pairs + dset.train_pairs)
test_pair_gt = set(dset.test_pairs)
self.test_pair_dict = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in test_pair_gt]
self.test_pair_dict = dict.fromkeys(self.test_pair_dict, 0)
# dict values are pair val, score, total
for attr, obj in test_pair_gt:
pair_val = dset.pair2idx[(attr,obj)]
key = (dset.attr2idx[attr], dset.obj2idx[obj])
self.test_pair_dict[key] = [pair_val, 0, 0]
if dset.open_world:
masks = [1 for _ in dset.pairs]
else:
masks = [1 if pair in test_pair_set else 0 for pair in dset.pairs]
self.closed_mask = torch.BoolTensor(masks)
# Mask of seen concepts
seen_pair_set = set(dset.train_pairs)
mask = [1 if pair in seen_pair_set else 0 for pair in dset.pairs]
self.seen_mask = torch.BoolTensor(mask)
# Object specific mask over which pairs occur in the object oracle setting
oracle_obj_mask = []
for _obj in dset.objs:
mask = [1 if _obj == obj else 0 for attr, obj in dset.pairs]
oracle_obj_mask.append(torch.BoolTensor(mask))
self.oracle_obj_mask = torch.stack(oracle_obj_mask, 0)
# Decide if the model under evaluation is a manifold model or not
self.score_model = self.score_manifold_model
# Generate mask for each settings, mask scores, and get prediction labels
def generate_predictions(self, scores, obj_truth, bias = 0.0, topk = 5): # (Batch, #pairs)
'''
Inputs
scores: Output scores
obj_truth: Ground truth object
Returns
results: dict of results in 3 settings
'''
def get_pred_from_scores(_scores, topk):
'''
Given list of scores, returns top 10 attr and obj predictions
Check later
'''
_, pair_pred = _scores.topk(topk, dim = 1) #sort returns indices of k largest values
pair_pred = pair_pred.contiguous().view(-1)
attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \
self.pairs[pair_pred][:, 1].view(-1, topk)
return (attr_pred, obj_pred)
results = {}
orig_scores = scores.clone()
mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension
scores[~mask] += bias # Add bias to test pairs
# Unbiased setting
# Open world setting --no mask, all pairs of the dataset
results.update({'open': get_pred_from_scores(scores, topk)})
results.update({'unbiased_open': get_pred_from_scores(orig_scores, topk)})
# Closed world setting - set the score for all Non test pairs to -1e10,
# this excludes the pairs from set not in evaluation
mask = self.closed_mask.repeat(scores.shape[0], 1)
closed_scores = scores.clone()
closed_scores[~mask] = -1e10
closed_orig_scores = orig_scores.clone()
closed_orig_scores[~mask] = -1e10
results.update({'closed': get_pred_from_scores(closed_scores, topk)})
results.update({'unbiased_closed': get_pred_from_scores(closed_orig_scores, topk)})
# Object_oracle setting - set the score to -1e10 for all pairs where the true object does Not participate, can also use the closed score
mask = self.oracle_obj_mask[obj_truth]
oracle_obj_scores = scores.clone()
oracle_obj_scores[~mask] = -1e10
oracle_obj_scores_unbiased = orig_scores.clone()
oracle_obj_scores_unbiased[~mask] = -1e10
results.update({'object_oracle': get_pred_from_scores(oracle_obj_scores, 1)})
results.update({'object_oracle_unbiased': get_pred_from_scores(oracle_obj_scores_unbiased, 1)})
return results
def score_clf_model(self, scores, obj_truth, topk = 5):
'''
Wrapper function to call generate_predictions for CLF models
'''
attr_pred, obj_pred = scores
# Go to CPU
attr_pred, obj_pred, obj_truth = attr_pred.to('cpu'), obj_pred.to('cpu'), obj_truth.to('cpu')
# Gather scores (P(a), P(o)) for all relevant (a,o) pairs
# Multiply P(a) * P(o) to get P(pair)
attr_subset = attr_pred.index_select(1, self.pairs[:,0]) # Return only attributes that are in our pairs
obj_subset = obj_pred.index_select(1, self.pairs[:, 1])
scores = (attr_subset * obj_subset) # (Batch, #pairs)
results = self.generate_predictions(scores, obj_truth)
results['biased_scores'] = scores
return results
def score_manifold_model(self, scores, obj_truth, bias = 0.0, topk = 5):
'''
Wrapper function to call generate_predictions for manifold models
'''
# Go to CPU
scores = {k: v.to('cpu') for k, v in scores.items()}
obj_truth = obj_truth.to(device)
# Gather scores for all relevant (a,o) pairs
scores = torch.stack(
[scores[(attr,obj)] for attr, obj in self.dset.pairs], 1
) # (Batch, #pairs)
orig_scores = scores.clone()
results = self.generate_predictions(scores, obj_truth, bias, topk)
results['scores'] = orig_scores
return results
def score_fast_model(self, scores, obj_truth, bias = 0.0, topk = 5):
'''
Wrapper function to call generate_predictions for manifold models
'''
results = {}
mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension
scores[~mask] += bias # Add bias to test pairs
mask = self.closed_mask.repeat(scores.shape[0], 1)
closed_scores = scores.clone()
closed_scores[~mask] = -1e10
_, pair_pred = closed_scores.topk(topk, dim = 1) #sort returns indices of k largest values
pair_pred = pair_pred.contiguous().view(-1)
attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \
self.pairs[pair_pred][:, 1].view(-1, topk)
results.update({'closed': (attr_pred, obj_pred)})
return results
def evaluate_predictions(self, predictions, attr_truth, obj_truth, pair_truth, allpred, topk = 1):
# Go to CPU
attr_truth, obj_truth, pair_truth = attr_truth.to('cpu'), obj_truth.to('cpu'), pair_truth.to('cpu')
pairs = list(
zip(list(attr_truth.numpy()), list(obj_truth.numpy())))
seen_ind, unseen_ind = [], []
for i in range(len(attr_truth)):
if pairs[i] in self.train_pairs:
seen_ind.append(i)
else:
unseen_ind.append(i)
seen_ind, unseen_ind = torch.LongTensor(seen_ind), torch.LongTensor(unseen_ind)
def _process(_scores):
# Top k pair accuracy
# Attribute, object and pair
attr_match = (attr_truth.unsqueeze(1).repeat(1, topk) == _scores[0][:, :topk])
obj_match = (obj_truth.unsqueeze(1).repeat(1, topk) == _scores[1][:, :topk])
# Match of object pair
match = (attr_match * obj_match).any(1).float()
attr_match = attr_match.any(1).float()
obj_match = obj_match.any(1).float()
# Match of seen and unseen pairs
seen_match = match[seen_ind]
unseen_match = match[unseen_ind]
### Calculating class average accuracy
# local_score_dict = copy.deepcopy(self.test_pair_dict)
# for pair_gt, pair_pred in zip(pairs, match):
# # print(pair_gt)
# local_score_dict[pair_gt][2] += 1.0 #increase counter
# if int(pair_pred) == 1:
# local_score_dict[pair_gt][1] += 1.0
# # Now we have hits and totals for classes in evaluation set
# seen_score, unseen_score = [], []
# for key, (idx, hits, total) in local_score_dict.items():
# score = hits/total
# if bool(self.seen_mask[idx]) == True:
# seen_score.append(score)
# else:
# unseen_score.append(score)
seen_score, unseen_score = torch.ones(512,5), torch.ones(512,5)
return attr_match, obj_match, match, seen_match, unseen_match, \
torch.Tensor(seen_score+unseen_score), torch.Tensor(seen_score), torch.Tensor(unseen_score)
def _add_to_dict(_scores, type_name, stats):
base = ['_attr_match', '_obj_match', '_match', '_seen_match', '_unseen_match', '_ca', '_seen_ca', '_unseen_ca']
for val, name in zip(_scores, base):
stats[type_name + name] = val
##################### Match in places where corrent object
obj_oracle_match = (attr_truth == predictions['object_oracle'][0][:, 0]).float() #object is already conditioned
obj_oracle_match_unbiased = (attr_truth == predictions['object_oracle_unbiased'][0][:, 0]).float()
stats = dict(obj_oracle_match = obj_oracle_match, obj_oracle_match_unbiased = obj_oracle_match_unbiased)
#################### Closed world
closed_scores = _process(predictions['closed'])
unbiased_closed = _process(predictions['unbiased_closed'])
_add_to_dict(closed_scores, 'closed', stats)
_add_to_dict(unbiased_closed, 'closed_ub', stats)
#################### Calculating AUC
scores = predictions['scores']
# getting score for each ground truth class
correct_scores = scores[torch.arange(scores.shape[0]), pair_truth][unseen_ind]
# Getting top predicted score for these unseen classes
max_seen_scores = predictions['scores'][unseen_ind][:, self.seen_mask].topk(topk, dim=1)[0][:, topk - 1]
# Getting difference between these scores
unseen_score_diff = max_seen_scores - correct_scores
# Getting matched classes at max bias for diff
unseen_matches = stats['closed_unseen_match'].bool()
correct_unseen_score_diff = unseen_score_diff[unseen_matches] - 1e-4
# sorting these diffs
correct_unseen_score_diff = torch.sort(correct_unseen_score_diff)[0]
magic_binsize = 20
# getting step size for these bias values
bias_skip = max(len(correct_unseen_score_diff) // magic_binsize, 1)
# Getting list
biaslist = correct_unseen_score_diff[::bias_skip]
seen_match_max = float(stats['closed_seen_match'].mean())
unseen_match_max = float(stats['closed_unseen_match'].mean())
seen_accuracy, unseen_accuracy = [], []
# Go to CPU
base_scores = {k: v.to('cpu') for k, v in allpred.items()}
obj_truth = obj_truth.to('cpu')
# Gather scores for all relevant (a,o) pairs
base_scores = torch.stack(
[allpred[(attr,obj)] for attr, obj in self.dset.pairs], 1
) # (Batch, #pairs)
for bias in biaslist:
scores = base_scores.clone()
results = self.score_fast_model(scores, obj_truth, bias = bias, topk = topk)
results = results['closed'] # we only need biased
results = _process(results)
seen_match = float(results[3].mean())
unseen_match = float(results[4].mean())
seen_accuracy.append(seen_match)
unseen_accuracy.append(unseen_match)
seen_accuracy.append(seen_match_max)
unseen_accuracy.append(unseen_match_max)
seen_accuracy, unseen_accuracy = np.array(seen_accuracy), np.array(unseen_accuracy)
area = np.trapz(seen_accuracy, unseen_accuracy)
for key in stats:
stats[key] = float(stats[key].mean())
harmonic_mean = hmean([seen_accuracy, unseen_accuracy], axis = 0)
max_hm = np.max(harmonic_mean)
idx = np.argmax(harmonic_mean)
if idx == len(biaslist):
bias_term = 1e3
else:
bias_term = biaslist[idx]
stats['biasterm'] = float(bias_term)
stats['best_unseen'] = np.max(unseen_accuracy)
stats['best_seen'] = np.max(seen_accuracy)
stats['AUC'] = area
stats['hm_unseen'] = unseen_accuracy[idx]
stats['hm_seen'] = seen_accuracy[idx]
stats['best_hm'] = max_hm
return stats | 20,689 | 39.174757 | 144 | py |
czsl | czsl-main/models/gcn.py | import numpy as np
import scipy.sparse as sp
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def normt_spm(mx, method='in'):
if method == 'in':
mx = mx.transpose()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
if method == 'sym':
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -0.5).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = mx.dot(r_mat_inv).transpose().dot(r_mat_inv)
return mx
def spm_to_tensor(sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack(
(sparse_mx.row, sparse_mx.col))).long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
class GraphConv(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, relu=True):
super().__init__()
if dropout:
self.dropout = nn.Dropout(p=0.5)
else:
self.dropout = None
self.layer = nn.Linear(in_channels, out_channels)
if relu:
# self.relu = nn.LeakyReLU(negative_slope=0.2)
self.relu = nn.ReLU()
else:
self.relu = None
def forward(self, inputs, adj):
if self.dropout is not None:
inputs = self.dropout(inputs)
outputs = torch.mm(adj, torch.mm(inputs, self.layer.weight.T)) + self.layer.bias
if self.relu is not None:
outputs = self.relu(outputs)
return outputs
class GCN(nn.Module):
def __init__(self, adj, in_channels, out_channels, hidden_layers):
super().__init__()
adj = normt_spm(adj, method='in')
adj = spm_to_tensor(adj)
self.adj = adj.to(device)
self.train_adj = self.adj
hl = hidden_layers.split(',')
if hl[-1] == 'd':
dropout_last = True
hl = hl[:-1]
else:
dropout_last = False
i = 0
layers = []
last_c = in_channels
for c in hl:
if c[0] == 'd':
dropout = True
c = c[1:]
else:
dropout = False
c = int(c)
i += 1
conv = GraphConv(last_c, c, dropout=dropout)
self.add_module('conv{}'.format(i), conv)
layers.append(conv)
last_c = c
conv = GraphConv(last_c, out_channels, relu=False, dropout=dropout_last)
self.add_module('conv-last', conv)
layers.append(conv)
self.layers = layers
def forward(self, x):
if self.training:
for conv in self.layers:
x = conv(x, self.train_adj)
else:
for conv in self.layers:
x = conv(x, self.adj)
return F.normalize(x)
### GCNII
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, dropout=False, relu=True, residual=False, variant=False):
super(GraphConvolution, self).__init__()
self.variant = variant
if self.variant:
self.in_features = 2*in_features
else:
self.in_features = in_features
if dropout:
self.dropout = nn.Dropout(p=0.5)
else:
self.dropout = None
if relu:
self.relu = nn.ReLU()
else:
self.relu = None
self.out_features = out_features
self.residual = residual
self.layer = nn.Linear(self.in_features, self.out_features, bias = False)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.out_features)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, adj , h0 , lamda, alpha, l):
if self.dropout is not None:
input = self.dropout(input)
theta = math.log(lamda/l+1)
hi = torch.spmm(adj, input)
if self.variant:
support = torch.cat([hi,h0],1)
r = (1-alpha)*hi+alpha*h0
else:
support = (1-alpha)*hi+alpha*h0
r = support
mm_term = torch.mm(support, self.layer.weight.T)
output = theta*mm_term+(1-theta)*r
if self.residual:
output = output+input
if self.relu is not None:
output = self.relu(output)
return output
class GCNII(nn.Module):
def __init__(self, adj, in_channels , out_channels, hidden_dim, hidden_layers, lamda, alpha, variant, dropout = True):
super(GCNII, self).__init__()
self.alpha = alpha
self.lamda = lamda
adj = normt_spm(adj, method='in')
adj = spm_to_tensor(adj)
self.adj = adj.to(device)
i = 0
layers = nn.ModuleList()
self.fc_dim = nn.Linear(in_channels, hidden_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
for i, c in enumerate(range(hidden_layers)):
conv = GraphConvolution(hidden_dim, hidden_dim, variant=variant, dropout=dropout)
layers.append(conv)
self.layers = layers
self.fc_out = nn.Linear(hidden_dim, out_channels)
def forward(self, x):
_layers = []
layer_inner = self.relu(self.fc_dim(self.dropout(x)))
# layer_inner = x
_layers.append(layer_inner)
for i,con in enumerate(self.layers):
layer_inner = con(layer_inner,self.adj,_layers[0],self.lamda,self.alpha,i+1)
layer_inner = self.fc_out(self.dropout(layer_inner))
return layer_inner | 5,869 | 27.634146 | 122 | py |
czsl | czsl-main/models/word_embedding.py | import torch
import numpy as np
from flags import DATA_FOLDER
def load_word_embeddings(emb_type, vocab):
if emb_type == 'glove':
embeds = load_glove_embeddings(vocab)
elif emb_type == 'fasttext':
embeds = load_fasttext_embeddings(vocab)
elif emb_type == 'word2vec':
embeds = load_word2vec_embeddings(vocab)
elif emb_type == 'ft+w2v':
embeds1 = load_fasttext_embeddings(vocab)
embeds2 = load_word2vec_embeddings(vocab)
embeds = torch.cat([embeds1, embeds2], dim = 1)
print('Combined embeddings are ',embeds.shape)
elif emb_type == 'ft+gl':
embeds1 = load_fasttext_embeddings(vocab)
embeds2 = load_glove_embeddings(vocab)
embeds = torch.cat([embeds1, embeds2], dim = 1)
print('Combined embeddings are ',embeds.shape)
elif emb_type == 'ft+ft':
embeds1 = load_fasttext_embeddings(vocab)
embeds = torch.cat([embeds1, embeds1], dim = 1)
print('Combined embeddings are ',embeds.shape)
elif emb_type == 'gl+w2v':
embeds1 = load_glove_embeddings(vocab)
embeds2 = load_word2vec_embeddings(vocab)
embeds = torch.cat([embeds1, embeds2], dim = 1)
print('Combined embeddings are ',embeds.shape)
elif emb_type == 'ft+w2v+gl':
embeds1 = load_fasttext_embeddings(vocab)
embeds2 = load_word2vec_embeddings(vocab)
embeds3 = load_glove_embeddings(vocab)
embeds = torch.cat([embeds1, embeds2, embeds3], dim = 1)
print('Combined embeddings are ',embeds.shape)
else:
raise ValueError('Invalid embedding')
return embeds
def load_fasttext_embeddings(vocab):
custom_map = {
'Faux.Fur': 'fake fur',
'Faux.Leather': 'fake leather',
'Full.grain.leather': 'thick leather',
'Hair.Calf': 'hairy leather',
'Patent.Leather': 'shiny leather',
'Boots.Ankle': 'ankle boots',
'Boots.Knee.High': 'kneehigh boots',
'Boots.Mid-Calf': 'midcalf boots',
'Shoes.Boat.Shoes': 'boatshoes',
'Shoes.Clogs.and.Mules': 'clogs shoes',
'Shoes.Flats': 'flats shoes',
'Shoes.Heels': 'heels',
'Shoes.Loafers': 'loafers',
'Shoes.Oxfords': 'oxford shoes',
'Shoes.Sneakers.and.Athletic.Shoes': 'sneakers',
'traffic_light': 'traficlight',
'trash_can': 'trashcan',
'dry-erase_board' : 'dry_erase_board',
'black_and_white' : 'black_white',
'eiffel_tower' : 'tower'
}
vocab_lower = [v.lower() for v in vocab]
vocab = []
for current in vocab_lower:
if current in custom_map:
vocab.append(custom_map[current])
else:
vocab.append(current)
import fasttext.util
ft = fasttext.load_model(DATA_FOLDER+'/fast/cc.en.300.bin')
embeds = []
for k in vocab:
if '_' in k:
ks = k.split('_')
emb = np.stack([ft.get_word_vector(it) for it in ks]).mean(axis=0)
else:
emb = ft.get_word_vector(k)
embeds.append(emb)
embeds = torch.Tensor(np.stack(embeds))
print('Fasttext Embeddings loaded, total embeddings: {}'.format(embeds.size()))
return embeds
def load_word2vec_embeddings(vocab):
# vocab = [v.lower() for v in vocab]
from gensim import models
model = models.KeyedVectors.load_word2vec_format(
DATA_FOLDER+'/w2v/GoogleNews-vectors-negative300.bin', binary=True)
custom_map = {
'Faux.Fur': 'fake_fur',
'Faux.Leather': 'fake_leather',
'Full.grain.leather': 'thick_leather',
'Hair.Calf': 'hair_leather',
'Patent.Leather': 'shiny_leather',
'Boots.Ankle': 'ankle_boots',
'Boots.Knee.High': 'knee_high_boots',
'Boots.Mid-Calf': 'midcalf_boots',
'Shoes.Boat.Shoes': 'boat_shoes',
'Shoes.Clogs.and.Mules': 'clogs_shoes',
'Shoes.Flats': 'flats_shoes',
'Shoes.Heels': 'heels',
'Shoes.Loafers': 'loafers',
'Shoes.Oxfords': 'oxford_shoes',
'Shoes.Sneakers.and.Athletic.Shoes': 'sneakers',
'traffic_light': 'traffic_light',
'trash_can': 'trashcan',
'dry-erase_board' : 'dry_erase_board',
'black_and_white' : 'black_white',
'eiffel_tower' : 'tower'
}
embeds = []
for k in vocab:
if k in custom_map:
k = custom_map[k]
if '_' in k and k not in model:
ks = k.split('_')
emb = np.stack([model[it] for it in ks]).mean(axis=0)
else:
emb = model[k]
embeds.append(emb)
embeds = torch.Tensor(np.stack(embeds))
print('Word2Vec Embeddings loaded, total embeddings: {}'.format(embeds.size()))
return embeds
def load_glove_embeddings(vocab):
'''
Inputs
emb_file: Text file with word embedding pairs e.g. Glove, Processed in lower case.
vocab: List of words
Returns
Embedding Matrix
'''
vocab = [v.lower() for v in vocab]
emb_file = DATA_FOLDER+'/glove/glove.6B.300d.txt'
model = {} # populating a dictionary of word and embeddings
for line in open(emb_file, 'r'):
line = line.strip().split(' ') # Word-embedding
wvec = torch.FloatTensor(list(map(float, line[1:])))
model[line[0]] = wvec
# Adding some vectors for UT Zappos
custom_map = {
'faux.fur': 'fake_fur',
'faux.leather': 'fake_leather',
'full.grain.leather': 'thick_leather',
'hair.calf': 'hair_leather',
'patent.leather': 'shiny_leather',
'boots.ankle': 'ankle_boots',
'boots.knee.high': 'knee_high_boots',
'boots.mid-calf': 'midcalf_boots',
'shoes.boat.shoes': 'boat_shoes',
'shoes.clogs.and.mules': 'clogs_shoes',
'shoes.flats': 'flats_shoes',
'shoes.heels': 'heels',
'shoes.loafers': 'loafers',
'shoes.oxfords': 'oxford_shoes',
'shoes.sneakers.and.athletic.shoes': 'sneakers',
'traffic_light': 'traffic_light',
'trash_can': 'trashcan',
'dry-erase_board' : 'dry_erase_board',
'black_and_white' : 'black_white',
'eiffel_tower' : 'tower',
'nubuck' : 'grainy_leather',
}
embeds = []
for k in vocab:
if k in custom_map:
k = custom_map[k]
if '_' in k:
ks = k.split('_')
emb = torch.stack([model[it] for it in ks]).mean(dim=0)
else:
emb = model[k]
embeds.append(emb)
embeds = torch.stack(embeds)
print('Glove Embeddings loaded, total embeddings: {}'.format(embeds.size()))
return embeds | 6,636 | 35.26776 | 90 | py |
czsl | czsl-main/models/modular_methods.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tmodels
import numpy as np
from .word_embedding import load_word_embeddings
import itertools
import math
import collections
from torch.distributions.bernoulli import Bernoulli
import pdb
import sys
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class GatingSampler(nn.Module):
"""Docstring for GatingSampler. """
def __init__(self, gater, stoch_sample=True, temperature=1.0):
"""TODO: to be defined1.
"""
nn.Module.__init__(self)
self.gater = gater
self._stoch_sample = stoch_sample
self._temperature = temperature
def disable_stochastic_sampling(self):
self._stoch_sample = False
def enable_stochastic_sampling(self):
self._stoch_sample = True
def forward(self, tdesc=None, return_additional=False, gating_wt=None):
if self.gater is None and return_additional:
return None, None
elif self.gater is None:
return None
if gating_wt is not None:
return_wts = gating_wt
gating_g = self.gater(tdesc, gating_wt=gating_wt)
else:
gating_g = self.gater(tdesc)
return_wts = None
if isinstance(gating_g, tuple):
return_wts = gating_g[1]
gating_g = gating_g[0]
if not self._stoch_sample:
sampled_g = gating_g
else:
raise (NotImplementedError)
if return_additional:
return sampled_g, return_wts
return sampled_g
class GatedModularNet(nn.Module):
"""
An interface for creating modular nets
"""
def __init__(self,
module_list,
start_modules=None,
end_modules=None,
single_head=False,
chain=False):
"""TODO: to be defined1.
:module_list: TODO
:g: TODO
"""
nn.Module.__init__(self)
self._module_list = nn.ModuleList(
[nn.ModuleList(m) for m in module_list])
self.num_layers = len(self._module_list)
if start_modules is not None:
self._start_modules = nn.ModuleList(start_modules)
else:
self._start_modules = None
if end_modules is not None:
self._end_modules = nn.ModuleList(end_modules)
self.num_layers += 1
else:
self._end_modules = None
self.sampled_g = None
self.single_head = single_head
self._chain = chain
def forward(self, x, sampled_g=None, t=None, return_feat=False):
"""TODO: Docstring for forward.
:x: Input data
:g: Gating tensor (#Task x )#num_layer x #num_mods x #num_mods
:t: task ID
:returns: TODO
"""
if t is None:
t_not_set = True
t = torch.tensor([0] * x.shape[0], dtype=x.dtype).long()
else:
t_not_set = False
t = t.squeeze()
if self._start_modules is not None:
prev_out = [mod(x) for mod in self._start_modules]
else:
prev_out = [x]
if sampled_g is None:
# NON-Gated Module network
prev_out = sum(prev_out) / float(len(prev_out))
#prev_out = torch.mean(prev_out, 0)
for li in range(len(self._module_list)):
prev_out = sum([
mod(prev_out) for mod in self._module_list[li]
]) / float(len(self._module_list[li]))
features = prev_out
if self._end_modules is not None:
if t_not_set or self.single_head:
prev_out = self._end_modules[0](prev_out)
else:
prev_out = torch.cat([
self._end_modules[tid](prev_out[bi:bi + 1])
for bi, tid in enumerate(t)
], 0)
if return_feat:
return prev_out, features
return prev_out
else:
# Forward prop with sampled Gs
for li in range(len(self._module_list)):
curr_out = []
for j in range(len(self._module_list[li])):
gind = j if not self._chain else 0
# Dim: #Batch x C
module_in_wt = sampled_g[li + 1][gind]
# Module input weights rearranged to match inputs
module_in_wt = module_in_wt.transpose(0, 1)
add_dims = prev_out[0].dim() + 1 - module_in_wt.dim()
module_in_wt = module_in_wt.view(*module_in_wt.shape,
*([1] * add_dims))
module_in_wt = module_in_wt.expand(
len(prev_out), *prev_out[0].shape)
module_in = sum([
module_in_wt[i] * prev_out[i]
for i in range(len(prev_out))
])
mod = self._module_list[li][j]
curr_out.append(mod(module_in))
prev_out = curr_out
# Output modules (with sampled Gs)
if self._end_modules is not None:
li = self.num_layers - 1
if t_not_set or self.single_head:
# Dim: #Batch x C
module_in_wt = sampled_g[li + 1][0]
# Module input weights rearranged to match inputs
module_in_wt = module_in_wt.transpose(0, 1)
add_dims = prev_out[0].dim() + 1 - module_in_wt.dim()
module_in_wt = module_in_wt.view(*module_in_wt.shape,
*([1] * add_dims))
module_in_wt = module_in_wt.expand(
len(prev_out), *prev_out[0].shape)
module_in = sum([
module_in_wt[i] * prev_out[i]
for i in range(len(prev_out))
])
features = module_in
prev_out = self._end_modules[0](module_in)
else:
curr_out = []
for bi, tid in enumerate(t):
# Dim: #Batch x C
gind = tid if not self._chain else 0
module_in_wt = sampled_g[li + 1][gind]
# Module input weights rearranged to match inputs
module_in_wt = module_in_wt.transpose(0, 1)
add_dims = prev_out[0].dim() + 1 - module_in_wt.dim()
module_in_wt = module_in_wt.view(
*module_in_wt.shape, *([1] * add_dims))
module_in_wt = module_in_wt.expand(
len(prev_out), *prev_out[0].shape)
module_in = sum([
module_in_wt[i] * prev_out[i]
for i in range(len(prev_out))
])
features = module_in
mod = self._end_modules[tid]
curr_out.append(mod(module_in[bi:bi + 1]))
prev_out = curr_out
prev_out = torch.cat(prev_out, 0)
if return_feat:
return prev_out, features
return prev_out
class CompositionalModel(nn.Module):
def __init__(self, dset, args):
super(CompositionalModel, self).__init__()
self.args = args
self.dset = dset
# precompute validation pairs
attrs, objs = zip(*self.dset.pairs)
attrs = [dset.attr2idx[attr] for attr in attrs]
objs = [dset.obj2idx[obj] for obj in objs]
self.val_attrs = torch.LongTensor(attrs).cuda()
self.val_objs = torch.LongTensor(objs).cuda()
def train_forward_softmax(self, x):
img, attrs, objs = x[0], x[1], x[2]
neg_attrs, neg_objs = x[4], x[5]
inv_attrs, comm_attrs = x[6], x[7]
sampled_attrs = torch.cat((attrs.unsqueeze(1), neg_attrs), 1)
sampled_objs = torch.cat((objs.unsqueeze(1), neg_objs), 1)
img_ind = torch.arange(sampled_objs.shape[0]).unsqueeze(1).repeat(
1, sampled_attrs.shape[1])
flat_sampled_attrs = sampled_attrs.view(-1)
flat_sampled_objs = sampled_objs.view(-1)
flat_img_ind = img_ind.view(-1)
labels = torch.zeros_like(sampled_attrs[:, 0]).long()
self.composed_g = self.compose(flat_sampled_attrs, flat_sampled_objs)
cls_scores, feat = self.comp_network(
img[flat_img_ind], self.composed_g, return_feat=True)
pair_scores = cls_scores[:, :1]
pair_scores = pair_scores.view(*sampled_attrs.shape)
loss = 0
loss_cls = F.cross_entropy(pair_scores, labels)
loss += loss_cls
loss_obj = torch.FloatTensor([0])
loss_attr = torch.FloatTensor([0])
loss_sparse = torch.FloatTensor([0])
loss_unif = torch.FloatTensor([0])
loss_aux = torch.FloatTensor([0])
acc = (pair_scores.argmax(1) == labels).sum().float() / float(
len(labels))
all_losses = {}
all_losses['total_loss'] = loss
all_losses['main_loss'] = loss_cls
all_losses['aux_loss'] = loss_aux
all_losses['obj_loss'] = loss_obj
all_losses['attr_loss'] = loss_attr
all_losses['sparse_loss'] = loss_sparse
all_losses['unif_loss'] = loss_unif
return loss, all_losses, acc, (pair_scores, feat)
def val_forward(self, x):
img = x[0]
batch_size = img.shape[0]
pair_scores = torch.zeros(batch_size, len(self.val_attrs))
pair_feats = torch.zeros(batch_size, len(self.val_attrs),
self.args.emb_dim)
pair_bs = len(self.val_attrs)
for pi in range(math.ceil(len(self.val_attrs) / pair_bs)):
self.compose_g = self.compose(
self.val_attrs[pi * pair_bs:(pi + 1) * pair_bs],
self.val_objs[pi * pair_bs:(pi + 1) * pair_bs])
compose_g = self.compose_g
expanded_im = img.unsqueeze(1).repeat(
1, compose_g[0][0].shape[0],
*tuple([1] * (img.dim() - 1))).view(-1, *img.shape[1:])
expanded_compose_g = [[
g.unsqueeze(0).repeat(batch_size, *tuple([1] * g.dim())).view(
-1, *g.shape[1:]) for g in layer_g
] for layer_g in compose_g]
this_pair_scores, this_feat = self.comp_network(
expanded_im, expanded_compose_g, return_feat=True)
featnorm = torch.norm(this_feat, p=2, dim=-1)
this_feat = this_feat.div(
featnorm.unsqueeze(-1).expand_as(this_feat))
this_pair_scores = this_pair_scores[:, :1].view(batch_size, -1)
this_feat = this_feat.view(batch_size, -1, self.args.emb_dim)
pair_scores[:, pi * pair_bs:pi * pair_bs +
this_pair_scores.shape[1]] = this_pair_scores[:, :]
pair_feats[:, pi * pair_bs:pi * pair_bs +
this_pair_scores.shape[1], :] = this_feat[:]
scores = {}
feats = {}
for i, (attr, obj) in enumerate(self.dset.pairs):
scores[(attr, obj)] = pair_scores[:, i]
feats[(attr, obj)] = pair_feats[:, i]
# return None, (scores, feats)
return None, scores
def forward(self, x, with_grad=False):
if self.training:
loss, loss_aux, acc, pred = self.train_forward(x)
else:
loss_aux = torch.Tensor([0])
loss = torch.Tensor([0])
if not with_grad:
with torch.no_grad():
acc, pred = self.val_forward(x)
else:
acc, pred = self.val_forward(x)
# return loss, loss_aux, acc, pred
return loss, pred
class GatedGeneralNN(CompositionalModel):
"""Docstring for GatedCompositionalModel. """
def __init__(self,
dset,
args,
num_layers=2,
num_modules_per_layer=3,
stoch_sample=False,
use_full_model=False,
num_classes=[2],
gater_type='general'):
"""TODO: to be defined1.
:dset: TODO
:args: TODO
"""
CompositionalModel.__init__(self, dset, args)
self.train_forward = self.train_forward_softmax
self.compose_type = 'nn' #todo: could be different
gating_in_dim = 128
if args.emb_init:
gating_in_dim = 300
elif args.clf_init:
gating_in_dim = 512
if self.compose_type == 'nn':
tdim = gating_in_dim * 2
inter_tdim = self.args.embed_rank
# Change this to allow only obj, only attr gatings
self.attr_embedder = nn.Embedding(
len(dset.attrs) + 1,
gating_in_dim,
padding_idx=len(dset.attrs),
)
self.obj_embedder = nn.Embedding(
len(dset.objs) + 1,
gating_in_dim,
padding_idx=len(dset.objs),
)
# initialize the weights of the embedders with the svm weights
if args.emb_init:
pretrained_weight = load_word_embeddings(
args.emb_init, dset.attrs)
self.attr_embedder.weight[:-1, :].data.copy_(pretrained_weight)
pretrained_weight = load_word_embeddings(
args.emb_init, dset.objs)
self.obj_embedder.weight.data[:-1, :].copy_(pretrained_weight)
elif args.clf_init:
for idx, attr in enumerate(dset.attrs):
at_id = self.dset.attr2idx[attr]
weight = torch.load(
'%s/svm/attr_%d' % (args.data_dir,
at_id)).coef_.squeeze()
self.attr_embedder.weight[idx].data.copy_(
torch.from_numpy(weight))
for idx, obj in enumerate(dset.objs):
obj_id = self.dset.obj2idx[obj]
weight = torch.load(
'%s/svm/obj_%d' % (args.data_dir,
obj_id)).coef_.squeeze()
self.obj_embedder.weight[idx].data.copy_(
torch.from_numpy(weight))
else:
n_attr = len(dset.attrs)
gating_in_dim = 300
tdim = gating_in_dim * 2 + n_attr
self.attr_embedder = nn.Embedding(
n_attr,
n_attr,
)
self.attr_embedder.weight.data.copy_(
torch.from_numpy(np.eye(n_attr)))
self.obj_embedder = nn.Embedding(
len(dset.objs) + 1,
gating_in_dim,
padding_idx=len(dset.objs),
)
pretrained_weight = load_word_embeddings(
'/home/ubuntu/workspace/czsl/data/glove/glove.6B.300d.txt', dset.objs)
self.obj_embedder.weight.data[:-1, :].copy_(pretrained_weight)
else:
raise (NotImplementedError)
self.comp_network, self.gating_network, self.nummods, _ = modular_general(
num_layers=num_layers,
num_modules_per_layer=num_modules_per_layer,
feat_dim=dset.feat_dim,
inter_dim=args.emb_dim,
stoch_sample=stoch_sample,
use_full_model=use_full_model,
tdim=tdim,
inter_tdim=inter_tdim,
gater_type=gater_type,
)
if args.static_inp:
for param in self.attr_embedder.parameters():
param.requires_grad = False
for param in self.obj_embedder.parameters():
param.requires_grad = False
def compose(self, attrs, objs):
obj_wt = self.obj_embedder(objs)
if self.compose_type == 'nn':
attr_wt = self.attr_embedder(attrs)
inp_wts = torch.cat([attr_wt, obj_wt], 1) # 2D
else:
raise (NotImplementedError)
composed_g, composed_g_wt = self.gating_network(
inp_wts, return_additional=True)
return composed_g
class GeneralNormalizedNN(nn.Module):
"""Docstring for GatedCompositionalModel. """
def __init__(self, num_layers, num_modules_per_layer, in_dim, inter_dim):
"""TODO: to be defined1. """
nn.Module.__init__(self)
self.start_modules = [nn.Sequential()]
self.layer1 = [[
nn.Sequential(
nn.Linear(in_dim, inter_dim), nn.BatchNorm1d(inter_dim),
nn.ReLU()) for _ in range(num_modules_per_layer)
]]
if num_layers > 1:
self.layer2 = [[
nn.Sequential(
nn.BatchNorm1d(inter_dim), nn.Linear(inter_dim, inter_dim),
nn.BatchNorm1d(inter_dim), nn.ReLU())
for _m in range(num_modules_per_layer)
] for _l in range(num_layers - 1)]
self.avgpool = nn.Sequential()
self.fc = [
nn.Sequential(nn.BatchNorm1d(inter_dim), nn.Linear(inter_dim, 1))
]
class GeneralGatingNN(nn.Module):
def __init__(
self,
num_mods,
tdim,
inter_tdim,
randinit=False,
):
"""TODO: to be defined1.
:num_mods: TODO
:tdim: TODO
"""
nn.Module.__init__(self)
self._num_mods = num_mods
self._tdim = tdim
self._inter_tdim = inter_tdim
task_outdim = self._inter_tdim
self.task_linear1 = nn.Linear(self._tdim, task_outdim, bias=False)
self.task_bn1 = nn.BatchNorm1d(task_outdim)
self.task_linear2 = nn.Linear(task_outdim, task_outdim, bias=False)
self.task_bn2 = nn.BatchNorm1d(task_outdim)
self.joint_linear1 = nn.Linear(task_outdim, task_outdim, bias=False)
self.joint_bn1 = nn.BatchNorm1d(task_outdim)
num_out = [[1]] + [[
self._num_mods[i - 1] for _ in range(self._num_mods[i])
] for i in range(1, len(self._num_mods))]
count = 0
out_ind = []
for i in range(len(num_out)):
this_out_ind = []
for j in range(len(num_out[i])):
this_out_ind.append([count, count + num_out[i][j]])
count += num_out[i][j]
out_ind.append(this_out_ind)
self.out_ind = out_ind
self.out_count = count
self.joint_linear2 = nn.Linear(task_outdim, count, bias=False)
def apply_init(m):
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, nn.ModuleList):
for subm in m:
if isinstance(subm, nn.ModuleList):
for subsubm in subm:
apply_init(subsubm)
else:
apply_init(subm)
else:
apply_init(m)
if not randinit:
self.joint_linear2.weight.data.zero_()
def forward(self, tdesc=None):
"""TODO: Docstring for function.
:arg1: TODO
:returns: TODO
"""
if tdesc is None:
return None
x = tdesc
task_embeds1 = F.relu(self.task_bn1(self.task_linear1(x)))
joint_embed = task_embeds1
joint_embed = self.joint_linear2(joint_embed)
joint_embed = [[
joint_embed[:, self.out_ind[i][j][0]:self.out_ind[i][j][1]]
for j in range(len(self.out_ind[i]))
] for i in range(len(self.out_ind))]
gating_wt = joint_embed
prob_g = [[F.softmax(wt, -1) for wt in gating_wt[i]]
for i in range(len(gating_wt))]
return prob_g, gating_wt
def modularize_network(
model,
stoch_sample=False,
use_full_model=False,
tdim=200,
inter_tdim=200,
gater_type='general',
single_head=True,
num_classes=[2],
num_lookup_gating=10,
):
# Copy start modules and end modules
start_modules = model.start_modules
end_modules = [
nn.Sequential(model.avgpool, Flatten(), fci) for fci in model.fc
]
# Create module_list as list of lists [[layer1 modules], [layer2 modules], ...]
module_list = []
li = 1
while True:
if hasattr(model, 'layer{}'.format(li)):
module_list.extend(getattr(model, 'layer{}'.format(li)))
li += 1
else:
break
num_module_list = [len(start_modules)] + [len(layer) for layer in module_list] \
+ [len(end_modules)]
gated_model_func = GatedModularNet
gated_net = gated_model_func(
module_list,
start_modules=start_modules,
end_modules=end_modules,
single_head=single_head)
gater_func = GeneralGatingNN
gater = gater_func(
num_mods=num_module_list, tdim=tdim, inter_tdim=inter_tdim)
fan_in = num_module_list
if use_full_model:
gater = None
# Create Gating Sampler
gating_sampler = GatingSampler(gater=gater, stoch_sample=stoch_sample)
return gated_net, gating_sampler, num_module_list, fan_in
def modular_general(
num_layers,
num_modules_per_layer,
feat_dim,
inter_dim,
stoch_sample=False,
use_full_model=False,
num_classes=[2],
single_head=True,
gater_type='general',
tdim=300,
inter_tdim=300,
num_lookup_gating=10,
):
# First create a ResNext model
model = GeneralNormalizedNN(
num_layers,
num_modules_per_layer,
feat_dim,
inter_dim,
)
# Modularize the model and create gating funcs
gated_net, gating_sampler, num_module_list, fan_in = modularize_network(
model,
stoch_sample,
use_full_model=use_full_model,
gater_type=gater_type,
tdim=tdim,
inter_tdim=inter_tdim,
single_head=single_head,
num_classes=num_classes,
num_lookup_gating=num_lookup_gating,
)
return gated_net, gating_sampler, num_module_list, fan_in | 23,151 | 34.454824 | 90 | py |
czsl | czsl-main/models/compcos.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .word_embedding import load_word_embeddings
from .common import MLP
from itertools import product
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def compute_cosine_similarity(names, weights, return_dict=True):
pairing_names = list(product(names, names))
normed_weights = F.normalize(weights,dim=1)
similarity = torch.mm(normed_weights, normed_weights.t())
if return_dict:
dict_sim = {}
for i,n in enumerate(names):
for j,m in enumerate(names):
dict_sim[(n,m)]=similarity[i,j].item()
return dict_sim
return pairing_names, similarity.to('cpu')
class CompCos(nn.Module):
def __init__(self, dset, args):
super(CompCos, self).__init__()
self.args = args
self.dset = dset
def get_all_ids(relevant_pairs):
# Precompute validation pairs
attrs, objs = zip(*relevant_pairs)
attrs = [dset.attr2idx[attr] for attr in attrs]
objs = [dset.obj2idx[obj] for obj in objs]
pairs = [a for a in range(len(relevant_pairs))]
attrs = torch.LongTensor(attrs).to(device)
objs = torch.LongTensor(objs).to(device)
pairs = torch.LongTensor(pairs).to(device)
return attrs, objs, pairs
# Validation
self.val_attrs, self.val_objs, self.val_pairs = get_all_ids(self.dset.pairs)
# for indivual projections
self.uniq_attrs, self.uniq_objs = torch.arange(len(self.dset.attrs)).long().to(device), \
torch.arange(len(self.dset.objs)).long().to(device)
self.factor = 2
self.scale = self.args.cosine_scale
if dset.open_world:
self.train_forward = self.train_forward_open
self.known_pairs = dset.train_pairs
seen_pair_set = set(self.known_pairs)
mask = [1 if pair in seen_pair_set else 0 for pair in dset.pairs]
self.seen_mask = torch.BoolTensor(mask).to(device) * 1.
self.activated = False
# Init feasibility-related variables
self.attrs = dset.attrs
self.objs = dset.objs
self.possible_pairs = dset.pairs
self.validation_pairs = dset.val_pairs
self.feasibility_margin = (1-self.seen_mask).float()
self.epoch_max_margin = self.args.epoch_max_margin
self.cosine_margin_factor = -args.margin
# Intantiate attribut-object relations, needed just to evaluate mined pairs
self.obj_by_attrs_train = {k: [] for k in self.attrs}
for (a, o) in self.known_pairs:
self.obj_by_attrs_train[a].append(o)
# Intantiate attribut-object relations, needed just to evaluate mined pairs
self.attrs_by_obj_train = {k: [] for k in self.objs}
for (a, o) in self.known_pairs:
self.attrs_by_obj_train[o].append(a)
else:
self.train_forward = self.train_forward_closed
# Precompute training compositions
if args.train_only:
self.train_attrs, self.train_objs, self.train_pairs = get_all_ids(self.dset.train_pairs)
else:
self.train_attrs, self.train_objs, self.train_pairs = self.val_attrs, self.val_objs, self.val_pairs
try:
self.args.fc_emb = self.args.fc_emb.split(',')
except:
self.args.fc_emb = [self.args.fc_emb]
layers = []
for a in self.args.fc_emb:
a = int(a)
layers.append(a)
self.image_embedder = MLP(dset.feat_dim, int(args.emb_dim), relu=args.relu, num_layers=args.nlayers,
dropout=self.args.dropout,
norm=self.args.norm, layers=layers)
# Fixed
self.composition = args.composition
input_dim = args.emb_dim
self.attr_embedder = nn.Embedding(len(dset.attrs), input_dim)
self.obj_embedder = nn.Embedding(len(dset.objs), input_dim)
# init with word embeddings
if args.emb_init:
pretrained_weight = load_word_embeddings(args.emb_init, dset.attrs)
self.attr_embedder.weight.data.copy_(pretrained_weight)
pretrained_weight = load_word_embeddings(args.emb_init, dset.objs)
self.obj_embedder.weight.data.copy_(pretrained_weight)
# static inputs
if args.static_inp:
for param in self.attr_embedder.parameters():
param.requires_grad = False
for param in self.obj_embedder.parameters():
param.requires_grad = False
# Composition MLP
self.projection = nn.Linear(input_dim * 2, args.emb_dim)
def freeze_representations(self):
print('Freezing representations')
for param in self.image_embedder.parameters():
param.requires_grad = False
for param in self.attr_embedder.parameters():
param.requires_grad = False
for param in self.obj_embedder.parameters():
param.requires_grad = False
def compose(self, attrs, objs):
attrs, objs = self.attr_embedder(attrs), self.obj_embedder(objs)
inputs = torch.cat([attrs, objs], 1)
output = self.projection(inputs)
output = F.normalize(output, dim=1)
return output
def compute_feasibility(self):
obj_embeddings = self.obj_embedder(torch.arange(len(self.objs)).long().to('cuda'))
obj_embedding_sim = compute_cosine_similarity(self.objs, obj_embeddings,
return_dict=True)
attr_embeddings = self.attr_embedder(torch.arange(len(self.attrs)).long().to('cuda'))
attr_embedding_sim = compute_cosine_similarity(self.attrs, attr_embeddings,
return_dict=True)
feasibility_scores = self.seen_mask.clone().float()
for a in self.attrs:
for o in self.objs:
if (a, o) not in self.known_pairs:
idx = self.dset.all_pair2idx[(a, o)]
score_obj = self.get_pair_scores_objs(a, o, obj_embedding_sim)
score_attr = self.get_pair_scores_attrs(a, o, attr_embedding_sim)
score = (score_obj + score_attr) / 2
feasibility_scores[idx] = score
self.feasibility_scores = feasibility_scores
return feasibility_scores * (1 - self.seen_mask.float())
def get_pair_scores_objs(self, attr, obj, obj_embedding_sim):
score = -1.
for o in self.objs:
if o!=obj and attr in self.attrs_by_obj_train[o]:
temp_score = obj_embedding_sim[(obj,o)]
if temp_score>score:
score=temp_score
return score
def get_pair_scores_attrs(self, attr, obj, attr_embedding_sim):
score = -1.
for a in self.attrs:
if a != attr and obj in self.obj_by_attrs_train[a]:
temp_score = attr_embedding_sim[(attr, a)]
if temp_score > score:
score = temp_score
return score
def update_feasibility(self,epoch):
self.activated = True
feasibility_scores = self.compute_feasibility()
self.feasibility_margin = min(1.,epoch/self.epoch_max_margin) * \
(self.cosine_margin_factor*feasibility_scores.float().to(device))
def val_forward(self, x):
img = x[0]
img_feats = self.image_embedder(img)
img_feats_normed = F.normalize(img_feats, dim=1)
pair_embeds = self.compose(self.val_attrs, self.val_objs).permute(1, 0) # Evaluate all pairs
score = torch.matmul(img_feats_normed, pair_embeds)
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:, self.dset.all_pair2idx[pair]]
return None, scores
def val_forward_with_threshold(self, x, th=0.):
img = x[0]
img_feats = self.image_embedder(img)
img_feats_normed = F.normalize(img_feats, dim=1)
pair_embeds = self.compose(self.val_attrs, self.val_objs).permute(1, 0) # Evaluate all pairs
score = torch.matmul(img_feats_normed, pair_embeds)
# Note: Pairs are already aligned here
mask = (self.feasibility_scores>=th).float()
score = score*mask + (1.-mask)*(-1.)
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:, self.dset.all_pair2idx[pair]]
return None, scores
def train_forward_open(self, x):
img, attrs, objs, pairs = x[0], x[1], x[2], x[3]
img_feats = self.image_embedder(img)
pair_embed = self.compose(self.train_attrs, self.train_objs).permute(1, 0)
img_feats_normed = F.normalize(img_feats, dim=1)
pair_pred = torch.matmul(img_feats_normed, pair_embed)
if self.activated:
pair_pred += (1 - self.seen_mask) * self.feasibility_margin
loss_cos = F.cross_entropy(self.scale * pair_pred, pairs)
else:
pair_pred = pair_pred * self.seen_mask + (1 - self.seen_mask) * (-10)
loss_cos = F.cross_entropy(self.scale * pair_pred, pairs)
return loss_cos.mean(), None
def train_forward_closed(self, x):
img, attrs, objs, pairs = x[0], x[1], x[2], x[3]
img_feats = self.image_embedder(img)
pair_embed = self.compose(self.train_attrs, self.train_objs).permute(1, 0)
img_feats_normed = F.normalize(img_feats, dim=1)
pair_pred = torch.matmul(img_feats_normed, pair_embed)
loss_cos = F.cross_entropy(self.scale * pair_pred, pairs)
return loss_cos.mean(), None
def forward(self, x):
if self.training:
loss, pred = self.train_forward(x)
else:
with torch.no_grad():
loss, pred = self.val_forward(x)
return loss, pred
| 10,133 | 36.533333 | 111 | py |
czsl | czsl-main/models/graph_method.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .common import MLP
from .gcn import GCN, GCNII
from .word_embedding import load_word_embeddings
import scipy.sparse as sp
def adj_to_edges(adj):
# Adj sparse matrix to list of edges
rows, cols = np.nonzero(adj)
edges = list(zip(rows.tolist(), cols.tolist()))
return edges
def edges_to_adj(edges, n):
# List of edges to Adj sparse matrix
edges = np.array(edges)
adj = sp.coo_matrix((np.ones(len(edges)), (edges[:, 0], edges[:, 1])),
shape=(n, n), dtype='float32')
return adj
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class GraphFull(nn.Module):
def __init__(self, dset, args):
super(GraphFull, self).__init__()
self.args = args
self.dset = dset
self.val_forward = self.val_forward_dotpr
self.train_forward = self.train_forward_normal
# Image Embedder
self.num_attrs, self.num_objs, self.num_pairs = len(dset.attrs), len(dset.objs), len(dset.pairs)
self.pairs = dset.pairs
if self.args.train_only:
train_idx = []
for current in dset.train_pairs:
train_idx.append(dset.all_pair2idx[current]+self.num_attrs+self.num_objs)
self.train_idx = torch.LongTensor(train_idx).to(device)
self.args.fc_emb = self.args.fc_emb.split(',')
layers = []
for a in self.args.fc_emb:
a = int(a)
layers.append(a)
if args.nlayers:
self.image_embedder = MLP(dset.feat_dim, args.emb_dim, num_layers= args.nlayers, dropout = self.args.dropout,
norm = self.args.norm, layers = layers, relu = True)
all_words = list(self.dset.attrs) + list(self.dset.objs)
self.displacement = len(all_words)
self.obj_to_idx = {word: idx for idx, word in enumerate(self.dset.objs)}
self.attr_to_idx = {word: idx for idx, word in enumerate(self.dset.attrs)}
if args.graph_init is not None:
path = args.graph_init
graph = torch.load(path)
embeddings = graph['embeddings'].to(device)
adj = graph['adj']
self.embeddings = embeddings
else:
embeddings = self.init_embeddings(all_words).to(device)
adj = self.adj_from_pairs()
self.embeddings = embeddings
hidden_layers = self.args.gr_emb
if args.gcn_type == 'gcn':
self.gcn = GCN(adj, self.embeddings.shape[1], args.emb_dim, hidden_layers)
else:
self.gcn = GCNII(adj, self.embeddings.shape[1], args.emb_dim, args.hidden_dim, args.gcn_nlayers, lamda = 0.5, alpha = 0.1, variant = False)
def init_embeddings(self, all_words):
def get_compositional_embeddings(embeddings, pairs):
# Getting compositional embeddings from base embeddings
composition_embeds = []
for (attr, obj) in pairs:
attr_embed = embeddings[self.attr_to_idx[attr]]
obj_embed = embeddings[self.obj_to_idx[obj]+self.num_attrs]
composed_embed = (attr_embed + obj_embed) / 2
composition_embeds.append(composed_embed)
composition_embeds = torch.stack(composition_embeds)
print('Compositional Embeddings are ', composition_embeds.shape)
return composition_embeds
# init with word embeddings
embeddings = load_word_embeddings(self.args.emb_init, all_words)
composition_embeds = get_compositional_embeddings(embeddings, self.pairs)
full_embeddings = torch.cat([embeddings, composition_embeds], dim=0)
return full_embeddings
def update_dict(self, wdict, row,col,data):
wdict['row'].append(row)
wdict['col'].append(col)
wdict['data'].append(data)
def adj_from_pairs(self):
def edges_from_pairs(pairs):
weight_dict = {'data':[],'row':[],'col':[]}
for i in range(self.displacement):
self.update_dict(weight_dict,i,i,1.)
for idx, (attr, obj) in enumerate(pairs):
attr_idx, obj_idx = self.attr_to_idx[attr], self.obj_to_idx[obj] + self.num_attrs
self.update_dict(weight_dict, attr_idx, obj_idx, 1.)
self.update_dict(weight_dict, obj_idx, attr_idx, 1.)
node_id = idx + self.displacement
self.update_dict(weight_dict,node_id,node_id,1.)
self.update_dict(weight_dict, node_id, attr_idx, 1.)
self.update_dict(weight_dict, node_id, obj_idx, 1.)
self.update_dict(weight_dict, attr_idx, node_id, 1.)
self.update_dict(weight_dict, obj_idx, node_id, 1.)
return weight_dict
edges = edges_from_pairs(self.pairs)
adj = sp.csr_matrix((edges['data'], (edges['row'], edges['col'])),
shape=(len(self.pairs)+self.displacement, len(self.pairs)+self.displacement))
return adj
def train_forward_normal(self, x):
img, attrs, objs, pairs = x[0], x[1], x[2], x[3]
if self.args.nlayers:
img_feats = self.image_embedder(img)
else:
img_feats = (img)
current_embeddings = self.gcn(self.embeddings)
if self.args.train_only:
pair_embed = current_embeddings[self.train_idx]
else:
pair_embed = current_embeddings[self.num_attrs+self.num_objs:self.num_attrs+self.num_objs+self.num_pairs,:]
pair_embed = pair_embed.permute(1,0)
pair_pred = torch.matmul(img_feats, pair_embed)
loss = F.cross_entropy(pair_pred, pairs)
return loss, None
def val_forward_dotpr(self, x):
img = x[0]
if self.args.nlayers:
img_feats = self.image_embedder(img)
else:
img_feats = (img)
current_embedddings = self.gcn(self.embeddings)
pair_embeds = current_embedddings[self.num_attrs+self.num_objs:self.num_attrs+self.num_objs+self.num_pairs,:].permute(1,0)
score = torch.matmul(img_feats, pair_embeds)
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:,self.dset.all_pair2idx[pair]]
return None, scores
def val_forward_distance_fast(self, x):
img = x[0]
img_feats = (self.image_embedder(img))
current_embeddings = self.gcn(self.embeddings)
pair_embeds = current_embeddings[self.num_attrs+self.num_objs:,:]
batch_size, pairs, features = img_feats.shape[0], pair_embeds.shape[0], pair_embeds.shape[1]
img_feats = img_feats[:,None,:].expand(-1, pairs, -1)
pair_embeds = pair_embeds[None,:,:].expand(batch_size, -1, -1)
diff = (img_feats - pair_embeds)**2
score = diff.sum(2) * -1
scores = {}
for itr, pair in enumerate(self.dset.pairs):
scores[pair] = score[:,self.dset.all_pair2idx[pair]]
return None, scores
def forward(self, x):
if self.training:
loss, pred = self.train_forward(x)
else:
with torch.no_grad():
loss, pred = self.val_forward(x)
return loss, pred
| 7,321 | 33.701422 | 151 | py |
czsl | czsl-main/models/image_extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from torchvision.models.resnet import ResNet, BasicBlock
class ResNet18_conv(ResNet):
def __init__(self):
super(ResNet18_conv, self).__init__(BasicBlock, [2, 2, 2, 2])
def forward(self, x):
# change forward here
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def get_image_extractor(arch = 'resnet18', pretrained = True, feature_dim = None, checkpoint = ''):
'''
Inputs
arch: Base architecture
pretrained: Bool, Imagenet weights
feature_dim: Int, output feature dimension
checkpoint: String, not implemented
Returns
Pytorch model
'''
if arch == 'resnet18':
model = models.resnet18(pretrained = pretrained)
if feature_dim is None:
model.fc = nn.Sequential()
else:
model.fc = nn.Linear(512, feature_dim)
if arch == 'resnet18_conv':
model = ResNet18_conv()
model.load_state_dict(models.resnet18(pretrained=True).state_dict())
elif arch == 'resnet50':
model = models.resnet50(pretrained = pretrained)
if feature_dim is None:
model.fc = nn.Sequential()
else:
model.fc = nn.Linear(2048, feature_dim)
elif arch == 'resnet50_cutmix':
model = models.resnet50(pretrained = pretrained)
checkpoint = torch.load('/home/ubuntu/workspace/pretrained/resnet50_cutmix.tar')
model.load_state_dict(checkpoint['state_dict'], strict=False)
if feature_dim is None:
model.fc = nn.Sequential()
else:
model.fc = nn.Linear(2048, feature_dim)
elif arch == 'resnet152':
model = models.resnet152(pretrained = pretrained)
if feature_dim is None:
model.fc = nn.Sequential()
else:
model.fc = nn.Linear(2048, feature_dim)
elif arch == 'vgg16':
model = models.vgg16(pretrained = pretrained)
modules = list(model.classifier.children())[:-3]
model.classifier=torch.nn.Sequential(*modules)
if feature_dim is not None:
model.classifier[3]=torch.nn.Linear(4096,feature_dim)
return model
| 2,428 | 29.3625 | 99 | py |
czsl | czsl-main/utils/reorganize_utzap.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Reorganize the UT-Zappos dataset to resemble the MIT-States dataset
root/attr_obj/img1.jpg
root/attr_obj/img2.jpg
root/attr_obj/img3.jpg
...
"""
import os
import torch
import shutil
import tqdm
DATA_FOLDER= "ROOT_FOLDER"
root = DATA_FOLDER+'/ut-zap50k/'
os.makedirs(root+'/images',exist_ok=True)
data = torch.load(root+'/metadata_compositional-split-natural.t7')
for instance in tqdm.tqdm(data):
image, attr, obj = instance['_image'], instance['attr'], instance['obj']
old_file = '%s/_images/%s'%(root, image)
new_dir = '%s/images/%s_%s/'%(root, attr, obj)
os.makedirs(new_dir, exist_ok=True)
shutil.copy(old_file, new_dir)
| 835 | 25.125 | 73 | py |
czsl | czsl-main/utils/config_model.py | import torch
import torch.optim as optim
from models.image_extractor import get_image_extractor
from models.visual_product import VisualProductNN
from models.manifold_methods import RedWine, LabelEmbedPlus, AttributeOperator
from models.modular_methods import GatedGeneralNN
from models.graph_method import GraphFull
from models.symnet import Symnet
from models.compcos import CompCos
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def configure_model(args, dataset):
image_extractor = None
is_open = False
if args.model == 'visprodNN':
model = VisualProductNN(dataset, args)
elif args.model == 'redwine':
model = RedWine(dataset, args)
elif args.model == 'labelembed+':
model = LabelEmbedPlus(dataset, args)
elif args.model == 'attributeop':
model = AttributeOperator(dataset, args)
elif args.model == 'tmn':
model = GatedGeneralNN(dataset, args, num_layers=args.nlayers, num_modules_per_layer=args.nmods)
elif args.model == 'symnet':
model = Symnet(dataset, args)
elif args.model == 'graphfull':
model = GraphFull(dataset, args)
elif args.model == 'compcos':
model = CompCos(dataset, args)
if dataset.open_world and not args.train_only:
is_open = True
else:
raise NotImplementedError
model = model.to(device)
if args.update_features:
print('Learnable image_embeddings')
image_extractor = get_image_extractor(arch = args.image_extractor, pretrained = True)
image_extractor = image_extractor.to(device)
# configuring optimizer
if args.model=='redwine':
optim_params = filter(lambda p: p.requires_grad, model.parameters())
elif args.model=='attributeop':
attr_params = [param for name, param in model.named_parameters() if 'attr_op' in name and param.requires_grad]
other_params = [param for name, param in model.named_parameters() if 'attr_op' not in name and param.requires_grad]
optim_params = [{'params':attr_params, 'lr':0.1*args.lr}, {'params':other_params}]
elif args.model=='tmn':
gating_params = [
param for name, param in model.named_parameters()
if 'gating_network' in name and param.requires_grad
]
network_params = [
param for name, param in model.named_parameters()
if 'gating_network' not in name and param.requires_grad
]
optim_params = [
{
'params': network_params,
},
{
'params': gating_params,
'lr': args.lrg
},
]
else:
model_params = [param for name, param in model.named_parameters() if param.requires_grad]
optim_params = [{'params':model_params}]
if args.update_features:
ie_parameters = [param for name, param in image_extractor.named_parameters()]
optim_params.append({'params': ie_parameters,
'lr': args.lrg})
optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)
model.is_open = is_open
return image_extractor, model, optimizer | 3,177 | 37.289157 | 123 | py |
czsl | czsl-main/utils/utils.py | import os
from os.path import join as ospj
import torch
import random
import copy
import shutil
import sys
import yaml
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_norm_values(norm_family = 'imagenet'):
'''
Inputs
norm_family: String of norm_family
Returns
mean, std : tuple of 3 channel values
'''
if norm_family == 'imagenet':
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
else:
raise ValueError('Incorrect normalization family')
return mean, std
def save_args(args, log_path, argfile):
shutil.copy('train.py', log_path)
modelfiles = ospj(log_path, 'models')
try:
shutil.copy(argfile, log_path)
except:
print('Config exists')
try:
shutil.copytree('models/', modelfiles)
except:
print('Already exists')
with open(ospj(log_path,'args_all.yaml'),'w') as f:
yaml.dump(args, f, default_flow_style=False, allow_unicode=True)
with open(ospj(log_path, 'args.txt'), 'w') as f:
f.write('\n'.join(sys.argv[1:]))
class UnNormalizer:
'''
Unnormalize a given tensor using mean and std of a dataset family
Inputs
norm_family: String, dataset
tensor: Torch tensor
Outputs
tensor: Unnormalized tensor
'''
def __init__(self, norm_family = 'imagenet'):
self.mean, self.std = get_norm_values(norm_family=norm_family)
self.mean, self.std = torch.Tensor(self.mean).view(1, 3, 1, 1), torch.Tensor(self.std).view(1, 3, 1, 1)
def __call__(self, tensor):
return (tensor * self.std) + self.mean
def load_args(filename, args):
with open(filename, 'r') as stream:
data_loaded = yaml.safe_load(stream)
for key, group in data_loaded.items():
for key, val in group.items():
setattr(args, key, val) | 1,943 | 28.454545 | 111 | py |
czsl | czsl-main/data/dataset.py | #external libs
import numpy as np
from tqdm import tqdm
from PIL import Image
import os
import random
from os.path import join as ospj
from glob import glob
#torch libs
from torch.utils.data import Dataset
import torch
import torchvision.transforms as transforms
#local libs
from utils.utils import get_norm_values, chunks
from models.image_extractor import get_image_extractor
from itertools import product
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class ImageLoader:
def __init__(self, root):
self.root_dir = root
def __call__(self, img):
img = Image.open(ospj(self.root_dir,img)).convert('RGB') #We don't want alpha
return img
def dataset_transform(phase, norm_family = 'imagenet'):
'''
Inputs
phase: String controlling which set of transforms to use
norm_family: String controlling which normaliztion values to use
Returns
transform: A list of pytorch transforms
'''
mean, std = get_norm_values(norm_family=norm_family)
if phase == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
elif phase == 'val' or phase == 'test':
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
elif phase == 'all':
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
else:
raise ValueError('Invalid transform')
return transform
def filter_data(all_data, pairs_gt, topk = 5):
'''
Helper function to clean data
'''
valid_files = []
with open('/home/ubuntu/workspace/top'+str(topk)+'.txt') as f:
for line in f:
valid_files.append(line.strip())
data, pairs, attr, obj = [], [], [], []
for current in all_data:
if current[0] in valid_files:
data.append(current)
pairs.append((current[1],current[2]))
attr.append(current[1])
obj.append(current[2])
counter = 0
for current in pairs_gt:
if current in pairs:
counter+=1
print('Matches ', counter, ' out of ', len(pairs_gt))
print('Samples ', len(data), ' out of ', len(all_data))
return data, sorted(list(set(pairs))), sorted(list(set(attr))), sorted(list(set(obj)))
# Dataset class now
class CompositionDataset(Dataset):
'''
Inputs
root: String of base dir of dataset
phase: String train, val, test
split: String dataset split
subset: Boolean if true uses a subset of train at each epoch
num_negs: Int, numbers of negative pairs per batch
pair_dropout: Percentage of pairs to leave in current epoch
'''
def __init__(
self,
root,
phase,
split = 'compositional-split',
model = 'resnet18',
norm_family = 'imagenet',
subset = False,
num_negs = 1,
pair_dropout = 0.0,
update_features = False,
return_images = False,
train_only = False,
open_world=False
):
self.root = root
self.phase = phase
self.split = split
self.num_negs = num_negs
self.pair_dropout = pair_dropout
self.norm_family = norm_family
self.return_images = return_images
self.update_features = update_features
self.feat_dim = 512 if 'resnet18' in model else 2048 # todo, unify this with models
self.open_world = open_world
self.attrs, self.objs, self.pairs, self.train_pairs, \
self.val_pairs, self.test_pairs = self.parse_split()
self.train_data, self.val_data, self.test_data = self.get_split_info()
self.full_pairs = list(product(self.attrs,self.objs))
# Clean only was here
self.obj2idx = {obj: idx for idx, obj in enumerate(self.objs)}
self.attr2idx = {attr : idx for idx, attr in enumerate(self.attrs)}
if self.open_world:
self.pairs = self.full_pairs
self.all_pair2idx = {pair: idx for idx, pair in enumerate(self.pairs)}
if train_only and self.phase == 'train':
print('Using only train pairs')
self.pair2idx = {pair : idx for idx, pair in enumerate(self.train_pairs)}
else:
print('Using all pairs')
self.pair2idx = {pair : idx for idx, pair in enumerate(self.pairs)}
if self.phase == 'train':
self.data = self.train_data
elif self.phase == 'val':
self.data = self.val_data
elif self.phase == 'test':
self.data = self.test_data
elif self.phase == 'all':
print('Using all data')
self.data = self.train_data + self.val_data + self.test_data
else:
raise ValueError('Invalid training phase')
self.all_data = self.train_data + self.val_data + self.test_data
print('Dataset loaded')
print('Train pairs: {}, Validation pairs: {}, Test Pairs: {}'.format(
len(self.train_pairs), len(self.val_pairs), len(self.test_pairs)))
print('Train images: {}, Validation images: {}, Test images: {}'.format(
len(self.train_data), len(self.val_data), len(self.test_data)))
if subset:
ind = np.arange(len(self.data))
ind = ind[::len(ind) // 1000]
self.data = [self.data[i] for i in ind]
# Keeping a list of all pairs that occur with each object
self.obj_affordance = {}
self.train_obj_affordance = {}
for _obj in self.objs:
candidates = [attr for (_, attr, obj) in self.train_data+self.test_data if obj==_obj]
self.obj_affordance[_obj] = list(set(candidates))
candidates = [attr for (_, attr, obj) in self.train_data if obj==_obj]
self.train_obj_affordance[_obj] = list(set(candidates))
self.sample_indices = list(range(len(self.data)))
self.sample_pairs = self.train_pairs
# Load based on what to output
self.transform = dataset_transform(self.phase, self.norm_family)
self.loader = ImageLoader(ospj(self.root, 'images'))
if not self.update_features:
feat_file = ospj(root, model+'_featurers.t7')
print(f'Using {model} and feature file {feat_file}')
if not os.path.exists(feat_file):
with torch.no_grad():
self.generate_features(feat_file, model)
self.phase = phase
activation_data = torch.load(feat_file)
self.activations = dict(
zip(activation_data['files'], activation_data['features']))
self.feat_dim = activation_data['features'].size(1)
print('{} activations loaded'.format(len(self.activations)))
def parse_split(self):
'''
Helper function to read splits of object atrribute pair
Returns
all_attrs: List of all attributes
all_objs: List of all objects
all_pairs: List of all combination of attrs and objs
tr_pairs: List of train pairs of attrs and objs
vl_pairs: List of validation pairs of attrs and objs
ts_pairs: List of test pairs of attrs and objs
'''
def parse_pairs(pair_list):
'''
Helper function to parse each phase to object attrribute vectors
Inputs
pair_list: path to textfile
'''
with open(pair_list, 'r') as f:
pairs = f.read().strip().split('\n')
pairs = [line.split() for line in pairs]
pairs = list(map(tuple, pairs))
attrs, objs = zip(*pairs)
return attrs, objs, pairs
tr_attrs, tr_objs, tr_pairs = parse_pairs(
ospj(self.root, self.split, 'train_pairs.txt')
)
vl_attrs, vl_objs, vl_pairs = parse_pairs(
ospj(self.root, self.split, 'val_pairs.txt')
)
ts_attrs, ts_objs, ts_pairs = parse_pairs(
ospj(self.root, self.split, 'test_pairs.txt')
)
#now we compose all objs, attrs and pairs
all_attrs, all_objs = sorted(
list(set(tr_attrs + vl_attrs + ts_attrs))), sorted(
list(set(tr_objs + vl_objs + ts_objs)))
all_pairs = sorted(list(set(tr_pairs + vl_pairs + ts_pairs)))
return all_attrs, all_objs, all_pairs, tr_pairs, vl_pairs, ts_pairs
def get_split_info(self):
'''
Helper method to read image, attrs, objs samples
Returns
train_data, val_data, test_data: List of tuple of image, attrs, obj
'''
data = torch.load(ospj(self.root, 'metadata_{}.t7'.format(self.split)))
train_data, val_data, test_data = [], [], []
for instance in data:
image, attr, obj, settype = instance['image'], instance['attr'], \
instance['obj'], instance['set']
curr_data = [image, attr, obj]
if attr == 'NA' or (attr, obj) not in self.pairs or settype == 'NA':
# Skip incomplete pairs, unknown pairs and unknown set
continue
if settype == 'train':
train_data.append(curr_data)
elif settype == 'val':
val_data.append(curr_data)
else:
test_data.append(curr_data)
return train_data, val_data, test_data
def get_dict_data(self, data, pairs):
data_dict = {}
for current in pairs:
data_dict[current] = []
for current in data:
image, attr, obj = current
data_dict[(attr, obj)].append(image)
return data_dict
def reset_dropout(self):
'''
Helper function to sample new subset of data containing a subset of pairs of objs and attrs
'''
self.sample_indices = list(range(len(self.data)))
self.sample_pairs = self.train_pairs
# Using sampling from random instead of 2 step numpy
n_pairs = int((1 - self.pair_dropout) * len(self.train_pairs))
self.sample_pairs = random.sample(self.train_pairs, n_pairs)
print('Sampled new subset')
print('Using {} pairs out of {} pairs right now'.format(
n_pairs, len(self.train_pairs)))
self.sample_indices = [ i for i in range(len(self.data))
if (self.data[i][1], self.data[i][2]) in self.sample_pairs
]
print('Using {} images out of {} images right now'.format(
len(self.sample_indices), len(self.data)))
def sample_negative(self, attr, obj):
'''
Inputs
attr: String of valid attribute
obj: String of valid object
Returns
Tuple of a different attribute, object indexes
'''
new_attr, new_obj = self.sample_pairs[np.random.choice(
len(self.sample_pairs))]
while new_attr == attr and new_obj == obj:
new_attr, new_obj = self.sample_pairs[np.random.choice(
len(self.sample_pairs))]
return (self.attr2idx[new_attr], self.obj2idx[new_obj])
def sample_affordance(self, attr, obj):
'''
Inputs
attr: String of valid attribute
obj: String of valid object
Return
Idx of a different attribute for the same object
'''
new_attr = np.random.choice(self.obj_affordance[obj])
while new_attr == attr:
new_attr = np.random.choice(self.obj_affordance[obj])
return self.attr2idx[new_attr]
def sample_train_affordance(self, attr, obj):
'''
Inputs
attr: String of valid attribute
obj: String of valid object
Return
Idx of a different attribute for the same object from the training pairs
'''
new_attr = np.random.choice(self.train_obj_affordance[obj])
while new_attr == attr:
new_attr = np.random.choice(self.train_obj_affordance[obj])
return self.attr2idx[new_attr]
def generate_features(self, out_file, model):
'''
Inputs
out_file: Path to save features
model: String of extraction model
'''
# data = self.all_data
data = ospj(self.root,'images')
files_before = glob(ospj(data, '**', '*.jpg'), recursive=True)
files_all = []
for current in files_before:
parts = current.split('/')
if "cgqa" in self.root:
files_all.append(parts[-1])
else:
files_all.append(os.path.join(parts[-2],parts[-1]))
transform = dataset_transform('test', self.norm_family)
feat_extractor = get_image_extractor(arch = model).eval()
feat_extractor = feat_extractor.to(device)
image_feats = []
image_files = []
for chunk in tqdm(
chunks(files_all, 512), total=len(files_all) // 512, desc=f'Extracting features {model}'):
files = chunk
imgs = list(map(self.loader, files))
imgs = list(map(transform, imgs))
feats = feat_extractor(torch.stack(imgs, 0).to(device))
image_feats.append(feats.data.cpu())
image_files += files
image_feats = torch.cat(image_feats, 0)
print('features for %d images generated' % (len(image_files)))
torch.save({'features': image_feats, 'files': image_files}, out_file)
def __getitem__(self, index):
'''
Call for getting samples
'''
index = self.sample_indices[index]
image, attr, obj = self.data[index]
# Decide what to output
if not self.update_features:
img = self.activations[image]
else:
img = self.loader(image)
img = self.transform(img)
data = [img, self.attr2idx[attr], self.obj2idx[obj], self.pair2idx[(attr, obj)]]
if self.phase == 'train':
all_neg_attrs = []
all_neg_objs = []
for curr in range(self.num_negs):
neg_attr, neg_obj = self.sample_negative(attr, obj) # negative for triplet lose
all_neg_attrs.append(neg_attr)
all_neg_objs.append(neg_obj)
neg_attr, neg_obj = torch.LongTensor(all_neg_attrs), torch.LongTensor(all_neg_objs)
#note here
if len(self.train_obj_affordance[obj])>1:
inv_attr = self.sample_train_affordance(attr, obj) # attribute for inverse regularizer
else:
inv_attr = (all_neg_attrs[0])
comm_attr = self.sample_affordance(inv_attr, obj) # attribute for commutative regularizer
data += [neg_attr, neg_obj, inv_attr, comm_attr]
# Return image paths if requested as the last element of the list
if self.return_images and self.phase != 'train':
data.append(image)
return data
def __len__(self):
'''
Call for length
'''
return len(self.sample_indices)
| 15,653 | 34.336343 | 106 | py |
MotifClass | MotifClass-master/text_classification/main.py | # The code structure is adapted from the WeSTClass implementation
# https://github.com/yumeng5/WeSTClass
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
np.random.seed(1234)
from time import time
from model import WSTC, f1
from keras.optimizers import SGD
from gen import augment, pseudodocs
from load_data import load_dataset
from gensim.models import word2vec
from gensim.models import KeyedVectors
from sklearn import preprocessing
def load_embedding(vocabulary_inv, num_class, dataset_name, embedding_name):
model_name = f'{dataset_name}/embedding_{embedding_name}'
if os.path.exists(model_name):
# embedding_model = word2vec.Word2Vec.load(model_name)
embedding_model = KeyedVectors.load_word2vec_format(model_name, binary = False, unicode_errors='ignore')
print("Loading existing embedding vectors {}...".format(model_name))
else:
print("Cannot find the embedding file!")
embedding_weights = {key: embedding_model[word] if word in embedding_model else
np.random.uniform(-0.25, 0.25, embedding_model.vector_size)
for key, word in vocabulary_inv.items()}
label_name = f'../{dataset_name}_data/labels.txt'
if os.path.exists(label_name):
centers = [None for _ in range(num_class)]
with open(label_name) as fin:
for idx, line in enumerate(fin):
label = line.strip()
centers[idx] = embedding_model[label] / np.linalg.norm(embedding_model[label])
return embedding_weights, centers
def write_output(write_path, y_pred, perm):
invperm = np.zeros(len(perm), dtype='int32')
for i,v in enumerate(perm):
invperm[v] = i
y_pred = y_pred[invperm]
with open(os.path.join(write_path, 'out.txt'), 'w') as f:
for val in y_pred:
f.write(str(val) + '\n')
print("Classification results are written in {}".format(os.path.join(write_path, 'out.txt')))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='main',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
### Basic settings ###
# dataset selection: MAG-CS (default), Amazon
parser.add_argument('--dataset', default='mag', choices=['mag', 'amazon'])
# embedding files: joint embedding (default)
parser.add_argument('--embedding', default='joint')
# whether ground truth labels are available for evaluation: True (default), False
parser.add_argument('--with_evaluation', default='True', choices=['True', 'False'])
### Training settings ###
# mini-batch size for both pre-training and self-training: 256 (default)
parser.add_argument('--batch_size', default=256, type=int)
# training epochs: dataset-specific
parser.add_argument('--pretrain_epochs', default=None, type=int)
### Hyperparameters settings ###
# number of generated pseudo documents per class: dataset-specific
parser.add_argument('--num_generated_docs', default=None, type=int)
# keyword vocabulary size (gamma): 50 (default)
parser.add_argument('--gamma', default=50, type=int)
# vmf concentration parameter when synthesizing documents (kappa): dataset-specific
parser.add_argument('--kappa', default=None, type=float)
# number of copies each retrieved/generated pseudo-labeled document has in one epoch
parser.add_argument('--ratio', default=10, type=int)
### Dummy arguments (please ignore) ###
# weak supervision selection: labeled documents (default)
parser.add_argument('--sup_source', default='docs', choices=['docs'])
# maximum self-training iterations: 0 (default)
parser.add_argument('--maxiter', default=0, type=int)
# self-training update interval: None (default)
parser.add_argument('--update_interval', default=None, type=int)
# background word distribution weight (alpha): 0.0 (default)
parser.add_argument('--alpha', default=0.0, type=float)
# self-training stopping criterion (delta): None (default)
parser.add_argument('--delta', default=0.1, type=float)
# trained model directory: None (default)
parser.add_argument('--trained_weights', default=None)
args = parser.parse_args()
print(args)
alpha = args.alpha
gamma = args.gamma
delta = args.delta
ratio = args.ratio
word_embedding_dim = 100
update_interval = 50
self_lr = 1e-3
if args.dataset == 'mag':
pretrain_epochs = 100
max_sequence_length = 200
num_generated_docs = 50
kappa = 200
beta = num_generated_docs * ratio
elif args.dataset == 'amazon':
pretrain_epochs = 100
max_sequence_length = 200
num_generated_docs = 100
kappa = 150
beta = num_generated_docs * ratio
decay = 1e-6
if args.update_interval is not None:
update_interval = args.update_interval
if args.pretrain_epochs is not None:
pretrain_epochs = args.pretrain_epochs
if args.with_evaluation == 'True':
with_evaluation = True
else:
with_evaluation = False
if args.sup_source == 'docs':
x, y, word_counts, vocabulary, vocabulary_inv_list, len_avg, len_std, word_sup_list, sup_idx, perm = \
load_dataset(args.dataset, model='cnn', sup_source=args.sup_source, with_evaluation=with_evaluation, truncate_len=max_sequence_length)
np.random.seed(1234)
vocabulary_inv = {key: value for key, value in enumerate(vocabulary_inv_list)}
vocab_sz = len(vocabulary_inv)
n_classes = len(word_sup_list)
if x.shape[1] < max_sequence_length:
max_sequence_length = x.shape[1]
x = x[:, :max_sequence_length]
sequence_length = max_sequence_length
print("\n### Input preparation ###")
embedding_weights, centers = load_embedding(vocabulary_inv, n_classes, args.dataset, args.embedding)
embedding_mat = np.array([np.array(embedding_weights[word]) for word in vocabulary_inv])
wstc = WSTC(input_shape=x.shape, n_classes=n_classes, y=y, model='cnn',
vocab_sz=vocab_sz, embedding_matrix=embedding_mat, word_embedding_dim=word_embedding_dim)
if args.trained_weights is None:
print("\n### Phase 1: vMF distribution fitting & pseudo document generation ###")
word_sup_array = np.array([np.array([vocabulary[word] for word in word_class_list]) for word_class_list in word_sup_list])
total_counts = sum(word_counts[ele] for ele in word_counts)
total_counts -= word_counts[vocabulary_inv_list[0]]
background_array = np.zeros(vocab_sz)
for i in range(1,vocab_sz):
background_array[i] = word_counts[vocabulary_inv[i]]/total_counts
seed_docs, seed_label = pseudodocs(word_sup_array, gamma, background_array,
sequence_length, len_avg, len_std, beta, alpha,
vocabulary_inv, embedding_mat, centers, kappa, 'cnn',
'./results/{}/{}/phase1/'.format(args.dataset, 'cnn'))
if args.sup_source == 'docs':
num_real_doc = len(sup_idx.flatten()) * ratio
real_seed_docs, real_seed_label = augment(x, sup_idx, num_real_doc)
seed_docs = np.concatenate((seed_docs, real_seed_docs), axis=0)
seed_label = np.concatenate((seed_label, real_seed_label), axis=0)
perm_seed = np.random.permutation(len(seed_label))
seed_docs = seed_docs[perm_seed]
seed_label = seed_label[perm_seed]
print('\n### Phase 2: pre-training with pseudo documents ###')
wstc.pretrain(x=seed_docs, pretrain_labels=seed_label,
sup_idx=sup_idx, optimizer=SGD(lr=0.1, momentum=0.9),
epochs=pretrain_epochs, batch_size=args.batch_size,
save_dir='./results/{}/{}/phase2'.format(args.dataset, 'cnn'))
y_pred = wstc.predict(x)
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
print('F1 score after pre-training: f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
else:
print("\n### Directly loading trained weights ###")
wstc.load_weights(args.trained_weights)
y_pred = wstc.predict(x)
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
print('F1 score: f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
print("\n### Generating outputs ###")
write_output('./' + args.dataset, y_pred, perm)
| 7,853 | 36.759615 | 137 | py |
MotifClass | MotifClass-master/text_classification/model.py | import numpy as np
np.random.seed(1234)
import os
from time import time
import csv
import keras.backend as K
# K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=30, inter_op_parallelism_threads=30)))
from keras.engine.topology import Layer
from keras.layers import Dense, Input, Convolution1D, Embedding, GlobalMaxPooling1D, GRU, TimeDistributed
from keras.layers.merge import Concatenate
from keras.models import Model
from keras import initializers, regularizers, constraints
from keras.initializers import VarianceScaling, RandomUniform
from sklearn.metrics import f1_score
def f1(y_true, y_pred):
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
f1_macro = f1_score(y_true, y_pred, average='macro')
f1_micro = f1_score(y_true, y_pred, average='micro')
return f1_macro, f1_micro
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
x = Input(shape=(input_shape,), name='input')
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding",
weights=[embedding_matrix], trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y, name='classifier')
def dot_product(x, kernel):
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
init='glorot_uniform', bias=True, **kwargs):
self.supports_masking = True
self.init = init
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight(shape=(input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
def HierAttLayer(input_shape, n_classes, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, gru_dim=100, fc_dim=100):
sentence_input = Input(shape=(input_shape[2],), dtype='int32')
embedded_sequences = Embedding(vocab_sz,
word_embedding_dim,
input_length=input_shape[2],
weights=[embedding_matrix],
trainable=word_trainable)(sentence_input)
l_lstm = GRU(gru_dim, return_sequences=True)(embedded_sequences)
l_dense = TimeDistributed(Dense(fc_dim))(l_lstm)
l_att = AttentionWithContext()(l_dense)
sentEncoder = Model(sentence_input, l_att)
x = Input(shape=(input_shape[1], input_shape[2]), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(x)
l_lstm_sent = GRU(gru_dim, return_sequences=True)(review_encoder)
l_dense_sent = TimeDistributed(Dense(fc_dim))(l_lstm_sent)
l_att_sent = AttentionWithContext()(l_dense_sent)
y = Dense(n_classes, activation='softmax')(l_att_sent)
return Model(inputs=x, outputs=y, name='classifier')
class WSTC(object):
def __init__(self,
input_shape,
n_classes=None,
init=RandomUniform(minval=-0.01, maxval=0.01),
y=None,
model='cnn',
vocab_sz=None,
word_embedding_dim=100,
embedding_matrix=None
):
super(WSTC, self).__init__()
self.input_shape = input_shape
self.y = y
self.n_classes = n_classes
if model == 'cnn':
self.classifier = ConvolutionLayer(self.input_shape[1], n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim, init=init)
elif model == 'rnn':
self.classifier = HierAttLayer(self.input_shape, n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim)
self.model = self.classifier
self.sup_list = {}
def pretrain(self, x, pretrain_labels, sup_idx=None, optimizer='adam',
loss='kld', epochs=200, batch_size=256, save_dir=None):
self.classifier.compile(optimizer=optimizer, loss=loss)
print("\nNeural model summary: ")
self.model.summary()
if sup_idx is not None:
for i, seed_idx in enumerate(sup_idx):
for idx in seed_idx:
self.sup_list[idx] = i
# begin pretraining
t0 = time()
print('\nPretraining...')
self.classifier.fit(x, pretrain_labels, batch_size=batch_size, epochs=epochs)
print('Pretraining time: {:.2f}s'.format(time() - t0))
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.classifier.save_weights(save_dir + '/pretrained.h5')
print('Pretrained model saved to {}/pretrained.h5'.format(save_dir))
self.pretrained = True
def load_weights(self, weights):
self.model.load_weights(weights)
def predict(self, x):
q = self.model.predict(x, verbose=0)
return q.argmax(1)
def target_distribution(self, q, power=2):
weight = q**power / q.sum(axis=0)
p = (weight.T / weight.sum(axis=1)).T
for i in self.sup_list:
p[i] = 0
p[i][self.sup_list[i]] = 1
return p
def compile(self, optimizer='sgd', loss='kld'):
self.model.compile(optimizer=optimizer, loss=loss)
def fit(self, x, y=None, maxiter=5e4, batch_size=256, tol=0.1, power=2,
update_interval=140, save_dir=None, save_suffix=''):
print('Update interval: {}'.format(update_interval))
pred = self.classifier.predict(x)
y_pred = np.argmax(pred, axis=1)
y_pred_last = np.copy(y_pred)
# logging file
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = open(save_dir + '/self_training_log_{}.csv'.format(save_suffix), 'w')
logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'f1_macro', 'f1_micro'])
logwriter.writeheader()
index = 0
index_array = np.arange(x.shape[0])
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q = self.model.predict(x, verbose=0)
y_pred = q.argmax(axis=1)
p = self.target_distribution(q, power)
print('\nIter {}: '.format(ite))
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
logdict = dict(iter=ite, f1_macro=f1_macro, f1_micro=f1_micro)
logwriter.writerow(logdict)
print('f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
# check stop criterion
delta_label = np.sum(y_pred != y_pred_last).astype(np.float) / y_pred.shape[0]
y_pred_last = np.copy(y_pred)
print('Fraction of documents with label changes: {} %'.format(np.round(delta_label*100, 3)))
if ite > 0 and delta_label < tol/100:
print('\nFraction: {} % < tol: {} %'.format(np.round(delta_label*100, 3), tol))
print('Reached tolerance threshold. Stopping training.')
logfile.close()
break
# train on batch
idx = index_array[index * batch_size: min((index+1) * batch_size, x.shape[0])]
self.model.train_on_batch(x=x[idx], y=p[idx])
index = index + 1 if (index + 1) * batch_size <= x.shape[0] else 0
ite += 1
logfile.close()
if save_dir is not None:
self.model.save_weights(save_dir + '/final.h5')
print("Final model saved to: {}/final.h5".format(save_dir))
return self.predict(x)
| 9,038 | 32.354244 | 124 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/attack_csgld_pgd_torch.py | """
Implementation of the attacks used in the article
"""
import numpy as np
import pandas as pd
import torch
import argparse
import time
import os
import sys
import re
from tqdm import tqdm
import random
from random import shuffle
from utils.data import CIFAR10, CIFAR100, ImageNet, MNIST
from utils.helpers import keyvalue, guess_model, guess_and_load_model, load_classifier, load_classifier_ensemble, list_models, \
compute_accuracy_from_nested_list_models, save_numpy, compute_norm, guess_method, USE_CUDA, DEVICE
from utils.attacks import ExtendedProjectedGradientDescentPyTorch
from art.attacks.evasion import CarliniLInfMethod
from utils.models import LightNestedEnsemble
from torchattacks import Square, CW, AutoAttack, OnePixel, PGD, PGDL2, APGD, FAB, DeepFool, MultiAttack
from torch.utils.data import TensorDataset, DataLoader
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
# parse args
parser = argparse.ArgumentParser(description="Craft PGD adv ex with each update computed on a different samples from an ensemble of models trained with cSGLD")
parser.add_argument("dirs_models", nargs='+', help="Path to directory containing all the models file of the ensemble model. Also support single path to a model file.")
parser.add_argument('--attack', choices=['PGD', 'PGD_ta', 'APGD', 'FAB', 'Square', 'AutoAttack', 'CW', 'OnePixel', 'DeepFool'], default='PGD', help="Attack to craft adversarial examples. Only PGD supports momentum, .")
parser.add_argument('--n-iter', type=int, default=None, help="Number of iterations to perform. If None (default), set to the number of samples.")
parser.add_argument("--norm", choices=['1', '2', 'inf'], default='2', help="Type of L-norm to use. Default: 2")
parser.add_argument("--max-norm", type=float, required=True, help="Max L-norm of the perturbation")
parser.add_argument("--norm-step", type=float, required=True, help="Max norm at each step.")
parser.add_argument('--n-ensemble', type=int, default=1, help="Number of samples to ensemble. Default: 1")
parser.add_argument('--shuffle', action='store_true', help="Random order of models vs sequential order of the MCMC (default)")
parser.add_argument('--n-random-init', type=int, default=0, help="Number of random restarts to perform. 0: no random init.")
parser.add_argument('--grad-noise-std', type=float, default=None, help="Add Gaussian noise to gradients with the specified standard deviation.")
parser.add_argument('--temperature', type=float, default=None, help="Temperature scaling the logits of the surrogate model. Deactivated if None (default).")
parser.add_argument('--skip-first-n-models', type=int, default=0, help="Number of models samples to discard")
parser.add_argument('--n-models-cycle', type=int, help="Number of models samples per cycle (only used for limit-n-samples-per-cycle or limit-n-cycles)")
parser.add_argument('--limit-n-samples-per-cycle', type=int, default=None, help="Takes into account only the first n samples inside a cycle, droping off the last ones. Default: None (desactivated)")
parser.add_argument('--method-samples-per-cycle', choices=['interval', 'true_interval', 'first', 'last'], default='interval', help="Method to select samples inside cycle. Use interval for cycle based surrogate, true_interval for non-cyclical surrogate.")
parser.add_argument('--limit-n-cycles', type=int, default=None, help="Takes into account only the first n cycles, droping off the last ones. Default: None (desactivated)")
# test time transferability improvements
parser.add_argument('--ghost-attack', action='store_true', help="Load each model as a Ghost network (default: no model alteration)")
parser.add_argument('--input-diversity', action='store_true', help="Add input diversity to each model (default: no model alteration)")
parser.add_argument('--skip-gradient-method', action='store_true', help="Add Skip Gradient Method (SGM) backward hook to each surrogate model (default: no model alteration)")
parser.add_argument('--translation-invariant', action='store_true', help="Apply translation invariance kernel to gradient (default: regular gradient)")
parser.add_argument('--target-defense-randomization', action='store_true', help="The target model is loaded with defense randomization (default: regular target). Set to True with --translation-invariant.")
parser.add_argument("--momentum", type=float, default=None, help="Apply momentum to gradients (default: regular gradient)")
# target model
parser.add_argument("--model-target-path", nargs='+', default=None, help="Path to the target models.")
parser.add_argument("--csv-export", default=None, help="Path to CSV where to export data about target.")
parser.add_argument("--csv-key-val", nargs='*', metavar="KEY=VALUE", action=keyvalue, help="Add the keys as columns with the corresponding values to the exported CSV.")
parser.add_argument("--export-target-per-iter", type=int, default=None, help="Export target acc each N iterations in csv-export file. Default (None) 1 line for final data.")
# others
parser.add_argument("--n-examples", type=int, default=None, help="Craft adv ex on a subset of test examples. If None "
"(default), perturbate all the test set. If "
"model-target-path is set, extract the subset from "
"the examples correctly predicted by it.")
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--validation", action='store_true', help="Craft adversarial examples from a validation set built from train set (of size: 2 x n_examples). Default: no validation set, examples from test set.")
parser.add_argument("--seed", type=int, default=None, help="Set random seed")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size. Try a lower value if out of memory (especially for high values of --ensemble-inner).")
parser.add_argument("--force-add", type=float, default=None, help="Add this scalar to the example. Use for compatibility with model trained on other range of pixels")
parser.add_argument("--force-divide", type=float, default=None, help="Divide the example ex by this scalar. Use for compatibility with model trained on other range of pixels")
parser.add_argument("--skip-accuracy-computation", action='store_true', help="Do not compute accuracies. To be used for full test set.")
args = parser.parse_args()
if args.norm == 'inf':
args.norm = np.inf
else:
args.norm = int(args.norm)
# check args
if args.limit_n_samples_per_cycle or args.limit_n_cycles:
if not args.n_models_cycle:
raise ValueError("If a limit is set in the number of models to consider, you have to precise the number of samples per cycle.")
if args.validation and not args.n_examples:
raise ValueError('For validation set, please provide its size with n-examples arg')
# set random seed
if args.seed:
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# detect models
if re.match('.+\\.pth?(\\.tar)?$', args.dirs_models[0]) and len(args.dirs_models) == 1:
# link to single model
if not os.path.isfile(args.dirs_models[0]):
raise ValueError('Non-existing path surrogate file passed')
paths_ensembles = [[args.dirs_models[0], ], ]
else:
paths_ensembles = [list_models(x) for x in args.dirs_models]
print(f'Ensembles of models detected: {[len(x) for x in paths_ensembles]}')
if args.skip_first_n_models:
print(f'Discarding the first {args.skip_first_n_models} models')
paths_ensembles = [x[args.skip_first_n_models:] for x in paths_ensembles]
if any([len(x) == 0 for x in paths_ensembles]):
raise ValueError('Empty model ensemble')
if args.n_models_cycle:
if any([len(x) % args.n_models_cycle != 0 for x in paths_ensembles]):
print('Warning: Number of models is not a multiple of the number of models per cycle')
if args.limit_n_cycles:
if any([len(x) < args.limit_n_cycles * args.n_models_cycle for x in paths_ensembles]):
raise ValueError(f'One of the ensemble is smaller than what expected ({ args.limit_n_cycles * args.n_models_cycle })')
if args.limit_n_samples_per_cycle:
if args.limit_n_samples_per_cycle > args.n_models_cycle:
raise ValueError('Limit to nb samples > nb of samples per cycle.')
# load test/val data
validation_size = 2 * args.n_examples if args.validation else None # if validation data, set the size of val dataset 2xn_examples (to have enough correctly predicted examples)
if 'CIFAR100' in args.dirs_models[0]:
data = CIFAR100(batch_size=args.batch_size, validation=validation_size, seed=args.seed)
elif 'CIFAR10' in args.dirs_models[0]:
data = CIFAR10(batch_size=args.batch_size, validation=validation_size, seed=args.seed)
elif 'ImageNet' in args.dirs_models[0]:
data = ImageNet(batch_size=args.batch_size, path=args.data_path, validation=validation_size, seed=args.seed)
elif 'MNIST' in args.dirs_models[0]:
data = MNIST(batch_size=args.batch_size, validation=validation_size, seed=args.seed)
else:
raise NotImplementedError('Dataset not supported')
model_target = None
if args.model_target_path:
# load target and select n_examples correctly predicted by it
# target model is loaded with randomization defense for translation invariance
models_target = [guess_and_load_model(path_model=x, data=data, defense_randomization=(args.translation_invariant or args.target_defense_randomization)) for x in args.model_target_path]
X, y = data.correctly_predicted_to_numpy(models=models_target, train=False, validation=args.validation, N=args.n_examples, seed=args.seed)
else:
X, y = data.to_numpy(train=False, validation=args.validation, N=args.n_examples, seed=args.seed)
if args.force_add:
X += args.force_add
data.min_pixel_value += args.force_add
data.max_pixel_value += args.force_add
if args.force_divide:
X /= args.force_divide
data.min_pixel_value /= args.force_divide
data.max_pixel_value /= args.force_divide
# limit cycles or samples per cycles
if args.limit_n_cycles or args.limit_n_samples_per_cycle:
paths_ensembles_lim = []
for i_ens, paths_models in enumerate(paths_ensembles):
paths_ensembles_lim.append([])
for i, path_model in enumerate(paths_models):
# stop if limit is set on the number of cycles to consider
if args.limit_n_cycles:
if i >= args.limit_n_cycles * args.n_models_cycle:
break
# only add current model for selected indexes
if args.limit_n_samples_per_cycle:
# select index (at regular interval, always including the last)
max_index = args.n_models_cycle-1
if args.method_samples_per_cycle == 'interval':
indexes_to_keep = [int(x.left) for x in pd.interval_range(start=0, end=max_index, periods=args.limit_n_samples_per_cycle-1)] + [max_index]
elif args.method_samples_per_cycle == 'true_interval':
indexes_to_keep = [int(x.left) for x in pd.interval_range(start=0, end=max_index+1, periods=args.limit_n_samples_per_cycle)]
elif args.method_samples_per_cycle == 'last':
indexes_to_keep = list(range(max_index - args.limit_n_samples_per_cycle+1, max_index+1))
elif args.method_samples_per_cycle == 'first':
indexes_to_keep = list(range(0, args.limit_n_samples_per_cycle))
else:
raise NotImplementedError('Method not supported.')
if (i % args.n_models_cycle) not in indexes_to_keep:
continue
paths_ensembles_lim[i_ens].append(path_model)
paths_ensembles = paths_ensembles_lim
if any([len(x) != len(paths_ensembles[0]) for x in paths_ensembles]):
raise NotImplementedError('All ensembles should have the same number of models.')
print(f'Ensembles of models used: {[len(x) for x in paths_ensembles]}')
# shuffle models
if args.shuffle:
for paths_models in paths_ensembles:
shuffle(paths_models)
# don't load unused models (if nb models > nb iters)
if args.n_iter:
max_nb_models_used = args.n_iter * args.n_ensemble
for i, paths_models in enumerate(paths_ensembles):
if len(paths_models) > max_nb_models_used:
paths_ensembles[i] = paths_models[:max_nb_models_used]
if len(args.dirs_models) > 1 and args.n_ensemble > 1:
raise ValueError('Attacking multiple ensembles doesn\'t support n-ensemble arg.')
# create nested list of models (ensemble > model)
# [ens1: [m1, m2, m3, m4], ens2: [m5, m6, m7, m8]]
ensemble_list = []
for i, path_model in enumerate(paths_ensembles[0]):
# if we have multiple MCMC chains, we ensemble
if len(paths_ensembles) > 1:
ensemble_list.append([x[i] for x in paths_ensembles])
else:
# if args.n_ensemble, we ensemble models from the same MCMC chain
if len(ensemble_list) == 0:
# avoid IndexError at first iteration
ensemble_list.append([path_model, ])
elif len(ensemble_list[-1]) >= args.n_ensemble:
ensemble_list.append([path_model, ])
else:
ensemble_list[-1].append(path_model)
# load each models and create ART classifier
ensemble_classifiers = [] # list of ART classifiers. Each one has the logits fused
list_ensemble_models = [] # nested list of torch models
for i, ensemble_path in enumerate(ensemble_list):
# only 1 model to attack
if len(ensemble_path) == 1:
model = guess_and_load_model(ensemble_path[0], data=data, load_as_ghost=args.ghost_attack, input_diversity=args.input_diversity, skip_gradient=args.skip_gradient_method, temperature=args.temperature)
classifier = load_classifier(model, data=data)
list_ensemble_models.append([model])
# if ensembling, store path_model to a list and build the ensembling model
else:
models_to_ensemble = []
for j, path_model in enumerate(ensemble_path):
# load next model and continue only if ensemble is done
models_to_ensemble.append(guess_and_load_model(path_model, data=data, load_as_ghost=args.ghost_attack, input_diversity=args.input_diversity, temperature=args.temperature, force_cpu=False))
classifier = load_classifier_ensemble(models_to_ensemble, data=data)
list_ensemble_models.append(models_to_ensemble)
ensemble_classifiers.append(classifier)
del classifier
# compute benign acc
if not args.skip_accuracy_computation:
acc_ens_prob, loss_ens_prob, predict_correct_ens = compute_accuracy_from_nested_list_models(list_ensemble=list_ensemble_models, X=X, y=y, data=data, export_predict=True)
print(f"Accuracy on ensemble benign test examples: {acc_ens_prob*100:.3f}% (loss: {loss_ens_prob:.3f}).")
# time code
if USE_CUDA:
torch.cuda.synchronize()
start_time = time.perf_counter()
if args.attack == 'PGD':
attack = ExtendedProjectedGradientDescentPyTorch(
estimators=ensemble_classifiers, targeted=False, norm=args.norm, eps=args.max_norm, eps_step=args.norm_step,
max_iter=args.n_iter, num_random_init=args.n_random_init, batch_size=args.batch_size,
translation_invariant=args.translation_invariant, momentum=args.momentum, grad_noise_std=args.grad_noise_std,
models_target_dict={name: models_target[i] for i,name in enumerate(args.model_target_path)} if args.export_target_per_iter else None,
freq_eval_target=args.export_target_per_iter,
data=data
)
X_adv = attack.generate(x=X, y=y)
elif args.attack == 'CW' and args.norm == np.inf:
ensemble_models = LightNestedEnsemble(list_models=list_ensemble_models, order=None)
ensemble_classifier = load_classifier(ensemble_models, data=data)
attack = CarliniLInfMethod(
classifier=ensemble_classifier, targeted=False, eps=args.max_norm, max_iter=args.n_iter,
batch_size=args.batch_size, learning_rate=0.01
)
X_adv = attack.generate(x=X, y=y)
else:
# attacks from torchattacks
ensemble_models = LightNestedEnsemble(list_models=list_ensemble_models, order=None) # we take care of the order before
norm_ta = f'L{args.norm}'
if args.attack == 'PGD_ta' and norm_ta == 'Linf':
attacks_list = [PGD(ensemble_models, eps=args.max_norm, alpha=args.norm_step, steps=args.n_iter, random_start=args.n_random_init > 0) for x in range(max(1, args.n_random_init))]
attack = MultiAttack(attacks_list)
elif args.attack == 'APGD':
attack = APGD(ensemble_models, norm=norm_ta, eps=args.max_norm, steps=args.n_iter, n_restarts=args.n_random_init, loss='ce', seed=args.seed)
elif args.attack == 'FAB':
attack = FAB(ensemble_models, norm=norm_ta, eps=args.max_norm, steps=args.n_iter, n_restarts=args.n_random_init, seed=args.seed, n_classes=data.num_classes)
elif args.attack == 'Square':
attack = Square(ensemble_models, norm=norm_ta, eps=args.max_norm, n_queries=args.n_iter, n_restarts=1, loss='ce', seed=args.seed)
elif args.attack == 'AutoAttack':
attack = AutoAttack(ensemble_models, norm=norm_ta, eps=args.max_norm, n_classes=data.num_classes, seed=args.seed)
elif args.attack == 'DeepFool':
if args.norm != 2:
raise ValueError('Only L2 norm supported for DeepFool attack')
print('Warming: max-norm ignored for DeepFool attack!')
attack = DeepFool(ensemble_models, steps=args.n_iter)
elif args.attack == 'CW':
if args.norm != 2:
raise ValueError('Only L2 norm supported for CW attack')
print('Warming: max-norm ignored for CW attack!')
attacks_list = [CW(ensemble_models, c=c, steps=1000, lr=0.1, kappa=30) for c in [0.1, 1, 10, 100]]
attack = MultiAttack(attacks_list)
elif args.attack == 'OnePixel':
print('Warming: norm ignored for OnePixel attack, max_norm used as nb pixels!')
attack = OnePixel(ensemble_models, pixels=args.max_norm, steps=75, popsize=400, inf_batch=args.batch_size)
else:
raise NotImplementedError(f'Attack not implemented.')
# implement batchsize
X_dataset = TensorDataset(torch.tensor(X).to(DEVICE), torch.tensor(y).to(DEVICE))
X_loader = DataLoader(X_dataset, batch_size=args.batch_size, shuffle=False)
X_adv = np.zeros((0,)+data.get_input_shape()[1:])
for X_batch, y_batch in tqdm(X_loader, desc='Batch'):
X_adv_batch = attack(X_batch, y_batch).detach().cpu().numpy()
X_adv = np.vstack((X_adv, X_adv_batch))
if X.shape != X_adv.shape:
raise RuntimeError(f'X and X_adv do not have the same shape: {X.shape} ; {X_adv.shape}')
if USE_CUDA:
torch.cuda.synchronize()
end_time = time.perf_counter()
model_name_list = [guess_model(x) for x in args.dirs_models]
# print stats
if not args.skip_accuracy_computation:
acc_ens_prob_adv, loss_ens_prob_adv = compute_accuracy_from_nested_list_models(list_ensemble=list_ensemble_models, X=X_adv, y=y, data=data)
lpnorm = compute_norm(X_adv=X_adv, X=X, norm=args.norm)
print(
f"Surrogate stats after {args.n_iter} iters: Accuracy: {acc_ens_prob_adv * 100:.3f}%, Loss: {loss_ens_prob_adv:.3f} (from {loss_ens_prob:.3f}), "
f"L{args.norm}-norm: mean {lpnorm.mean():.5f} (min {lpnorm.min():.5f} max {lpnorm.max():.5f}), Nb examples: {X_adv.shape[0]}, "
f"Time: {(end_time - start_time) / 60:.3f} min")
if args.csv_export:
if not args.model_target_path:
raise ValueError('Target model should be specified to export CSV.')
for i, model_target in enumerate(models_target):
acc_target_adv, loss_target_adv, acc_target_adv_ensok = compute_accuracy_from_nested_list_models([[model_target,],], X=X_adv, y=y, data=data, export_mask=predict_correct_ens)
# transfer_rate_target = 1 - accuracy on adversarial examples predicted correctly both by the target and the surrogate
transfer_rate_target = 1 - acc_target_adv_ensok
nb_examples_transfer_rate = predict_correct_ens.sum().cpu().item()
acc_target_original, loss_target_original = compute_accuracy_from_nested_list_models([[model_target,],], X=X, y=y, data=data)
print(f'* On target: { args.model_target_path[i] }')
print(f" Attack success rate: {(1-acc_target_adv) * 100:.3f} % (transfer rate: {transfer_rate_target * 100:.3f}% on {nb_examples_transfer_rate} examples)")
print(f" Loss on target: {loss_target_adv:.3f} (vs. original {loss_target_original:.3f})")
dict_metrics = args.csv_key_val.copy() if args.csv_key_val else dict()
dict_metrics.update({
'model_target': f"{'defense_randomization/' if (args.translation_invariant or args.target_defense_randomization) else ''}{args.model_target_path[i]}",
'arch_target': guess_model(args.model_target_path[i]),
'model_surrogate': args.dirs_models[0],
'surrogate_type': guess_method(args.dirs_models[0]),
'surrogate_archs': '_'.join(model_name_list),
# 'surrogate_size_ensembles': len(paths_ensembles[0]), # nb models per arch
'surrogate_size_ensembles': args.n_models_cycle * args.limit_n_cycles if args.limit_n_cycles else len(paths_ensembles[0]), # nb models per arch
'norm_type': args.norm,
'norm_max': args.max_norm,
'norm_step': args.norm_step,
'n_iter': args.n_iter,
'n_ensemble': args.n_ensemble,
'n_random_init': args.n_random_init,
'momentum': args.momentum,
'shuffle': args.shuffle,
'ghost': args.ghost_attack,
'input_diversity': args.input_diversity,
'translation_invariant': args.translation_invariant,
#'adv_fail_rate': acc_target_adv, # X contains only correctly predicted examples
'adv_success_rate': 1-acc_target_adv,
'transfer_rate': transfer_rate_target,
'loss_adv': loss_target_adv,
'loss_original': loss_target_original,
'adv_norm_mean': lpnorm.mean(),
'adv_norm_min': lpnorm.min(),
'adv_norm_max': lpnorm.max(),
'limit_samples_cycle': args.limit_n_samples_per_cycle,
'limit_cycles': args.limit_n_cycles,
'surrogate_acc_original_ex': acc_ens_prob,
'surrogate_acc_adv_ex': acc_ens_prob_adv,
'surrogate_loss_original_ex': loss_ens_prob,
'surrogate_loss_adv_ex': loss_ens_prob_adv,
'target_acc_original_ex': acc_target_original,
'acc_original_ex': acc_ens_prob,
'nb_adv': X_adv.shape[0],
'nb_adv_transf_rate': nb_examples_transfer_rate, # different nb of examples to compute the transfer rate
'dataset': 'val' if args.validation else 'test',
'time': end_time - start_time,
'transferability_techniques': f"{'MI_' if args.momentum else ''}{'ghost_' if args.ghost_attack else ''}{'DI_' if args.input_diversity else ''}{'TI_' if args.translation_invariant else ''}{'SGM_' if args.skip_gradient_method else ''}",
'grad_noise_std': args.grad_noise_std,
'temperature': args.temperature,
'attack': args.attack,
'args': ' '.join(sys.argv[1:]),
})
df_metrics = pd.DataFrame([dict_metrics, ])
if args.export_target_per_iter:
stats_target_dict = attack.get_target_accuracy_per_iter(args.model_target_path[i])
# duplicate the df line to the number of iterations
df_metrics = pd.concat([df_metrics] * len(stats_target_dict['acc']), ignore_index=True)
df_metrics['n_iter'] = stats_target_dict['iter']
df_metrics['adv_fail_rate'] = stats_target_dict['acc']
df_metrics['adv_success_rate'] = 1 - df_metrics['adv_fail_rate']
df_metrics['loss_adv'] = stats_target_dict['loss']
# create dir and append one line to csv
os.makedirs(os.path.dirname(args.csv_export), exist_ok=True)
df_metrics.to_csv(args.csv_export, mode='a', header=not os.path.exists(args.csv_export), index=False)
| 24,661 | 61.753181 | 254 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/compute_accuracy.py | import argparse
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
import os
import sys
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from utils.helpers import list_models, guess_and_load_model, guess_method
from utils.data import ImageNet
def nll(outputs, labels):
labels = labels.astype(int)
idx = (np.arange(labels.size), labels)
ps = outputs[idx]
nll = -np.sum(np.log(ps))
return nll
# parse args
parser = argparse.ArgumentParser(description="Compute the accuracy of an ensemble of models")
parser.add_argument("dir_models", help="Path to directory containing all the models file of the ensemble model")
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size. Try a lower value if out of memory (especially for high values of --ensemble-inner).")
parser.add_argument("--num-workers", type=int, default=10)
parser.add_argument("--temperature", type=float, default=1, help="Apply temperature scaling.")
parser.add_argument("--validation", type=int, default=None, help="Compute on a validation dataset of provided size (subset of the train test).")
parser.add_argument("--seed", type=int, default=None, help="Random seed. Important to set with validation flag to have the same set.")
parser.add_argument("--csv-export", default=None, help="Path of CSV to export.")
args = parser.parse_args()
if not args.seed and args.validation:
raise ValueError('Provide random seed for validation set.')
data = ImageNet(batch_size=args.batch_size, path=args.data_path)
path_ensemble = list_models(args.dir_models)
model_ensemble = []
for i, filename in enumerate(path_ensemble):
model = guess_and_load_model(filename, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
model.eval()
model_ensemble.append(model)
num_classes = 1000
traindir = os.path.join(args.data_path, 'train')
valdir = os.path.join(args.data_path, 'val')
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
if args.validation:
# create a validation set from the train set
train_dataset = datasets.ImageFolder(traindir, val_transform)
# fix generator for reproducibility
dataset, _ = torch.utils.data.random_split(train_dataset, lengths=[args.validation, len(train_dataset) - args.validation],
generator=torch.Generator().manual_seed(args.seed))
else:
# use the imagenet val set as test set
dataset = datasets.ImageFolder(valdir, val_transform)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
predictions = np.zeros((len(loader.dataset), num_classes))
targets = np.zeros(len(loader.dataset))
k = 0
for input, target in loader:
input = input.cuda(non_blocking=True)
# torch.manual_seed(i)
with torch.no_grad():
for model in model_ensemble:
output = model(input)
cur_preds = output.cpu().numpy() / args.temperature
predictions[k:k+input.size()[0]] += F.softmax(output, dim=1).cpu().numpy()
cur_targets = target.numpy()
targets[k:(k+target.size(0))] = cur_targets
k += input.size()[0]
predictions = predictions / len(model_ensemble)
test_acc = np.mean(np.argmax(predictions, axis=1) == targets)
test_nll = nll(predictions, targets) / predictions.shape[0]
test_ce = F.cross_entropy(predictions, targets)
print("--- VAL ---" if args.validation else "--- TEST ---")
print(f"Ensemble {args.dir_models} of {len(model_ensemble)} models")
print(" Accuracy:", test_acc)
print(" NLL:", test_nll)
print(" CE:", test_ce)
if args.csv_export:
df_metrics = pd.DataFrame([{
'dir_models': args.dir_models,
'temperature': args.temperature,
'dataset': 'val' if args.validation else 'test',
'model_type': guess_method(args.dir_models),
'accuracy': test_acc,
'nll': test_nll,
'nb_ex': len(loader.dataset),
'n_models': len(model_ensemble),
'args': ' '.join(sys.argv[1:]),
},])
os.makedirs(os.path.dirname(args.csv_export), exist_ok=True)
df_metrics.to_csv(args.csv_export, mode='a', header=not os.path.exists(args.csv_export), index=False)
| 4,542 | 37.82906 | 163 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/analyse_weights_space.py | import pandas as pd
import random
import os
import argparse
from tqdm import tqdm
import numpy as np
import torch
from torchvision import models as tmodels
import torchvision.datasets as datasets
import torchvision.transforms as transforms
#from pyhessian import hessian
from utils.data import ImageNet
from utils.helpers import list_models, guess_and_load_model, guess_model
from utils.pca_weights import PcaWeights, model2vector, models2tensor, vector2model
# from utils.subspace_inference.posteriors import SWAG
# from utils.subspace_inference.posteriors.proj_model import SubspaceModel
# from utils.subspace_inference import utils, losses
from utils.subspace_inference.utils import save_checkpoint
def parse_args():
parser = argparse.ArgumentParser(description="Analyse weights space: PCA and shift subspace XPs")
parser.add_argument("dir_models_pca", help="Path to directory containing all the models file of the ensemble model")
parser.add_argument("path_original_model", help="Path to original model")
parser.add_argument("--xp", choices=['PCA_projection', 'translate', 'hessian'], default='PCA_projection')
parser.add_argument("--pca-rank", type=int, default=5)
parser.add_argument("--export-dir", default=None, help="If set, export projected models on the first --export-ranks components")
parser.add_argument("--export-ranks", nargs='*', type=int, default=[], help="If set, export projected models on the first --export-ranks components."
"If muliple values are provided, export values recurvively on different subfolder. Must be > 0.")
parser.add_argument("--update-bn", action='store_true', help="Update BN after projection to original space")
parser.add_argument("--alpha-translate", type=float, default=1., help="Multiply deviations by this constant.")
parser.add_argument("--dir-models-translate", default=None, help="Path to directory containing the models from "
"another local maximum, to which we translate the "
"dir_models_pca")
parser.add_argument("--limit-n-export-models", type=int, default=None, help="Limit the number of exported model by randomly sampling them. Default: None, no limit.")
# parser.add_argument("--path-original-model-translate", help="Path to original model of the other local maximum, to which we translate the dir_models_pca")
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size. Try a lower value if out of memory (especially for high values of --ensemble-inner).")
parser.add_argument("--num-workers", type=int, default=10)
parser.add_argument("--seed", type=int, default=None, help="Random seed passed to PCA.")
args = parser.parse_args()
return args
def main(args):
np.random.seed(args.seed)
data = ImageNet(batch_size=args.batch_size, path=args.data_path)
traindir = os.path.join(args.data_path, 'train')
valdir = os.path.join(args.data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
loaders = {'train': train_loader, 'test': val_loader}
paths_pca = list_models(args.dir_models_pca)
models_pca = []
for i, filename in enumerate(paths_pca):
model = guess_and_load_model(filename, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
models_pca.append(model)
model_original = guess_and_load_model(args.path_original_model, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
model_cfg = getattr(tmodels, guess_model(paths_pca[0]))
# build a PCA of the target models
# swag_model = SWAG(model_cfg,
# subspace_type='pca',
# subspace_kwargs={
# 'max_rank': len(models_pca), # collect all the models
# 'pca_rank': args.pca_rank,
# },
# num_classes=data.num_classes)
# swag_model.cuda()
# # emulate model collection
# for model in models_pca:
# swag_model.collect_model(model)
#
# print('Setting SWA solution (as subspace shift vector)')
# swag_model.set_swa()
# utils.bn_update(loaders["train"], swag_model, verbose=True, subset=0.1)
# print(f" Metrics on test-set: {utils.eval(loaders['test'], swag_model, losses.cross_entropy)}")
# print('Building subspace')
# mean, variance, cov_factor = swag_model.get_space()
# print(np.linalg.norm(cov_factor, axis=1))
# subspace = SubspaceModel(mean, cov_factor)
if args.xp == 'hessian':
raise NotImplementedError('XP not yet implemented')
if args.xp == 'PCA_projection':
# build a PCA of the target models
pca = PcaWeights(model_cfg, rank=args.pca_rank, train_loader=loaders['train'], seed=args.seed)
pca.fit(models_pca)
# save the origin as SWA (0 rank)
Z0 = np.zeros((1, args.pca_rank))
model_swa = pca.inverse_transform(Z0, update_bn=args.update_bn)[0]
# analyse PCA
print(f"Explained variance ratio: {pca.pca.explained_variance_ratio_}")
total_cum_var_ratio = np.cumsum(pca.pca.explained_variance_ratio_)
print(f"Total cumulated explained variance: {total_cum_var_ratio}")
# print(f"Explained variance: {pca.pca.explained_variance_}")
print(f"Singular values: {pca.pca.singular_values_}")
df_metrics_pca = pd.DataFrame({
'dir_models': args.dir_models_pca,
'dim': list(range(args.pca_rank)),
'expl_var': pca.pca.explained_variance_ratio_,
'totcum_expl_var': total_cum_var_ratio,
'singular_values': pca.pca.singular_values_,
})
# project parameters into subspace, and into original space
Z = pca.transform(models=models_pca)
# original model
Z_original = pca.transform(models=[model_original])
Z_all = np.concatenate((Z0, Z_original, Z), axis=0)
labels_all = ['SWA'] + ['Original'] + ['Collected models'] * len(Z)
# viz in comet:
# experiment.log_embedding(
# vectors=Z_all,
# labels=labels_all,
# title="Collected models (PCA)"
# )
# extract models
print('Projecting models...')
model_original_proj = pca.inverse_transform(Z_original, update_bn=args.update_bn)[0]
if args.export_dir:
os.makedirs(args.export_dir, exist_ok=True)
df_metrics_pca.to_csv(os.path.join(args.export_dir, 'metrics_pca.csv'), index=False)
# save projected models
export_dir = os.path.join(args.export_dir, 'dims_0')
save_checkpoint(export_dir, name='model_swa', state_dict=model_swa.state_dict())
export_dir = os.path.join(args.export_dir, f'original_proj')
save_checkpoint(export_dir, name='model_original_proj', state_dict=model_original_proj.state_dict())
for i in tqdm(args.export_ranks, desc="Export dims"):
Z_proj = Z.copy()
# i between 1 and pca_rank+1
Z_proj[:, i:] = 0 # project to the first i components
models_pca_proj = pca.inverse_transform(Z_proj, update_bn=args.update_bn)
export_dir = os.path.join(args.export_dir, f'dims_{i}')
for j, model in enumerate(models_pca_proj):
save_checkpoint(export_dir, name='models_pca_proj', sample=j, state_dict=model.state_dict())
if args.xp == 'translate':
if not args.dir_models_translate:
# or not args.path_original_model_translate
raise ValueError('dir_models_translate should de provided')
print('Translating to new local minimum...')
w_original = model2vector(model_original)
# compute first SWA
W = models2tensor(models_pca)
w_swa = torch.mean(W, 0)
#w_swa = model2vector(model_swa) # can be check with: pca.transform(W=torch.reshape(w_swa, (1, w_swa.shape[0])))
# load other collected models
paths_new_models = list_models(args.dir_models_translate)
new_models = []
for i, filename in enumerate(paths_new_models):
model = guess_and_load_model(filename, data=data, load_as_ghost=False, input_diversity=False,
skip_gradient=False)
new_models.append(model)
W_new = models2tensor(new_models)
w_new_swa = torch.mean(W_new, 0)
# update BN and save new SWA
# model_swa_new = vector2model(w_new_swa, model_cfg, update_bn=args.update_bn, train_loader=train_loader)
# export_dir = os.path.join(args.export_dir, 'SWA_new')
# save_checkpoint(export_dir, name='model_new_swa', state_dict=model_swa_new.state_dict())
# save another embedding with the new original model and the new collected models translated to the first SWA
# Z_new_original_translated = pca.transform(W=torch.reshape(w_new_original - w_new_swa + w_swa, (1, w_new_swa.shape[0])))
# Z_new_translated = pca.transform(W=W_new - w_new_swa + w_swa)
# Z_all = np.concatenate((Z_all, Z_new_original_translated, Z_new_translated), axis=0)
# print(Z_new_translated.shape)
# labels_all = labels_all + ['Original model 2 translated'] + ['Collected models 2'] * len(new_models)
# experiment.log_embedding(
# vectors=Z_all,
# labels=labels_all,
# title="Collected models (PCA) + translated new models"
# )
# for translation_type in range(1, 4):
# if translation_type == 1:
# print(' ...Strategy 1: new_original_model + collected_models - original_model')
# elif translation_type == 2:
# print(' ...Strategy 2: new_original_model + collected_models - swa')
# elif translation_type == 3:
# print(' ...Strategy 3: new_swa + collected_models - swa')
# else:
# raise RuntimeError('Undefined translation_type')
dot_prod_two_subspace_basis = np.zeros((len(models_pca), W_new.shape[0]))
print('Translating: LGV-SWA_new + (LGV - LGV-SWA) ')
index_to_export = None
if args.limit_n_export_models:
print(f'...Limiting Export to {args.limit_n_export_models} randomly picked models')
random.seed(args.seed)
index_to_export = random.sample(range(0, len(models_pca)), args.limit_n_export_models)
for i, model_pca in enumerate(tqdm(models_pca, desc=f"Translation")):
w_pca = model2vector(model_pca)
# cosine similarity compute dot product of basis vectors of deviations
for j in range(W_new.shape[0]):
# dot product b/w unit vectors
v1 = w_pca - w_swa
v2 = W_new[j,:] - w_new_swa
if not (len(v1.shape) == len(v2.shape) == 1):
raise RuntimeError('Should compute cosine sim on vectors')
dot_prod_two_subspace_basis[i,j] = (torch.dot(v1, v2) / (torch.linalg.norm(v1, ord=2) * torch.linalg.norm(v2, ord=2))).cpu().numpy()
# angles_two_subspace_basis[i,j] = np.arccos(np.clip(torch.dot(v1_u, v2_u).cpu().numpy(), -1.0, 1.0))
# print(f'Dot product of {i} deviation LGV1 with all LGV2 deviations: {dot_prod_two_subspace_basis[i,:]}') # debug
if (index_to_export is None) or (i in index_to_export):
w_pca_trans = w_new_swa + args.alpha_translate * (w_pca - w_swa)
model_pca_trans = vector2model(w=w_pca_trans, model_cfg=model_cfg, update_bn=args.update_bn, train_loader=train_loader)
if args.export_dir:
# export_dir = os.path.join(args.export_dir, f'translation_{translation_type}')
export_dir = os.path.join(args.export_dir, f'translation_deviations_to_new_swa')
save_checkpoint(export_dir, name='models_translated', sample=i, state_dict=model_pca_trans.state_dict())
print(f'\nCosine similarity of all basis vectors from the two LGV:')
print(f' mean:{ dot_prod_two_subspace_basis.mean().item() }')
print(f' mean abs:{ np.abs(dot_prod_two_subspace_basis).mean().item() }')
print(f' min:{ dot_prod_two_subspace_basis.min().item() }')
print(f' max:{ dot_prod_two_subspace_basis.max().item() }')
if args.export_dir:
pd.DataFrame(dot_prod_two_subspace_basis).to_csv(os.path.join(export_dir, 'dot_prod_two_subspace_basis.csv'))
if __name__ == '__main__':
args = parse_args()
main(args) | 13,744 | 54.873984 | 179 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/analyse_feature_space.py | """
Interpolate between adv ex from 2 surrogate in feature space
"""
import os
import sys
import torch
import math
import random
import argparse
import numpy as np
import pandas as pd
from math import sqrt
from tqdm import tqdm
from utils.n_sphere import convert_spherical, convert_rectangular
from utils.data import CIFAR10, ImageNet
from utils.helpers import guess_and_load_model, load_classifier, list_models, project_on_sphere, compute_accuracy_from_nested_list_models, compute_norm, flatten
from utils.attacks import ExtendedProjectedGradientDescentPyTorch
# parse args
parser = argparse.ArgumentParser(description="Interpolation of adversarial examples from two surrogate")
parser.add_argument("path_model_1", help="Path to model 1. Could be either a directory of models or a path to a model.")
parser.add_argument("path_model_2", help="Path to model 2. Could be either a directory of models or a path to a model.")
parser.add_argument("--xp", choices=['interpolation', 'disk'], default='interpolation', help="Type of L-norm to use. Default: 2")
parser.add_argument("--path_target", nargs='+', help="Path to target directory")
parser.add_argument("--norm", choices=['1', '2', 'inf'], default='2', help="Type of L-norm to use. Default: 2")
parser.add_argument("--max-norm", type=float, required=True, help="Max L-norm of the perturbation")
parser.add_argument("--csv-export", default=None, help="Path to CSV where to export data about target.")
# all xp
parser.add_argument("--n-examples", type=int, default=2000, help="Craft adv ex on a subset of test examples. If None "
"(default), perturbate all the test set. If "
"model-target-path is set, extract the subset from "
"the examples correctly predicted by it.")
# xp interpolation
parser.add_argument("--n-interpolation", type=int, default=100, help="Number of adv ex to compute along the generated path (include the two original ones).")
parser.add_argument('--interpolation-method', choices=['linear', 'proj_sphere', 'polar', 'linear_hyperspherical_coord'], default='linear_hyperspherical_coord', help="Interpolation method between the 2 adv examples")
parser.add_argument("--alpha-range", type=float, default=1, help="Max alpha interpolation coef. alpha in [-alphamax, 1+alphamax]")
# xp disk
parser.add_argument("--n-points", type=int, default=500, help="Number of points to evaluate loss in the disk.")
parser.add_argument("--grid", choices=['grid', 'sunflower'], default='grid', help="Type of grid to generate points in the disk.")
# others
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--seed", type=int, default=None, help="Set random seed")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size. Try a lower value if out of memory (especially for high values of --ensemble-inner).")
args = parser.parse_args()
if args.norm == 'inf':
args.norm = np.inf
else:
args.norm = int(args.norm)
if args.seed:
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if 'CIFAR10' in args.path_model_1:
data = CIFAR10(batch_size=args.batch_size)
elif 'ImageNet' in args.path_model_1:
data = ImageNet(batch_size=args.batch_size, path=args.data_path)
else:
raise ValueError('dataset not supported')
path_model_list_1 = list_models(args.path_model_1)
path_model_list_2 = list_models(args.path_model_2)
list_model_1 = [guess_and_load_model(path, data=data) for path in path_model_list_1]
list_model_2 = [guess_and_load_model(path, data=data) for path in path_model_list_2]
list_classifier_1 = [load_classifier(model, data=data) for model in list_model_1]
list_classifier_2 = [load_classifier(model, data=data) for model in list_model_2]
models_target = [guess_and_load_model(path_model=x, data=data) for x in args.path_target]
X, y = data.correctly_predicted_to_numpy(models=models_target, train=False,
N=args.n_examples, seed=args.seed)
attack1 = ExtendedProjectedGradientDescentPyTorch(estimators=list_classifier_1, targeted=False, norm=args.norm,
eps=args.max_norm, eps_step=args.max_norm / 10., max_iter=50,
num_random_init=0,
batch_size=args.batch_size)
attack2 = ExtendedProjectedGradientDescentPyTorch(estimators=list_classifier_2, targeted=False, norm=args.norm,
eps=args.max_norm, eps_step=args.max_norm / 10., max_iter=50,
num_random_init=0,
batch_size=args.batch_size)
X_adv1 = attack1.generate(x=X, y=y)
X_adv2 = attack2.generate(x=X, y=y)
delta1 = X_adv1 - X
delta2 = X_adv2 - X
print('Natural accuracy:')
for i, model_target in enumerate(models_target):
acc_target_original, loss_target_original = compute_accuracy_from_nested_list_models([[model_target, ], ], X=X, y=y,
data=data)
print(f' * {args.path_target[i]}: {acc_target_original*100}% ; loss: {loss_target_original}')
if args.xp == "interpolation":
alpha_list = np.linspace(-args.alpha_range, 1+args.alpha_range, args.n_interpolation, endpoint=True).tolist()
metrics_list = []
if args.interpolation_method == 'linear_hyperspherical_coord':
# get nb digits precision for n-sphere rounding
precision_dtype = np.finfo(delta1.dtype).precision
# perturbations in spherical coordinates
delta1_sc = convert_spherical(flatten(delta1), digits=precision_dtype)
delta2_sc = convert_spherical(flatten(delta2), digits=precision_dtype)
for alpha in tqdm(alpha_list, desc="Interpolate adv ex"):
if args.interpolation_method == 'linear':
X_adv_interp = X + (1-alpha) * delta1 + alpha * delta2
elif args.interpolation_method == 'proj_sphere':
X_adv_interp = X + (1-alpha) * delta1 + alpha * delta2
X_adv_interp = project_on_sphere(X=X, X_adv=X_adv_interp, data=data, size=args.max_norm, norm=args.norm)
elif args.interpolation_method == 'polar':
# https://www.inference.vc/high-dimensional-gaussian-distributions-are-soap-bubble/
# valid under some condition
X_adv_interp = X + sqrt(1-alpha)*delta1 + sqrt(alpha)*delta2
elif args.interpolation_method == 'linear_hyperspherical_coord':
if args.norm != 2:
raise ValueError('linear_hyperspherical_coord interpolation only guarantee for 2-norm')
# the most appropriate interpolation
# https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates
# interpolate in spherical coordinates
delta_inter_sc = (1-alpha) * delta1_sc + alpha * delta2_sc
# if we extrapolate, the perturbation may be outside the L2 ball. We project it into the L2 sphere
# the first dim is the l2 norm (radius of the sphere)
examples_to_proj = (delta_inter_sc[:, 0] > args.max_norm)
delta_inter_sc[examples_to_proj, 0] = args.max_norm
delta_inter = convert_rectangular(delta_inter_sc, digits=precision_dtype)
X_adv_interp = X + delta_inter.reshape(X.shape)
else:
raise ValueError('Interpolation method not supported')
# check norm perturbation
lpnorm = compute_norm(X_adv=X_adv_interp, X=X, norm=args.norm)
if (lpnorm > args.max_norm + 1e-6).any():
print(f'For alpha={alpha}, nb examples outside the Lp ball: {(lpnorm > args.max_norm + 1e-6).sum()}')
acc_surrogate1, loss_surrogate1 = compute_accuracy_from_nested_list_models([list_model_1, ], X=X_adv_interp, y=y, data=data)
metrics_list.append({'model': args.path_model_1, 'type_model': 'surrogate', 'alpha': alpha, 'adv_accuracy': acc_surrogate1, 'adv_loss': loss_surrogate1})
acc_surrogate2, loss_surrogate2 = compute_accuracy_from_nested_list_models([list_model_2, ], X=X_adv_interp, y=y, data=data)
metrics_list.append({'model': args.path_model_2, 'type_model': 'surrogate', 'alpha': alpha, 'adv_accuracy': acc_surrogate2, 'adv_loss': loss_surrogate2})
for i, model_target in enumerate(models_target):
acc_target, loss_target = compute_accuracy_from_nested_list_models([[model_target, ], ], X=X_adv_interp, y=y, data=data)
metrics_list.append({'model': args.path_target[i], 'type_model': 'target', 'alpha': alpha, 'adv_accuracy': acc_target, 'adv_loss': loss_target})
df_metrics = pd.DataFrame(metrics_list)
df_metrics['norm'] = args.norm
df_metrics['max_norm'] = args.max_norm
df_metrics['n_examples'] = args.n_examples
df_metrics['n_interpolation'] = args.n_interpolation
df_metrics['interpolation_method'] = args.interpolation_method
df_metrics['seed'] = args.seed
os.makedirs(os.path.dirname(args.csv_export), exist_ok=True)
df_metrics.to_csv(args.csv_export, header=True, index=False)
# for i, path_target in enumerate(args.path_target):
# df_ = df_metrics.query(f'model == "{path_target}"')
# experiment.log_curve(f"target_{path_target}", x=df_['alpha'], y=df_['adv_loss'])
#
# df_ = df_metrics.query(f'model == "{args.path_model_1}"')
# experiment.log_curve(f"target_{args.path_model_1}", x=df_['alpha'], y=df_['adv_loss'])
#
# df_ = df_metrics.query(f'model == "{args.path_model_2}"')
# experiment.log_curve(f"target_{args.path_model_2}", x=df_['alpha'], y=df_['adv_loss'])
elif args.xp == 'disk':
metrics_list = []
# define plane with 3 points: X, X_adv1, X_adv2 using Gram-Schmidt
# quote from SWA paper "Suppose we have three weight vectors w1, w2, w3.
# We set u = (w2−w1), v = (w3−w1)− <w3−w1, w2− w1>/||w2− w1||2 · (w2 − w1).
# Then the normalized vectors u_ = u/||u||, v_ = v/||v|| form an orthonormal basis in the plane containing w1, w2, w3."
u = flatten(delta1) # (X_adv1−X)
u_ = u / np.linalg.norm(u, axis=1, keepdims=True) # normalised vector
u__ = u / np.linalg.norm(u, axis=1, keepdims=True)**2 # tmp
v = flatten(delta2) - flatten(np.diagonal(np.dot(flatten(delta2), u__.T))) * u # delta2 = Xadv2-X
# check with np.isclose( flatten(X_adv2 - X)[0] - np.dot(flatten(X_adv2 - X)[0], u__[0]) * u[0] , v[0] )
v_ = v / np.linalg.norm(v, axis=1, keepdims=True) # normalised vector
if args.grid == 'grid':
nb_points_per_axis = math.ceil(math.sqrt(args.n_points))
x_values = np.linspace(-args.max_norm, args.max_norm, nb_points_per_axis, endpoint=True)
x1, x2 = np.meshgrid(x_values, x_values)
x1, x2 = x1.flatten(), x2.flatten()
elif args.grid == 'sunflower':
# then we generate N points in the disk of radius epsilon
# 2D sunflower spiral algorithm https://stackoverflow.com/a/44164075/6253027
indices = np.arange(0, args.n_points, dtype=float) + 0.5
r = np.sqrt(indices / args.n_points) * args.max_norm # max_norm = epsilon
theta = np.pi * (1 + 5 ** 0.5) * indices
x1, x2 = r * np.cos(theta), r * np.sin(theta)
else:
raise ValueError("Wrong type of grid")
# save position of u and v
df_Xadv1 = pd.DataFrame({
'model': args.path_model_1,
'type_model': 'surrogate_1',
'x1': np.diagonal(np.dot(flatten(delta1), u_.T)),
'x2': np.diagonal(np.dot(flatten(delta1), v_.T)),
})
df_Xadv2 = pd.DataFrame({
'model': args.path_model_2,
'type_model': 'surrogate_2',
'x1': np.diagonal(np.dot(flatten(delta2), u_.T)),
'x2': np.diagonal(np.dot(flatten(delta2), v_.T)),
})
df_Xadv = df_Xadv1.append(df_Xadv2, ignore_index=True)
print(f'Position of Xadv1: {df_Xadv1["x1"].mean()}, {df_Xadv1["x2"].mean()}')
print(f'Position of Xadv2: {df_Xadv2["x1"].mean()}, {df_Xadv2["x2"].mean()}')
os.makedirs(os.path.dirname(args.csv_export), exist_ok=True)
path_csv_xadv = args.csv_export.replace('.csv', '__ref_xadv.csv')
df_Xadv.to_csv(path_csv_xadv, header=True, index=False)
# and generate the examples to evaluate
# "A point P with coordinates(x, y) in the plane would then be given by P = w1+x·u_ + y·v_"
# for each point in disk
for i in tqdm(range(args.n_points), desc="Predicting points"):
X_disk = X + (x1[i] * u_ + x2[i] * v_).reshape(X.shape)
if args.grid != 'grid':
lpnorm = compute_norm(X_adv=X_disk, X=X, norm=args.norm)
if (lpnorm > args.max_norm + 1e-6).any():
print(f'At point #{i}, nb examples outside the Lp ball: {(lpnorm > args.max_norm + 1e-6).sum()}')
acc_surrogate1, loss_surrogate1 = compute_accuracy_from_nested_list_models([list_model_1, ], X=X_disk,
y=y, data=data)
metrics_list.append(
{'model': args.path_model_1, 'type_model': 'surrogate', 'x1': x1[i], 'x2': x2[i], 'adv_accuracy': acc_surrogate1,
'adv_loss': loss_surrogate1})
acc_surrogate2, loss_surrogate2 = compute_accuracy_from_nested_list_models([list_model_2, ], X=X_disk,
y=y, data=data)
metrics_list.append(
{'model': args.path_model_2, 'type_model': 'surrogate', 'x1': x1[i], 'x2': x2[i], 'adv_accuracy': acc_surrogate2,
'adv_loss': loss_surrogate2})
for j, model_target in enumerate(models_target):
acc_target, loss_target = compute_accuracy_from_nested_list_models([[model_target, ], ], X=X_disk,
y=y, data=data)
metrics_list.append(
{'model': args.path_target[j], 'type_model': 'target', 'x1': x1[i], 'x2': x2[i], 'adv_accuracy': acc_target,
'adv_loss': loss_target})
df_metrics = pd.DataFrame(metrics_list)
df_metrics['norm'] = args.norm
df_metrics['max_norm'] = args.max_norm
df_metrics['n_examples'] = args.n_examples
df_metrics['n_points'] = args.n_points
df_metrics['seed'] = args.seed
df_metrics['xp'] = args.xp
df_metrics.to_csv(args.csv_export, header=True, index=False)
else:
raise ValueError("Wrong XP id")
| 14,705 | 54.91635 | 215 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/generate_parametric_path.py | import os
import argparse
from tqdm import tqdm
import numpy as np
import torch
from torchvision import models as tmodels
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from utils.data import ImageNet
from utils.helpers import guess_and_load_model, guess_model
from utils.pca_weights import model2vector, models2tensor, vector2model
from utils.subspace_inference.utils import save_checkpoint
def parse_args():
parser = argparse.ArgumentParser(description="Export models along a path connecting two models")
parser.add_argument("path_model_1", help="Path to first model")
parser.add_argument("path_model_2", help="Path to 2nd model")
parser.add_argument("--names-models", nargs=2, metavar=('name1', 'name2'), help="Names of the two models")
parser.add_argument("--alphas", default=None, nargs='*', type=float, help="Overwrite default list of alphas. If specified, n-models is ignored.")
parser.add_argument("--n-models", default=100, type=int, help="Number of models to export")
parser.add_argument("--export-dir", default=None, help="Dir to export models and CSV")
parser.add_argument("--update-bn", action='store_true', help="Update BN of produced models")
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size. Try a lower value if out of memory (especially for high values of --ensemble-inner).")
parser.add_argument("--num-workers", type=int, default=10)
parser.add_argument("--seed", type=int, default=None, help="Random seed passed to PCA.")
args = parser.parse_args()
return args
def main(args):
np.random.seed(args.seed)
data = ImageNet(batch_size=args.batch_size, path=args.data_path)
traindir = os.path.join(args.data_path, 'train')
valdir = os.path.join(args.data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
loaders = {'train': train_loader, 'test': val_loader}
model1 = guess_and_load_model(args.path_model_1, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
model2 = guess_and_load_model(args.path_model_2, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
model_cfg = getattr(tmodels, guess_model(args.path_model_1))
theta1 = model2vector(model1)
theta2 = model2vector(model2)
# alpha_min, alpha_max = args.alpha_range
alpha_min, alpha_max = -1, 2 # hardcoded to avoid corner cases
if args.alphas:
alpha_list = list(args.alphas)
else:
alpha_list = np.linspace(alpha_min, alpha_max, args.n_models, endpoint=True).tolist()
if 0. not in alpha_list:
alpha_list += [0.]
if 1. not in alpha_list:
alpha_list += [1.]
print(f'Start exporting the following alpha values: {["{0:0.2f}".format(i) for i in alpha_list]}')
for alpha in tqdm(alpha_list, desc="Export interpolated models"):
theta_alpha = torch.lerp(theta1, theta2, alpha).detach().clone()
model_alpha = vector2model(theta_alpha, model_cfg, update_bn=args.update_bn, train_loader=loaders['train'])
export_dir = os.path.join(args.export_dir, f'alpha_{alpha}')
save_checkpoint(export_dir, name='model_interpolated', sample=alpha, state_dict=model_alpha.state_dict())
if __name__ == '__main__':
args = parse_args()
main(args) | 4,285 | 46.622222 | 167 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/train_swag_imagenet.py | import argparse
import os
import random
import sys
import time
import tabulate
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision.models
import timm
from utils.swag import data
from utils.subspace_inference import utils, losses
#from utils.swag.posteriors import SWAG
from utils.subspace_inference.posteriors import SWAG
parser = argparse.ArgumentParser(description="SGD/SWA training")
parser.add_argument(
"--dir",
type=str,
default=None,
required=True,
help="training directory (default: None)",
)
parser.add_argument(
"--data_path",
type=str,
default=None,
required=True,
metavar="PATH",
help="path to datasets location (default: None)",
)
parser.add_argument(
"--batch_size",
type=int,
default=256,
metavar="N",
help="input batch size (default: 256)",
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
metavar="N",
help="number of workers (default: 4)",
)
parser.add_argument(
"--model",
type=str,
default=None,
required=True,
metavar="MODEL",
help="model name (default: None)",
)
parser.add_argument(
"--pretrained",
action="store_true",
help="pretrained model usage flag (default: off)",
)
parser.add_argument(
"--pretrained_ckpt",
type=str,
default=None,
help="pretrained behavior from model checkpoint (default: off)",
)
parser.add_argument(
"--parallel", action="store_true", help="data parallel model switch (default: off)"
)
parser.add_argument(
"--resume",
type=str,
default=None,
metavar="CKPT",
help="checkpoint to resume training from (default: None). Should be trained on torchvision data normalization.",
)
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 5)",
)
parser.add_argument(
"--save_freq", type=int, default=1, metavar="N", help="save frequency (default: 1)"
)
parser.add_argument(
"--eval_freq",
type=int,
default=1,
metavar="N",
help="evaluation frequency (default: 1)",
)
parser.add_argument(
"--eval_freq_swa",
type=int,
default=1,
metavar="N",
help="evaluation frequency of SWA solution, need BN update (default: 1)",
)
parser.add_argument(
"--eval_start",
action='store_true',
help="evaluation of the initial model (default: deactivated)",
)
parser.add_argument(
"--lr_init",
type=float,
default=0.01,
metavar="LR",
help="initial learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="SGD momentum (default: 0.9)",
)
parser.add_argument(
"--wd", type=float, default=1e-4, help="weight decay (default: 1e-4)"
)
parser.add_argument("--swa", action="store_true", help="swa usage flag (default: off)")
parser.add_argument(
"--swa_cpu", action="store_true", help="store swag on cpu (default: off)"
)
parser.add_argument(
"--swa_start",
type=float,
default=161,
metavar="N",
help="SWA start epoch number (default: 161)",
)
parser.add_argument(
"--swa_lr", type=float, default=0.02, metavar="LR", help="SWA LR (default: 0.02)"
)
parser.add_argument(
"--swa_freq",
type=int,
default=4,
metavar="N",
help="SWA model collection frequency/ num samples per epoch (default: 4)",
)
parser.add_argument('--max_num_models', type=int, default=20, help='maximum number of SWAG models to save')
parser.add_argument(
"--swa_resume",
type=str,
default=None,
metavar="CKPT",
help="checkpoint to restor SWA from (default: None)",
)
parser.add_argument(
"--loss",
type=str,
default="CE",
help="loss to use for training model (default: Cross-entropy)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument("--no_schedule", action="store_true", help="store schedule")
# parser.add_argument('--inference', choices=['low_rank_gaussian', 'projected_sgd'], default='low_rank_gaussian')
parser.add_argument('--subspace', choices=['covariance', 'pca', 'freq_dir'], default='covariance')
parser.add_argument('--no-save-swag', action='store_true', default="Don't save swag checkpoint")
args = parser.parse_args()
args.device = None
if torch.cuda.is_available():
args.device = torch.device("cuda")
else:
args.device = torch.device("cpu")
print("Preparing directory %s" % args.dir)
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, "command.sh"), "w") as f:
f.write(" ".join(sys.argv))
f.write("\n")
# torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print("Loading ImageNet from %s" % (args.data_path))
loaders, num_classes = data.loaders(args.data_path, args.batch_size, args.num_workers)
if 'timm_' in args.model:
print(f"Loading { '' }model from timm: {args.model}")
print(f"Loading { 'pretrained ' if args.pretrained else ''}model from timm: {args.model}")
arch_ = args.model.replace('timm_', '')
model = timm.create_model(arch_, pretrained=args.pretrained)
config = timm.data.resolve_data_config({}, model=model)
if config['mean'] != (0.485, 0.456, 0.406) or config['std'] != (0.229, 0.224, 0.225):
raise NotImplementedError(f"This model requires non-default normalization values. got: {config['mean'], config['std']}")
model_class = getattr(timm.models, arch_)
else:
print("Using torchvision model %s" % args.model)
model_class = getattr(torchvision.models, args.model)
print("Preparing model")
model = model_class(pretrained=args.pretrained, num_classes=num_classes)
model.to(args.device)
if args.swa:
print("SWAG training")
swag_model = SWAG(model_class,
subspace_type=args.subspace,
subspace_kwargs={'max_rank': args.max_num_models},
num_classes=num_classes)
args.swa_device = "cpu" if args.swa_cpu else args.device
swag_model.to(args.swa_device)
if args.pretrained:
print(f"Starting from pretrained model")
model.to(args.swa_device)
swag_model.collect_model(model)
model.to(args.device)
if args.pretrained_ckpt:
print(f"Starting from chkpt: {args.pretrained_ckpt}")
checkpoint = torch.load(args.pretrained_ckpt)
try:
model.load_state_dict(checkpoint["state_dict"])
except RuntimeError:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
# TODO: also load optimizer if available
model.to(args.swa_device)
swag_model.collect_model(model)
model.to(args.device)
else:
print("SGD training")
def schedule(epoch):
if args.swa and epoch >= args.swa_start:
return args.swa_lr
else:
return args.lr_init * (0.1 ** (epoch // 30))
# use a slightly modified loss function that allows input of model
if args.loss == "CE":
criterion = losses.cross_entropy
# criterion = F.cross_entropy
elif args.loss == "adv_CE":
criterion = losses.adversarial_cross_entropy
else:
raise NotImplementedError('criterion not implemented')
optimizer = torch.optim.SGD(
model.parameters(), lr=args.lr_init, momentum=args.momentum, weight_decay=args.wd
)
if args.parallel:
print("Using Data Parallel model")
model = torch.nn.parallel.DataParallel(model)
start_epoch = 0
if args.resume is not None:
print("Resume training from %s" % args.resume)
checkpoint = torch.load(args.resume)
start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
if args.swa and args.swa_resume is not None:
checkpoint = torch.load(args.swa_resume)
swag_model.subspace.rank = torch.tensor(0)
swag_model.load_state_dict(checkpoint["state_dict"])
columns = ["ep", "lr", "tr_loss", "tr_acc", "te_loss", "te_acc", "time", "mem_usage"]
if args.swa:
columns = columns[:-2] + ["swa_te_loss", "swa_te_acc"] + columns[-2:]
swag_res = {"loss": None, "accuracy": None}
# deactivate original model save
# utils.save_checkpoint(
# args.dir,
# start_epoch,
# state_dict=model.state_dict(),
# optimizer=optimizer.state_dict(),
# )
if args.eval_start:
print("START CKPT EVAL TEST")
init_res = utils.eval(loaders["test"], model, criterion, verbose=True)
print(f"Loss: {init_res['loss']} ; Acc: {init_res['accuracy']}")
print("START CKPT EVAL TRAIN")
init_res = utils.eval(loaders["train"], model, criterion, verbose=True)
print(f"Loss: {init_res['loss']} ; Acc: {init_res['accuracy']}")
num_iterates = 0
for epoch in range(start_epoch, args.epochs):
time_ep = time.time()
if not args.no_schedule:
lr = schedule(epoch)
utils.adjust_learning_rate(optimizer, lr)
else:
lr = args.lr_init
print("EPOCH %d. TRAIN" % (epoch + 1))
if args.swa and (epoch + 1) > args.swa_start:
subset = 1.0 / args.swa_freq
for i in range(args.swa_freq):
print("PART %d/%d" % (i + 1, args.swa_freq))
train_res = utils.train_epoch(
loaders["train"],
model,
criterion,
optimizer,
subset=subset,
verbose=True,
)
num_iterates += 1
utils.save_checkpoint(
args.dir, num_iterates, name="iter", state_dict=model.state_dict()
)
model.to(args.swa_device)
swag_model.collect_model(model)
model.to(args.device)
else:
train_res = utils.train_epoch(
loaders["train"], model, criterion, optimizer, verbose=True
)
if (
epoch == 0
or epoch % args.eval_freq == args.eval_freq - 1
or epoch == args.epochs - 1
):
print("EPOCH %d. EVAL" % (epoch + 1))
test_res = utils.eval(loaders["test"], model, criterion, verbose=True)
else:
test_res = {"loss": None, "accuracy": None}
if args.swa and (epoch + 1) > args.swa_start:
if (
epoch == args.swa_start
or epoch % args.eval_freq_swa == args.eval_freq_swa - 1
or epoch == args.epochs - 1
):
swag_res = {"loss": None, "accuracy": None}
swag_model.to(args.device)
#swag_model.sample(0.0)
swag_model.set_swa()
print("EPOCH %d. SWA BN" % (epoch + 1))
utils.bn_update(loaders["train"], swag_model, verbose=True, subset=0.1)
print("EPOCH %d. SWA EVAL" % (epoch + 1))
swag_res = utils.eval(loaders["test"], swag_model, criterion, verbose=True)
swag_model.to(args.swa_device)
else:
swag_res = {"loss": None, "accuracy": None}
if (epoch + 1) % args.save_freq == 0:
if args.swa:
if not args.no_save_swag:
utils.save_checkpoint(
args.dir, epoch + 1, name="swag", state_dict=swag_model.state_dict()
)
else:
utils.save_checkpoint(
args.dir,
epoch + 1,
state_dict=model.state_dict(),
optimizer=optimizer.state_dict(),
)
time_ep = time.time() - time_ep
memory_usage = torch.cuda.memory_allocated() / (1024.0 ** 3)
values = [
epoch + 1,
lr,
train_res["loss"],
train_res["accuracy"],
test_res["loss"],
test_res["accuracy"],
time_ep,
memory_usage,
]
if args.swa:
values = values[:-2] + [swag_res["loss"], swag_res["accuracy"]] + values[-2:]
table = tabulate.tabulate([values], columns, tablefmt="simple", floatfmt="8.4f")
table = table.split("\n")
table = "\n".join([table[1]] + table)
print(table)
if args.epochs % args.save_freq != 0:
if args.swa:
if not args.no_save_swag:
utils.save_checkpoint(
args.dir, args.epochs, name="swag", state_dict=swag_model.state_dict()
)
else:
utils.save_checkpoint(args.dir, args.epochs, state_dict=model.state_dict())
| 12,515 | 29.378641 | 128 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/generate_noisy_models.py | import pandas as pd
import os
from pathlib import Path
import argparse
import random
from tqdm import tqdm
import numpy as np
import torch
from torchvision import models as tmodels
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from utils.data import ImageNet
from utils.helpers import guess_and_load_model, guess_model, list_models
from utils.pca_weights import model2vector, models2tensor, vector2model
from utils.subspace_inference.utils import save_checkpoint
def parse_args():
parser = argparse.ArgumentParser(description="Export models with added randomness")
parser.add_argument("path_model", help="Path to model")
parser.add_argument("--xp", default='gaussian_noise',
choices=['gaussian_noise', 'random_1D', 'random_ensemble_equivalent', 'gaussian_subspace'],
help="Name of the experiment to run. gaussian_noise to add Gaussian noise to weights (iid noise"
" per model). random_1D to export n-models along a single random direction in weights. "
"space. random_ensemble_equivalent to export for all models of path-ref-ensemble a model "
"at the same L2 distance from path_model but in a random direction")
parser.add_argument("--std", default=1, type=float, help="Standard deviation of parameter noise. Used only for 'gaussian_noise' XP")
parser.add_argument("--max-norm", default=1, type=float, help="Max 2-norm in weights space to sample equality spaced n-models along the single random direction. Used only for 'random_1D' XP")
parser.add_argument("--path-ref-ensemble", default=None, help="Ensemble of reference from which we generate a similar one but with random directions. Used only for 'random_ensemble_equivalent' XP")
parser.add_argument("--n-models", default=10, type=int, help="Number of models to export")
parser.add_argument("--export-dir", default=None, help="Dir to export models and CSV")
parser.add_argument("--update-bn", action='store_true', help="Update BN of produced models")
parser.add_argument("--data-path", default=None, help="Path of data. Only supported for ImageNet.")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size.")
parser.add_argument("--num-workers", type=int, default=10)
parser.add_argument("--seed", type=int, default=None, help="Random seed passed to PCA.")
args = parser.parse_args()
return args
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
data = ImageNet(batch_size=args.batch_size, path=args.data_path)
traindir = os.path.join(args.data_path, 'train')
valdir = os.path.join(args.data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
loaders = {'train': train_loader, 'test': val_loader}
model = guess_and_load_model(args.path_model, data=data, load_as_ghost=False, input_diversity=False, skip_gradient=False)
if args.path_ref_ensemble:
paths_ref_ensemble = list_models(args.path_ref_ensemble)
models_ref_ensemble = []
for i, filename in enumerate(paths_ref_ensemble):
model_tmp = guess_and_load_model(filename, data=data, load_as_ghost=False, input_diversity=False,
skip_gradient=False)
models_ref_ensemble.append(model_tmp)
w_ref_ensemble_list = [model2vector(x) for x in models_ref_ensemble]
model_cfg = getattr(tmodels, guess_model(args.path_model))
w = model2vector(model)
if args.xp == 'gaussian_noise':
for i in tqdm(range(args.n_models), desc="Export models"):
w_random = w.detach().clone() + torch.randn(w.shape) * args.std
model_noisy = vector2model(w_random, model_cfg, update_bn=args.update_bn, train_loader=loaders['train'])
save_checkpoint(args.export_dir, name='model_noisy', sample=i, state_dict=model_noisy.state_dict())
elif args.xp == 'random_1D':
rand_vect = torch.randn(w.shape)
rand_vect = rand_vect / torch.linalg.norm(rand_vect, ord=2)
norm_list = np.linspace(0, args.max_norm, num=args.n_models).tolist()
for i, norm in enumerate(tqdm(norm_list, desc="Export models")):
w_random = w.detach().clone() + rand_vect * norm
model_noisy = vector2model(w_random, model_cfg, update_bn=args.update_bn, train_loader=loaders['train'])
export_dir = os.path.join(args.export_dir, f'norm_{norm}')
save_checkpoint(export_dir, name='model_noisy_1D', sample=i, state_dict=model_noisy.state_dict())
elif args.xp == 'random_ensemble_equivalent':
# for each model, we compute the distance b/w w_ref and w, generate a random direction (uniform from unit sphere)
# add it to w, update BN, and export
for i, w_ref in enumerate(w_ref_ensemble_list):
filename_ref = Path(paths_ref_ensemble[i]).stem
ref_dist = torch.linalg.norm(w_ref - w, ord=2)
rand_vect = torch.randn(w.shape)
rand_vect = rand_vect / torch.linalg.norm(rand_vect, ord=2)
w_random = w.detach().clone() + rand_vect * ref_dist
print(f"Export model {filename_ref}; ||w_ref - w||: {ref_dist}; ||w_random - w||: {torch.linalg.norm(w_random - w, ord=2)} ")
model_noisy = vector2model(w_random, model_cfg, update_bn=args.update_bn, train_loader=loaders['train'])
save_checkpoint(args.export_dir, name=filename_ref, state_dict=model_noisy.state_dict())
elif args.xp == 'gaussian_subspace':
# random gaussian in the LGV deviation subspace
n_samples = len(models_ref_ensemble)
n_weights = w.shape[0]
W = models2tensor(models_ref_ensemble)
D = (W-w).T # deviation matrix
for i in tqdm(range(args.n_models), desc="Export models"):
w_random = w.detach().clone() + 1 / (n_samples-1)**0.5 * (D @ torch.randn(n_samples)) * args.std
model_noisy = vector2model(w_random, model_cfg, update_bn=args.update_bn, train_loader=loaders['train'])
save_checkpoint(args.export_dir, name='model_gaussian_subspace', sample=i, state_dict=model_noisy.state_dict())
if __name__ == '__main__':
args = parse_args()
main(args) | 7,194 | 56.103175 | 201 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/hessian/compute_hessian.py | #*
# @file Different utility functions
# Copyright (c) Zhewei Yao, Amir Gholami
# All rights reserved.
# This file is part of PyHessian library.
#
# PyHessian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyHessian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.
#*
from __future__ import print_function
import argparse
from collections import OrderedDict
from utils_hessian import *
from torchvision.models import resnet50
from pyhessian import hessian
# Settings
parser = argparse.ArgumentParser(description='PyTorch Example')
parser.add_argument(
'--mini-hessian-batch-size',
type=int,
default=200,
help='input batch size for mini-hessian batch (default: 200)')
parser.add_argument('--hessian-batch-size',
type=int,
default=200,
help='input batch size for hessian (default: 200)')
parser.add_argument('--seed',
type=int,
default=1,
help='random seed (default: 1)')
parser.add_argument('--cuda',
action='store_false',
help='do we use gpu or not')
parser.add_argument('--resume',
type=str,
default='',
help='get the checkpoint')
parser.add_argument('--data-path',
type=str,
default='',
help='get the checkpoint')
args = parser.parse_args()
# set random seed to reproduce the work
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
for arg in vars(args):
print(arg, getattr(args, arg))
# get dataset
train_loader, test_loader = getData(name='imagenet_without_dataaugmentation',
train_bs=args.mini_hessian_batch_size,
test_bs=1,
path=args.data_path
)
##############
# Get the hessian data
##############
assert (args.hessian_batch_size % args.mini_hessian_batch_size == 0)
#assert (50000 % args.hessian_batch_size == 0) # TODO
batch_num = args.hessian_batch_size // args.mini_hessian_batch_size
if batch_num == 1:
for inputs, labels in train_loader:
hessian_dataloader = (inputs, labels)
break
else:
hessian_dataloader = []
for i, (inputs, labels) in enumerate(train_loader):
hessian_dataloader.append((inputs, labels))
if i == batch_num - 1:
break
# get model
model = resnet50()
if args.cuda:
print('loading to gpu')
model = model.cuda()
#model = torch.nn.DataParallel(model)
criterion = nn.CrossEntropyLoss() # label loss
###################
# Get model checkpoint, get saving folder
###################
if args.resume == '':
raise Exception("please choose the trained model")
state_dict = torch.load(args.resume)
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
try:
model.load_state_dict(state_dict)
except RuntimeError:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
######################################################
# Begin the computation
######################################################
# turn model to eval mode
model.eval()
if batch_num == 1:
hessian_comp = hessian(model,
criterion,
data=hessian_dataloader,
cuda=args.cuda)
else:
hessian_comp = hessian(model,
criterion,
dataloader=hessian_dataloader,
cuda=args.cuda)
print(
'********** finish data loading and begin Hessian computation **********')
top_eigenvalues, _ = hessian_comp.eigenvalues()
print('\n***Top Eigenvalues: ', top_eigenvalues)
trace = hessian_comp.trace()
print('\n***Trace: ', np.mean(trace))
#density_eigen, density_weight = hessian_comp.density()
| 4,571 | 30.531034 | 78 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/lgv/imagenet/hessian/utils_hessian.py | #*
# @file Different utility functions
# Copyright (c) Zhewei Yao, Amir Gholami
# All rights reserved.
# This file is part of PyHessian library.
#
# PyHessian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyHessian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.
#*
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
def getData(name='cifar10', train_bs=128, test_bs=1000, path=None):
"""
Get the dataloader
"""
if name == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
trainset = datasets.CIFAR10(root='../data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=train_bs,
shuffle=True)
testset = datasets.CIFAR10(root='../data',
train=False,
download=False,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=test_bs,
shuffle=False)
if name == 'cifar10_without_dataaugmentation':
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
trainset = datasets.CIFAR10(root='../data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=train_bs,
shuffle=True)
testset = datasets.CIFAR10(root='../data',
train=False,
download=False,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=test_bs,
shuffle=False)
if name == 'imagenet_without_dataaugmentation':
transform_train = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),
])
traindir = os.path.join(path, 'train')
trainset = datasets.ImageFolder(root=traindir,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=train_bs,
shuffle=True)
testdir = os.path.join(path, 'validation')
testset = datasets.ImageFolder(root=testdir,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=test_bs,
shuffle=False)
return train_loader, test_loader
def test(model, test_loader, cuda=True):
"""
Get the test performance
"""
model.eval()
correct = 0
total_num = 0
for data, target in test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
pred = output.data.max(
1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
total_num += len(data)
print('testing_correct: ', correct / total_num, '\n')
return correct / total_num
| 5,701 | 39.728571 | 75 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/modelsghostpreresnet.py | """
PreResNet model definition
ported from https://github.com/bearpaw/pytorch-classification/blob/master/models/cifar/preresnet.py
-----
Adapted to add skip connection erosion
Do not use to train a model. Only for inference. Train on regular PreResNet
"""
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import math
__all__ = ['PreResNet110', 'PreResNet164']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
self.planes = planes
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
# added for Ghost
# random scalar with optimal value reported in the original implementation of Ghost Paper
# https://github.com/LiYingwei/ghost-network/blob/master/nets/resnet_v2_50.py#L129
random_range = 0.22
# uniform distribution [1-random_range, 1+random_range]
if residual.size(1) != self.planes * self.expansion or len(residual.size()) != 4:
raise ValueError('wrong dimension')
perturb_var = torch.rand((1, residual.size(1), 1, 1), requires_grad=False, device=residual.device) * random_range * 2 + (
1 - random_range)
out += residual * perturb_var
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.planes = planes
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
# added for Ghost
# random scalar with optimal value reported in the original implementation of Ghost Paper
# https://github.com/LiYingwei/ghost-network/blob/master/nets/resnet_v2_50.py#L129
random_range = 0.22
# uniform distribution [1-random_range, 1+random_range]
if residual.size(1) != self.planes * self.expansion or len(residual.size()) != 4:
raise ValueError('wrong dimension')
perturb_var = torch.rand((1,residual.size(1),1,1), requires_grad=False, device=residual.device) * random_range * 2 + (1-random_range)
out += residual * perturb_var
return out
class PreResNet(nn.Module):
def __init__(self, num_classes=10, depth=110):
super(PreResNet, self).__init__()
if depth >= 44:
assert (depth - 2) % 9 == 0, 'depth should be 9n+2'
n = (depth - 2) // 9
block = Bottleneck
else:
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = BasicBlock
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class PreResNet110:
base = PreResNet
args = list()
kwargs = {'depth': 110}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet164:
base = PreResNet
args = list()
kwargs = {'depth': 164}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 6,999 | 32.653846 | 141 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/n_sphere.py | # N-sphere Convert to Spherical or Rectangular Coordination
# improve n-sphere package with numerical stability and basic vectorization: https://pypi.org/project/n-sphere/
import numpy as np
import math
import torch
SUPPORTED_TYPES = ['Tensor', 'ndarray', 'list']
def convert_spherical(input, digits=6, tol=1e-8):
input_type = type(input).__name__
if input_type not in SUPPORTED_TYPES:
raise ValueError("Unsupported type")
result = []
if input_type == 'list':
input = np.array(input)
# over 2-dimension (current available 2-dimension)
if (input.ndim == 1):
r = 0
for i in range(0, len(input)):
r += input[i] * input[i]
r = math.sqrt(r)
convert = [r]
for i in range(0, len(input) - 2):
convert.append(round(math.acos(input[i] / (r + tol)), digits))
r = math.sqrt(r * r - input[i] * input[i])
if input[-2] >= 0:
convert.append(round(math.acos(input[-2] / (r + tol)), digits))
else:
convert.append(round(2 * math.pi - math.acos(input[-2] / (r + tol)), digits))
result = convert
if input_type == 'ndarray':
result = np.array(result)
elif input_type == 'Tensor':
result = torch.stack(result)
else:
result = np.zeros(input.shape)
ssq_cum = np.sum(input[:,-2:] ** 2, axis=1)
result[:, -1] = np.arccos(input[:,-2] / np.sqrt(ssq_cum + tol))
mask = input[:, -1] < 0
result[mask, -1] = 2 * np.pi - result[mask, -1]
for i in range(2, input.shape[1]):
ssq_cum = ssq_cum + input[:, -i-1]**2
result[:, -i] = np.arccos(input[:, -i-1] / np.sqrt(ssq_cum + tol))
result[:,0] = np.sqrt(ssq_cum)
if input_type == 'Tensor':
result = torch.from_numpy(result)
return result
def convert_rectangular(input, digits=6):
input_type = type(input).__name__
if input_type not in SUPPORTED_TYPES:
raise ValueError("Unsupported type")
if input_type == 'list':
input = np.array(input)
if input.ndim == 1:
result = []
r = input[0]
multi_sin = 1
convert = []
if input_type == 'Tensor':
for i in range(1, len(input) - 1):
convert.append(r * multi_sin * math.cos(input[i]))
multi_sin *= math.sin(input[i])
convert.append(r * multi_sin * math.cos(input[-1]))
convert.append(r * multi_sin * math.sin(input[-1]))
convert = np.array(convert)
convert = torch.from_numpy(convert)
else:
for i in range(1, len(input) - 1):
convert.append(round(r * multi_sin * math.cos(input[i]), digits))
multi_sin *= math.sin(input[i])
convert.append(round(r * multi_sin * math.cos(input[-1]), digits))
convert.append(round(r * multi_sin * math.sin(input[-1]), digits))
if input_type != 'list':
convert = np.array(convert)
result = convert
else:
# over 2-dimension
result = np.zeros(input.shape)
r = input[:, 0]
multi_sin = np.zeros(input.shape[0]) + 1
for i in range(1, input.shape[1] - 1):
result[:, i - 1] = r * multi_sin * np.cos(input[:, i])
multi_sin *= np.sin(input[:, i])
result[:, i] = r * multi_sin * np.cos(input[:, i + 1])
result[:, i + 1] = r * multi_sin * np.sin(input[:, i + 1])
if input_type == 'Tensor':
result = torch.from_numpy(result)
return result
| 3,609 | 37.817204 | 111 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/pca_weights.py | import torch
from sklearn.decomposition import PCA
from utils.subspace_inference.utils import flatten, bn_update
def model2vector(model):
"""
Transform a pytorch model into its weight Tensor
:param model: pytorch model
:return: tensor of size (n_weights,)
"""
w = flatten([param.detach().cpu() for param in model.parameters()])
return w
def models2tensor(models):
"""
Transform a list of pytorch model into its weight Tensor
:param models: list of pytorch model
:return: tensor of size (n_models, n_weights)
"""
n_weights = sum(p.numel() for p in models[0].parameters())
W = torch.empty(0, n_weights, dtype=torch.float32)
for model in models:
w = model2vector(model)
W = torch.cat((W, w.view(1, -1)), dim=0)
return W
def vector2model(w, model_cfg, update_bn=True, train_loader=None, **kwargs):
"""
Build a pytorch model from given weight vector
:param w: tensor of size (1, n_weights)
:param model_cfg: model class
:param update_bn: Update or not the BN statistics
:param train_loader: Data loader to update BN stats
:param kwargs: args passed to model_cfg
:return: pytorch model
"""
if update_bn and not train_loader:
raise ValueError('train_loader must be provided with update_bn')
w = w.detach().clone()
new_model = model_cfg(**kwargs).cuda()
offset = 0
for param in new_model.parameters():
param.data.copy_(w[offset:offset + param.numel()].view(param.size()).to('cuda'))
offset += param.numel()
if update_bn:
bn_update(train_loader, new_model, verbose=False, subset=0.1)
new_model.eval()
return new_model
class PcaWeights:
def __init__(self, model_cfg, rank=20, train_loader=None, seed=None):
"""
PCA on pytorch models
:param model_cfg: class of the models
:param rank: number of PCA components
:param train_loader: train loader to update BN of produced models
"""
self.model_cfg = model_cfg
self.rank = rank
self.train_loader = train_loader
self.pca = PCA(n_components=self.rank, svd_solver='auto', random_state=seed)
def fit(self, models):
"""
Fit PCA to models
:param models: list of pytorch model
"""
num_models = len(models)
if self.rank > 0.8*num_models:
print('Randomized SVD might not be ideal for rank PCA > 80% number of models')
W = models2tensor(models)
if W.shape[0] != num_models:
raise RuntimeError('Wrong dimension of W')
W = W.numpy()
self.pca.fit(W)
def transform(self, models=None, W=None, components=None):
"""
Transform a model into the PCA subspace
:param models: list of pytorch model
:param W: pytorch tensor of the weights. Ignored if models is specified.
:param components: list of int corresponding to the components to keep. `None` keep all `rank` components
:return:
"""
if not models and not torch.is_tensor(W):
raise ValueError('models or W should be defined')
if models:
W = models2tensor(models)
W = W.numpy()
Z = self.pca.transform(W)
if components:
Z = Z[:, components]
return Z
def inverse_transform(self, Z, update_bn=True, **kwargs):
"""
Inverse transform from latent space to model
:param Z: Component vector
:param update_bn: Update BN layers of models
:param kwargs: args passed to model class
:return: list of models
"""
W = self.pca.inverse_transform(Z)
W = torch.from_numpy(W)
new_models = []
for i in range(W.shape[0]):
w = W[i, :]
new_model = vector2model(w=w, model_cfg=self.model_cfg, update_bn=update_bn, train_loader=self.train_loader,
**kwargs)
new_models.append(new_model)
return new_models
| 4,046 | 34.191304 | 120 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/optimizers.py | """
File adapted from https://github.com/JavierAntoran/Bayesian-Neural-Networks
"""
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
class SGLD(Optimizer):
"""
SGLD optimiser based on pytorch's SGD.
Note that the weight decay is specified in terms of the gaussian prior sigma.
"""
def __init__(self, params, lr=1e-2, prior_sigma=np.inf):
weight_decay = 1 / (prior_sigma ** 2)
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, weight_decay=weight_decay)
super(SGLD, self).__init__(params, defaults)
@torch.no_grad()
def step(self):
"""
Performs a single optimization step.
"""
loss = None
for group in self.param_groups:
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
langevin_noise = p.data.new(p.data.size()).normal_(mean=0, std=1) / np.sqrt(group['lr'])
p.data = p.data.add(0.5 * d_p + langevin_noise, alpha=-group['lr'])
return loss
class pSGLD(Optimizer):
"""
RMSprop preconditioned SGLD using pytorch rmsprop implementation.
"""
def __init__(self, params, lr=1e-2, prior_sigma=np.inf, alpha=0.99, eps=1e-8):
weight_decay = 1 / (prior_sigma ** 2)
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, weight_decay=weight_decay, alpha=alpha, eps=eps)
super(pSGLD, self).__init__(params, defaults)
@torch.no_grad()
def step(self):
"""
Performs a single optimization step.
"""
loss = None
for group in self.param_groups:
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
# sqavg x alpha + (1-alph) sqavg *(elemwise) sqavg
square_avg.mul_(alpha).addcmul_(d_p, d_p, value=1 - alpha)
avg = square_avg.sqrt().add_(group['eps'])
langevin_noise = p.data.new(p.data.size()).normal_(mean=0, std=1) / np.sqrt(group['lr'])
p.data = p.data.add(0.5 * d_p.div_(avg) + langevin_noise / torch.sqrt(avg), alpha=-group['lr'])
return loss
| 3,430 | 30.190909 | 111 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/data.py | import os
import logging
import torch
import torchvision
import torchvision.datasets as datasets
import numpy as np
from torchvision import transforms
from .helpers import list_models, guess_and_load_model, DEVICE
def check_args(method):
def inner(ref, **kwargs):
if kwargs.get('validation', False) and not ref.valset:
raise ValueError('Trying to call validation without creating it')
return method(ref, **kwargs)
return inner
class DataBase:
trainset = valset = testset = None
transform_train = transform_test = None
trainloader = None
valloader = None
testloader = None
transform = None
classes = None
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __init__(self, batch_size=64, num_workers=0, validation=None, normalize=False, seed=None):
"""
Base class for data
:param batch_size: Batch size for data loader.
:param num_workers: Number of workers.
:param validation: Split the training set into a training set and into a validation set.
:param normalize: False, data in [0,1] (default) for adversarial crafting; True normalize data for training.
:param seed: Random seed.
"""
if hasattr(self, 'min_pixel_value') + hasattr(self, 'max_pixel_value') < 2:
self.min_pixel_value = 1e8
self.max_pixel_value = -1e8
for images, _ in self.testloader:
min_pixel = torch.min(images)
max_pixel = torch.max(images)
if min_pixel < self.min_pixel_value:
self.min_pixel_value = min_pixel
if max_pixel > self.max_pixel_value:
self.max_pixel_value = max_pixel
self.classes = self.testset.classes
self.num_classes = len(self.classes)
self.batch_size = batch_size
self.num_workers = num_workers
self.validation = validation
self.seed = seed
if self.validation:
if type(self.validation) != int:
raise ValueError('validation should be a int of the size of the val set')
if self.seed is None:
logging.warning('It is recommended to provided random seed to reproducibility')
generator = None
else:
generator = torch.Generator().manual_seed(self.seed)
# create validation set of provided size
self.trainset, self.valset = torch.utils.data.random_split(self.trainset,
lengths=[len(self.trainset) - self.validation, self.validation],
generator=generator)
self.trainloader.data = self.trainset # update dataset loader
self.valset.transform = self.transform_test # set test transform to val dataset
self.valloader = torch.utils.data.DataLoader(self.valset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=False, num_workers=num_workers)
def get_input_shape(self):
return tuple(self.trainset.data.shape)
@check_args
def to_numpy(self, train=False, validation=False, N=None, seed=None):
"""
Return dataset as numpy array
Becareful, data has to be able to be loaded into memory.
:param train: bool, train set
:param validation: bool, validation set. If train is False, test set.
:param N: int, max number of examples to import
:return: X, y: numpy arrays
"""
if train:
set = self.trainset
elif validation:
set = self.valset
else:
set = self.testset
if N is None:
N = len(set)
if seed:
torch.manual_seed(seed)
loader = torch.utils.data.DataLoader(set, batch_size=N, shuffle=(train or N < len(set)))
load_tmp = next(iter(loader))
X = load_tmp[0].numpy()
y = load_tmp[1].numpy()
return X, y
@check_args
def correctly_predicted_to_numpy(self, model=None, models=None, train=False, validation=False, N=None, seed=None):
"""
Return the examples correcty predicted by model in the dataset as numpy arrays
:param model: pytorch model
:param models: list of pytorch models
:param train: bool, train or test set
:param validation: bool, validation set. If train is False, test set.
:param N: int, max number of examples to import
:param seed: int, fix random seed for reproducibility and select same examples
:return: X, y: numpy arrays
"""
if not model and not models:
raise ValueError('model or models should be defined')
if model and models:
raise ValueError('model and models cannot be both defined')
if model:
models = [model, ]
if train:
set = self.trainset
elif validation:
set = self.valset
else:
set = self.testset
if N is None:
N = len(set)
if seed:
torch.manual_seed(seed)
loader = torch.utils.data.DataLoader(set, batch_size=self.batch_size, shuffle=(train or N < len(set)))
X = np.zeros((N,) + self.get_input_shape()[1:], dtype='float32')
y = np.zeros((N,), dtype='int')
nb_images_saved = 0
for images, labels in loader:
images_ = images.to(DEVICE)
idx_ok = torch.zeros(len(images))
for model in models:
outputs = model(images_)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
idx_ok += predicted == labels
idx_ok = idx_ok == len(models)
images_ok = images[idx_ok,]
labels_ok = labels[idx_ok,]
nb_images_to_append = min(idx_ok.sum().item(), N-nb_images_saved)
X[nb_images_saved:(nb_images_saved+nb_images_to_append),] = images_ok[0:nb_images_to_append,].numpy()
y[nb_images_saved:nb_images_saved+nb_images_to_append] = labels_ok[0:nb_images_to_append].numpy()
nb_images_saved += nb_images_to_append
if nb_images_saved >= N:
break
X = X[0:nb_images_saved,]
y = y[0:nb_images_saved,]
if not (X.shape[0] == y.shape[0] <= N):
raise RuntimeError("Array shape unexpected")
if X.shape[0] < N < len(set):
logging.warning('Number of examples lower than requested')
return X, y
@check_args
def compute_accuracy(self, model, train=False, validation=False):
"""
Compute the accuracy on the test or train data
:param model: Pytorch NN
:param train: compute on the train set
:param train: compute on the validation set. If train is False, test set.
:return: float
"""
if train:
loader = self.trainloader
elif validation:
loader = self.valloader
else:
loader = self.testloader
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch in loader:
inputs, labels = batch[0].to(self.device, non_blocking=True), batch[1].to(self.device, non_blocking=True)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
class MNIST(DataBase):
def __init__(self, batch_size, num_workers=0, path='data', validation=None, normalize=False, seed=None):
if normalize:
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=(0.1307,), std=(0.3081,))])
else:
transform = transforms.Compose([transforms.ToTensor(),])
self.transform_train = self.transform_test = self.transform = transform
self.trainset = torchvision.datasets.MNIST(root=path, train=True, download=True, transform=self.transform)
self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=True, num_workers=num_workers)
self.testset = torchvision.datasets.MNIST(root=path, train=False, download=True, transform=self.transform)
self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=False, num_workers=num_workers)
super().__init__(batch_size=batch_size, num_workers=num_workers, validation=validation, seed=seed)
def get_input_shape(self):
return (1, 1, 28, 28)
class CIFAR10(DataBase):
def __init__(self, batch_size, num_workers=0, path='data', validation=None, normalize=False, seed=None):
if normalize:
normalize_transform = transforms.Normalize(mean=(0.49139968, 0.48215841, 0.44653091),
std=(0.24703223, 0.24348513, 0.26158784))
else:
normalize_transform = transforms.Normalize(mean=(0., 0., 0.),
std=(1., 1., 1.))
#normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5),
# std=(0.5, 0.5, 0.5))
self.transform_train = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_transform])
self.transform_test = transforms.Compose(
[transforms.ToTensor(),
normalize_transform])
self.trainset = torchvision.datasets.CIFAR10(root=path, train=True, download=True, transform=self.transform_train)
self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=True, num_workers=num_workers)
self.testset = torchvision.datasets.CIFAR10(root=path, train=False, download=True, transform=self.transform_test)
self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=False, num_workers=num_workers)
super().__init__(batch_size=batch_size, num_workers=num_workers, validation=validation, seed=seed)
def get_input_shape(self):
return (1, 3, 32, 32)
class CIFAR100(DataBase):
def __init__(self, batch_size, num_workers=0, path='data', validation=None, normalize=False, seed=None):
if normalize:
normalize_transform = transforms.Normalize((0.50707516, 0.48654887, 0.44091784), (0.26733429, 0.25643846, 0.27615047))
else:
normalize_transform = transforms.Normalize((0., 0., 0.), (1., 1., 1.))
self.transform_train = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_transform])
self.transform_test = transforms.Compose(
[transforms.ToTensor(),
normalize_transform])
self.trainset = torchvision.datasets.CIFAR100(root=path, train=True, download=True, transform=self.transform_train)
self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=True, num_workers=num_workers)
self.testset = torchvision.datasets.CIFAR100(root=path, train=False, download=True, transform=self.transform_test)
self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=batch_size, pin_memory=self.use_cuda,
shuffle=False, num_workers=num_workers)
super().__init__(batch_size=batch_size, num_workers=num_workers, validation=validation, seed=seed)
class ImageNet(DataBase):
def __init__(self, batch_size, path='/work/projects/bigdata_sets/ImageNet/ILSVRC2012/raw-data/', num_workers=0,
validation=None, normalize=False, input_size=224, resize_size=256, seed=None):
# default: input_size=224, resize_size=256
# inception: input_size=299, resize_size=342
# if DATAPATH env var is set, overright default value
self.input_size = input_size
self.resize_size = resize_size
if os.environ.get('DATAPATH', False):
path = os.environ.get('DATAPATH')
if normalize:
print('Loading ImageNet with normalization.')
normalize_transform = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize_transform = transforms.Normalize(mean=[0., 0., 0.],
std=[1., 1., 1.])
traindir = os.path.join(path, 'train')
self.transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_transform,
])
self.trainset = datasets.ImageFolder(traindir, self.transform_train)
self.trainloader = torch.utils.data.DataLoader(
self.trainset,
batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=self.use_cuda)
testdir = os.path.join(path, 'validation')
self.transform_test = transforms.Compose([
transforms.Resize(resize_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize_transform,
])
self.testset = datasets.ImageFolder(testdir, self.transform_test)
self.testloader = torch.utils.data.DataLoader(
self.testset,
batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=self.use_cuda)
self.min_pixel_value = 0
self.max_pixel_value = 1
super().__init__(batch_size=batch_size, num_workers=num_workers, validation=validation, seed=seed)
def get_input_shape(self):
return (1, 3, self.input_size, self.input_size)
| 14,626 | 45.582803 | 130 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/utils_sgm.py | """
Code from the following paper:
@inproceedings{wu2020skip,
title={Skip connections matter: On the transferability of adversarial examples generated with resnets},
author={Wu, Dongxian and Wang, Yisen and Xia, Shu-Tao and Bailey, James and Ma, Xingjun},
booktitle={ICLR},
year={2020}
}
https://github.com/csdongxian/skip-connections-matter
"""
import numpy as np
import torch
import torch.nn as nn
def backward_hook(gamma):
# implement SGM through grad through ReLU
def _backward_hook(module, grad_in, grad_out):
if isinstance(module, nn.ReLU):
return (gamma * grad_in[0],)
return _backward_hook
def backward_hook_norm(module, grad_in, grad_out):
# normalize the gradient to avoid gradient explosion or vanish
grad_in_norm = []
for i, grad_in_i in enumerate(grad_in):
std = torch.std(grad_in_i)
grad_in_norm.append(grad_in_i / std)
return tuple(grad_in_norm)
def register_hook_for_resnet(model, arch, gamma):
# There is only 1 ReLU in Conv module of ResNet-18/34
# and 2 ReLU in Conv module ResNet-50/101/152
if arch in ['resnet50', 'resnet101', 'resnet152']:
gamma = np.power(gamma, 0.5)
backward_hook_sgm = backward_hook(gamma)
for name, module in model.named_modules():
if 'relu' in name and not '0.relu' in name:
# only to the last ReLU of each layer
module.register_backward_hook(backward_hook_sgm)
# e.g., 1.layer1.1, 1.layer4.2, ...
# if len(name.split('.')) == 3:
if len(name.split('.')) >= 2 and 'layer' in name.split('.')[-2]:
module.register_backward_hook(backward_hook_norm)
def register_hook_for_preresnet(model, arch, gamma):
# There are 3 ReLU in Conv module of PreResNet110
if arch in ['PreResNet110']:
gamma = np.power(gamma, 1/3)
else:
raise ValueError('Arch not supported')
backward_hook_sgm = backward_hook(gamma)
for name, module in model.named_modules():
if 'relu' in name and 'layer' in name and '.0.relu' not in name:
# .0.relu to skip the relu of downsampling ('.0.' is important to keep layerX.10.relu)
# do not apply to last relu 'relu'
module.register_backward_hook(backward_hook_sgm)
# e.g., 1.layer1.1, 1.layer4.2, ...
# if len(name.split('.')) == 3:
if len(name.split('.')) >= 2 and 'layer' in name.split('.')[-2]:
module.register_backward_hook(backward_hook_norm)
def register_hook_for_densenet(model, arch, gamma):
# There are 2 ReLU in Conv module of DenseNet-121/169/201.
gamma = np.power(gamma, 0.5)
backward_hook_sgm = backward_hook(gamma)
for name, module in model.named_modules():
if 'relu' in name and not 'transition' in name:
module.register_backward_hook(backward_hook_sgm)
| 2,861 | 35.692308 | 107 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/layers.py | import torch
from PIL import Image
from torchvision.transforms import functional as F
class RandomResizePad(torch.nn.Module):
def __init__(self, min_resize):
super().__init__()
self.min_resize = min_resize
def forward(self, img):
size_original = img.size()
if size_original[-1] != size_original[-2]:
raise ValueError("Only squared images supported")
random_size = int(torch.randint(low=self.min_resize, high=size_original[-1], size=(1,)))
pad_margin = size_original[-1] - random_size
img = F.resize(img, size=random_size, interpolation=Image.NEAREST)
pad_top = int(torch.randint(low=0, high=pad_margin, size=(1,)))
pad_bottom = pad_margin - pad_top
pad_right = int(torch.randint(low=0, high=pad_margin, size=(1,)))
pad_left = pad_margin - pad_right
img = F.pad(img, padding=[pad_left, pad_top, pad_right, pad_bottom], fill=0)
if img.size() != size_original:
raise RuntimeError("Output size is not the same than input size.")
return img
| 1,083 | 40.692308 | 96 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/modelsghost.py | # adapted from torchvision ResNet implementation https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# Add skip connection erosion
# do not use to train a model. Only for inference. Train on regular torchvision resnet
import torch
from torch import Tensor
import torch.nn as nn
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.hub import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
# added for Ghost
# random scalar with optimal value reported in the original implementation of Ghost Paper
# https://github.com/LiYingwei/ghost-network/blob/master/nets/resnet_v2_50.py#L129
random_range = 0.22
# uniform distribution [1-random_range, 1+random_range]
if identity.size(1) != self.planes * self.expansion or len(identity.size()) != 4:
raise ValueError('wrong dimension')
perturb_var = torch.rand((1, identity.size(1), 1, 1), requires_grad=False, device=identity.device) * random_range * 2 + (
1 - random_range)
out += identity * perturb_var
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.planes = planes
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
# added for Ghost
# random scalar with optimal value reported in the original implementation of Ghost Paper
# https://github.com/LiYingwei/ghost-network/blob/master/nets/resnet_v2_50.py#L129
random_range = 0.22
# uniform distribution [1-random_range, 1+random_range]
if identity.size(1) != self.planes * self.expansion or len(identity.size()) != 4:
raise ValueError('wrong dimension')
perturb_var = torch.rand((1,identity.size(1),1,1), requires_grad=False, device=identity.device) * random_range * 2 + (1-random_range)
out += identity * perturb_var
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 17,173 | 40.383133 | 141 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/models.py | import torch
from torch import nn
import torch.nn.functional as F
from random import randrange, shuffle
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling.
Code adapted from https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
model (nn.Module):
A classification neural network
NB: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model, temperature=1.):
super(ModelWithTemperature, self).__init__()
self.model = model
self.temperature = torch.tensor(temperature)
def forward(self, input):
logits = self.model(input)
return self.temperature_scale(logits)
def temperature_scale(self, logits):
"""
Perform temperature scaling on logits
"""
# Expand temperature to match the size of logits
temperature = self.temperature.expand(logits.size(0), logits.size(1))
return logits / temperature
def to(self, *args, **kwargs):
self.model = self.model.to(*args, **kwargs)
self.temperature = self.temperature.to(*args, **kwargs)
super(ModelWithTemperature, self).to(*args, **kwargs)
class MnistFc(nn.Module):
def __init__(self, num_classes=10, pretrained=False):
if pretrained:
raise NotImplementedError()
super(MnistFc, self).__init__()
self.fc1 = nn.Linear(784, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, num_classes)
def forward(self, x):
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
output = self.fc3(x)
return output
class MnistSmallFc(nn.Module):
"""
One hidden layer FC
"""
def __init__(self, num_classes=10, pretrained=False, hidden_size=512):
if pretrained:
raise NotImplementedError()
super(MnistSmallFc, self).__init__()
self.hidden_size = hidden_size
self.fc1 = nn.Linear(784, hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
output = self.fc2(x)
return output
class MnistCnn(nn.Module):
def __init__(self, num_classes=10, pretrained=False):
if pretrained:
raise NotImplementedError()
super(MnistCnn, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(1024, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
output = self.fc3(x)
return output
class CifarLeNet(nn.Module):
def __init__(self, num_classes=10, pretrained=False):
super(CifarLeNet, self).__init__()
self.conv_1 = nn.Conv2d(3, 6, 5)
self.conv_2 = nn.Conv2d(6, 16, 5)
self.fc_1 = nn.Linear(16 * 5 * 5, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, num_classes)
def forward(self, x):
out = F.relu(self.conv_1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv_2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc_1(out))
out = F.relu(self.fc_2(out))
out = self.fc_3(out)
return out
class TorchEnsemble(nn.Module):
def __init__(self, models, ensemble_logits=False):
"""
:param models: list of pytorch models to ensemble
:param ensemble_logits: True if ensemble logits, False to ensemble probabilities
:return probablities if ensemble_logits is False, logits if True
"""
super(TorchEnsemble, self).__init__()
if len(models) < 1:
raise ValueError('Empty list of models')
self.models = nn.ModuleList(models)
self.ensemble_logits = ensemble_logits
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# clone to make sure x is not changed by inplace methods
if not self.ensemble_logits:
x_list = [self.softmax(model(x.clone())) for model in self.models] # probs
else:
x_list = [model(x.clone()) for model in self.models] # logits
x = torch.stack(x_list) # concat on dim 0
x = torch.mean(x, dim=0, keepdim=False)
#for model in self.models:
# xi = model(x.clone()) # clone to make sure x is not changed by inplace methods
# x1 = self.modelA(x.clone()) # clone to make sure x is not changed by inplace methods
# x1 = x1.view(x1.size(0), -1)
# x2 = self.modelB(x)
# x2 = x2.view(x2.size(0), -1)
# x = torch.cat((x1, x2), dim=1)
# x = self.classifier(F.relu(x))
return x
class LightEnsemble(nn.Module):
def __init__(self, models, order=None):
"""
Perform a single forward pass to one of the models when call forward()
:param models: list of pytorch models to ensemble
:param random: str, 'random' draw a model with replacement, 'shuffle' draw a model w/o replacement, None cycle in provided order (Default).
:return logits
"""
super(LightEnsemble, self).__init__()
self.n_models = len(models)
if self.n_models < 1:
raise ValueError('Empty list of models')
if order == 'shuffle':
shuffle(models)
elif order in [None, 'random']:
pass
else:
raise ValueError('Not supported order')
self.models = nn.ModuleList(models)
self.order = order
self.f_count = 0
def forward(self, x):
if self.order == 'random':
index = randrange(0, self.n_models)
else:
index = self.f_count % self.n_models
x = self.models[index](x)
self.f_count += 1
return x
class LightNestedEnsemble(nn.Module):
def __init__(self, list_models, order=None):
"""
Perform ensemble a single list of models when call forward()
:param models: nested list of pytorch models to ensemble
:param random: str, 'random' draw a model with replacement, 'shuffle' draw a model w/o replacement, None cycle in provided order (Default).
:return logits
"""
super(LightNestedEnsemble, self).__init__()
self.n_ensembles = len(list_models)
if self.n_ensembles < 1:
raise ValueError('Empty list of models')
if order == 'shuffle':
shuffle(list_models)
elif order in [None, 'random']:
pass
else:
raise ValueError('Not supported order')
self.models = nn.ModuleList([TorchEnsemble(models=x, ensemble_logits=True) for x in list_models])
self.order = order
self.f_count = 0
def forward(self, x):
if self.order == 'random':
index = randrange(0, self.n_ensembles)
else:
index = self.f_count % self.n_ensembles
x = self.models[index](x)
self.f_count += 1
return x
| 7,591 | 32.444934 | 147 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/helpers.py | import os
import re
import glob
import argparse
import torch
import numpy as np
from collections import OrderedDict
try:
from art.classifiers import PyTorchClassifier
except ModuleNotFoundError:
from art.estimators.classification import PyTorchClassifier
from .models import TorchEnsemble, CifarLeNet, MnistCnn, MnistFc, MnistSmallFc, ModelWithTemperature
from .layers import RandomResizePad
from .utils_sgm import register_hook_for_resnet, register_hook_for_preresnet, register_hook_for_densenet
from pytorch_ensembles import models as pemodels
from utils import modelsghost as ghostmodels
from utils import modelsghostpreresnet as ghostpreresnet
from torchvision import models as tvmodels
import timm
from robustbench.utils import load_model as rb_load_model
from torchvision import transforms
from torch import nn
MCMC_OPTIMIZERS = ['SGLD', 'pSGLD']
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda:0" if USE_CUDA else "cpu")
PEMODELS_NAMES = ['BayesPreResNet110', 'PreResNet110', 'PreResNet164', 'VGG16BN', 'VGG19BN', 'WideResNet28x10',
'WideResNet28x10do']
TVMODELS_NAMES = ['BayesResNet50', "resnet50", "resnet18", "resnet152", "resnext50_32x4d", "mnasnet1_0", "densenet121", "densenet201",
"mobilenet_v2", "wide_resnet50_2", "vgg19", "inception_v3", "inception_v2", "googlenet"]
TIMODELS_NAMES = ["efficientnet_b0", "adv_inception_v3", "inception_resnet_v2", "tf_inception_v3",
"timm_resnet152", "timm_resnext50_32x4d", "timm_wide_resnet50_2", "vit_base_patch16_224"]
# timm_vgg19 timm_densenet201 timm_inception_v3 are imported from torchvision. do not use
RBMODELS_NAMES = ["Salman2020Do_50_2", "Salman2020Do_R50", "Engstrom2019Robustness", "Wong2020Fast", "Salman2020Do_R18"]
ALL_MODELS_NAMES = ['MnistFc', 'MnistSmallFc', 'MnistCnn', 'LeNet'] + PEMODELS_NAMES + TVMODELS_NAMES + TIMODELS_NAMES + RBMODELS_NAMES
class keyvalue(argparse.Action):
"""
Parse key-values command line argument
Code from https://www.geeksforgeeks.org/python-key-value-pair-using-argparse/
"""
# Constructor calling
def __call__(self, parser, namespace,
values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
# split it into key and value
key, value = value.split('=')
# assign into dictionary
getattr(namespace, self.dest)[key] = value
def args2paths(args, index_model=None):
"""
Create a string corresponding to path to save current model
:param args: train.py command line arguments namespace
:param index_model: int for a specific element of ensemble models or for a model sample
:return: set of pytorch model path and metrics CSV file path
"""
if args.optimizer in MCMC_OPTIMIZERS:
filename = f'{index_model:04}.pth'
filename_metrics = 'metrics.csv'
relative_path = f'{args.dataset}/{args.architecture}/mcmc_samples/{args.optimizer}_bs{args.batch_size}_lr{args.lr}_lrd{"plateau" if args.lr_decay_on_plateau else args.lr_decay}_psig{args.prior_sigma:.1f}_s{args.samples}_si{args.sampling_interval}_bi{args.burnin}_seed{args.seed}'
else: # DNN
if index_model is None: # single DNN
filename = f'model_{args.optimizer}_bs{args.batch_size}_lr{args.lr}_lrd{"plateau" if args.lr_decay_on_plateau else args.lr_decay}_psig{args.prior_sigma:.1f}_ep{args.epochs}_seed{args.seed}.pth'
filename_metrics = filename.replace('model', 'metrics').replace('.pth', '.csv')
relative_path = f'{args.dataset}/{args.architecture}/single_model/'
else: # Ensemble of DNN
filename = f'{index_model:04}.pth'
filename_metrics = 'metrics.csv'
relative_path = f'{args.dataset}/{args.architecture}/dnn_ensemble/{args.optimizer}_bs{args.batch_size}_lr{args.lr}_lrd{"plateau" if args.lr_decay_on_plateau else args.lr_decay}_psig{args.prior_sigma:.1f}_ep{args.epochs}_seed{args.seed}'
os.makedirs(os.path.join(args.output, relative_path), exist_ok=True)
return os.path.join(args.output, relative_path, filename), \
os.path.join(args.output, relative_path, filename_metrics)
def list_models(models_dir):
# pretrained model
if re.match(r"^ImageNet/pretrained/(\w+)$", models_dir):
return [models_dir, ]
# path to single model
if re.match('.+\\.pth?(\\.tar)?$', models_dir):
if not os.path.isfile(models_dir):
raise ValueError('Non-existing path surrogate file passed')
return [models_dir, ]
# directory of models
path_models = glob.glob(f'{models_dir}/*.pt')
path_models.extend(glob.glob(f'{models_dir}/*.pth'))
path_models.extend(glob.glob(f'{models_dir}/*.pt.tar'))
path_models.extend(glob.glob(f'{models_dir}/*.pth.tar'))
path_models = sorted(path_models)
return path_models
def load_model(path_model, class_model, *args, **kwargs):
model = class_model(*args, **kwargs)
model.load_state_dict(torch.load(path_model))
model.eval()
return model
def load_list_models(models_dir, class_model, device=None, *args, **kwargs):
path_models = list_models(models_dir)
models = []
for path_model in path_models:
model = load_model(path_model=path_model, class_model=class_model)
if device:
model.to(device)
models.append(model)
return models
def guess_model(path_model):
"""
Return the name of the model
"""
candidates = [x for x in ALL_MODELS_NAMES if x in path_model]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) > 1:
# pick the longest one
return max(candidates, key=len)
raise ValueError('Not able to guess model name')
def guess_and_load_model(path_model, data, load_as_ghost=False, input_diversity=False, skip_gradient=False, defense_randomization=False, temperature=None, force_cpu=False):
"""
Load model from its path only (guessing the model class)
:param path_model: str, path to the pt file to load
:param data: data class
:param load_as_ghost: load model as a Ghost Network. Only Skip Connection Erosion of resnet on ImageNet supported.
:param input_diversity: add input diversity as first layer (p=0.5)
:param skip_gradient: apply Skip Gradient Method with backward hook (gamma=0.5)
:param defense_randomization: add input diversity as first layer (p=1) to be used as defense
:param temperature: temperature scaling value. Deactivated if None (default).
:param force_cpu: don't send model to GPU if True
:return: pytorch instance of a model
"""
if load_as_ghost and not (('resnet' in path_model and 'ImageNet' in path_model) or ('PreResNet' in path_model and 'CIFAR10' in path_model)):
raise ValueError('Ghost Networks only supports resnet on ImageNet, or PreResNet on CIFAR10.')
if input_diversity and defense_randomization:
raise ValueError('input_diversity and defense_randomization should not be set at the same time')
if 'MNIST' in path_model:
if load_as_ghost:
raise NotImplementedError('Ghost MNIST models not supported')
# model from utils/models.py
if 'CNN' in path_model or 'MnistCnn' in path_model:
model_name_list = ['MnistCnn']
model = MnistCnn(pretrained=False, num_classes=data.num_classes)
elif 'FC' in path_model or 'MnistFc' in path_model:
model_name_list = ['MnistFc']
model = MnistFc(pretrained=False, num_classes=data.num_classes)
elif 'MnistSmallFc' in path_model:
model_name_list = ['MnistSmallFc']
model = MnistSmallFc(pretrained=False, num_classes=data.num_classes)
else:
raise NotImplementedError('Model class unknown')
model_loaded = torch.load(path_model, map_location=DEVICE)
if 'model_state' in model_loaded:
model_loaded = model_loaded['model_state']
try:
model.load_state_dict(model_loaded)
except RuntimeError:
new_state_dict = OrderedDict()
for k, v in model_loaded.items():
name = k[2:] # remove `1.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
model = add_normalization_layer(model=model, mean=(0.1307,), std=(0.3081,))
arch = model_name_list[0]
elif 'CIFAR' in path_model:
# ghost models
if load_as_ghost:
if 'PreResNet' not in path_model:
raise ValueError('Only PreResNet ghost models supported on CIFAR10')
model_name_list = [x for x in PEMODELS_NAMES if x in path_model]
if len(model_name_list) != 1:
raise ValueError(f'Failed to extract model name: {model_name_list}')
arch = getattr(ghostpreresnet, model_name_list[0])
model = arch.base(num_classes=data.num_classes, **arch.kwargs)
# model from utils/models.py
elif 'LeNet' in path_model:
model_name_list = ['LeNet']
model = CifarLeNet(pretrained=False, num_classes=data.num_classes)
# model from pytorch-ensembles
elif 'BayesPreResNet110' in path_model:
model_name_list = ['BayesPreResNet110']
arch = getattr(pemodels, 'BayesPreResNet110')
model = arch.base(num_classes=data.num_classes, **arch.kwargs)
elif len([x for x in PEMODELS_NAMES if x in path_model]) >= 1:
# list model name in pemodels: [x for x in dir(pemodels) if x[0:2] != '__']
model_name_list = [x for x in PEMODELS_NAMES if x in path_model]
if len(model_name_list) != 1:
raise ValueError(f'Failed to extract model name: {model_name_list}')
arch = getattr(pemodels, model_name_list[0])
model = arch.base(num_classes=data.num_classes, **arch.kwargs)
else:
raise NotImplementedError('Model class unknown')
model_loaded = torch.load(path_model, map_location=DEVICE)
if 'model_state' in model_loaded:
model_loaded = model_loaded['model_state']
try:
model.load_state_dict(model_loaded)
except RuntimeError:
new_state_dict = OrderedDict()
for k, v in model_loaded.items():
name = k[2:] # remove `1.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
# cSGLD trained on [0,1] data range: nothing to do
# old dnn_ensemble trained on [-1, 1] data range
if 'dnn_ensemble' in path_model or 'single_model' in path_model:
model.to(DEVICE)
model = add_normalization_layer(model=model, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
arch = model_name_list[0]
elif 'ImageNet' in path_model:
if 'ImageNet/pretrained' in path_model:
if load_as_ghost:
raise ValueError('Ghost pretrained models not supported')
a = re.match(r"^ImageNet/pretrained/(\w+)$", path_model)
if a:
arch = a.groups()[0]
else:
raise ValueError('Failed extracting name of pretrained model')
if arch in TVMODELS_NAMES:
model = tvmodels.__dict__[arch](pretrained=True)
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif arch in TIMODELS_NAMES:
arch_ = arch.replace('timm_', '')
model = timm.create_model(arch_, pretrained=True)
config = timm.data.resolve_data_config({}, model=model)
model = add_normalization_layer(model=model, mean=config['mean'], std=config['std'])
elif arch in RBMODELS_NAMES:
# https://github.com/RobustBench/robustbench#imagenet
model = rb_load_model(model_name=arch, model_dir='~/.cache/robustbench/models', dataset='imagenet',
threat_model="Linf")
else:
raise ValueError(f'Model {arch} not supported.')
model.to(DEVICE)
else:
checkpoint_dict = torch.load(path_model, map_location=DEVICE)
source_model = tvmodels
if load_as_ghost:
source_model = ghostmodels
if 'cSGLD' in path_model or 'single_model' in path_model:
arch = checkpoint_dict['arch']
if arch in TIMODELS_NAMES:
if load_as_ghost:
raise ValueError('Ghost timm models not supported')
model = timm.create_model(arch, pretrained=False)
else:
model = source_model.__dict__[arch]()
# try to load state_dir
# some models were trained with dataparallel, some not.
try:
model.load_state_dict(checkpoint_dict['state_dict'])
except RuntimeError:
#model = torch.nn.parallel.DataParallel(model)
new_state_dict = OrderedDict()
for k, v in checkpoint_dict['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
if 'withdatanorm' in path_model:
# models trained with regular data norm
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif 'subspace_inference' in path_model:
# all SI models are trained on normalized data
a = re.match(r".*models(_target)?/ImageNet/(\w+)/.*", path_model)
if a:
arch = a.group(2)
else:
raise ValueError('Failed extracting archiecture from filename model')
if arch in TIMODELS_NAMES:
if load_as_ghost:
raise ValueError('Ghost timm models not supported')
model = timm.create_model(arch.replace('timm_', ''), pretrained=False)
else:
model = source_model.__dict__[arch]()
state_dict = checkpoint_dict
if 'state_dict' in checkpoint_dict:
state_dict = checkpoint_dict['state_dict']
try:
model.load_state_dict(state_dict)
except RuntimeError:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
model.to(DEVICE)
# add normalization layer at first
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif ('deepens_imagenet' in path_model) or ('FGE' in path_model) or ('SSE' in path_model):
# pretrained resnet50 from pytorch-ensemble
arch = 'resnet50'
model = source_model.__dict__[arch]()
# models trained with dataparallel
#model = torch.nn.parallel.DataParallel(model)
new_state_dict = OrderedDict()
for k, v in checkpoint_dict['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.to(DEVICE)
# add normalization layer at first
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif 'SWAG' in path_model:
# train by us from 1 deepens_imagenet checkpoint resnet50
arch = 'resnet50'
model = source_model.__dict__[arch]()
if 'state_dict' in checkpoint_dict:
checkpoint_dict = checkpoint_dict['state_dict']
model.load_state_dict(checkpoint_dict)
model.to(DEVICE)
# add normalization layer at first
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif 'VI' in path_model:
arch = 'BayesResNet50'
model = pemodels.__dict__[arch]()
new_state_dict = OrderedDict()
model.load_state_dict(checkpoint_dict)
model.to(DEVICE)
# add normalization layer at first
model = add_normalization_layer(model=model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
raise ValueError('ImageNet model not recognized.')
if 'inception' in path_model:
# resize input to to 299
model = add_resize_layer(model=model, size=(299, 299))
else:
raise ValueError('dataset not supported')
model.eval()
# to GPU
if USE_CUDA and not force_cpu:
model.to(DEVICE)
if input_diversity:
# add as first layer that randomly resize between [90%, 100%] and 0-pad to original size with probability 0.5
model = add_random_resize_layer(model=model, p=0.5, min_resize=round(data.get_input_shape()[2]*0.9))
model.to(DEVICE)
model.eval()
if skip_gradient:
if arch in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:
register_hook_for_resnet(model, arch=arch, gamma=0.2)
elif arch in ['PreResNet110']:
gamma = float(os.getenv('ADV_TRANSFER_SGM_GAMMA_PRERESNET', '0.7').replace(',', '.'))
register_hook_for_preresnet(model, arch=arch, gamma=gamma)
elif arch in ['densenet121', 'densenet169', 'densenet201']:
register_hook_for_densenet(model, arch=arch, gamma=0.5)
else:
raise ValueError(f'Arch { arch } not supported by Skip Gradient Method')
if defense_randomization:
# add as first layer that randomly resize between [90%, 100%] and 0-pad to original size with probability 1.
# defense by https://arxiv.org/pdf/1711.01991.pdf
model = add_random_resize_layer(model=model, p=1, min_resize=round(data.get_input_shape()[2]*0.9))
model.to(DEVICE)
model.eval()
if temperature:
model = ModelWithTemperature(model, temperature=temperature)
model.to(DEVICE)
model.eval()
return model
def load_classifier(model, data):
"""
Load ART PyTorch classifier from pytorch model
:param model: pytorch model instance
:param data: data class
:return: ART classifier
"""
# not used but mandatory for ART
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
classifier = PyTorchClassifier(
model=model,
clip_values=(data.min_pixel_value, data.max_pixel_value),
loss=criterion,
optimizer=optimizer,
input_shape=data.get_input_shape(),
nb_classes=data.num_classes,
device_type="gpu" if USE_CUDA else "cpu"
)
#classifier.set_learning_phase(False) # automatically set by predict() calls
return classifier
def load_classifier_ensemble(models, **kwargs):
"""
Build an ART classifier of an ensemble of PyTorch models
:param models: list of pytorch model instances
:return:
"""
model = TorchEnsemble(models=models, ensemble_logits=True)
if USE_CUDA:
model.to(DEVICE)
model.eval()
return load_classifier(model, **kwargs)
def predict_ensemble(models_dir, X, data):
"""
Compute prediction for each model inside models_dir
:param models_dir: str path to pytorch's models
:param X: pytorch tensor or numpy array
:param data: data class instance
:return: tuple of 2 numpy arrays of predicted labels with probs and logits ensembling
"""
torchdataset = torch.utils.data.TensorDataset(torch.Tensor(X))
loader = torch.utils.data.DataLoader(
torchdataset,
batch_size=data.batch_size, shuffle=False,
num_workers=data.num_workers, pin_memory=USE_CUDA)
#if not torch.is_tensor(X):
# X = torch.Tensor(X)
#if USE_CUDA:
# X = X.to(DEVICE)
path_models = list_models(models_dir)
y_pred_ens_logit = torch.zeros((X.shape[0], data.num_classes))
y_pred_ens_prob = torch.zeros((X.shape[0], data.num_classes))
for path_model in path_models:
model = guess_and_load_model(path_model=path_model, data=data)
for i, (inputs,) in enumerate(loader):
inputs = inputs.to(DEVICE, non_blocking=True)
with torch.no_grad():
output = model(inputs)
y_pred_ens_logit[i*data.batch_size:(i+1)*data.batch_size, :] += output.cpu()
y_pred_ens_prob[i*data.batch_size:(i+1)*data.batch_size, :] += torch.nn.functional.softmax(output, dim=1).cpu()
# clean
del model, output
if USE_CUDA:
torch.cuda.empty_cache()
y_pred_ens_logit /= len(path_models)
y_pred_ens_logit = torch.nn.functional.softmax(y_pred_ens_logit, dim=1)
y_pred_ens_prob /= len(path_models)
label_pred_logit = np.argmax(y_pred_ens_logit.numpy(), axis=1)
label_pred_prob = np.argmax(y_pred_ens_prob.numpy(), axis=1)
return label_pred_prob, label_pred_logit
def compute_accuracy_ensemble(models_dir, X, y, data):
label_pred_prob, label_pred_logit = predict_ensemble(models_dir=models_dir, X=X, data=data)
acc_prob = (label_pred_prob == y).mean()
acc_logit = (label_pred_logit == y).mean()
return acc_prob, acc_logit
def compute_accuracy_multiple_ensemble(models_dirs, X, y, data):
"""
Compute the mean of the accuracies of several ensembles.
"""
acc_prob, acc_logit = 0., 0.
for models_dir in models_dirs:
acc_prob_tmp, acc_logit_tmp = compute_accuracy_ensemble(models_dir=models_dir, X=X, y=y, data=data)
acc_prob += acc_prob_tmp
acc_logit += acc_logit_tmp
return acc_prob / len(models_dirs), acc_logit / len(models_dirs)
def compute_accuracy_from_nested_list_models(list_ensemble, X, y, data, export_predict=False, export_mask=None, tol=1e-12):
"""
Compute prediction for each model inside list_ensemble
:param list_ensemble: list of list of pytorch's models
:param X: pytorch tensor or numpy array
:param y: pytorch tensor or numpy array
:param data: data class instance
:param export_predict: bool, also export a boolean vector corresponding to correct predictions of examples
:param export_mask: tensor, also export metrics computed on examples masked by provided tensor
:param loss: str either 'CE' or 'NLL'
:return: tuple of 2 scalar accuracy and CE loss
"""
#nb_models = np.sum([len(x) for x in list_ensemble]) # total nb of models
if export_mask is not None:
if export_mask.shape != (X.shape[0], ):
raise ValueError('Wrong shape for export_mask')
if not np.isin(export_mask.numpy(), [0, 1]).all():
raise ValueError('export_mask tensor should contain only 0,1 values')
export_mask.bool().to(DEVICE)
torchdataset = torch.utils.data.TensorDataset(torch.tensor(X, dtype=torch.float), torch.tensor(y))
loader = torch.utils.data.DataLoader(
torchdataset,
batch_size=data.batch_size, shuffle=False,
num_workers=data.num_workers, pin_memory=USE_CUDA)
loss = nn.NLLLoss(reduction='sum').to(DEVICE)
correct = 0
total = 0
loss_sum = 0.
predict_correct = torch.zeros((0,))
log_probs = []
with torch.no_grad():
for i, (inputs, labels) in enumerate(loader):
inputs = inputs.to(DEVICE, non_blocking=True)
labels = labels.to(DEVICE, non_blocking=True)
y_pred_ens_batch = torch.zeros(inputs.size(0), data.num_classes).to(DEVICE, non_blocking=True)
size_ensemble = 0
log_prob_models_batch = []
for list_model in list_ensemble:
for model in list_model:
output = model(inputs)
# more numerically stable to save log_softmax then log sum exp
log_prob_model_batch = torch.nn.functional.log_softmax(output, dim=1)
log_prob_models_batch.append(log_prob_model_batch)
# log sum exp of log probs, over models dims for each example and classes
log_prob_ens_batch = torch.logsumexp(torch.dstack(log_prob_models_batch), dim=2) - np.log(len(log_prob_models_batch))
loss_sum += loss(log_prob_ens_batch, labels.long()).item() # NLLLoss takes log prob
_, predicted = torch.max(log_prob_ens_batch.data, 1)
correct += (predicted == labels).sum().item()
predict_correct = torch.cat((predict_correct, (predicted == labels).cpu()), 0)
total += labels.size(0)
if export_predict:
if predict_correct.shape[0] != X.shape[0]:
raise RuntimeError('Unexpected shape of predict_correct vector')
return correct / total, loss_sum / total, predict_correct.bool()
if export_mask is not None:
correct_masked = torch.masked_select(predict_correct, mask=export_mask).sum().cpu().item()
total_masked = export_mask.sum().cpu().item()
if total_masked == 0:
total_masked = np.NaN
return correct / total, loss_sum / total, correct_masked / total_masked
return correct / total, loss_sum / total
def save_numpy(array, path, filename):
os.makedirs(path, exist_ok=True)
np.save(os.path.join(path, filename), array)
def flatten(X):
return X.reshape((X.shape[0], -1))
def compute_norm(X_adv, X, norm=2):
return np.linalg.norm(flatten(X_adv) - flatten(X), ord=norm, axis=1)
def project_on_sphere(X, X_adv, data, size=4., norm=2):
"""
Project on sphere (not the ball!) of specified size
:param X: np array
:param X_adv: np array
:param size:
:param norm: Lp norm to use. Only 2 implemented
:return:
"""
if norm != 2:
raise NotImplementedError('Only L2 norm implemented')
lpnorm = compute_norm(X_adv, X, norm=2)
X_adv_proj = X + size / lpnorm.reshape((X.shape[0], 1, 1, 1)) * (X_adv - X)
X_adv_proj = np.clip(X_adv_proj, data.min_pixel_value, data.max_pixel_value)
return X_adv_proj
def add_normalization_layer(model, mean, std):
return torch.nn.Sequential(
transforms.Normalize(mean=mean, std=std),
model
)
def add_resize_layer(model, size, **kwargs):
return torch.nn.Sequential(
transforms.Resize(size=size, **kwargs),
model
)
def add_random_resize_layer(model, p=0.5, min_resize=200):
"""
Add input diversity layer as first layer
:param model: pytorch model
:param p: probability to apply input diversity
:param min_resize: minimum possible resize
:return:
"""
return torch.nn.Sequential(
transforms.RandomApply([
RandomResizePad(min_resize),
], p=p),
model
)
def guess_method(path_model):
"""
Return the name of the method to train the model
"""
METHODS_OTHER = ['SGLD', 'cSGLD', 'pSGLD', 'HMC', 'SVI', 'VI', 'SWAG', 'FGE', 'SSE', 'SI/pca/ESS', 'SI/random/ESS', 'SWA',
'collected_models', 'cSGD', 'ImageNet/pretrained']
METHODS_DNN = ['SGD', 'Adam', 'RMSprop', 'DNN', 'dnn_ensemble', 'single_model', 'deepens'] # always return 'dnn'
candidates = [x for x in METHODS_OTHER if x in path_model]
if len(candidates) > 1:
# pick the longest one
return max(candidates, key=len)
elif len(candidates) == 1:
return candidates[0]
candidates = [x for x in METHODS_DNN if x in path_model]
if len(candidates) >= 1:
return 'dnn'
raise ValueError('Not able to guess model training method') | 28,359 | 45.79868 | 287 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/attacks.py | import torch
import os
import numpy as np
import scipy.stats as st
from art.attacks.evasion import FastGradientMethod, ProjectedGradientDescentPyTorch
from art.classifiers import PyTorchClassifier
from art.config import ART_NUMPY_DTYPE
from art.utils import (
random_sphere,
projection,
)
from tqdm import tqdm
from typing import Optional, Union, TYPE_CHECKING, Tuple, List
from utils.helpers import DEVICE, compute_accuracy_from_nested_list_models
class ExtendedFastGradientMethod(FastGradientMethod):
attack_params = FastGradientMethod.attack_params + ['momentum', 'grad_momentum', 'translation_invariant']
def __init__(self, momentum=None, grad_momentum=None, translation_invariant=False, **kwargs):
super().__init__(**kwargs)
self.momentum = momentum
self.grad_momentum = grad_momentum
self.translation_invariant = translation_invariant
if momentum and grad_momentum is None:
raise ValueError("grad should be provided when using momentum")
def _compute_perturbation(self, batch: np.ndarray, batch_labels: np.ndarray, mask: Optional[np.ndarray], batch_grad_momentum: Optional[np.ndarray]) -> \
Tuple[np.ndarray, np.ndarray]:
# Pick a small scalar to avoid division by 0
tol = 10e-8
# Get gradient wrt loss; invert it if attack is targeted
grad = self.estimator.loss_gradient(batch, batch_labels) * (1 - 2 * int(self.targeted))
if self.translation_invariant:
# original implementation:
#noise = tf.nn.depthwise_conv2d(noise, stack_kernel, strides=[1, 1, 1, 1], padding='SAME')
# translation_invariant kernel
#kernel = self.gkern(15, 3).astype(np.float32)
#stack_kernel = np.stack([kernel, kernel, kernel]).swapaxes(2, 0)
#stack_kernel = np.expand_dims(stack_kernel, 3)
kernlen = 15
nb_channels = batch.shape[1]
padding = int((kernlen - 1) / 2) # same padding
with torch.no_grad():
kernel = torch.from_numpy(self.gkern(kernlen=kernlen).astype(np.float32))
stack_kernel = kernel.view(1, 1, kernlen, kernlen).repeat(nb_channels, 1, 1, 1).to(DEVICE)
grad = torch.nn.functional.conv2d(torch.from_numpy(grad).to(DEVICE), stack_kernel, padding=padding, groups=nb_channels).cpu().numpy()
if grad.shape != batch.shape:
raise RuntimeError('Translation invariant gradient does not have the same dimension as input')
# Apply norm bound
def _apply_norm(grad, norm=self.norm, object_type=False):
if norm in [np.inf, "inf"]:
grad = np.sign(grad)
elif norm == 1:
if not object_type:
ind = tuple(range(1, len(batch.shape)))
else:
ind = None
grad = grad / (np.sum(np.abs(grad), axis=ind, keepdims=True) + tol)
elif norm == 2:
if not object_type:
ind = tuple(range(1, len(batch.shape)))
else:
ind = None
grad = grad / (np.sqrt(np.sum(np.square(grad), axis=ind, keepdims=True)) + tol)
return grad
# momentum
if self.momentum:
# scale L1-norm
if batch.dtype == np.object:
for i_sample in range(batch.shape[0]):
grad[i_sample] = _apply_norm(grad[i_sample], norm=1, object_type=True)
assert batch[i_sample].shape == grad[i_sample].shape
else:
grad = _apply_norm(grad, norm=1)
# update moving avg
#self.grad_momentum = self.grad_momentum * self.momentum + grad
batch_grad_momentum = batch_grad_momentum * self.momentum + grad
grad = batch_grad_momentum.copy()
#self.grad_momentum = self.grad_momentum.detach().clone()
# Apply mask
if mask is not None:
grad = np.where(mask == 0.0, 0.0, grad)
# Apply norm bound
if batch.dtype == np.object:
for i_sample in range(batch.shape[0]):
grad[i_sample] = _apply_norm(grad[i_sample], object_type=True)
assert batch[i_sample].shape == grad[i_sample].shape
else:
grad = _apply_norm(grad)
assert batch.shape == grad.shape
return grad, batch_grad_momentum
@staticmethod
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array.
From the original implementation https://github.com/dongyp13/Translation-Invariant-Attacks/blob/master/attack_iter.py
"""
x = np.linspace(-nsig, nsig, kernlen)
kern1d = st.norm.pdf(x)
kernel_raw = np.outer(kern1d, kern1d)
kernel = kernel_raw / kernel_raw.sum()
return kernel
def _compute(
self,
x: np.ndarray,
x_init: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray],
eps: float,
eps_step: float,
project: bool,
random_init: bool,
) -> np.ndarray:
if random_init:
n = x.shape[0]
m = np.prod(x.shape[1:]).item()
random_perturbation = random_sphere(n, m, eps, self.norm).reshape(x.shape).astype(ART_NUMPY_DTYPE)
if mask is not None:
random_perturbation = random_perturbation * (mask.astype(ART_NUMPY_DTYPE))
x_adv = x.astype(ART_NUMPY_DTYPE) + random_perturbation
if self.estimator.clip_values is not None:
clip_min, clip_max = self.estimator.clip_values
x_adv = np.clip(x_adv, clip_min, clip_max)
else:
if x.dtype == np.object:
x_adv = x.copy()
else:
x_adv = x.astype(ART_NUMPY_DTYPE)
# Compute perturbation with implicit batching
for batch_id in range(int(np.ceil(x.shape[0] / float(self.batch_size)))):
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
batch_index_2 = min(batch_index_2, x.shape[0])
batch = x_adv[batch_index_1:batch_index_2]
batch_labels = y[batch_index_1:batch_index_2]
batch_grad_momentum = self.grad_momentum[batch_index_1:batch_index_2]
mask_batch = mask
if mask is not None:
# Here we need to make a distinction: if the masks are different for each input, we need to index
# those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is.
if len(mask.shape) == len(x.shape):
mask_batch = mask[batch_index_1:batch_index_2]
# Get perturbation
perturbation, batch_grad_momentum = self._compute_perturbation(batch, batch_labels, mask_batch, batch_grad_momentum)
# Apply perturbation and clip
x_adv[batch_index_1:batch_index_2] = self._apply_perturbation(batch, perturbation, eps_step)
self.grad_momentum[batch_index_1:batch_index_2] = batch_grad_momentum
if project:
if x_adv.dtype == np.object:
for i_sample in range(batch_index_1, batch_index_2):
perturbation = projection(x_adv[i_sample] - x_init[i_sample], eps, self.norm)
x_adv[i_sample] = x_init[i_sample] + perturbation
else:
perturbation = projection(
x_adv[batch_index_1:batch_index_2] - x_init[batch_index_1:batch_index_2], eps, self.norm
)
x_adv[batch_index_1:batch_index_2] = x_init[batch_index_1:batch_index_2] + perturbation
return x_adv
class ExtendedProjectedGradientDescentPyTorch(ProjectedGradientDescentPyTorch):
"""
Implement Light Version of PGD where only a subset of models is attack at each iterations.
Also support some test-time techniques for transferability.
:param models_target_dict: Dictionary of target models as values with the name as key.
:param freq_acc_target: Compute accuracy on target each provided iterations. All iterations by default.
:param data: Data class. For now only required with models_target_dict.
"""
attack_params = ProjectedGradientDescentPyTorch.attack_params + ['momentum', 'translation_invariant', 'grad_noise_std', 'models_target_dict', 'freq_eval_target', 'data'],
def __init__(
self,
estimators: List[PyTorchClassifier], # pass a list of classifier
momentum: float = None,
translation_invariant: bool = False,
grad_noise_std: float = None,
models_target_dict=None,
freq_eval_target=1,
data=None,
**kwargs
):
self.estimators = estimators
super().__init__(estimator=estimators[0], **kwargs)
self.momentum = momentum
self.translation_invariant = translation_invariant
self.grad_noise_std = grad_noise_std
# to report target accuracy at each freq_eval_target iteration
self.freq_eval_target = freq_eval_target
self.data = data
if models_target_dict and data is None:
raise ValueError('data param should be provided if models_target_dict is set.')
self.models_target_dict = models_target_dict
self.stats_per_iter = {k: {'iter': [], 'acc': [], 'loss': []} for k in models_target_dict} if models_target_dict else {}
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
Modified to compute the successful adversarial on all the classifiers in the list. Not only one.
:param x: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.
Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any
features for which the mask is zero will not be adversarially perturbed.
:type mask: `np.ndarray`
:return: An array holding the adversarial examples.
"""
import torch # lgtm [py/repeated-import]
mask = self._get_mask(x, **kwargs)
# Ensure eps is broadcastable
self._check_compatibility_input_and_eps(x=x)
# Check whether random eps is enabled
self._random_eps()
# Set up targets
targets = self._set_targets(x, y)
# Create dataset
if mask is not None:
# Here we need to make a distinction: if the masks are different for each input, we need to index
# those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is.
if len(mask.shape) == len(x.shape):
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(mask.astype(ART_NUMPY_DTYPE)),
)
else:
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(np.array([mask.astype(ART_NUMPY_DTYPE)] * x.shape[0])),
)
else:
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
)
data_loader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=self.batch_size, shuffle=False, drop_last=False
)
# Start to compute adversarial examples
adv_x = x.astype(ART_NUMPY_DTYPE)
# Compute perturbation with batching
for (batch_id, batch_all) in enumerate(
tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose)
):
if mask is not None:
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2]
else:
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], None
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
# Compute batch_eps and batch_eps_step
if isinstance(self.eps, np.ndarray):
if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]:
batch_eps = self.eps[batch_index_1:batch_index_2]
batch_eps_step = self.eps_step[batch_index_1:batch_index_2]
else:
batch_eps = self.eps
batch_eps_step = self.eps_step
else:
batch_eps = self.eps
batch_eps_step = self.eps_step
for rand_init_num in range(max(1, self.num_random_init)):
if rand_init_num == 0:
# first iteration: use the adversarial examples as they are the only ones we have now
adv_x[batch_index_1:batch_index_2] = self._generate_batch(
x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step
)
else:
adversarial_batch = self._generate_batch(
x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step
)
# return the successful adversarial examples
# modified:
attack_success = self.compute_success_array(
batch,
batch_labels,
adversarial_batch,
self.targeted,
batch_size=self.batch_size,
)
adv_x[batch_index_1:batch_index_2][attack_success] = adversarial_batch[attack_success]
# modified:
# logger.info(
# "Success rate of attack: %.2f%%",
# 100 * self.compute_success(x, y, adv_x, self.targeted, batch_size=self.batch_size),
# )
return adv_x
def compute_success_array(
self,
x_clean: np.ndarray,
labels: np.ndarray,
x_adv: np.ndarray,
targeted: bool = False,
batch_size: int = 1,
) -> float:
"""
Compute the success rate of an attack based on clean samples, adversarial samples and targets or correct labels.
Modified: use the list of estimators to compute the predictions
:param x_clean: Original clean samples.
:param labels: Correct labels of `x_clean` if the attack is untargeted, or target labels of the attack otherwise.
:param x_adv: Adversarial samples to be evaluated.
:param targeted: `True` if the attack is targeted. In that case, `labels` are treated as target classes instead of
correct labels of the clean samples.
:param batch_size: Batch size.
:return: Percentage of successful adversarial samples.
"""
adv_results = np.zeros((x_adv.shape[0], self.estimators[0].nb_classes), dtype=np.float32)
for classifier in self.estimators:
adv_results += classifier.predict(x_adv, batch_size=batch_size)
adv_preds = np.argmax(adv_results, axis=1)
if targeted:
attack_success = adv_preds == np.argmax(labels, axis=1)
else:
results = np.zeros((x_clean.shape[0], self.estimators[0].nb_classes), dtype=np.float32)
for classifier in self.estimators:
results += classifier.predict(x_clean, batch_size=batch_size)
preds = np.argmax(results, axis=1)
attack_success = adv_preds != preds
return attack_success
def _generate_batch(
self,
x: "torch.Tensor",
targets: "torch.Tensor",
mask: "torch.Tensor",
eps: Union[int, float, np.ndarray],
eps_step: Union[int, float, np.ndarray],
) -> np.ndarray:
"""
Generate a batch of adversarial samples and return them in an array. Each iteration is computed on a different estimator from estimators.
:param x: An array with the original inputs.
:param targets: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)`.
:param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be
broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially
perturbed.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_step: Attack step size (input variation) at each iteration.
:return: Adversarial examples.
"""
inputs = x.to(self.estimator.device)
targets = targets.to(self.estimator.device)
y = torch.max(targets, dim=1).indices # class idx
adv_x = inputs
if mask is not None:
mask = mask.to(self.estimator.device)
# init grad_momentum at beginning of a batch
self.grad_momentum = torch.zeros(x.size()).to(self.estimator.device)
for i_max_iter in range(self.max_iter):
# cycle between estimators
self._estimator = self.estimators[i_max_iter % len(self.estimators)]
adv_x = self._compute_torch(
adv_x, inputs, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0,
)
# compute target accuracy at each freq_eval_target iteration (+ first/last iterations)
if self.models_target_dict:
if (i_max_iter % self.freq_eval_target == 0) or (i_max_iter == self.max_iter-1):
for name_target, model_target in self.models_target_dict.items():
acc_target, loss_target = compute_accuracy_from_nested_list_models(
[[model_target, ], ], X=adv_x.cpu(), y=y.cpu(), data=self.data)
self.stats_per_iter[name_target]['iter'].append(i_max_iter)
self.stats_per_iter[name_target]['acc'].append(acc_target)
self.stats_per_iter[name_target]['loss'].append(loss_target)
return adv_x.cpu().detach().numpy()
def _compute_perturbation(
self, x: "torch.Tensor", y: "torch.Tensor", mask: Optional["torch.Tensor"]
) -> "torch.Tensor":
"""
Compute perturbations.
:param x: Current adversarial examples.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be
broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially
perturbed.
:return: Perturbations.
"""
import torch # lgtm [py/repeated-import]
# Pick a small scalar to avoid division by 0
tol = 10e-8
# Get gradient wrt loss; invert it if attack is targeted
grad = self.estimator.loss_gradient(x=x, y=y) * (1 - 2 * int(self.targeted))
# added:
if self.translation_invariant:
# original implementation:
#noise = tf.nn.depthwise_conv2d(noise, stack_kernel, strides=[1, 1, 1, 1], padding='SAME')
# translation_invariant kernel
#kernel = self.gkern(15, 3).astype(np.float32)
#stack_kernel = np.stack([kernel, kernel, kernel]).swapaxes(2, 0)
#stack_kernel = np.expand_dims(stack_kernel, 3)
# kernlen = 15
kernlen = int(os.getenv('ADV_TRANSFER_TI_KERNEL_SIZE', '15'))
nb_channels = x.size(1)
padding = int((kernlen - 1) / 2) # same padding
with torch.no_grad():
kernel = self.gkern(kernlen=kernlen).to(self.estimator.device)
stack_kernel = kernel.view(1, 1, kernlen, kernlen).repeat(nb_channels, 1, 1, 1)
grad = torch.nn.functional.conv2d(grad, stack_kernel, padding=padding, groups=nb_channels)
if grad.shape != x.shape:
raise RuntimeError('Translation invariant gradient does not have the same dimension as input')
# added for momentum
if self.momentum:
# scale L1-norm
ind = tuple(range(1, len(x.shape)))
grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore
# update moving avg
with torch.no_grad():
self.grad_momentum = self.grad_momentum * self.momentum + grad
grad = self.grad_momentum
# add gaussian noise to gradients
if self.grad_noise_std:
grad += torch.randn(grad.shape).to(self.estimator.device) * self.grad_noise_std
# Apply mask
if mask is not None:
grad = torch.where(mask == 0.0, torch.tensor(0.0), grad)
# Apply norm bound
if self.norm in ["inf", np.inf]:
grad = grad.sign()
elif self.norm == 1:
ind = tuple(range(1, len(x.shape)))
grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore
elif self.norm == 2:
ind = tuple(range(1, len(x.shape)))
grad = grad / (torch.sqrt(torch.sum(grad * grad, axis=ind, keepdims=True)) + tol) # type: ignore
assert x.shape == grad.shape
return grad
@staticmethod
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array.
From the original implementation https://github.com/dongyp13/Translation-Invariant-Attacks/blob/master/attack_iter.py
"""
x = np.linspace(-nsig, nsig, kernlen)
kern1d = st.norm.pdf(x)
kernel_raw = np.outer(kern1d, kern1d)
kernel = kernel_raw / kernel_raw.sum()
# convert to tensor
kernel = torch.from_numpy(kernel.astype(np.float32))
return kernel
def get_target_accuracy_per_iter(self, name_target):
if not self.models_target_dict:
raise ValueError('models_target_dict must be specified')
return self.stats_per_iter[name_target]
| 23,513 | 45.562376 | 174 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/swag/losses.py | import torch
import torch.nn.functional as F
def cross_entropy(model, input, target):
# standard cross-entropy loss function
output = model(input)
loss = F.cross_entropy(output, target)
return loss, output
def adversarial_cross_entropy(
model, input, target, lossfn=F.cross_entropy, epsilon=0.01
):
# loss function based on algorithm 1 of "simple and scalable uncertainty estimation using
# deep ensembles," lakshminaraynan, pritzel, and blundell, nips 2017,
# https://arxiv.org/pdf/1612.01474.pdf
# note: the small difference bw this paper is that here the loss is only backpropped
# through the adversarial loss rather than both due to memory constraints on preresnets
# we can change back if we want to restrict ourselves to VGG-like networks (where it's fine).
# scale epsilon by min and max (should be [0,1] for all experiments)
# see algorithm 1 of paper
scaled_epsilon = epsilon * (input.max() - input.min())
# force inputs to require gradient
input.requires_grad = True
# standard forwards pass
output = model(input)
loss = lossfn(output, target)
# now compute gradients wrt input
loss.backward(retain_graph=True)
# now compute sign of gradients
inputs_grad = torch.sign(input.grad)
# perturb inputs and use clamped output
inputs_perturbed = torch.clamp(
input + scaled_epsilon * inputs_grad, 0.0, 1.0
).detach()
# inputs_perturbed.requires_grad = False
input.grad.zero_()
# model.zero_grad()
outputs_perturbed = model(inputs_perturbed)
# compute adversarial version of loss
adv_loss = lossfn(outputs_perturbed, target)
# return mean of loss for reasonable scalings
return (loss + adv_loss) / 2.0, output
def masked_loss(y_pred, y_true, void_class=11.0, weight=None, reduce=True):
# masked version of crossentropy loss
el = torch.ones_like(y_true) * void_class
mask = torch.ne(y_true, el).long()
y_true_tmp = y_true * mask
loss = F.cross_entropy(y_pred, y_true_tmp, weight=weight, reduction="none")
loss = mask.float() * loss
if reduce:
return loss.sum() / mask.sum()
else:
return loss, mask
def seg_cross_entropy(model, input, target, weight=None):
output = model(input)
# use masked loss function
loss = masked_loss(output, target, weight=weight)
return {"loss": loss, "output": output}
def seg_ale_cross_entropy(model, input, target, num_samples=50, weight=None):
# requires two outputs for model(input)
output = model(input)
mean = output[:, 0, :, :, :]
scale = output[:, 1, :, :, :].abs()
output_distribution = torch.distributions.Normal(mean, scale)
total_loss = 0
for _ in range(num_samples):
sample = output_distribution.rsample()
current_loss, mask = masked_loss(sample, target, weight=weight, reduce=False)
total_loss = total_loss + current_loss.exp()
mean_loss = total_loss / num_samples
return {"loss": mean_loss.log().sum() / mask.sum(), "output": mean, "scale": scale}
| 3,094 | 28.47619 | 97 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/swag/utils.py | import itertools
import torch
import os
import copy
from datetime import datetime
import math
import numpy as np
import tqdm
import torch.nn.functional as F
def flatten(lst):
tmp = [i.contiguous().view(-1, 1) for i in lst]
return torch.cat(tmp).view(-1)
def unflatten_like(vector, likeTensorList):
# Takes a flat torch.tensor and unflattens it to a list of torch.tensors
# shaped like likeTensorList
outList = []
i = 0
for tensor in likeTensorList:
# n = module._parameters[name].numel()
n = tensor.numel()
outList.append(vector[:, i : i + n].view(tensor.shape))
i += n
return outList
def LogSumExp(x, dim=0):
m, _ = torch.max(x, dim=dim, keepdim=True)
return m + torch.log((x - m).exp().sum(dim=dim, keepdim=True))
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
def save_checkpoint(dir, epoch, name="checkpoint", **kwargs):
state = {"epoch": epoch}
state.update(kwargs)
filepath = os.path.join(dir, "%s-%d.pt" % (name, epoch))
torch.save(state, filepath)
def train_epoch(
loader,
model,
criterion,
optimizer,
cuda=True,
regression=False,
verbose=False,
subset=None,
):
loss_sum = 0.0
correct = 0.0
verb_stage = 0
num_objects_current = 0
num_batches = len(loader)
model.train()
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output = criterion(model, input, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.data.item() * input.size(0)
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
num_objects_current += input.size(0)
if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:
print(
"Stage %d/10. Loss: %12.4f. Acc: %6.2f"
% (
verb_stage + 1,
loss_sum / num_objects_current,
correct / num_objects_current * 100.0,
)
)
verb_stage += 1
return {
"loss": loss_sum / num_objects_current,
"accuracy": None if regression else correct / num_objects_current * 100.0,
}
def eval(loader, model, criterion, cuda=True, regression=False, verbose=False):
loss_sum = 0.0
correct = 0.0
num_objects_total = len(loader.dataset)
model.eval()
with torch.no_grad():
if verbose:
loader = tqdm.tqdm(loader)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output = criterion(model, input, target)
loss_sum += loss.item() * input.size(0)
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
"loss": loss_sum / num_objects_total,
"accuracy": None if regression else correct / num_objects_total * 100.0,
}
def predict(loader, model, verbose=False):
predictions = list()
targets = list()
model.eval()
if verbose:
loader = tqdm.tqdm(loader)
offset = 0
with torch.no_grad():
for input, target in loader:
input = input.cuda(non_blocking=True)
output = model(input)
batch_size = input.size(0)
predictions.append(F.softmax(output, dim=1).cpu().numpy())
targets.append(target.numpy())
offset += batch_size
return {"predictions": np.vstack(predictions), "targets": np.concatenate(targets)}
def moving_average(net1, net2, alpha=1):
for param1, param2 in zip(net1.parameters(), net2.parameters()):
param1.data *= 1.0 - alpha
param1.data += param2.data * alpha
def _check_bn(module, flag):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
def _get_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.momentum = momenta[module]
def bn_update(loader, model, verbose=False, subset=None, **kwargs):
"""
BatchNorm buffers update (if any).
Performs 1 epochs to estimate buffers average using train dataset.
:param loader: train dataset loader for buffers average estimation.
:param model: model being update
:return: None
"""
if not check_bn(model):
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
n = 0
num_batches = len(loader)
with torch.no_grad():
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for input, _ in loader:
input = input.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
b = input_var.data.size(0)
momentum = b / (n + b)
for module in momenta.keys():
module.momentum = momentum
model(input_var, **kwargs)
n += b
model.apply(lambda module: _set_momenta(module, momenta))
def inv_softmax(x, eps=1e-10):
return torch.log(x / (1.0 - x + eps))
def predictions(test_loader, model, seed=None, cuda=True, regression=False, **kwargs):
# will assume that model is already in eval mode
# model.eval()
preds = []
targets = []
for input, target in test_loader:
if seed is not None:
torch.manual_seed(seed)
if cuda:
input = input.cuda(non_blocking=True)
output = model(input, **kwargs)
if regression:
preds.append(output.cpu().data.numpy())
else:
probs = F.softmax(output, dim=1)
preds.append(probs.cpu().data.numpy())
targets.append(target.numpy())
return np.vstack(preds), np.concatenate(targets)
def schedule(epoch, lr_init, epochs, swa, swa_start=None, swa_lr=None):
t = (epoch) / (swa_start if swa else epochs)
lr_ratio = swa_lr / lr_init if swa else 0.01
if t <= 0.5:
factor = 1.0
elif t <= 0.9:
factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4
else:
factor = lr_ratio
return lr_init * factor
| 7,489 | 26.740741 | 86 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/swag/data.py | """
separate data loader for imagenet
"""
import os
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def loaders(path, batch_size, num_workers, shuffle_train=True):
train_dir = os.path.join(path, "train")
# validation_dir = os.path.join(path, 'validation')
validation_dir = os.path.join(path, "val")
# transformations for pretrained models (https://github.com/pytorch/examples/blob/42e5b996718797e45c46a25c55b031e6768f8440/imagenet/main.py#L89-L101)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform_train = transforms.Compose(
[
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
transform_test = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
train_set = torchvision.datasets.ImageFolder(train_dir, transform=transform_train)
test_set = torchvision.datasets.ImageFolder(
validation_dir, transform=transform_test
)
num_classes = 1000
return (
{
"train": torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=shuffle_train,
num_workers=num_workers,
pin_memory=True,
),
"test": torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
),
},
num_classes,
)
| 1,851 | 25.84058 | 153 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/swag/posteriors/swag.py | """
implementation of SWAG
"""
import torch
import numpy as np
import itertools
from torch.distributions.normal import Normal
import copy
import gpytorch
from gpytorch.lazy import RootLazyTensor, DiagLazyTensor, AddedDiagLazyTensor
from gpytorch.distributions import MultivariateNormal
from ..utils import flatten, unflatten_like
def swag_parameters(module, params, no_cov_mat=True):
for name in list(module._parameters.keys()):
if module._parameters[name] is None:
continue
data = module._parameters[name].data
module._parameters.pop(name)
module.register_buffer("%s_mean" % name, data.new(data.size()).zero_())
module.register_buffer("%s_sq_mean" % name, data.new(data.size()).zero_())
if no_cov_mat is False:
module.register_buffer(
"%s_cov_mat_sqrt" % name, data.new_empty((0, data.numel())).zero_()
)
params.append((module, name))
class SWAG(torch.nn.Module):
def __init__(
self, base, no_cov_mat=True, max_num_models=0, var_clamp=1e-30, *args, **kwargs
):
super(SWAG, self).__init__()
self.register_buffer("n_models", torch.zeros([1], dtype=torch.long))
self.params = list()
self.no_cov_mat = no_cov_mat
self.max_num_models = max_num_models
self.var_clamp = var_clamp
self.base = base(*args, **kwargs)
self.base.apply(
lambda module: swag_parameters(
module=module, params=self.params, no_cov_mat=self.no_cov_mat
)
)
def forward(self, *args, **kwargs):
return self.base(*args, **kwargs)
def sample(self, scale=1.0, cov=False, seed=None, block=False, fullrank=True):
if seed is not None:
torch.manual_seed(seed)
if not block:
self.sample_fullrank(scale, cov, fullrank)
else:
self.sample_blockwise(scale, cov, fullrank)
def sample_blockwise(self, scale, cov, fullrank):
for module, name in self.params:
mean = module.__getattr__("%s_mean" % name)
sq_mean = module.__getattr__("%s_sq_mean" % name)
eps = torch.randn_like(mean)
var = torch.clamp(sq_mean - mean ** 2, self.var_clamp)
scaled_diag_sample = scale * torch.sqrt(var) * eps
if cov is True:
cov_mat_sqrt = module.__getattr__("%s_cov_mat_sqrt" % name)
eps = cov_mat_sqrt.new_empty((cov_mat_sqrt.size(0), 1)).normal_()
cov_sample = (
scale / ((self.max_num_models - 1) ** 0.5)
) * cov_mat_sqrt.t().matmul(eps).view_as(mean)
if fullrank:
w = mean + scaled_diag_sample + cov_sample
else:
w = mean + scaled_diag_sample
else:
w = mean + scaled_diag_sample
module.__setattr__(name, w)
def sample_fullrank(self, scale, cov, fullrank):
scale_sqrt = scale ** 0.5
mean_list = []
sq_mean_list = []
if cov:
cov_mat_sqrt_list = []
for (module, name) in self.params:
mean = module.__getattr__("%s_mean" % name)
sq_mean = module.__getattr__("%s_sq_mean" % name)
if cov:
cov_mat_sqrt = module.__getattr__("%s_cov_mat_sqrt" % name)
cov_mat_sqrt_list.append(cov_mat_sqrt.cpu())
mean_list.append(mean.cpu())
sq_mean_list.append(sq_mean.cpu())
mean = flatten(mean_list)
sq_mean = flatten(sq_mean_list)
# draw diagonal variance sample
var = torch.clamp(sq_mean - mean ** 2, self.var_clamp)
var_sample = var.sqrt() * torch.randn_like(var, requires_grad=False)
# if covariance draw low rank sample
if cov:
cov_mat_sqrt = torch.cat(cov_mat_sqrt_list, dim=1)
cov_sample = cov_mat_sqrt.t().matmul(
cov_mat_sqrt.new_empty(
(cov_mat_sqrt.size(0),), requires_grad=False
).normal_()
)
cov_sample /= (self.max_num_models - 1) ** 0.5
rand_sample = var_sample + cov_sample
else:
rand_sample = var_sample
# update sample with mean and scale
sample = mean + scale_sqrt * rand_sample
sample = sample.unsqueeze(0)
# unflatten new sample like the mean sample
samples_list = unflatten_like(sample, mean_list)
for (module, name), sample in zip(self.params, samples_list):
module.__setattr__(name, sample.cuda())
def collect_model(self, base_model):
for (module, name), base_param in zip(self.params, base_model.parameters()):
mean = module.__getattr__("%s_mean" % name)
sq_mean = module.__getattr__("%s_sq_mean" % name)
# first moment
mean = mean * self.n_models.item() / (
self.n_models.item() + 1.0
) + base_param.data / (self.n_models.item() + 1.0)
# second moment
sq_mean = sq_mean * self.n_models.item() / (
self.n_models.item() + 1.0
) + base_param.data ** 2 / (self.n_models.item() + 1.0)
# square root of covariance matrix
if self.no_cov_mat is False:
cov_mat_sqrt = module.__getattr__("%s_cov_mat_sqrt" % name)
# block covariance matrices, store deviation from current mean
dev = (base_param.data - mean).view(-1, 1)
cov_mat_sqrt = torch.cat((cov_mat_sqrt, dev.view(-1, 1).t()), dim=0)
# remove first column if we have stored too many models
if (self.n_models.item() + 1) > self.max_num_models:
cov_mat_sqrt = cov_mat_sqrt[1:, :]
module.__setattr__("%s_cov_mat_sqrt" % name, cov_mat_sqrt)
module.__setattr__("%s_mean" % name, mean)
module.__setattr__("%s_sq_mean" % name, sq_mean)
self.n_models.add_(1)
def load_state_dict(self, state_dict, strict=True):
if not self.no_cov_mat:
n_models = state_dict["n_models"].item()
rank = min(n_models, self.max_num_models)
for module, name in self.params:
mean = module.__getattr__("%s_mean" % name)
module.__setattr__(
"%s_cov_mat_sqrt" % name,
mean.new_empty((rank, mean.numel())).zero_(),
)
super(SWAG, self).load_state_dict(state_dict, strict)
def export_numpy_params(self, export_cov_mat=False):
mean_list = []
sq_mean_list = []
cov_mat_list = []
for module, name in self.params:
mean_list.append(module.__getattr__("%s_mean" % name).cpu().numpy().ravel())
sq_mean_list.append(
module.__getattr__("%s_sq_mean" % name).cpu().numpy().ravel()
)
if export_cov_mat:
cov_mat_list.append(
module.__getattr__("%s_cov_mat_sqrt" % name).cpu().numpy().ravel()
)
mean = np.concatenate(mean_list)
sq_mean = np.concatenate(sq_mean_list)
var = sq_mean - np.square(mean)
if export_cov_mat:
return mean, var, cov_mat_list
else:
return mean, var
def import_numpy_weights(self, w):
k = 0
for module, name in self.params:
mean = module.__getattr__("%s_mean" % name)
s = np.prod(mean.shape)
module.__setattr__(name, mean.new_tensor(w[k : k + s].reshape(mean.shape)))
k += s
def generate_mean_var_covar(self):
mean_list = []
var_list = []
cov_mat_root_list = []
for module, name in self.params:
mean = module.__getattr__("%s_mean" % name)
sq_mean = module.__getattr__("%s_sq_mean" % name)
cov_mat_sqrt = module.__getattr__("%s_cov_mat_sqrt" % name)
mean_list.append(mean)
var_list.append(sq_mean - mean ** 2.0)
cov_mat_root_list.append(cov_mat_sqrt)
return mean_list, var_list, cov_mat_root_list
def compute_ll_for_block(self, vec, mean, var, cov_mat_root):
vec = flatten(vec)
mean = flatten(mean)
var = flatten(var)
cov_mat_lt = RootLazyTensor(cov_mat_root.t())
var_lt = DiagLazyTensor(var + 1e-6)
covar_lt = AddedDiagLazyTensor(var_lt, cov_mat_lt)
qdist = MultivariateNormal(mean, covar_lt)
with gpytorch.settings.num_trace_samples(
1
) and gpytorch.settings.max_cg_iterations(25):
return qdist.log_prob(vec)
def block_logdet(self, var, cov_mat_root):
var = flatten(var)
cov_mat_lt = RootLazyTensor(cov_mat_root.t())
var_lt = DiagLazyTensor(var + 1e-6)
covar_lt = AddedDiagLazyTensor(var_lt, cov_mat_lt)
return covar_lt.log_det()
def block_logll(self, param_list, mean_list, var_list, cov_mat_root_list):
full_logprob = 0
for i, (param, mean, var, cov_mat_root) in enumerate(
zip(param_list, mean_list, var_list, cov_mat_root_list)
):
# print('Block: ', i)
block_ll = self.compute_ll_for_block(param, mean, var, cov_mat_root)
full_logprob += block_ll
return full_logprob
def full_logll(self, param_list, mean_list, var_list, cov_mat_root_list):
cov_mat_root = torch.cat(cov_mat_root_list, dim=1)
mean_vector = flatten(mean_list)
var_vector = flatten(var_list)
param_vector = flatten(param_list)
return self.compute_ll_for_block(
param_vector, mean_vector, var_vector, cov_mat_root
)
def compute_logdet(self, block=False):
_, var_list, covar_mat_root_list = self.generate_mean_var_covar()
if block:
full_logdet = 0
for (var, cov_mat_root) in zip(var_list, covar_mat_root_list):
block_logdet = self.block_logdet(var, cov_mat_root)
full_logdet += block_logdet
else:
var_vector = flatten(var_list)
cov_mat_root = torch.cat(covar_mat_root_list, dim=1)
full_logdet = self.block_logdet(var_vector, cov_mat_root)
return full_logdet
def diag_logll(self, param_list, mean_list, var_list):
logprob = 0.0
for param, mean, scale in zip(param_list, mean_list, var_list):
logprob += Normal(mean, scale).log_prob(param).sum()
return logprob
def compute_logprob(self, vec=None, block=False, diag=False):
mean_list, var_list, covar_mat_root_list = self.generate_mean_var_covar()
if vec is None:
param_list = [getattr(param, name) for param, name in self.params]
else:
param_list = unflatten_like(vec, mean_list)
if diag:
return self.diag_logll(param_list, mean_list, var_list)
elif block is True:
return self.block_logll(
param_list, mean_list, var_list, covar_mat_root_list
)
else:
return self.full_logll(param_list, mean_list, var_list, covar_mat_root_list)
| 11,331 | 34.63522 | 88 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/losses.py | import torch
import torch.nn.functional as F
class GaussianLikelihood:
"""
Minus Gaussian likelihood for regression problems.
Mean squared error (MSE) divided by `2 * noise_var`.
"""
def __init__(self, noise_var = 0.5):
self.noise_var = noise_var
self.mse = torch.nn.functional.mse_loss
def __call__(self, model, input, target):
output = model(input)
if self.noise_var is not None:
mse = self.mse(output, target)
loss = mse / (2 * self.noise_var)
return loss, output, {"mse": mse}
else:
mean = output[:,0].view_as(target)
var = output[:,1].view_as(target)
mse = self.mse(mean, target, reduction='none')
mean_portion = mse / (2 * var)
var_portion = 0.5 * torch.log(var)
loss = mean_portion + var_portion
return loss.mean(), output[:,0], {'mse': torch.mean((mean - target)**2.0)}
def cross_entropy(model, input, target):
# standard cross-entropy loss function
output = model(input)
loss = F.cross_entropy(output, target)
return loss, output, {}
def cross_entropy_output(output, target):
# standard cross-entropy loss function
loss = F.cross_entropy(output, target)
return loss, {}
def adversarial_cross_entropy(model, input, target, lossfn = F.cross_entropy, epsilon = 0.01):
# loss function based on algorithm 1 of "simple and scalable uncertainty estimation using
# deep ensembles," lakshminaraynan, pritzel, and blundell, nips 2017,
# https://arxiv.org/pdf/1612.01474.pdf
# note: the small difference bw this paper is that here the loss is only backpropped
# through the adversarial loss rather than both due to memory constraints on preresnets
# we can change back if we want to restrict ourselves to VGG-like networks (where it's fine).
#scale epsilon by min and max (should be [0,1] for all experiments)
#see algorithm 1 of paper
scaled_epsilon = epsilon * (input.max() - input.min())
#force inputs to require gradient
input.requires_grad = True
#standard forwards pass
output = model(input)
loss = lossfn(output, target)
#now compute gradients wrt input
loss.backward(retain_graph = True)
#now compute sign of gradients
inputs_grad = torch.sign(input.grad)
#perturb inputs and use clamped output
inputs_perturbed = torch.clamp(input + scaled_epsilon * inputs_grad, 0.0, 1.0).detach()
#inputs_perturbed.requires_grad = False
input.grad.zero_()
#model.zero_grad()
outputs_perturbed = model(inputs_perturbed)
#compute adversarial version of loss
adv_loss = lossfn(outputs_perturbed, target)
#return mean of loss for reasonable scalings
return (loss + adv_loss)/2.0, output, {}
def masked_loss(y_pred, y_true, void_class = 11., weight=None, reduce = True):
# masked version of crossentropy loss
el = torch.ones_like(y_true) * void_class
mask = torch.ne(y_true, el).long()
y_true_tmp = y_true * mask
loss = F.cross_entropy(y_pred, y_true_tmp, weight=weight, reduction='none')
loss = mask.float() * loss
if reduce:
return loss.sum()/mask.sum()
else:
return loss, mask
def seg_cross_entropy(model, input, target, weight = None):
output = model(input)
# use masked loss function
loss = masked_loss(output, target, weight=weight)
return {'loss': loss, 'output': output}
def seg_ale_cross_entropy(model, input, target, num_samples = 50, weight = None):
#requires two outputs for model(input)
output = model(input)
mean = output[:, 0, :, :, :]
scale = output[:, 1, :, :, :].abs()
output_distribution = torch.distributions.Normal(mean, scale)
total_loss = 0
for _ in range(num_samples):
sample = output_distribution.rsample()
current_loss, mask = masked_loss(sample, target, weight=weight, reduce=False)
total_loss = total_loss + current_loss.exp()
mean_loss = total_loss / num_samples
return {'loss': mean_loss.log().sum() / mask.sum(), 'output': mean, 'scale': scale}
| 4,258 | 29.862319 | 97 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/utils.py | import itertools
import torch
import os
import copy
from datetime import datetime
import math
import numpy as np
import tqdm
from collections import defaultdict
from time import gmtime, strftime
import sys
import torch.nn.functional as F
def get_logging_print(fname):
cur_time = strftime("%m-%d_%H:%M:%S", gmtime())
def print_func(*args):
str_to_write = ' '.join(map(str, args))
filename = fname % cur_time if '%s' in fname else fname
with open(filename, 'a') as f:
f.write(str_to_write + '\n')
f.flush()
print(str_to_write)
sys.stdout.flush()
return print_func, fname % cur_time if '%s' in fname else fname
def flatten(lst):
tmp = [i.contiguous().view(-1,1) for i in lst]
return torch.cat(tmp).view(-1)
def unflatten_like(vector, likeTensorList):
# Takes a flat torch.tensor and unflattens it to a list of torch.tensors
# shaped like likeTensorList
outList = []
i=0
for tensor in likeTensorList:
#n = module._parameters[name].numel()
n = tensor.numel()
outList.append(vector[:,i:i+n].view(tensor.shape))
i+=n
return outList
def LogSumExp(x,dim=0):
m,_ = torch.max(x,dim=dim,keepdim=True)
return m + torch.log((x - m).exp().sum(dim=dim,keepdim=True))
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_checkpoint(dir, epoch=None, name='checkpoint', sample=None, **kwargs):
os.makedirs(dir, exist_ok=True)
state = {
'epoch': epoch,
'sample': sample,
}
if epoch is not None:
name = f'{name}-{epoch:05}.pt'
elif sample is not None:
name = f'{name}-{sample:05}.pt'
else:
name = f'{name}.pt'
state.update(kwargs)
filepath = os.path.join(dir, name)
torch.save(state, filepath)
def train_epoch(loader, model, criterion, optimizer, cuda=True, regression=False, verbose=False, subset=None):
loss_sum = 0.0
stats_sum = defaultdict(float)
correct = 0.0
verb_stage = 0
num_objects_current = 0
num_batches = len(loader)
model.train()
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output, stats = criterion(model, input, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.data.item() * input.size(0)
for key, value in stats.items():
stats_sum[key] += value * input.size(0)
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
num_objects_current += input.size(0)
if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:
print('Stage %d/10. Loss: %12.4f. Acc: %6.2f' % (
verb_stage + 1, loss_sum / num_objects_current,
correct / num_objects_current * 100.0
))
verb_stage += 1
return {
'loss': loss_sum / num_objects_current,
'accuracy': None if regression else correct / num_objects_current * 100.0,
'stats': {key: value / num_objects_current for key, value in stats_sum.items()}
}
def eval(loader, model, criterion, cuda=True, regression=False, verbose=False):
loss_sum = 0.0
correct = 0.0
stats_sum = defaultdict(float)
num_objects_total = len(loader.dataset)
model.eval()
with torch.no_grad():
if verbose:
loader = tqdm.tqdm(loader)
for i, (input, target) in enumerate(loader):
if cuda:
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
loss, output, stats = criterion(model, input, target)
loss_sum += loss.item() * input.size(0)
for key, value in stats.items():
stats_sum[key] += value
if not regression:
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
'loss': loss_sum / num_objects_total,
'accuracy': None if regression else correct / num_objects_total * 100.0,
'stats': {key: value / num_objects_total for key, value in stats_sum.items()}
}
def predict(loader, model, verbose=False):
predictions = list()
targets = list()
model.eval()
if verbose:
loader = tqdm.tqdm(loader)
offset = 0
with torch.no_grad():
for input, target in loader:
input = input.cuda(non_blocking=True)
output = model(input)
batch_size = input.size(0)
predictions.append(F.softmax(output, dim=1).cpu().numpy())
targets.append(target.numpy())
offset += batch_size
return {
'predictions': np.vstack(predictions),
'targets': np.concatenate(targets)
}
def moving_average(net1, net2, alpha=1):
for param1, param2 in zip(net1.parameters(), net2.parameters()):
param1.data *= (1.0 - alpha)
param1.data += param2.data * alpha
def _check_bn(module, flag):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
def _get_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.momentum = momenta[module]
def bn_update(loader, model, verbose=False, subset=None, **kwargs):
"""
BatchNorm buffers update (if any).
Performs 1 epochs to estimate buffers average using train dataset.
:param loader: train dataset loader for buffers average estimation.
:param model: model being update
:return: None
"""
if not check_bn(model):
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
n = 0
num_batches = len(loader)
with torch.no_grad():
if subset is not None:
num_batches = int(num_batches * subset)
loader = itertools.islice(loader, num_batches)
if verbose:
loader = tqdm.tqdm(loader, total=num_batches)
for input, _ in loader:
input = input.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
b = input_var.data.size(0)
momentum = b / (n + b)
for module in momenta.keys():
module.momentum = momentum
model(input_var, **kwargs)
n += b
model.apply(lambda module: _set_momenta(module, momenta))
def inv_softmax(x, eps = 1e-10):
return torch.log(x/(1.0 - x + eps))
def predictions(test_loader, model, seed=None, cuda=True, regression=False, **kwargs):
#will assume that model is already in eval mode
#model.eval()
preds = []
targets = []
for input, target in test_loader:
if seed is not None:
torch.manual_seed(seed)
if cuda:
input = input.cuda(non_blocking=True)
output = model(input, **kwargs)
if regression:
preds.append(output.cpu().data.numpy())
else:
probs = F.softmax(output, dim=1)
preds.append(probs.cpu().data.numpy())
targets.append(target.numpy())
return np.vstack(preds), np.concatenate(targets)
def schedule(epoch, lr_init, epochs, swa, swa_start=None, swa_lr=None):
t = (epoch) / (swa_start if swa else epochs)
lr_ratio = swa_lr / lr_init if swa else 0.01
if t <= 0.5:
factor = 1.0
elif t <= 0.9:
factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4
else:
factor = lr_ratio
return lr_init * factor
def set_weights(model, vector, device=None):
offset = 0
for param in model.parameters():
param.data.copy_(vector[offset:offset + param.numel()].view(param.size()).to(device))
offset += param.numel()
def extract_parameters(model):
params = []
for module in model.modules():
for name in list(module._parameters.keys()):
if module._parameters[name] is None:
continue
param = module._parameters[name]
params.append((module, name, param.size()))
module._parameters.pop(name)
return params
def set_weights_old(params, w, device):
offset = 0
for module, name, shape in params:
size = np.prod(shape)
value = w[offset:offset + size]
setattr(module, name, value.view(shape).to(device))
offset += size
| 9,479 | 28.349845 | 110 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/data.py | import numpy as np
import torch
import torchvision
import os
c10_classes = np.array([
[0, 1, 2, 8, 9],
[3, 4, 5, 6, 7]
], dtype=np.int32)
def camvid_loaders(path, batch_size, num_workers, transform_train, transform_test,
use_validation, val_size, shuffle_train=True,
joint_transform=None, ft_joint_transform=None, ft_batch_size=1, **kwargs):
#load training and finetuning datasets
print(path)
train_set = CamVid(root=path, split='train', joint_transform=joint_transform, transform=transform_train, **kwargs)
ft_train_set = CamVid(root=path, split='train', joint_transform=ft_joint_transform, transform=transform_train, **kwargs)
val_set = CamVid(root=path, split='val', joint_transform=None, transform=transform_test, **kwargs)
test_set = CamVid(root=path, split='test', joint_transform=None, transform=transform_test, **kwargs)
num_classes = 11 # hard coded labels ehre
return {'train': torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=shuffle_train,
num_workers=num_workers,
pin_memory=True
),
'fine_tune': torch.utils.data.DataLoader(
ft_train_set,
batch_size=ft_batch_size,
shuffle=shuffle_train,
num_workers=num_workers,
pin_memory=True
),
'val': torch.utils.data.DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
),
'test': torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
)}, num_classes
def svhn_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation, val_size, shuffle_train=True):
train_set = torchvision.datasets.SVHN(root=path, split='train', download = True, transform = transform_train)
if use_validation:
test_set = torchvision.datasets.SVHN(root=path, split='train', download = True, transform = transform_test)
train_set.data = train_set.data[:-val_size]
train_set.labels = train_set.labels[:-val_size]
test_set.data = test_set.data[-val_size:]
test_set.labels = test_set.labels[-val_size:]
else:
print('You are going to run models on the test set. Are you sure?')
test_set = torchvision.datasets.SVHN(root=path, split='test', download = True, transform = transform_test)
num_classes = 10
return \
{
'train': torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=True and shuffle_train,
num_workers=num_workers,
pin_memory=True
),
'test': torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
),
}, \
num_classes
def loaders(dataset, path, batch_size, num_workers, transform_train, transform_test,
use_validation=True, val_size=5000, split_classes=None, shuffle_train=True,
**kwargs):
if dataset == 'CamVid':
return camvid_loaders(path, batch_size=batch_size, num_workers=num_workers, transform_train=transform_train,
transform_test=transform_test, use_validation=use_validation, val_size=val_size, **kwargs)
path = os.path.join(path, dataset.lower())
ds = getattr(torchvision.datasets, dataset)
if dataset == 'SVHN':
return svhn_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation, val_size)
else:
ds = getattr(torchvision.datasets, dataset)
if dataset == 'STL10':
train_set = ds(root=path, split='train', download=True, transform=transform_train)
num_classes = 10
cls_mapping = np.array([0, 2, 1, 3, 4, 5, 7, 6, 8, 9])
train_set.labels = cls_mapping[train_set.labels]
elif dataset == 'ImageNet':
train_set = ds(root=path, split='train', transform=transform_train)
else:
train_set = ds(root=path, train=True, download=True, transform=transform_train)
num_classes = max(train_set.train_labels) + 1
if use_validation:
print("Using train (" + str(len(train_set.train_data)-val_size) + ") + validation (" +str(val_size)+ ")")
train_set.train_data = train_set.train_data[:-val_size]
train_set.train_labels = train_set.train_labels[:-val_size]
test_set = ds(root=path, train=True, download=True, transform=transform_test)
test_set.train = False
test_set.test_data = test_set.train_data[-val_size:]
test_set.test_labels = test_set.train_labels[-val_size:]
delattr(test_set, 'train_data')
delattr(test_set, 'train_labels')
else:
print('You are going to run models on the test set. Are you sure?')
if dataset == 'STL10':
test_set = ds(root=path, split='test', download=True, transform=transform_test)
test_set.labels = cls_mapping[test_set.labels]
else:
test_set = ds(root=path, train=False, download=True, transform=transform_test)
if split_classes is not None:
assert dataset == 'CIFAR10'
assert split_classes in {0, 1}
print('Using classes:', end='')
print(c10_classes[split_classes])
train_mask = np.isin(train_set.train_labels, c10_classes[split_classes])
train_set.train_data = train_set.train_data[train_mask, :]
train_set.train_labels = np.array(train_set.train_labels)[train_mask]
train_set.train_labels = np.where(train_set.train_labels[:, None] == c10_classes[split_classes][None, :])[1].tolist()
print('Train: %d/%d' % (train_set.train_data.shape[0], train_mask.size))
test_mask = np.isin(test_set.test_labels, c10_classes[split_classes])
test_set.test_data = test_set.test_data[test_mask, :]
test_set.test_labels = np.array(test_set.test_labels)[test_mask]
test_set.test_labels = np.where(test_set.test_labels[:, None] == c10_classes[split_classes][None, :])[1].tolist()
print('Test: %d/%d' % (test_set.test_data.shape[0], test_mask.size))
return \
{
'train': torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=True and shuffle_train,
num_workers=num_workers,
pin_memory=True
),
'test': torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
),
}, \
num_classes
def loaders_inc(dataset, path, num_chunks, batch_size, num_workers, transform_train, transform_test, use_validation=True, val_size=5000, shuffle_train=True, seed=1):
assert dataset in {'MNIST', 'CIFAR10', 'CIFAR100'}
path = os.path.join(path, dataset.lower())
ds = getattr(torchvision.datasets, dataset)
train_set = ds(root=path, train=True, download=True, transform=transform_train)
num_classes = int(max(train_set.train_labels)) + 1
num_samples = (train_set.train_data.shape[0] - val_size) if use_validation else train_set.train_data.shape[0]
train_sets = list()
offset = 0
random_state = np.random.RandomState(seed)
order = random_state.permutation(train_set.train_data.shape[0])
for i in range(num_chunks, 0, -1):
chunk_size = (num_samples + i - 1) // i
tmp_set = ds(root=path, train=True, download=True, transform=transform_train)
tmp_set.train_data = tmp_set.train_data[order[offset:offset + chunk_size]]
tmp_set.train_labels = np.array(tmp_set.train_labels)[order[offset:offset + chunk_size]]
train_sets.append(tmp_set)
offset += chunk_size
num_samples -= chunk_size
print('Using train %d chunks: %s' % (num_chunks, str([tmp_set.train_data.shape[0] for tmp_set in train_sets])))
if use_validation:
print('Using validation (%d)' % val_size)
test_set = ds(root=path, train=True, download=True, transform=transform_test)
test_set.train = False
test_set.test_data = test_set.train_data[order[-val_size:]]
test_set.test_labels = np.array(test_set.train_labels)[order[-val_size:]]
delattr(test_set, 'train_data')
delattr(test_set, 'train_labels')
else:
print('You are going to run models on the test set. Are you sure?')
test_set = ds(root=path, train=False, download=True, transform=transform_test)
return \
{
'train': [
torch.utils.data.DataLoader(
tmp_set,
batch_size=batch_size,
shuffle=True and shuffle_train,
num_workers=num_workers,
pin_memory=True
) for tmp_set in train_sets
],
'test': torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
),
}, \
num_classes
| 9,867 | 39.77686 | 165 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/preresnet.py | """
PreResNet model definition
ported from https://github.com/bearpaw/pytorch-classification/blob/master/models/cifar/preresnet.py
"""
import torch.nn as nn
import torchvision.transforms as transforms
import math
__all__ = ['PreResNet110', 'PreResNet56', 'PreResNet8', 'PreResNet83', 'PreResNet164']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class PreResNet(nn.Module):
def __init__(self, num_classes=10, depth=110):
super(PreResNet, self).__init__()
if depth >= 44:
assert (depth - 2) % 9 == 0, 'depth should be 9n+2'
n = (depth - 2) // 9
block = Bottleneck
else:
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = BasicBlock
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class PreResNet164:
base = PreResNet
args = list()
kwargs = {'depth': 164}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet110:
base = PreResNet
args = list()
kwargs = {'depth': 110}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet83:
base = PreResNet
args = list()
kwargs = {'depth': 83}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet56:
base = PreResNet
args = list()
kwargs = {'depth': 56}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet8:
base = PreResNet
args = list()
kwargs = {'depth': 8}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 7,349 | 30.410256 | 103 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/regression_net.py | import math
import torch
import torch.nn as nn
import torchvision.transforms as transforms
try:
import os
os.sys.path.append("/home/izmailovpavel/Documents/Projects/curves/")
import curves
except:
pass
__all__ = [
'RegNet',
'ToyRegNet',
]
class MDropout(torch.nn.Module):
def __init__(self, dim, p, inplace=False):
super(MDropout, self).__init__()
self.dim = dim
self.p = p
self.inplace = inplace
self.register_buffer('mask', torch.ones(dim, dtype=torch.long))
def forward(self, input):
if self.training:
return torch.nn.functional.dropout(input, self.p, self.training, self.inplace)
else:
return input * self.mask.float().view(1, -1) * 1.0 / (1.0 - self.p)
def sample(self):
self.mask.bernoulli_(1.0 - self.p)
def sample_masks(module):
if isinstance(module, MDropout):
module.sample()
class SplitDim(nn.Module):
def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional.softplus, correction = True):
super(SplitDim, self).__init__()
self.nonlinearity = nonlin_type
self.col = nonlin_col
if correction:
self.var = torch.nn.Parameter(torch.zeros(1))
else:
#equivalent to about 3e-7 when using softplus
self.register_buffer('var', torch.ones(1, requires_grad = False)*-15.)
self.correction = correction
def forward(self, input):
transformed_output = self.nonlinearity(input[:,self.col])
transformed_output = (transformed_output + self.nonlinearity(self.var))
stack_list = [input[:,:self.col], transformed_output.view(-1,1)]
if self.col+1 < input.size(1):
stack_list.append(input[:,(self.col+1):])
#print(self.nonlinearity(self.var).item(), transformed_output.mean().item())
output = torch.cat(stack_list,1)
return output
class RegNetBase(nn.Sequential):
def __init__(self, dimensions, input_dim=1, output_dim=1, dropout=None, apply_var=True):
super(RegNetBase, self).__init__()
self.dimensions = [input_dim, *dimensions, output_dim]
for i in range(len(self.dimensions) - 1):
if dropout is not None and i > 0:
self.add_module('dropout%d' % i, MDropout(self.dimensions[i], p=dropout))
self.add_module('linear%d' % i, torch.nn.Linear(self.dimensions[i], self.dimensions[i + 1]))
if i < len(self.dimensions) - 2:
self.add_module('relu%d' % i, torch.nn.ReLU())
if output_dim == 2:
self.add_module('var_split', SplitDim(correction=apply_var))
def forward(self, x, output_features=False):
if not output_features:
return super().forward(x)
else:
print(self._modules.values())
print(list(self._modules.values())[:-2])
for module in list(self._modules.values())[:-3]:
x = module(x)
print(x.size())
return x
class RegNetCurve(nn.Sequential):
def __init__(self, dimensions, fix_points, input_dim=1, output_dim=1, dropout=None):
super(RegNetCurve, self).__init__()
self.dimensions = [input_dim, *dimensions, output_dim]
for i in range(len(self.dimensions) - 1):
if dropout is not None and i > 0:
self.add_module('dropout%d' % i, MDropout(self.dimensions[i], p=dropout))
self.add_module('linear%d' % i, curves.Linear(self.dimensions[i], self.dimensions[i + 1], fix_points=fix_points))
if i < len(self.dimensions) - 2:
self.add_module('tanh%d' % i, torch.nn.ReLU())
def forward(self, x, t):
for module in self._modules.values():
if isinstance(module, curves.Linear):
x = module(x, t)
else:
x = module(x)
return x
class RegNet:
base = RegNetBase
curve = RegNetCurve
args = list()
kwargs = {"dimensions": [1000, 1000, 500, 50, 2]}
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
class ToyRegNet:
base = RegNetBase
curve = RegNetCurve
args = list()
kwargs = {"dimensions": [200, 50, 50, 50],
"output_dim": 1,
"input_dim": 2}
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
| 4,672 | 31.006849 | 125 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/vgg.py | """
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import torch.nn as nn
import torchvision.transforms as transforms
__all__ = ['VGG16', 'VGG16BN', 'VGG19', 'VGG19BN']
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Base:
base = VGG
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
#transforms.Normalize((0.4376821 , 0.4437697 , 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
#transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
#transforms.Normalize((0.45242316, 0.45249584, 0.46897713), (0.21943445, 0.22656967, 0.22850613))
])
class VGG16(Base):
pass
class VGG16BN(Base):
kwargs = {'batch_norm': True}
class VGG19(Base):
kwargs = {'depth': 19}
class VGG19BN(Base):
kwargs = {'depth': 19, 'batch_norm': True}
| 2,841 | 27.707071 | 105 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/mlp.py | import torch.nn as nn
import torchvision.transforms as transforms
import torch
__all__=['MLP', 'MLPBoston']
class MLPBase(nn.Module):
def __init__(self, num_classes=0, in_dim=1, layers=2, hidden=7):
super(MLPBase, self).__init__()
out_layer_list = [hidden for i in range(layers)]
if num_classes == 0:
out_layer_list.append(1) #for regression
else:
out_layer_list.append(num_classes)
in_layer_list = [hidden for i in range(layers)]
in_layer_list.insert(0, in_dim)
layers = []
for input, output in zip(in_layer_list, out_layer_list):
layers.append(nn.Linear(input, output))
#add relu activations
layers.append(nn.ReLU())
layers.pop() #remove final relu layer
self.model = nn.Sequential(*layers)
#self.log_noise = nn.Parameter(torch.log(torch.ones(1)*7))
print(self.model)
def forward(self, x):
return self.model(x)
class MLP:
base = MLPBase
args = list()
kwargs = {}
transform_train = transforms.ToTensor()
transform_test = transforms.ToTensor()
class MLPBoston:
base = MLPBase
base.log_noise = nn.Parameter(torch.log(torch.ones(1)*7))
args = list()
kwargs = {'in_dim': 13, 'layers': 1, 'hidden':50}
transform_train = transforms.ToTensor()
transform_test = transforms.ToTensor()
| 1,419 | 27.979592 | 68 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/layers.py | """
layer definitions for 100-layer tiramisu
#from: https://github.com/bfortuner/pytorch_tiramisu
"""
import torch
import torch.nn as nn
class DenseLayer(nn.Sequential):
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv2d(in_channels, growth_rate, kernel_size=3,
stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout(p=0.2))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
super().__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(
in_channels + i*growth_rate, growth_rate)
for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
#we pass all previous activations into each dense layer normally
#But we only store each dense layer's output in the new_features array
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features,1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1) # 1 = channel axis
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(in_channels, in_channels,
kernel_size=1, stride=1,
padding=0, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
self.add_module('maxpool', nn.MaxPool2d(2))
def forward(self, x):
return super().forward(x)
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
class Bottleneck(nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(
in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:(xy2 + max_height), xy1:(xy1 + max_width)]
| 3,117 | 33.644444 | 82 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/wide_resnet.py | """
WideResNet model definition
ported from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
"""
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
__all__ = ['WideResNet28x10']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=math.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, num_classes=10, depth=28, widen_factor=10, dropout_rate=0.):
super(WideResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nstages[0])
self.layer1 = self._wide_layer(WideBasic, nstages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nstages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nstages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nstages[3], momentum=0.9)
self.linear = nn.Linear(nstages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class WideResNet28x10:
base = WideResNet
args = list()
kwargs = {'depth': 28, 'widen_factor': 10}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 3,660 | 32.587156 | 100 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/vgg_dropout.py | """
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import torch.nn as nn
import torchvision.transforms as transforms
__all__ = ['VGG16Drop', 'VGG16BNDrop', 'VGG19Drop', 'VGG19BNDrop']
P = 0.05
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [nn.Dropout(P), conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [nn.Dropout(P), conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGGDrop(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False):
super(VGGDrop, self).__init__()
self.features = make_layers(cfg[depth], batch_norm)
self.classifier = nn.Sequential(
nn.Dropout(P),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(P),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Base:
base = VGGDrop
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
#transforms.Normalize((0.4376821 , 0.4437697 , 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
#transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
#transforms.Normalize((0.45242316, 0.45249584, 0.46897713), (0.21943445, 0.22656967, 0.22850613))
])
class VGG16Drop(Base):
pass
class VGG16BNDrop(Base):
kwargs = {'batch_norm': True}
class VGG19Drop(Base):
kwargs = {'depth': 19}
class VGG19BNDrop(Base):
kwargs = {'depth': 19, 'batch_norm': True}
| 2,927 | 27.990099 | 105 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/wide_resnet_dropout.py | """
WideResNet model definition
ported from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
"""
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
__all__ = ['WideResNet28x10Drop']
P = 0.05
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=math.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.dropout = nn.Dropout(p=P)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNetDrop(nn.Module):
def __init__(self, num_classes=10, depth=28, widen_factor=10):
super(WideResNetDrop, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nstages[0])
self.layer1 = self._wide_layer(WideBasic, nstages[1], n, stride=1)
self.layer2 = self._wide_layer(WideBasic, nstages[2], n, stride=2)
self.layer3 = self._wide_layer(WideBasic, nstages[3], n, stride=2)
self.bn1 = nn.BatchNorm2d(nstages[3], momentum=0.9)
self.linear = nn.Linear(nstages[3], num_classes)
self.drop = nn.Dropout(P)
def _wide_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.drop(out)
out = self.linear(out)
return out
class WideResNet28x10Drop:
base = WideResNetDrop
args = list()
kwargs = {'depth': 28, 'widen_factor': 10}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 3,640 | 31.508929 | 100 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/models/preresnet_dropout.py | """
PreResNet model definition
ported from https://github.com/bearpaw/pytorch-classification/blob/master/models/cifar/preresnet.py
"""
import torch.nn as nn
import torchvision.transforms as transforms
import math
__all__ = ['PreResNet110Drop', 'PreResNet56Drop', 'PreResNet8Drop', 'PreResNet164Drop']
P = 0.01
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout(P)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.drop(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.drop(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.drop = nn.Dropout(P)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.drop(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.drop(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.drop(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class PreResNetDrop(nn.Module):
def __init__(self, num_classes=10, depth=110):
super(PreResNetDrop, self).__init__()
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = Bottleneck if depth >= 44 else BasicBlock
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
self.drop = nn.Dropout(P)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Dropout(P),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.drop(x)
x = self.fc(x)
return x
class PreResNet164Drop:
base = PreResNetDrop
args = list()
kwargs = {'depth': 164}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet110Drop:
base = PreResNetDrop
args = list()
kwargs = {'depth': 110}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet56Drop:
base = PreResNetDrop
args = list()
kwargs = {'depth': 56}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class PreResNet8Drop:
base = PreResNetDrop
args = list()
kwargs = {'depth': 8}
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 7,057 | 30.092511 | 103 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/ess.py | import torch
import numpy as np
from .elliptical_slice import elliptical_slice, slice_sample
from .proj_model import ProjectedModel
class EllipticalSliceSampling(torch.nn.Module):
def __init__(self, base, subspace, var, loader, criterion, num_samples = 20,
use_cuda = False, method='elliptical', *args, **kwargs):
super(EllipticalSliceSampling, self).__init__()
if method=='elliptical':
self.slice_method = elliptical_slice
if method=='slice':
self.slice_method = slice_sample
self.base_model = base(*args, **kwargs)
if use_cuda:
self.base_model.cuda()
self.base_params = []
for name, param in self.base_model.named_parameters():
self.base_params.append([param, name, param.size()])
self.subspace = subspace
self.var = var
self.loader = loader
self.criterion = criterion
self.num_samples = num_samples
self.use_cuda = use_cuda
self.all_samples = None
self.model = self.base_model
def forward(self, *args, **kwargs):
return self.model.forward(*args, **kwargs)
def prior_sample(self, prior='identity', scale=1.0):
if prior=='identity':
cov_mat = np.eye(self.subspace.cov_factor.size(0))
elif prior=='schur':
trans_cov_mat = self.subspace.cov_factor.matmul(self.subspace.cov_factor.subspace.t()).numpy()
trans_cov_mat /= (self.swag_model.n_models.item() - 1)
cov_mat = np.eye(self.subspace.cov_factor.size(0)) + trans_cov_mat
else:
raise NotImplementedError('Only schur and identity priors have been implemented')
cov_mat *= scale
sample = np.random.multivariate_normal(np.zeros(self.subspace.cov_factor.size(0)), cov_mat.astype(np.float64), 1)[0,:]
return sample
def log_pdf(self, params, temperature = 1., minibatch = False):
params_tensor = torch.FloatTensor(params)
params_tensor = params_tensor.view(-1)
if self.use_cuda:
params_tensor = params_tensor.cuda()
with torch.no_grad():
proj_model = ProjectedModel(model=self.base_model, subspace = self.subspace, proj_params = params_tensor)
loss = 0
num_datapoints = 0.0
for batch_num, (data, target) in enumerate(self.loader):
if minibatch and batch_num > 0:
break
num_datapoints += data.size(0)
if self.use_cuda:
data, target = data.cuda(), target.cuda()
batch_loss, _, _ = self.criterion(proj_model, data, target)
loss += batch_loss
loss = loss / (batch_num+1) * num_datapoints
return -loss.cpu().numpy() / temperature
def fit(self, use_cuda = True, prior='identity', scale=1.0, **kwargs):
# initialize at prior mean = 0
current_sample = np.zeros(self.subspace.cov_factor.size(0))
all_samples = np.zeros((current_sample.size, self.num_samples))
logprobs = np.zeros(self.num_samples)
for i in range(self.num_samples):
prior_sample = self.prior_sample(prior=prior, scale=scale)
current_sample, logprobs[i] = self.slice_method(initial_theta=current_sample, prior=prior_sample,
lnpdf=self.log_pdf, **kwargs)
# print(logprobs[i])
all_samples[:,i] = current_sample
self.all_samples = all_samples
return logprobs
def sample(self, ind=None, *args, **kwargs):
if ind is None:
ind = np.random.randint(self.num_samples)
rsample = torch.FloatTensor(self.all_samples[:,int(ind)])
#sample = self.subspace(torch.FloatTensor(rsample)).view(-1)
if self.use_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
rsample = rsample.to(device)
self.model = ProjectedModel(model=self.base_model, subspace=self.subspace, proj_params=rsample)
return rsample
| 4,179 | 35.347826 | 126 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/inferences.py | """
inferences class w/in the subspace
currently only fitting the Gaussian associated is implemented
"""
import abc
import torch
import numpy as np
from torch.distributions import LowRankMultivariateNormal
from .elliptical_slice import elliptical_slice
from ..utils import unflatten_like, flatten, train_epoch
from .proj_model import ProjectedModel
from .vi_model import VIModel, ELBO
class Inference(torch.nn.Module, metaclass=abc.ABCMeta):
subclasses = {}
@classmethod
def register_subclass(cls, inference_type):
def decorator(subclass):
cls.subclasses[inference_type] = subclass
return subclass
return decorator
@classmethod
def create(cls, inference_type, **kwargs):
if inference_type not in cls.subclasses:
raise ValueError('Bad inference type {}'.format(inference_type))
return cls.subclasses[inference_type](**kwargs)
@abc.abstractmethod
def __init__(self, *args, **kwargs):
super(Inference, self).__init__()
@abc.abstractmethod
def fit(self, mean, variance, cov_factor, *args, **kwargs):
pass
@abc.abstractmethod
def sample(self, *args, **kwargs):
pass
@Inference.register_subclass('low_rank_gaussian')
class LRGaussian(Inference):
def __init__(self, base, base_args, base_kwargs, var_clamp=1e-6):
super(LRGaussian, self).__init__()
self.var_clamp = var_clamp
self.dist = None
def fit(self, mean, variance, cov_factor):
# ensure variance >= var_clamp
variance = torch.clamp(variance, self.var_clamp)
# form a low rank (+ diagonal Gaussian) distribution when fitting
self.dist = LowRankMultivariateNormal(loc=mean, cov_diag=variance,
cov_factor=cov_factor.t())
def sample(self, scale=0.5, seed=None):
if seed is not None:
torch.manual_seed(seed)
# x = \mu + L'z
unscaled_sample = self.dist.rsample()
# x' = \sqrt(scale) * (x - \mu) + \mu
scaled_sample = (scale ** 0.5) * (unscaled_sample - self.dist.loc) + self.dist.loc
return scaled_sample
def log_prob(self, sample):
return self.dist.log_prob(sample)
@Inference.register_subclass('projected_sgd')
class ProjSGD(Inference):
def __init__(self, model, loader, criterion, epochs = 10, **kwargs):
super(ProjSGD, self).__init__()
self.kwargs = kwargs
self.optimizer = None
self.epochs = epochs
self.mean, self.var, self.subspace = None, None, None
self.optimizer = None
self.proj_params = None
self.loader, self.criterion = loader, criterion
self.model = model
def fit(self, mean, variance, subspace, use_cuda = True, **kwargs):
if use_cuda and torch.cuda.is_available():
self.mean = mean.cuda()
self.subspace = subspace.cuda()
else:
self.mean = mean
self.subspace = subspace
if self.proj_params is None:
proj_params = torch.zeros(self.subspace.size(0), 1, dtype = self.subspace.dtype, device = self.subspace.device, requires_grad = True)
print(proj_params.device)
self.proj_model = ProjectedModel(model=self.model, mean=self.mean.unsqueeze(1), projection=self.subspace, proj_params=proj_params)
# define optimizer
self.optimizer = torch.optim.SGD([proj_params], **self.kwargs)
else:
proj_params = self.proj_params.clone()
# now train projected parameters
loss_vec = []
for _ in range(self.epochs):
loss = train_epoch(loader=self.loader, optimizer=self.optimizer, model=self.proj_model, criterion=self.criterion, **kwargs)
loss_vec.append( loss )
self.proj_params = proj_params
return loss_vec
def sample(self, *args, **kwargs):
print(self.mean.size(), self.subspace.size(), self.proj_params.size())
map_sample = self.mean + self.subspace.t().matmul(self.proj_params.squeeze(1))
return map_sample.view(1,-1)
@Inference.register_subclass('vi')
class VI(Inference):
def __init__(self, base, base_args, base_kwargs, rank, init_inv_softplus_simga=-6.0, prior_log_sigma=0.0):
super(VI, self).__init__()
self.vi_model = VIModel(
base=base,
base_args=base_args,
base_kwargs=base_kwargs,
rank=rank,
init_inv_softplus_simga=init_inv_softplus_simga,
prior_log_sigma=prior_log_sigma
)
def fit(self, mean, variance, cov_factor, loader, criterion, epochs=100):
print('Fitting VI')
self.vi_model.set_subspace(mean, cov_factor)
elbo = ELBO(criterion, len(loader.dataset))
optimizer = torch.optim.Adam([param for param in self.vi_model.parameters() if param.requires_grad])
for _ in range(epochs):
train_res = train_epoch(loader, self.vi_model, elbo, optimizer)
print(train_res)
def sample(self):
return self.vi_model.sample()
| 5,185 | 31.21118 | 145 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/vinf_model.py | import math
import torch
from ..utils import set_weights
class VINFModel(torch.nn.Module):
def __init__(self, base, subspace, flow,
prior_log_sigma=1.0, *args, **kwargs):
super(VINFModel, self).__init__()
self.base_model = base(*args, **kwargs)
self.flow = flow
self.subspace = subspace
self.rank = self.subspace.rank
self.prior_log_sigma = prior_log_sigma
def forward(self, input, t=None, *args, **kwargs):
if t is None:
t = self.flow.sample()
w = self.subspace(t.squeeze())
set_weights(self.base_model, w, self.flow.device)
return self.base_model(input, *args, **kwargs)
def compute_kl_mc(self, t=None):
if t is None:
t = self.flow.sample()
prior_logprob = - torch.norm(t.squeeze())**2 / (2 * math.exp(self.prior_log_sigma * 2))
return self.flow.log_prob(t) - prior_logprob
def compute_entropy_mc(self, t=None):
if t is None:
t = self.flow.sample()
return self.flow.log_prob(t)
class ELBO_NF(object):
def __init__(self, criterion, num_samples, temperature=1.):
self.criterion = criterion
self.num_samples = num_samples
self.temperature = temperature
def __call__(self, model, input, target):
# likelihood term
t = model.flow.sample()
output = model(input, t=t)
nll, _ = self.criterion(output, target)
# kl term
kl = model.compute_kl_mc(t)
loss = nll + kl * self.temperature / self.num_samples # -elbo
return loss, output, {"nll": nll.item(), "kl": kl.item()}
class BenchmarkVINFModel(VINFModel):
# same as a VINFModel, except with a fit method
# for ease of benchmarking
def __init__(self, loader, criterion, optimizer, epochs, base, subspace, flow,
prior_log_sigma=3.0, lr=0.1, temperature=1., num_samples=45000, *args, **kwargs):
super(BenchmarkVINFModel, self).__init__(base, subspace, flow, prior_log_sigma=prior_log_sigma)
self.loader = loader
self.criterion = criterion
self.optimizer = torch.optim.Adam([param for param in self.parameters()], lr=lr)
self.elbo = ELBO_NF(self.criterion, num_samples, temperature)
def fit(self, *args, **kwargs):
for epoch in range(self.epochs):
train_res = train_epoch(self.loader, self, self.elbo, self.optimizer)
values = ['%d/%d' % (epoch + 1, self.epochs), train_res['accuracy'], train_res['loss'],
train_res['stats']['kl'], train_res['stats']['nll']]
print(values)
| 2,662 | 33.584416 | 103 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/swag.py | import torch
from ..utils import flatten, set_weights
from .subspaces import Subspace
class SWAG(torch.nn.Module):
def __init__(self, base, subspace_type,
subspace_kwargs=None, var_clamp=1e-6, *args, **kwargs):
super(SWAG, self).__init__()
self.base_model = base(*args, **kwargs)
self.num_parameters = sum(param.numel() for param in self.base_model.parameters())
self.register_buffer('mean', torch.zeros(self.num_parameters))
self.register_buffer('sq_mean', torch.zeros(self.num_parameters))
self.register_buffer('n_models', torch.zeros(1, dtype=torch.long))
# Initialize subspace
if subspace_kwargs is None:
subspace_kwargs = dict()
self.subspace = Subspace.create(subspace_type, num_parameters=self.num_parameters,
**subspace_kwargs)
self.var_clamp = var_clamp
self.cov_factor = None
self.model_device = 'cpu'
# dont put subspace on cuda?
def cuda(self, device=None):
self.model_device = 'cuda'
self.base_model.cuda(device=device)
def to(self, *args, **kwargs):
self.base_model.to(*args, **kwargs)
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
self.model_device = device.type
self.subspace.to(device=torch.device('cpu'), dtype=dtype, non_blocking=non_blocking)
def forward(self, *args, **kwargs):
return self.base_model(*args, **kwargs)
def collect_model(self, base_model, *args, **kwargs):
# need to refit the space after collecting a new model
self.cov_factor = None
w = flatten([param.detach().cpu() for param in base_model.parameters()])
# first moment
self.mean.mul_(self.n_models.item() / (self.n_models.item() + 1.0))
self.mean.add_(w / (self.n_models.item() + 1.0))
# second moment
self.sq_mean.mul_(self.n_models.item() / (self.n_models.item() + 1.0))
self.sq_mean.add_(w ** 2 / (self.n_models.item() + 1.0))
dev_vector = w - self.mean
self.subspace.collect_vector(dev_vector, *args, **kwargs)
self.n_models.add_(1)
def _get_mean_and_variance(self):
variance = torch.clamp(self.sq_mean - self.mean ** 2, self.var_clamp)
return self.mean, variance
def fit(self):
if self.cov_factor is not None:
return
self.cov_factor = self.subspace.get_space()
def set_swa(self):
set_weights(self.base_model, self.mean, self.model_device)
def sample(self, scale=0.5, diag_noise=True, cov_factor=True):
self.fit()
mean, variance = self._get_mean_and_variance()
z = torch.zeros_like(variance)
if cov_factor:
eps_low_rank = torch.randn(self.cov_factor.size()[0])
z = self.cov_factor.t() @ eps_low_rank
if diag_noise:
z += variance.sqrt() * torch.randn_like(variance)
z *= scale ** 0.5
sample = mean + z
# apply to parameters
set_weights(self.base_model, sample, self.model_device)
return sample
def get_space(self, export_cov_factor=True):
mean, variance = self._get_mean_and_variance()
if not export_cov_factor:
return mean.clone(), variance.clone()
else:
self.fit()
return mean.clone(), variance.clone(), self.cov_factor.clone()
| 3,487 | 34.232323 | 96 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/subspaces.py | """
subspace classes
CovarianceSpace: covariance subspace
PCASpace: PCA subspace
FreqDirSpace: Frequent Directions Space
"""
import abc
import torch
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition._pca import _assess_dimension
from sklearn.utils.extmath import randomized_svd
class Subspace(torch.nn.Module, metaclass=abc.ABCMeta):
subclasses = {}
@classmethod
def register_subclass(cls, subspace_type):
def decorator(subclass):
cls.subclasses[subspace_type] = subclass
return subclass
return decorator
@classmethod
def create(cls, subspace_type, **kwargs):
if subspace_type not in cls.subclasses:
raise ValueError('Bad subspaces type {}'.format(subspace_type))
return cls.subclasses[subspace_type](**kwargs)
def __init__(self):
super(Subspace, self).__init__()
@abc.abstractmethod
def collect_vector(self, vector):
pass
@abc.abstractmethod
def get_space(self):
pass
@Subspace.register_subclass('random')
class RandomSpace(Subspace):
def __init__(self, num_parameters, rank=20, method='dense'):
assert method in ['dense', 'fastfood']
super(RandomSpace, self).__init__()
self.num_parameters = num_parameters
self.rank = rank
self.method = method
if method == 'dense':
self.subspace = torch.randn(rank, num_parameters)
if method == 'fastfood':
raise NotImplementedError("FastFood transform hasn't been implemented yet")
# random subspace is independent of data
def collect_vector(self, vector):
pass
def get_space(self):
return self.subspace
@Subspace.register_subclass('covariance')
class CovarianceSpace(Subspace):
def __init__(self, num_parameters, max_rank=20):
super(CovarianceSpace, self).__init__()
self.num_parameters = num_parameters
self.register_buffer('rank', torch.zeros(1, dtype=torch.long))
self.register_buffer('cov_mat_sqrt',
torch.empty(0, self.num_parameters, dtype=torch.float32))
self.max_rank = max_rank
def collect_vector(self, vector):
if self.rank.item() + 1 > self.max_rank:
self.cov_mat_sqrt = self.cov_mat_sqrt[1:, :]
self.cov_mat_sqrt = torch.cat((self.cov_mat_sqrt, vector.view(1, -1)), dim=0)
self.rank = torch.min(self.rank + 1, torch.as_tensor(self.max_rank)).view(-1)
def get_space(self):
return self.cov_mat_sqrt.clone() / (self.cov_mat_sqrt.size(0) - 1) ** 0.5
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
rank = state_dict[prefix + 'rank'].item()
self.cov_mat_sqrt = self.cov_mat_sqrt.new_empty((rank, self.cov_mat_sqrt.size()[1]))
super(CovarianceSpace, self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
@Subspace.register_subclass('pca')
class PCASpace(CovarianceSpace):
def __init__(self, num_parameters, pca_rank=20, max_rank=20):
super(PCASpace, self).__init__(num_parameters, max_rank=max_rank)
# better phrasing for this condition?
assert(pca_rank == 'mle' or isinstance(pca_rank, int))
if pca_rank != 'mle':
assert 1 <= pca_rank <= max_rank
self.pca_rank = pca_rank
def get_space(self):
cov_mat_sqrt_np = self.cov_mat_sqrt.clone().numpy()
# perform PCA on DD'
cov_mat_sqrt_np /= (max(1, self.rank.item() - 1))**0.5
if self.pca_rank == 'mle':
pca_rank = self.rank.item()
else:
pca_rank = self.pca_rank
pca_rank = max(1, min(pca_rank, self.rank.item()))
# pca_decomp = TruncatedSVD(n_components=pca_rank)
# pca_decomp.fit(cov_mat_sqrt_np)
_, s, Vt = randomized_svd(cov_mat_sqrt_np, n_components=pca_rank, n_iter=5)
# perform post-selection fitting
if self.pca_rank == 'mle':
eigs = s ** 2.0
ll = np.zeros(len(eigs))
correction = np.zeros(len(eigs))
# compute minka's PCA marginal log likelihood and the correction term
for rank in range(len(eigs)):
# secondary correction term based on the rank of the matrix + degrees of freedom
m = cov_mat_sqrt_np.shape[1] * rank - rank * (rank + 1) / 2.
correction[rank] = 0.5 * m * np.log(cov_mat_sqrt_np.shape[0])
ll[rank] = _assess_dimension(spectrum=eigs,
rank=rank,
# n_features=min(cov_mat_sqrt_np.shape),
n_samples=max(cov_mat_sqrt_np.shape))
self.ll = ll
self.corrected_ll = ll - correction
self.pca_rank = np.nanargmax(self.corrected_ll)
print('PCA Rank is: ', self.pca_rank)
return torch.FloatTensor(s[:self.pca_rank, None] * Vt[:self.pca_rank, :])
else:
return torch.FloatTensor(s[:, None] * Vt)
@Subspace.register_subclass('freq_dir')
class FreqDirSpace(CovarianceSpace):
"""
Frequent directions is an online PCA technique, cf. "Frequent directions: Simple and deterministic matrix sketching."
"""
def __init__(self, num_parameters, max_rank=20):
super(FreqDirSpace, self).__init__(num_parameters, max_rank=max_rank)
self.register_buffer('num_models', torch.zeros(1, dtype=torch.long))
self.delta = 0.0
self.normalized = False
def collect_vector(self, vector):
if self.rank >= 2 * self.max_rank:
sketch = self.cov_mat_sqrt.numpy()
[_, s, Vt] = np.linalg.svd(sketch, full_matrices=False)
if s.size >= self.max_rank:
current_delta = s[self.max_rank - 1] ** 2
self.delta += current_delta
s = np.sqrt(s[:self.max_rank - 1] ** 2 - current_delta)
self.cov_mat_sqrt = torch.from_numpy(s[:, None] * Vt[:s.size, :])
self.cov_mat_sqrt = torch.cat((self.cov_mat_sqrt, vector.view(1, -1)), dim=0)
self.rank = torch.as_tensor(self.cov_mat_sqrt.size(0))
self.num_models.add_(1)
self.normalized = False
def get_space(self):
if not self.normalized:
sketch = self.cov_mat_sqrt.numpy()
[_, s, Vt] = np.linalg.svd(sketch, full_matrices=False)
self.cov_mat_sqrt = torch.from_numpy(s[:, None] * Vt)
self.normalized = True
curr_rank = min(self.rank.item(), self.max_rank)
return self.cov_mat_sqrt[:curr_rank].clone() / max(1, self.num_models.item() - 1) ** 0.5
| 7,016 | 35.357513 | 121 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/realnvp.py | import math
import numpy as np
import torch
from torch import nn
from torch import distributions
class RealNVP(nn.Module):
def __init__(self, nets, nett, masks, prior, device=None):
super().__init__()
self.prior = prior
self.mask = nn.Parameter(masks, requires_grad=False)
self.t = torch.nn.ModuleList([nett() for _ in range(len(masks))])
self.s = torch.nn.ModuleList([nets() for _ in range(len(masks))])
self.to(device)
self.device = device
def g(self, z):
x = z
for i in reversed(range(len(self.mask))):
mx = self.mask[i] * x
tmx = self.t[i](mx)
smx = self.s[i](mx)
x = mx + (1 - self.mask[i]) * ((x - tmx) * torch.exp(-smx))
return x
def f(self, x):
z = x
log_det_J = 0
for i in range(len(self.mask)):
mz = self.mask[i] * z
smz = self.s[i](mz)
tmz = self.t[i](mz)
z = mz + (1 - self.mask[i]) * (z * torch.exp(smz) + tmz)
if x.dim() == 2:
log_det_J += (smz * (1-self.mask[i])).sum(1)
else:
log_det_J += (smz * (1-self.mask[i])).sum(1, 2, 3)
return z, log_det_J
def log_prob(self, x):
z, log_det_J = self.f(x)
return self.prior.log_prob(z) + log_det_J
def sample(self, bs=1):
z = self.prior.sample(torch.Size([bs]))
x = self.g(z)
return x
def construct_flow(D, coupling_layers_num=2, inner_dim=128, inner_layers=2, prior=None, device=None):
def inner_seq(n, inner_dim):
res = []
for _ in range(n):
res.append(nn.Linear(inner_dim, inner_dim))
res.append(nn.ReLU())
return res
class Nets(nn.Module):
""" net for parametrizing scaling function in coupling layer """
def __init__(self, D, inner_dim, inner_layers):
super().__init__()
self.seq_part = nn.Sequential(nn.Linear(D, inner_dim),
nn.ReLU(),
*inner_seq(inner_layers, inner_dim),
nn.Linear(inner_dim, D),
nn.Tanh())
self.scale = nn.Parameter(torch.ones(D))
def forward(self, x):
x = self.seq_part.forward(x)
x = self.scale * x
return x
# a function that take no arguments and return a pytorch model, dim(X) -> dim(X)
nets = lambda: Nets(D, inner_dim, inner_layers)
nett = lambda: nn.Sequential(nn.Linear(D, inner_dim),
nn.ReLU(),
*inner_seq(inner_layers, inner_dim),
nn.Linear(inner_dim, D))
if prior is None:
prior = distributions.MultivariateNormal(torch.zeros(D).to(device),
torch.eye(D).to(device))
d = D // 2
masks = torch.zeros(coupling_layers_num, D)
for i in range(masks.size(0)):
if i % 2:
masks[i, :d] = 1.
else:
masks[i, d:] = 1.
masks.to(device)
return RealNVP(nets, nett, masks, prior, device=device)
| 3,244 | 32.112245 | 101 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/proj_model.py | import torch
from ..utils import unflatten_like
class SubspaceModel(torch.nn.Module):
def __init__(self, mean, cov_factor):
super(SubspaceModel, self).__init__()
self.rank = cov_factor.size(0)
self.register_buffer('mean', mean)
self.register_buffer('cov_factor', cov_factor)
def forward(self, t):
return self.mean + self.cov_factor.t() @ t
class ProjectedModel(torch.nn.Module):
def __init__(self, proj_params, model, projection=None, mean=None, subspace=None):
super(ProjectedModel, self).__init__()
self.model = model
if subspace is None:
self.subspace = SubspaceModel(mean, projection)
else:
self.subspace = subspace
if mean is None and subspace is None:
raise NotImplementedError('Must enter either subspace or mean')
self.proj_params = proj_params
def update_params(self, vec, model):
vec_list = unflatten_like(likeTensorList=list(model.parameters()), vector=vec.view(1,-1))
for param, v in zip(model.parameters(), vec_list):
param.detach_()
param.mul_(0.0).add_(v)
def forward(self, *args, **kwargs):
y = self.subspace(self.proj_params)
self.update_params(y, self.model)
return self.model(*args, **kwargs)
| 1,329 | 32.25 | 97 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/pyro.py | import numpy as np
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.mcmc import NUTS, MCMC
from pyro.nn import AutoRegressiveNN
from ..utils import extract_parameters
from ..utils import set_weights_old as set_weights
class PyroModel(torch.nn.Module):
def __init__(self,
base,
subspace,
prior_log_sigma,
likelihood_given_outputs,
batch_size = 100,
*args, **kwargs):
super(PyroModel, self).__init__()
self.base_model = base(*args, **kwargs)
self.base_params = extract_parameters(self.base_model)
#self.rank = cov_factor.size()[0]
self.prior_log_sigma = prior_log_sigma
self.likelihood = likelihood_given_outputs
self.batch_size = batch_size
self.subspace = subspace
self.rank = self.subspace.cov_factor.size(0)
def model(self, x, y):
self.t = pyro.sample("t", dist.Normal(torch.zeros(self.rank),
torch.ones(self.rank) * np.exp(self.prior_log_sigma)).to_event(1))
self.t = self.t.to(x.device)
bs = self.batch_size
num_batches = x.shape[0] // bs
if x.shape[0] % bs: num_batches += 1
for i in pyro.plate("batches", num_batches):
x_ = x[i * bs: (i+1)*bs]
y_ = y[i * bs: (i+1)*bs]
with pyro.plate("data" + str(i), x_.shape[0]):
w = self.subspace(self.t)
set_weights(self.base_params, w, self.t.device)
z = self.base_model(x_)
pyro.sample("y" + str(i), self.likelihood(z).to_event(1), obs=y_)
def model_subsample(self, x, y):
subsample_size = self.batch_size
self.t = pyro.sample("t", dist.Normal(torch.zeros(self.rank),
torch.ones(self.rank) * np.exp(self.prior_log_sigma)).to_event(1))
self.t = self.t.to(x.device)
with pyro.plate("data", x.shape[0], subsample_size=subsample_size) as ind:
#w = self.mean.to(self.t.device) + self.cov_factor.to(self.t.device).t() @ self.t
w = self.subspace(self.t)
set_weights(self.base_params, w, self.t.device)
z = self.base_model(x[ind])
pyro.sample("y", self.likelihood(z).to_event(1), obs=y[ind])
def forward(self, *args, **kwargs):
w = self.subspace(self.t)
set_weights(self.base_params, w, self.t.device)
return self.base_model(*args, **kwargs)
class BenchmarkPyroModel(PyroModel):
def __init__(self, base, subspace, prior_log_sigma, likelihood_given_outputs, batch_size = 100,
kernel=NUTS, num_samples=30, kernel_kwargs={},
*args, **kwargs):
super(BenchmarkPyroModel, self).__init__(base, subspace, prior_log_sigma, likelihood_given_outputs,
batch_size=batch_size, *args, **kwargs)
self.kernel = kernel(self.model, **kernel_kwargs)
self.num_samples = num_samples
#self.loader = loader
self.all_samples = None
self.mcmc_run = None
def fit(self, inputs, targets, *args, **kwargs):
self.mcmc_run = MCMC(self.kernel, num_samples=self.num_samples, warmup_steps=100).run(inputs, targets)
self.all_samples = torch.cat(list(self.mcmc_run.marginal(sites="t").support(flatten=True).values()), dim=-1)
def sample(self, ind=None, scale=1.0):
if ind is None:
ind = np.random.randint(self.num_samples)
self.eval()
self.t.set_(self.all_samples[int(ind), :])
class GaussianGuide(torch.nn.Module):
def __init__(self, rank, init_inv_softplus_sigma=-3.0, eps=1e-6, with_mu=True):
super(GaussianGuide, self).__init__()
self.rank = rank
self.eps = eps
self.with_mu = with_mu
self.init_inv_softplus_sigma = init_inv_softplus_sigma
def model(self, *args, **kargs):
if self.with_mu:
self.mu = pyro.param("mu", torch.zeros(self.rank))
else:
self.mu = torch.zeros(self.rank)
self.inv_softplus_sigma = pyro.param("inv_softplus_sigma",
torch.ones(self.rank) * (self.init_inv_softplus_sigma))
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma) + self.eps
self.t = pyro.sample("t", dist.Normal(self.mu, sigma).to_event(1))
return self.t
class IAFGuide(torch.nn.Module):
def __init__(self, rank, n_hid=[100]):
super(IAFGuide, self).__init__()
self.rank = rank
self.n_hid = n_hid
@property
def sigma(self):
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma)
return sigma
def model(self, *args, **kargs):
self.inv_softplus_sigma = pyro.param("inv_softplus_sigma", torch.ones(self.rank))
sigma = self.sigma#torch.nn.functional.softplus(self.inv_softplus_sigma)
#base_dist = dist.Normal(torch.zeros(self.rank), torch.ones(self.rank))
# Pavel: introducing `sigma` in the IAF distribution makes training more
# stable in tems of the scale of the distribution we are trying to learn
base_dist = dist.Normal(torch.zeros(self.rank), sigma)
ann = AutoRegressiveNN(self.rank, self.n_hid, skip_connections=True)
iaf = dist.InverseAutoregressiveFlow(ann)
iaf_module = pyro.module("my_iaf", iaf)
iaf_dist = dist.TransformedDistribution(base_dist, [iaf])
self.t = pyro.sample("t", iaf_dist.to_event(1))
return self.t
class TemperedCategorical(dist.Categorical):
def __init__(self, temperature=1., *args, **kwargs):
super(TemperedCategorical, self).__init__(*args, **kwargs)
self.temperature = temperature
def log_prob(self, value):
ans = super(TemperedCategorical, self).log_prob(value)
return ans / self.temperature
def expand(self, batch_shape):
# Blindly copied from pyro
batch_shape = torch.Size(batch_shape)
validate_args = self.__dict__.get('validate_args')
if 'probs' in self.__dict__:
probs = self.probs.expand(batch_shape + self.probs.shape[-1:])
return TemperedCategorical(temperature=self.temperature, probs=probs, validate_args=validate_args)
else:
logits = self.logits.expand(batch_shape + self.logits.shape[-1:])
return TemperedCategorical(temperature=self.temperature, logits=logits, validate_args=validate_args)
| 6,618 | 37.707602 | 116 | py |
lgv-geometric-transferability | lgv-geometric-transferability-main/utils/subspace_inference/posteriors/vi_model.py | import math
import torch
from ..utils import extract_parameters, train_epoch
from ..utils import set_weights_old as set_weights
class VIModel(torch.nn.Module):
def __init__(self, base, subspace, init_inv_softplus_sigma=-3.0,
prior_log_sigma=3.0, eps=1e-6, with_mu=True, *args, **kwargs):
super(VIModel, self).__init__()
self.base_model = base(*args, **kwargs)
self.base_params = extract_parameters(self.base_model)
self.subspace = subspace
self.rank = self.subspace.rank
self.prior_log_sigma = prior_log_sigma
self.eps = eps
self.with_mu = with_mu
if with_mu:
self.mu = torch.nn.Parameter(torch.zeros(self.rank))
self.inv_softplus_sigma = torch.nn.Parameter(torch.empty(self.rank).fill_(init_inv_softplus_sigma))
def forward(self, *args, **kwargs):
device = self.inv_softplus_sigma.device
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma) + self.eps
if self.with_mu:
z = self.mu + torch.randn(self.rank, device=device) * sigma
else:
z = torch.randn(self.rank, device=device) * sigma
w = self.subspace(z)
set_weights(self.base_params, w, device)
#set_weights(self.base_model, w, device)
return self.base_model(*args, **kwargs)
def sample(self, scale=1.):
device = self.inv_softplus_sigma.device
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma.detach()) + self.eps
if self.with_mu:
z = self.mu + torch.randn(self.rank, device=device) * sigma * scale
else:
z = torch.randn(self.rank, device=device) * sigma * scale
w = self.subspace(z)
return w
#def sample_z(self):
# sigma = torch.nn.functional.softplus(self.inv_softplus_sigma.detach().cpu()) + self.eps
# z = torch.randn(self.rank) * sigma
# if self.with_mu:
# z += self.mu.detach().cpu()
# return z
def compute_kl(self):
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma) + self.eps
kl = torch.sum(self.prior_log_sigma - torch.log(sigma) +
0.5 * (sigma ** 2) / (math.exp(self.prior_log_sigma * 2)))
if self.with_mu:
kl += 0.5 * torch.sum(self.mu ** 2) / math.exp(self.prior_log_sigma * 2)
return kl
def compute_entropy(self):
sigma = torch.nn.functional.softplus(self.inv_softplus_sigma) + self.eps
return torch.sum(torch.log(sigma))
class ELBO(object):
def __init__(self, criterion, num_samples, temperature=1.):
self.criterion = criterion
self.num_samples = num_samples
self.temperature = temperature
#print("In ELBO, temperature:", temperature)
#print("In ELBO, num_samples:", num_samples)
def __call__(self, model, input, target):
nll, output, _ = self.criterion(model, input, target)
kl = model.compute_kl() / self.num_samples
kl *= self.temperature
loss = nll + kl
#loss = nll
return loss, output, {'nll': nll.item(), 'kl': kl.item()}
class BenchmarkVIModel(VIModel):
# same as a VI model, except with a fit method
# for ease of benchmarking
def __init__(self, loader, criterion, epochs, base, subspace, init_inv_softplus_sigma=-3.0,
prior_log_sigma=3.0, eps=1e-6, with_mu=True, lr=0.01, num_samples=45000, temperature=1.0, use_cuda=True, *args, **kwargs):
super(BenchmarkVIModel, self).__init__(base, subspace, init_inv_softplus_sigma=-3.0,
prior_log_sigma=prior_log_sigma, eps=eps, with_mu=with_mu, *args, **kwargs)
self.use_cuda = use_cuda
self.loader = loader
self.criterion = criterion
#print("Num Samples ELBO:", num_samples)
self.optimizer = torch.optim.Adam([param for param in self.parameters()], lr=lr)
self.elbo = ELBO(self.criterion, num_samples, temperature=temperature)
self.epochs = epochs
def fit(self, *args, **kwargs):
for epoch in range(self.epochs):
train_res = train_epoch(self.loader, self, self.elbo, self.optimizer, regression=True, cuda = self.use_cuda)
values = ['%d/%d' % (epoch + 1, self.epochs), train_res['accuracy'], train_res['loss'],
train_res['stats']['kl'], train_res['stats']['nll']]
#print(values)
#with torch.no_grad():
# print("sigma:", torch.nn.functional.softplus(self.inv_softplus_sigma.cpu()))
# if self.with_mu:
# print("mu:", self.mu.cpu())
| 4,676 | 38.635593 | 139 | py |
ActiveVisionManipulation | ActiveVisionManipulation-master/HER/envs/fakercnn_pusher.py | from HER.envs import bb_pusher
import numpy as np
from HER.rcnn import renderer
import random
from keras import backend as K
from HER.rcnn import load_rcnn
import tensorflow as tf
from HER.envs.pusher import _tuple
from ipdb import set_trace as st
class BaxterEnv(bb_pusher.BaxterEnv):
def __init__(self, *args, aux_rwd = False, test = False, fake_scheme = 'default', **kwargs):
self.aux_rwd = aux_rwd
self.fake_scheme = fake_scheme
self.test = test
super().__init__(*args, **kwargs)
if test:
self.initialize_rcnn()
def reset_model(self):
#we can't use last_mask or last_box BECAUSE it's misleading once the camera starts moving
self.last_box_vis = None
# if hasattr(self, 'last_mask'):
# del self.last_mask #in case we fail at detection
# if self.test:
# self.last_box = None
return super().reset_model()
def calc_reward(self, state, return_success = False):
rwd, succ = super().calc_reward(state, True)
if self.aux_rwd and not self.test:
rwd += state.aux
if return_success:
return rwd, succ
else:
return rwd
def _get_obs(self):
obs = super()._get_obs()
if self.aux_rwd and not self.test:
obs.aux = self.last_success
return obs
#we need to dig up some old code here...
def initialize_rcnn(self):
#need to mess around with a different grpah/session because the rcnn
#and the HER code runs on separate graphs/sessions
self.rcnn_graph = tf.Graph()
self.rcnn_session = tf.Session(graph = self.rcnn_graph)
K.set_session(self.rcnn_session)
with self.rcnn_session.as_default():
with self.rcnn_graph.as_default():
self.rcnn = load_rcnn.load_rcnn()
def _get_obj_bbox_from_rcnn(self):
self.renderer.render_rgb() #throwaway
rgb = self.renderer.render_rgb()
#####
K.set_session(self.rcnn_session)
with self.rcnn_session.as_default():
with self.rcnn_graph.as_default():
rcnn_out = self.rcnn.detect([rgb], verbose = 0)[0]
rois = rcnn_out['rois']
#from ipdb import set_trace as st
#st()
DEBUG_RCNN = True
if len(rois) == 0 or rcnn_out['scores'][0] < 0.98:
if DEBUG_RCNN:
print("rcnn detected nothing")
box = None
else:
#rois are sorted by score, so we take the most confident detection
y1, x1, y2, x2 = rois[0]
box = (x1, x2, y1, y2)
if DEBUG_RCNN:
print("rcnn detection at", box)
# self.last_box = box
self.last_box_vis = box
# return self.last_box
return box
def _get_obj_bbox(self):
#if self.test and self.num_step > 1:
if self.test:
return self._get_obj_bbox_from_rcnn()
self.renderer.render_rgb() #throwaway
mask = self.renderer.render_box()
modal_mask = self.renderer.render_box(override_amodal = False)
unocc = np.sum(modal_mask) / (np.sum(mask) + 1E-9)
#complete unocc = 100% success. 75% occluded means 50% detection chance
if self.num_step == 1: #let's say we alwyas see it on ep 0
chance_success = 1.0
else:
#actually this seems a bit too strict
if self.fake_scheme == 'default':
chance_success = np.sqrt(unocc)
elif self.fake_scheme == 'piecewise':
if unocc >= 0.2:
chance_success = unocc*0.1+0.9 #so 0.92 at 0.2
else:
chance_success = unocc*0.92*5.0
else:
raise Exception('unknown fake_scheme')
if not hasattr(self, 'bar'):
self.bar = lambda: None
self.bar.count = 0
self.bar.succ = 0
self.bar.unocc = 0.0
self.bar.count += 1
self.bar.unocc += unocc
self.last_success = 0
if random.random() < chance_success: #if we succeeded
#self.last_mask = mask
self.last_success = 0.25
self.bar.succ += 1
else: #if we failed
return None
#elif not hasattr(self, 'last_mask'): #if we failed but don't have last mask
#self.last_mask = np.zeros((self.img_params.full_imgH, self.img_params.full_imgW))
if self.debug:
print('detect rate', self.bar.succ/self.bar.count)
print('unocc', self.bar.unocc/self.bar.count)
#return self._get_bbox_from_mask(self.last_mask, unocc)
return self._get_bbox_from_mask(mask, unocc)
| 4,861 | 32.531034 | 97 | py |
ActiveVisionManipulation | ActiveVisionManipulation-master/HER/rcnn/Mask_RCNN/parallel_model.py | """
Mask R-CNN
Multi-GPU Support for Keras.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
"""
import tensorflow as tf
import keras.backend as K
import keras.layers as KL
import keras.models as KM
class ParallelModel(KM.Model):
"""Subclasses the standard Keras Model and adds multi-GPU support.
It works by creating a copy of the model on each GPU. Then it slices
the inputs and sends a slice to each copy of the model, and then
merges the outputs together and applies the loss on the combined
outputs.
"""
def __init__(self, keras_model, gpu_count):
"""Class constructor.
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,
outputs=merged_outputs)
def __getattribute__(self, attrname):
"""Redirect loading and saving methods to the inner model. That's where
the weights are stored."""
if 'load' in attrname or 'save' in attrname:
return getattr(self.inner_model, attrname)
return super(ParallelModel, self).__getattribute__(attrname)
def summary(self, *args, **kwargs):
"""Override summary() to display summaries of both, the wrapper
and inner models."""
super(ParallelModel, self).summary(*args, **kwargs)
self.inner_model.summary(*args, **kwargs)
def make_parallel(self):
"""Creates a new wrapper model that consists of multiple replicas of
the original model placed on different GPUs.
"""
# Slice inputs. Slice inputs on the CPU to avoid sending a copy
# of the full inputs to all GPUs. Saves on bandwidth and memory.
input_slices = {name: tf.split(x, self.gpu_count)
for name, x in zip(self.inner_model.input_names,
self.inner_model.inputs)}
output_names = self.inner_model.output_names
outputs_all = []
for i in range(len(self.inner_model.outputs)):
outputs_all.append([])
# Run the model call() on each GPU to place the ops there
for i in range(self.gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
# Run a slice of inputs through this replica
zipped_inputs = zip(self.inner_model.input_names,
self.inner_model.inputs)
inputs = [
KL.Lambda(lambda s: input_slices[name][i],
output_shape=lambda s: (None,) + s[1:])(tensor)
for name, tensor in zipped_inputs]
# Create the model replica and get the outputs
outputs = self.inner_model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later
for l, o in enumerate(outputs):
outputs_all[l].append(o)
# Merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs, name in zip(outputs_all, output_names):
# If outputs are numbers without dimensions, add a batch dim.
def add_dim(tensor):
"""Add a dimension to tensors that don't have any."""
if K.int_shape(tensor) == ():
return KL.Lambda(lambda t: K.reshape(t, [1, 1]))(tensor)
return tensor
outputs = list(map(add_dim, outputs))
# Concatenate
merged.append(KL.Concatenate(axis=0, name=name)(outputs))
return merged
if __name__ == "__main__":
# Testing code below. It creates a simple model to train on MNIST and
# tries to run it on 2 GPUs. It saves the graph so it can be viewed
# in TensorBoard. Run it as:
#
# python3 parallel_model.py
import os
import numpy as np
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
GPU_COUNT = 2
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs/parallel")
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype('float32') / 255
x_test = np.expand_dims(x_test, -1).astype('float32') / 255
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Build data generator and model
datagen = ImageDataGenerator()
model = build_model(x_train, 10)
# Add multi-GPU support.
model = ParallelModel(model, GPU_COUNT)
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Train
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=50, epochs=10, verbose=1,
validation_data=(x_test, y_test),
callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,
write_graph=True)]
)
| 6,863 | 38.448276 | 95 | py |
ActiveVisionManipulation | ActiveVisionManipulation-master/HER/rcnn/Mask_RCNN/model.py | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(6000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are in image domain.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Run detection refinement graph on each item in the batch
_, _, window, _ = parse_image_meta_graph(image_meta)
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
if augment:
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, "resnet101", stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
clipnorm=5.0)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(
layer.output, keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE,
augment=False)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = max(self.config.BATCH_SIZE // 2, 2)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=next(val_generator),
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
| 111,322 | 42.553599 | 115 | py |
SimPer | SimPer-main/src/simper.py | """
Minimal SimPer implementation & example training loops.
"""
import tensorflow as tf
from networks import Featurizer, Classifier
@tf.function
def _max_cross_corr(feats_1, feats_2):
# feats_1: 1 x T(# time stamp)
# feats_2: M(# aug) x T(# time stamp)
feats_2 = tf.cast(feats_2, feats_1.dtype)
feats_1 = feats_1 - tf.math.reduce_mean(feats_1, axis=-1, keepdims=True)
feats_2 = feats_2 - tf.math.reduce_mean(feats_2, axis=-1, keepdims=True)
min_N = min(feats_1.shape[-1], feats_2.shape[-1])
padded_N = max(feats_1.shape[-1], feats_2.shape[-1]) * 2
feats_1_pad = tf.pad(feats_1, tf.constant([[0, 0], [0, padded_N - feats_1.shape[-1]]]))
feats_2_pad = tf.pad(feats_2, tf.constant([[0, 0], [0, padded_N - feats_2.shape[-1]]]))
feats_1_fft = tf.signal.rfft(feats_1_pad)
feats_2_fft = tf.signal.rfft(feats_2_pad)
X = feats_1_fft * tf.math.conj(feats_2_fft)
power_norm = tf.cast(
tf.math.reduce_std(feats_1, axis=-1, keepdims=True) *
tf.math.reduce_std(feats_2, axis=-1, keepdims=True),
X.dtype)
power_norm = tf.where(
tf.equal(power_norm, 0), tf.ones_like(power_norm), power_norm)
X = X / power_norm
cc = tf.signal.irfft(X) / (min_N - 1)
max_cc = tf.math.reduce_max(cc, axis=-1)
return max_cc
@tf.function
def batched_max_cross_corr(x, y):
"""
x: M(# aug) x T(# time stamp)
y: M(# aug) x T(# time stamp)
"""
# Calculate distance for a single row of x.
per_x_dist = lambda i: _max_cross_corr(x[i:(i + 1), :], y)
# Compute and stack distances for all rows of x.
dist = tf.map_fn(fn=per_x_dist,
elems=tf.range(tf.shape(x)[0], dtype=tf.int64),
fn_output_signature=x.dtype)
return dist
@tf.function
def normed_psd(x, fps, zero_pad=0, high_pass=0.25, low_pass=15):
""" x: M(# aug) x T(# time stamp) """
x = x - tf.math.reduce_mean(x, axis=-1, keepdims=True)
if zero_pad > 0:
L = x.shape[-1]
x = tf.pad(x, tf.constant([[int(zero_pad / 2 * L), int(zero_pad / 2 * L)]]))
x = tf.abs(tf.signal.rfft(x)) ** 2
Fn = fps / 2
freqs = tf.linspace(0., Fn, x.shape[-1])
use_freqs = tf.math.logical_and(freqs >= high_pass, freqs <= low_pass)
use_freqs = tf.repeat(tf.expand_dims(use_freqs, 0), x.shape[0], axis=0)
x = tf.reshape(x[use_freqs], (x.shape[0], -1))
# Normalize PSD
denom = tf.math.reduce_euclidean_norm(x, axis=-1, keepdims=True)
denom = tf.where(tf.equal(denom, 0), tf.ones_like(denom), denom)
x = x / denom
return x
@tf.function
def batched_normed_psd(x, y):
"""
x: M(# aug) x T(# time stamp)
y: M(# aug) x T(# time stamp)
"""
return tf.matmul(normed_psd(x), normed_psd(y), transpose_b=True)
def label_distance(labels_1, labels_2, dist_fn='l1', label_temperature=0.1):
# labels: bsz x M(#augs)
# output: bsz x M(#augs) x M(#augs)
if dist_fn == 'l1':
dist_mat = - tf.math.abs(labels_1[:, :, None] - labels_2[:, None, :])
elif dist_fn == 'l2':
dist_mat = - tf.math.abs(labels_1[:, :, None] - labels_2[:, None, :]) ** 2
elif dist_fn == 'sqrt':
dist_mat = - tf.math.abs(labels_1[:, :, None] - labels_2[:, None, :]) ** 0.5
else:
raise NotImplementedError(f"`{dist_fn}` not implemented.")
prob_mat = tf.nn.softmax(dist_mat / label_temperature, axis=-1)
return prob_mat
class SimPer(tf.keras.Model):
def __init__(self, hparams):
super(SimPer, self).__init__()
self.hparams = hparams
self.featurizer = Featurizer(self.hparams["n_frames"])
self.regressor = Classifier(self.featurizer.n_outputs, 1, False)
self.network = tf.keras.Sequential([self.featurizer, self.regressor])
self.optimizer = tf.keras.optimizers.Adam(lr=self.hparams["lr"])
def update(self, minibatches):
all_x, all_speed = minibatches
# all_x: [bsz, 2*M, SSL_FRAMES, H, W, C]
batch_size, num_augments = all_x.shape[0], all_x.shape[1]
all_x = tf.reshape(all_x, [batch_size * num_augments] + all_x.shape[2:].as_list())
# [bsz, 2*M] -> [bsz, M, M]
all_labels = label_distance(all_speed[:, :num_augments // 2],
all_speed[:, num_augments // 2:],
self.hparams["label_dist_fn"],
self.hparams["label_temperature"])
criterion = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
with tf.GradientTape() as tape:
all_z = self.featurizer(all_x)
all_z = tf.reshape(all_z, [batch_size, num_augments, -1])
loss = 0
for feats, labels in zip(all_z, all_labels):
feat_dist = globals()[self.hparams["feat_dist_fn"]](
feats[:num_augments // 2], feats[num_augments // 2:])
loss += criterion(y_pred=feat_dist, y_true=labels)
loss /= batch_size
gradients = tape.gradient(loss, self.featurizer.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.featurizer.trainable_variables))
return loss
def predict(self, x, training: bool):
return self.featurizer(x, training=training)
| 5,269 | 35.344828 | 91 | py |
SimPer | SimPer-main/src/networks.py | """
Example network architectures:
- Featurizer (for representation learning)
- Classifier (for downstream tasks)
"""
import tensorflow as tf
from tensorflow.keras.layers import (Conv2D, Conv3D, Dense, Flatten, BatchNormalization,
TimeDistributed, MaxPool2D, GlobalAveragePooling2D)
class Featurizer(tf.keras.Model):
def __init__(self, n_outputs):
super(Featurizer, self).__init__()
self.conv0 = Conv3D(64, (5, 3, 3), padding='same')
self.conv1 = Conv3D(128, (5, 3, 3), padding='same')
self.conv2 = Conv3D(128, (5, 3, 3), padding='same')
self.conv3 = Conv3D(1, (1, 1, 1))
self.bn0 = BatchNormalization()
self.bn1 = BatchNormalization()
self.bn2 = BatchNormalization()
self.bn3 = BatchNormalization()
self.pool0 = TimeDistributed(MaxPool2D((2, 2)))
self.pool1 = TimeDistributed(MaxPool2D((2, 2)))
self.pool2 = TimeDistributed(MaxPool2D((2, 2)))
self.pool3 = TimeDistributed(GlobalAveragePooling2D())
self.flatten = Flatten()
self.n_outputs = n_outputs
def call(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = tf.nn.relu(x)
x = self.pool0(x)
x = self.conv1(x)
x = self.bn1(x)
x = tf.nn.relu(x)
x = self.pool1(x)
# x = self.conv2(x)
# x = self.bn2(x)
# x = tf.nn.relu(x)
# x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
x = tf.nn.relu(x)
x = self.pool3(x)
x = self.flatten(x)
return x
class MLP(tf.keras.Model):
def __init__(self, n_outputs):
super(MLP, self).__init__()
self.inputs = Dense(n_outputs)
self.hidden = Dense(n_outputs)
self.outputs = Dense(n_outputs)
def call(self, x):
x = self.inputs(x)
x = tf.nn.relu(x)
# x = self.hidden(x)
# x = tf.nn.relu(x)
x = self.outputs(x)
return x
def Classifier(in_features, out_features, nonlinear=False):
if nonlinear:
return tf.keras.Sequential(
[Dense(in_features // 2, activation=tf.nn.relu),
Dense(in_features // 4, activation=tf.nn.relu),
Dense(out_features)])
else:
return Dense(out_features)
| 2,324 | 26.678571 | 88 | py |
sampling_cf | sampling_cf-main/main.py | import os
import time
import importlib
import datetime as dt
from tqdm import tqdm
from utils import file_write, log_end_epoch, INF, valid_hyper_params
from data_path_constants import get_log_file_path, get_model_file_path
# NOTE: No global-level torch imports as the GPU-ID is set through code
def train(model, criterion, optimizer, reader, hyper_params, forgetting_events, track_events):
import torch
model.train()
# Initializing metrics since we will calculate MSE on the train set on the fly
metrics = {}
# Initializations
at = 0
# Train for one epoch, batch-by-batch
loop = tqdm(reader)
for data, y in loop:
# Empty the gradients
model.zero_grad()
optimizer.zero_grad()
# Forward pass
output = model(data)
# Compute per-interaction loss
loss = criterion(output, y, return_mean = False)
criterion.anneal(1.0 / float(len(reader) * hyper_params['epochs']))
# loop.set_description("Loss: {}".format(round(float(loss), 4)))
# Track forgetting events
if track_events:
with torch.no_grad():
if hyper_params['task'] == 'explicit': forgetting_events[at : at+data[0].shape[0]] += loss.data
else:
pos_output, neg_output = output
pos_output = pos_output.repeat(1, neg_output.shape[1])
num_incorrect = torch.sum((neg_output > pos_output).float(), -1)
forgetting_events[at : at+data[0].shape[0]] += num_incorrect.data
at += data[0].shape[0]
# Backward pass
loss = torch.mean(loss)
loss.backward()
optimizer.step()
return metrics, forgetting_events
def train_complete(hyper_params, train_reader, val_reader, model, model_class, track_events):
import torch
from loss import CustomLoss
from eval import evaluate
from torch_utils import is_cuda_available
criterion = CustomLoss(hyper_params)
optimizer = torch.optim.Adam(
model.parameters(), lr=hyper_params['lr'], betas=(0.9, 0.98),
weight_decay=hyper_params['weight_decay']
)
file_write(hyper_params['log_file'], str(model))
file_write(hyper_params['log_file'], "\nModel Built!\nStarting Training...\n")
try:
best_MSE = float(INF)
best_AUC = -float(INF)
best_HR = -float(INF)
decreasing_streak = 0
forgetting_events = None
if track_events:
forgetting_events = torch.zeros(train_reader.num_interactions).float()
if is_cuda_available: forgetting_events = forgetting_events.cuda()
for epoch in range(1, hyper_params['epochs'] + 1):
epoch_start_time = time.time()
# Training for one epoch
metrics, local_forgetted_count = train(
model, criterion, optimizer, train_reader, hyper_params,
forgetting_events, track_events
)
# Calulating the metrics on the validation set
if (epoch % hyper_params['validate_every'] == 0) or (epoch == 1):
metrics = evaluate(model, criterion, val_reader, hyper_params, train_reader.item_propensity)
metrics['dataset'] = hyper_params['dataset']
decreasing_streak += 1
# Save best model on validation set
if hyper_params['task'] == 'explicit' and metrics['MSE'] < best_MSE:
print("Saving model...")
torch.save(model.state_dict(), hyper_params['model_path'])
decreasing_streak, best_MSE = 0, metrics['MSE']
elif hyper_params['task'] != 'explicit' and metrics['AUC'] > best_AUC:
print("Saving model...")
torch.save(model.state_dict(), hyper_params['model_path'])
decreasing_streak, best_AUC = 0, metrics['AUC']
elif hyper_params['task'] != 'explicit' and metrics['HR@10'] > best_HR:
print("Saving model...")
torch.save(model.state_dict(), hyper_params['model_path'])
decreasing_streak, best_HR = 0, metrics['HR@10']
log_end_epoch(hyper_params, metrics, epoch, time.time() - epoch_start_time, metrics_on = '(VAL)')
# Check if need to early-stop
if 'early_stop' in hyper_params and decreasing_streak >= hyper_params['early_stop']:
file_write(hyper_params['log_file'], "Early stopping..")
break
except KeyboardInterrupt: print('Exiting from training early')
# Load best model and return it for evaluation on test-set
if os.path.exists(hyper_params['model_path']):
model = model_class(hyper_params)
if is_cuda_available: model = model.cuda()
model.load_state_dict(torch.load(hyper_params['model_path']))
model.eval()
if track_events: forgetting_events = forgetting_events.cpu().numpy() / float(hyper_params['epochs'])
return model, forgetting_events
def train_neumf(hyper_params, train_reader, val_reader, track_events):
from pytorch_models.NeuMF import GMF, MLP, NeuMF
from torch_utils import is_cuda_available, xavier_init
initial_path = hyper_params['model_path']
# Pre-Training the GMF Model
hyper_params['model_path'] = initial_path[:-3] + "_gmf.pt"
gmf_model = GMF(hyper_params)
if is_cuda_available: gmf_model = gmf_model.cuda()
xavier_init(gmf_model)
gmf_model, _ = train_complete(hyper_params, train_reader, val_reader, gmf_model, GMF, track_events)
# Pre-Training the MLP Model
hyper_params['model_path'] = initial_path[:-3] + "_mlp.pt"
mlp_model = MLP(hyper_params)
if is_cuda_available: mlp_model = mlp_model.cuda()
xavier_init(mlp_model)
mlp_model, _ = train_complete(hyper_params, train_reader, val_reader, mlp_model, MLP, track_events)
# Training the final NeuMF Model
hyper_params['model_path'] = initial_path
model = NeuMF(hyper_params)
if is_cuda_available: model = model.cuda()
model.init(gmf_model, mlp_model)
model, forgetting_events = train_complete(hyper_params, train_reader, val_reader, model, NeuMF, track_events)
# Remove GMF and MLP models
mlp_path = initial_path[:-3] + "_mlp.pt"
gmf_path = initial_path[:-3] + "_gmf.pt"
os.remove(mlp_path) ; os.remove(gmf_path)
return model, forgetting_events
def main_pytorch(hyper_params, track_events = False, eval_full = True):
from load_data import load_data
from eval import evaluate
from torch_utils import is_cuda_available, xavier_init, get_model_class
from loss import CustomLoss
if not valid_hyper_params(hyper_params):
print("Invalid task combination specified, exiting.")
return
# Load the data readers
train_reader, test_reader, val_reader, hyper_params = load_data(hyper_params, track_events = track_events)
file_write(hyper_params['log_file'], "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
file_write(hyper_params['log_file'], "Data reading complete!")
file_write(hyper_params['log_file'], "Number of train batches: {:4d}".format(len(train_reader)))
file_write(hyper_params['log_file'], "Number of validation batches: {:4d}".format(len(val_reader)))
file_write(hyper_params['log_file'], "Number of test batches: {:4d}".format(len(test_reader)))
# Initialize & train the model
start_time = time.time()
if hyper_params['model_type'] == 'NeuMF':
model, forgetting_events = train_neumf(hyper_params, train_reader, val_reader, track_events)
else:
model = get_model_class(hyper_params)(hyper_params)
if is_cuda_available: model = model.cuda()
xavier_init(model)
model, forgetting_events = train_complete(
hyper_params, train_reader, val_reader, model, get_model_class(hyper_params), track_events
)
metrics = {}
if eval_full:
# Calculating MSE on test-set
criterion = CustomLoss(hyper_params)
metrics = evaluate(model, criterion, test_reader, hyper_params, train_reader.item_propensity, test = True)
log_end_epoch(hyper_params, metrics, 'final', time.time() - start_time, metrics_on = '(TEST)')
# We have no space left for storing the models
os.remove(hyper_params['model_path'])
del model, train_reader, test_reader, val_reader
return metrics, forgetting_events
def main_pop_rec(hyper_params):
from load_data import load_data
from eval import evaluate
from loss import CustomLoss
from pytorch_models.pop_rec import PopRec
# Load the data readers
train_reader, test_reader, val_reader, hyper_params = load_data(hyper_params)
file_write(hyper_params['log_file'], "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
file_write(hyper_params['log_file'], "Data reading complete!")
file_write(hyper_params['log_file'], "Number of test batches: {:4d}\n\n".format(len(test_reader)))
# Make the model
start_time = time.time()
model = PopRec(hyper_params, train_reader.get_item_count_map())
# Calculating MSE on test-set
criterion = CustomLoss(hyper_params)
metrics = evaluate(model, criterion, test_reader, hyper_params, train_reader.item_propensity, test = True)
log_end_epoch(hyper_params, metrics, 'final', time.time() - start_time, metrics_on = '(TEST)')
del model, train_reader, test_reader, val_reader
return metrics, None
def main(hyper_params, gpu_id = None):
if not valid_hyper_params(hyper_params):
print("Invalid task combination specified, exiting.")
return
# Setting GPU ID for running entire code ## Very Very Imp.
if gpu_id is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# torch.cuda.set_device(int(gpu_id))
# torch.cuda.empty_cache()
# Set dataset specific hyper-params
hyper_params.update(
importlib.import_module('data_hyperparams.{}'.format(hyper_params['dataset'])).hyper_params
)
# Learning rate is "highly" dataset AND model specific
if 'lr' not in hyper_params:
if hyper_params['model_type'] == 'SASRec': hyper_params['lr'] = 0.006
elif hyper_params['model_type'] == 'SVAE': hyper_params['lr'] = 0.02
elif hyper_params['model_type'] == 'MVAE': hyper_params['lr'] = 0.01
else: hyper_params['lr'] = 0.008
hyper_params['log_file'] = get_log_file_path(hyper_params)
hyper_params['model_path'] = get_model_file_path(hyper_params)
if hyper_params['model_type'] == 'pop_rec': main_pop_rec(hyper_params)
else: main_pytorch(hyper_params)
# torch.cuda.empty_cache()
if __name__ == '__main__':
from hyper_params import hyper_params
main(hyper_params)
| 10,944 | 39.238971 | 114 | py |
sampling_cf | sampling_cf-main/data_genie.py | from data_genie.data_genie_config import *
from data_genie.data_genie_trainers import *
from data_genie.data_genie_data import OracleData
from data_genie.data_genie_model import PointwiseDataGenie, PairwiseDataGenie
# NOTE: Please edit the config in `data_genie/data_genie_config.py` before \
# running this trainer script
print("Datasets:", ", ".join(datasets))
for embedding_type in [
'handcrafted',
'unsupervised_gcn_dim_8_layers_1',
'unsupervised_gcn_dim_8_layers_3',
'unsupervised_gcn_dim_16_layers_1',
'unsupervised_gcn_dim_16_layers_3',
'unsupervised_gcn_dim_32_layers_1',
]:
# If you want to try out different combinations of handcrafted features
options = [
# [0, 1],
# [0, 1, 3],
# [0, 1, 4],
# [0, 1, 5],
[0, 1, 3, 4, 5],
# [0, 1, 2, 3, 4, 5, 6],
] if embedding_type == 'handcrafted' else [ None ]
# Create model
for feats_to_keep in options:
# Load data
print("\n\n{} Using {} embedding {}\n\n".format("="*30, embedding_type, "="*30))
pointwise_data = OracleData(datasets, feats_to_keep, embedding_type, bsz = 128, pointwise = True)
pairwise_data = OracleData(datasets, feats_to_keep, embedding_type, bsz = 128, pointwise = False)
########### Linear Regression
print("\n\n{} Linear Regression {}\n\n".format("="*30, "="*30))
for C in [ 1e-4, 1e-2, 1, 1e2, 1e4 ]: train_linear_regression(pointwise_data, embedding_type, feats_to_keep, C = float(C))
########### Logistic Regression
print("\n\n{} Logistic Regression {}\n\n".format("="*30, "="*30))
for C in [ 1e-2, 1, 1e2, 1e4 ]: train_logistic_regression(pairwise_data, embedding_type, feats_to_keep, C = float(C))
########### XGBoost
print("\n\n{} XGBoost Regression {}\n\n".format("="*30, "="*30))
for max_depth in [ 2, 4, 6, 8, 10 ]:
train_xgboost_regression(pointwise_data, embedding_type, feats_to_keep, max_depth = max_depth)
print("\n\n{} XGBoost Classification {}\n\n".format("="*30, "="*30))
for max_depth in [ 2, 4, 6, 8, 10 ]:
train_xgboost_bce(pairwise_data, embedding_type, feats_to_keep, max_depth = max_depth)
########## Neural network
print("\n\n{} Data-Genie {}\n\n".format("="*30, "="*30))
for Analyzer in [
PointwiseDataGenie,
PairwiseDataGenie
]:
print("\nPointwise:" if Analyzer == PointwiseDataGenie else "\nPairwise:")
# NOTE: These hyper-parameters were estimated using a basic grid-search
LR = 0.001
WD = float(1e-6)
DIM = 64
DROPOUT = 0.2
GRAPH_DIM = 64
GCN_LAYERS = 2
EPOCHS = 200 if embedding_type != 'handcrafted' else 20
train_pytorch(
pointwise_data if Analyzer == PointwiseDataGenie else pairwise_data,
Analyzer, feats_to_keep, LR, WD, DIM,
DROPOUT, embedding_type, GRAPH_DIM, GCN_LAYERS,
EPOCHS = EPOCHS, VALIDATE_EVERY = 1
)
| 2,771 | 34.088608 | 124 | py |
sampling_cf | sampling_cf-main/loss.py | import torch
import torch.nn.functional as F
from torch_utils import is_cuda_available
class CustomLoss(torch.nn.Module):
def __init__(self, hyper_params):
super(CustomLoss, self).__init__()
self.forward = {
'explicit': self.mse,
'implicit': self.bpr,
'sequential': self.bpr,
}[hyper_params['task']]
if hyper_params['model_type'] == "MVAE": self.forward = self.vae_loss
if hyper_params['model_type'] == "SVAE": self.forward = self.svae_loss
if hyper_params['model_type'] == "SASRec": self.forward = self.bce_sasrec
self.torch_bce = torch.nn.BCEWithLogitsLoss()
self.anneal_val = 0.0
self.hyper_params = hyper_params
def mse(self, output, y, return_mean = True):
mse = torch.pow(output - y, 2)
if return_mean: return torch.mean(mse)
return mse
def bce_sasrec(self, output, pos, return_mean = True):
pos_logits, neg_logits = output
pos_labels, neg_labels = torch.ones(pos_logits.shape), torch.zeros(neg_logits.shape)
if is_cuda_available: pos_labels, neg_labels = pos_labels.cuda(), neg_labels.cuda()
indices = pos != self.hyper_params['total_items']
loss = self.torch_bce(pos_logits[indices], pos_labels[indices])
loss += self.torch_bce(neg_logits[indices], neg_labels[indices])
return loss
def bpr(self, output, y, return_mean = True):
pos_output, neg_output = output
pos_output = pos_output.repeat(1, neg_output.shape[1]).view(-1)
neg_output = neg_output.view(-1)
loss = -F.logsigmoid(pos_output - neg_output)
if return_mean: return torch.mean(loss)
return loss
def anneal(self, step_size):
self.anneal_val += step_size
self.anneal_val = max(self.anneal_val, 0.2)
def vae_loss(self, output, y_true_s, return_mean = True):
decoder_output, mu_q, logvar_q = output
# Calculate KL Divergence loss
kld = torch.mean(torch.sum(0.5 * (-logvar_q + torch.exp(logvar_q) + mu_q**2 - 1), -1))
# Calculate Likelihood
decoder_output = F.log_softmax(decoder_output, -1)
likelihood = torch.sum(-1.0 * y_true_s * decoder_output, -1)
final = (self.anneal_val * kld) + (likelihood)
if return_mean: return torch.mean(final)
return final
def svae_loss(self, output, y, return_mean = True):
decoder_output, mu_q, logvar_q = output
dec_shape = decoder_output.shape # [batch_size x seq_len x total_items]
# Calculate KL Divergence loss
kld = torch.mean(torch.sum(0.5 * (-logvar_q + torch.exp(logvar_q) + mu_q**2 - 1), -1))
# Don't compute loss on padded items
y_true_s, y_indices = y
keep_indices = y_indices != self.hyper_params['total_items']
y_true_s = y_true_s[keep_indices]
decoder_output = decoder_output[keep_indices]
# Calculate Likelihood
decoder_output = F.log_softmax(decoder_output, -1)
likelihood = torch.sum(-1.0 * y_true_s * decoder_output)
likelihood = likelihood / float(dec_shape[0] * self.hyper_params['num_next'])
final = (self.anneal_val * kld) + (likelihood)
if return_mean: return torch.mean(final)
return final
| 3,395 | 36.318681 | 94 | py |
sampling_cf | sampling_cf-main/torch_utils.py | import torch
is_cuda_available = torch.cuda.is_available()
if is_cuda_available:
print("Using CUDA...\n")
LongTensor = torch.cuda.LongTensor
FloatTensor = torch.cuda.FloatTensor
BoolTensor = torch.cuda.BoolTensor
else:
LongTensor = torch.LongTensor
FloatTensor = torch.FloatTensor
BoolTensor = torch.BoolTensor
def get_model_class(hyper_params):
from pytorch_models import MF, MVAE, SASRec, SVAE
return {
"bias_only": MF.MF,
"MF_dot": MF.MF,
"MF": MF.MF,
"MVAE": MVAE.MVAE,
"SVAE": SVAE.SVAE,
"SASRec": SASRec.SASRec,
}[hyper_params['model_type']]
def xavier_init(model):
for _, param in model.named_parameters():
try: torch.nn.init.xavier_uniform_(param.data)
except: pass # just ignore those failed init layers
| 827 | 25.709677 | 59 | py |
sampling_cf | sampling_cf-main/eval.py | import torch
import numpy as np
from numba import jit, float32, float64, int64
from utils import INF
def evaluate(model, criterion, reader, hyper_params, item_propensity, topk = [ 10, 100 ], test = False):
metrics = {}
# Do a negative sampled item-space evaluation (only on the validation set)
# if the dataset is too big
partial_eval = (not test) and hyper_params['partial_eval']
partial_eval = partial_eval and (hyper_params['model_type'] not in [ 'MVAE', 'SVAE', 'pop_rec' ])
if partial_eval: metrics['eval'] = 'partial'
if hyper_params['task'] == 'explicit': metrics['MSE'] = 0.0
else:
preds, y_binary = [], []
for kind in [ 'HR', 'NDCG', 'PSP' ]:
for k in topk:
metrics['{}@{}'.format(kind, k)] = 0.0
model.eval()
with torch.no_grad():
for data, y in reader:
output = model(data, eval = True)
if hyper_params['model_type'] in [ 'MVAE', 'SVAE' ]: output, _, _ = output
if hyper_params['model_type'] == 'SVAE': output = output[:, -1, :]
if hyper_params['task'] == 'explicit':
metrics['MSE'] += torch.sum(criterion(output, y, return_mean = False).data)
else:
function = evaluate_batch_partial if partial_eval else evaluate_batch
metrics, temp_preds, temp_y = function(data, output, y, item_propensity, topk, metrics)
preds += temp_preds
y_binary += temp_y
if hyper_params['task'] == 'explicit':
metrics['MSE'] = round(float(metrics['MSE']) / reader.num_interactions, 4)
else:
# NOTE: sklearn's `roc_auc_score` is suuuuper slow
metrics['AUC'] = round(fast_auc(np.array(y_binary), np.array(preds)), 4)
for kind in [ 'HR', 'NDCG', 'PSP' ]:
for k in topk:
metrics['{}@{}'.format(kind, k)] = round(
float(100.0 * metrics['{}@{}'.format(kind, k)]) / reader.num_interactions, 4
)
return metrics
def evaluate_batch(data, output_batch, y, item_propensity, topk, metrics):
# Y
train_positive, test_positive_set = y
# Data
_, _, auc_negatives = data
# AUC Stuff
temp_preds, temp_y = [], []
logits_cpu = output_batch.cpu().numpy()
for b in range(len(output_batch)):
# Validation set could have 0 positive interactions
if len(test_positive_set[b]) == 0: continue
temp_preds += np.take(logits_cpu[b], np.array(list(test_positive_set[b]))).tolist()
temp_y += [ 1.0 for _ in range(len(test_positive_set[b])) ]
temp_preds += np.take(logits_cpu[b], auc_negatives[b]).tolist()
temp_y += [ 0.0 for _ in range(len(auc_negatives[b])) ]
# Marking train-set consumed items as negative INF
for b in range(len(output_batch)): output_batch[b][ train_positive[b] ] = -INF
_, indices = torch.topk(output_batch, min(item_propensity.shape[0], max(topk)), sorted = True)
indices = indices.cpu().numpy().tolist()
for k in topk:
for b in range(len(output_batch)):
num_pos = float(len(test_positive_set[b]))
# Validation set could have 0 positive interactions after sampling
if num_pos == 0: continue
metrics['HR@{}'.format(k)] += float(len(set(indices[b][:k]) & test_positive_set[b])) / float(min(num_pos, k))
test_positive_sorted_psp = sorted([ item_propensity[x] for x in test_positive_set[b] ])[::-1]
dcg, idcg, psp, max_psp = 0.0, 0.0, 0.0, 0.0
for at, pred in enumerate(indices[b][:k]):
if pred in test_positive_set[b]:
dcg += 1.0 / np.log2(at + 2)
psp += float(item_propensity[pred]) / float(min(num_pos, k))
if at < num_pos:
idcg += 1.0 / np.log2(at + 2)
max_psp += test_positive_sorted_psp[at]
metrics['NDCG@{}'.format(k)] += dcg / idcg
metrics['PSP@{}'.format(k)] += psp / max_psp
return metrics, temp_preds, temp_y
def evaluate_batch_partial(data, output, y, item_propensity, topk, metrics):
_, test_pos_items, _ = data
test_pos_items = test_pos_items.cpu().numpy()
pos_score, neg_score = output
pos_score, neg_score = pos_score.cpu().numpy(), neg_score.cpu().numpy()
temp_preds, temp_y, hr, ndcg, psp = evaluate_batch_partial_jit(
pos_score, neg_score, test_pos_items, np.array(item_propensity), np.array(topk)
)
for at_k, k in enumerate(topk):
metrics['HR@{}'.format(k)] += hr[at_k]
metrics['NDCG@{}'.format(k)] += ndcg[at_k]
metrics['PSP@{}'.format(k)] += psp[at_k]
return metrics, temp_preds.tolist(), temp_y.tolist()
@jit('Tuple((float32[:], float32[:], float32[:], float32[:], float32[:]))(float32[:,:], float32[:,:], int64[:,:], float64[:], int64[:])')
def evaluate_batch_partial_jit(pos_score, neg_score, test_pos_items, item_propensity, topk):
temp_preds = np.zeros(
((pos_score.shape[0] * pos_score.shape[1]) + (neg_score.shape[0] * neg_score.shape[1])),
dtype = np.float32
)
temp_y = np.zeros(temp_preds.shape, dtype = np.float32)
at_preds = 0
hr_arr = np.zeros((len(topk)), dtype = np.float32)
ndcg_arr = np.zeros((len(topk)), dtype = np.float32)
psp_arr = np.zeros((len(topk)), dtype = np.float32)
for b in range(len(pos_score)):
pos, neg = pos_score[b, :], neg_score[b, :]
# pos will be padded, un-pad it
last_index = len(pos) - 1
while last_index > 0 and pos[last_index] == pos[last_index - 1]: last_index -= 1
pos = pos[:last_index + 1]
# Add to AUC
temp_preds[at_preds:at_preds+len(pos)] = pos
temp_y[at_preds:at_preds+len(pos)] = 1
at_preds += len(pos)
temp_preds[at_preds:at_preds+len(neg)] = neg
temp_y[at_preds:at_preds+len(neg)] = 0
at_preds += len(neg)
# get rank of all elements in pos
temp_ranks = np.argsort(- np.concatenate((pos, neg)))
# To maintain order
pos_ranks = np.zeros(len(pos))
for at, r in enumerate(temp_ranks):
if r < len(pos): pos_ranks[r] = at + 1
test_positive_sorted_psp = sorted([ item_propensity[x] for x in test_pos_items[b] ])[::-1]
for at_k, k in enumerate(topk):
num_pos = float(len(pos))
hr_arr[at_k] += np.sum(pos_ranks <= k) / float(min(num_pos, k))
dcg, idcg, psp, max_psp = 0.0, 0.0, 0.0, 0.0
for at, rank in enumerate(pos_ranks):
if rank <= k:
dcg += 1.0 / np.log2(rank + 1) # 1-based indexing
psp += item_propensity[test_pos_items[b][at]] / float(min(num_pos, k))
idcg += 1.0 / np.log2(at + 2)
max_psp += test_positive_sorted_psp[at]
ndcg_arr[at_k] += dcg / idcg
psp_arr[at_k] += psp / max_psp
return temp_preds[:at_preds], temp_y[:at_preds], hr_arr, ndcg_arr, psp_arr
@jit(float64(float64[:], float64[:]))
def fast_auc(y_true, y_prob):
y_true = y_true[np.argsort(y_prob)]
nfalse, auc = 0, 0
for i in range(len(y_true)):
nfalse += (1 - y_true[i])
auc += y_true[i] * nfalse
return auc / (nfalse * (len(y_true) - nfalse)) | 7,384 | 38.704301 | 137 | py |
sampling_cf | sampling_cf-main/svp_handler.py | import numpy as np
from collections import defaultdict
from main import main_pytorch
from data_path_constants import get_svp_log_file_path, get_svp_model_file_path
class SVPHandler:
def __init__(self, model_type, loss_type, hyper_params):
hyper_params['model_type'] = model_type
hyper_params['task'] = loss_type
hyper_params['num_train_negs'] = 1
hyper_params['num_test_negs'] = 100
hyper_params['latent_size'] = 10
hyper_params['dropout'] = 0.3
hyper_params['weight_decay'] = float(1e-6)
hyper_params['lr'] = 0.006
hyper_params['epochs'] = 50
hyper_params['validate_every'] = 5000
hyper_params['batch_size'] = 1024
self.hyper_params = hyper_params
self.hyper_params['log_file'] = self.log_file
self.hyper_params['model_path'] = self.model_file
self.train_model()
def train_model(self):
_, self.forgetted_count = main_pytorch(self.hyper_params, track_events = True, eval_full = False)
def forgetting_events(self, percent, data, index):
# Keep those points which have the maximum forgetted count
# => Remove those points which have the minimum forgetted count
index_map = []
for at, i in enumerate(index):
if i == 0: index_map.append(at)
split_point = int(float(len(self.forgetted_count)) * (float(percent) / 100.0))
order = np.argsort(self.forgetted_count)
order = list(map(lambda x: index_map[x], order))
remove_indices = order[:split_point] # If greedy
for i in remove_indices: index[i] = -1 # Remove
return index
def forgetting_events_user(self, percent, data, index):
# Keep those users which have the maximum forgetted count
# Remove those users which have the minimum forgetted count
index_map, user_map, hist, at, total = [], [], {}, 0, 0
for u in range(len(data)):
for i, r, t in data[u]:
if index[at] == 0:
index_map.append(at)
user_map.append(u)
if u not in hist: hist[u] = 0
hist[u] += 1
total += 1
at += 1
user_forgetted_count = defaultdict(list)
for train_at, cnt in enumerate(self.forgetted_count):
user_forgetted_count[user_map[train_at]].append(cnt)
user_forgetted_count = sorted(list(dict(user_forgetted_count).items()), key = lambda x: np.mean(x[1]))
interactions_to_remove, removed, users_to_remove = total * (float(percent) / 100.0), 0, set()
for u, _ in user_forgetted_count:
if removed >= interactions_to_remove: break
users_to_remove.add(u)
removed += hist[u]
for train_at in range(len(user_map)):
if user_map[train_at] in users_to_remove: index[index_map[train_at]] = -1
return index
def compute_freq(self, data, index, freq_type):
freq, at = defaultdict(int), 0
for u in range(len(data)):
for i, r, t in data[u]:
if index[at] == 0:
to_count = [ u, i ][freq_type]
freq[to_count] += 1
at += 1
valid_users = list(freq.keys())
return list(map(lambda x: freq[x], valid_users)), dict(zip(valid_users, list(range(len(freq)))))
def compute_prop(self, freq_vector, num_instances, A = 0.55, B = 1.5):
C = (np.log(num_instances)-1)*np.power(B+1, A)
wts = 1.0 + C*np.power(np.array(freq_vector)+B, -A)
return np.ravel(wts)
def forgetting_events_propensity(self, percent, data, index, pooling_method = 'max'):
# Keep those points which have the maximum forgetted count
# Remove those points which have the minimum forgetted count
num_interactions = len(self.forgetted_count)
user_freq, user_map = self.compute_freq(data, index, 0)
user_propensity_vector = self.compute_prop(user_freq, num_interactions)
item_freq, item_map = self.compute_freq(data, index, 1)
item_propensity_vector = self.compute_prop(item_freq, num_interactions)
interaction_propensity, at = [], 0
freq, at = defaultdict(int), 0
def pool(prop_u, prop_i):
if pooling_method == 'sum': return prop_u + prop_i
elif pooling_method == 'max': return max(prop_u, prop_i)
for u in range(len(data)):
for i, r, t in data[u]:
if index[at] == 0:
interaction_propensity.append(
pool(user_propensity_vector[user_map[u]], item_propensity_vector[item_map[i]])
)
at += 1
assert len(interaction_propensity) == num_interactions
# interaction_propensity actually estimates the `inverse` propensity, hence multiply
updated_count = np.array(self.forgetted_count) * np.array(interaction_propensity)
index_map = []
for at, i in enumerate(index):
if i == 0: index_map.append(at)
split_point = int(float(len(updated_count)) * (float(percent) / 100.0))
order = np.argsort(updated_count)
order = list(map(lambda x: index_map[x], order))
remove_indices = order[:split_point] # If greedy
for i in remove_indices: index[i] = -1 # Remove
return index
def forgetting_events_user_propensity(self, percent, data, index):
# Keep those users which have the maximum forgetted count
# Keep those users which have the maximum propensity --> minimum frequency
# Remove those users which have the minimum forgetted count
num_interactions = len(self.forgetted_count)
user_freq, user_index_map = self.compute_freq(data, index, 0)
user_propensity_vector = self.compute_prop(user_freq, num_interactions)
index_map, user_map, hist, at, total = [], [], {}, 0, 0
for u in range(len(data)):
for i, r, t in data[u]:
if index[at] == 0:
index_map.append(at)
user_map.append(u)
if u not in hist: hist[u] = 0
hist[u] += 1
total += 1
at += 1
user_forgetted_count = defaultdict(list)
for train_at, cnt in enumerate(self.forgetted_count):
u = user_map[train_at]
user_forgetted_count[u].append(cnt * user_propensity_vector[user_index_map[u]])
user_forgetted_count = sorted(list(dict(user_forgetted_count).items()), key = lambda x: np.mean(x[1]))
interactions_to_remove, removed, users_to_remove = total * (float(percent) / 100.0), 0, set()
for u, _ in user_forgetted_count:
if removed >= interactions_to_remove: break
users_to_remove.add(u)
removed += hist[u]
for train_at in range(len(user_map)):
if user_map[train_at] in users_to_remove: index[index_map[train_at]] = -1
return index
@property
def model_file(self):
return get_svp_model_file_path(self.hyper_params)
@property
def log_file(self):
return get_svp_log_file_path(self.hyper_params)
| 7,281 | 40.375 | 110 | py |
sampling_cf | sampling_cf-main/data_genie/data_genie_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class PointwiseLoss(nn.Module):
def __init__(self): super(PointwiseLoss, self).__init__()
def forward(self, output, y, return_mean = True):
loss = torch.pow(output - y, 2)
if return_mean: return torch.mean(loss)
return loss
class PairwiseLoss(nn.Module):
def __init__(self): super(PairwiseLoss, self).__init__()
def forward(self, pos_output, neg_output, return_mean = True):
loss = -F.logsigmoid(pos_output - neg_output)
if return_mean: return torch.mean(loss)
return loss
| 560 | 27.05 | 63 | py |
sampling_cf | sampling_cf-main/data_genie/data_genie_trainers.py | import time
import torch
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from sklearn.feature_selection import RFE
from sklearn.metrics import roc_auc_score
from xgboost import XGBClassifier, XGBRegressor
from torch.utils.tensorboard import SummaryWriter
from sklearn.linear_model import Ridge, LogisticRegression
from data_genie.data_genie_config import *
from data_genie.data_genie_model import PointwiseDataGenie
from data_genie.data_genie_utils import LOGS_BASE, TENSORBOARD_BASE, MODELS_BASE
from torch_utils import xavier_init
from utils import log_end_epoch, INF
def get_metrics(output, y, pointwise):
output = np.array(output).reshape([ len(output) // len(all_samplers), len(all_samplers) ])
y = np.array(y).reshape([ len(y) // len(all_samplers), len(all_samplers) ])
metrics = defaultdict(list)
for i in range(len(output)):
pred, true = np.array(output[i]), np.array(y[i])
keep_indices = []
for j in range(len(true)):
if true[j] >= -1 and true[j] <= 1: keep_indices.append(j)
keep_indices = np.array(keep_indices)
if len(keep_indices) == 0: continue
pred, true = pred[keep_indices], true[keep_indices]
if pointwise: metrics['MSE'] += ((pred - true) ** 2).tolist()
if len(keep_indices) < 5: continue
metrics['P@1'].append(float(true[np.argmax(pred)] == np.max(true))) # Since there can be many with same kendall's tau
metrics['Rand P@1'].append(float(true[np.random.randint(0, len(true))] == np.max(true)))
return { k: round(100.0 * np.mean(metrics[k]), 2) if k != 'MSE' else round(np.mean(metrics[k]), 4) for k in metrics }
def validate(model, data, which_data, pointwise):
model.model.eval()
output, y = [], []
with torch.no_grad():
for complete, subset, task, metric, tau in data.test_iter(which_data):
output_batch = model.model(complete, subset, task, metric)
if pointwise: output_batch = torch.tanh(output_batch)
output += output_batch.cpu().numpy().tolist()
y += tau
return get_metrics(output, y, pointwise)
def train_one_epoch(model, data):
model.model.train()
metric, total = 0.0, 0.0
for data_batch in data.train_iter():
# Empty the gradients
model.model.zero_grad()
model.optimizer.zero_grad()
# Forward pass
loss, temp_metric, temp_total = model.step(data_batch)
metric += temp_metric ; total += temp_total
# Backward pass
loss.backward()
model.optimizer.step()
return model.get_metric_dict(metric, total)
def train_pytorch(data, Analyzer, feats_to_keep, lr, wd, dim, dropout, embedding_type, graph_dim, gcn_layers, EPOCHS, VALIDATE_EVERY):
common_path = "{}_{}_dim_{}_drop_{}_lr_{}_wd_{}".format(
"pointwise" if Analyzer == PointwiseDataGenie else "pairwise",
embedding_type, dim, dropout, lr, wd
)
if feats_to_keep is not None: common_path += "_feats_" + "-".join(map(str, feats_to_keep))
if embedding_type != "handcrafted": common_path += "_graph_dim_{}_gcn_layers_{}".format(graph_dim, gcn_layers)
writer = SummaryWriter(log_dir = TENSORBOARD_BASE + common_path)
model = Analyzer({
'lr': lr,
'weight_decay': float(wd),
'dim': dim,
'dropout': dropout,
'feats': 5 + (len(feats_to_keep) * NUM_SAMPLES) if feats_to_keep is not None else None,
'log_file': LOGS_BASE + '{}.txt'.format(common_path),
'model_file': MODELS_BASE + '{}.pt'.format(common_path),
'embedding_type': embedding_type,
'graph_dim': graph_dim,
'gcn_layers': gcn_layers
}, writer, xavier_init)
# Train model
start_time, best_metric = time.time(), -INF
avg_p1 = 0.0
for epoch in tqdm(range(1, EPOCHS + 1)):
epoch_start_time = time.time()
train_metrics = train_one_epoch(model, data)
for m in train_metrics: writer.add_scalar('Train/' + m, train_metrics[m], epoch)
log_end_epoch(model.hyper_params, train_metrics, epoch, time.time() - epoch_start_time, metrics_on = '(TRAIN)', dont_print = True)
if epoch % VALIDATE_EVERY == 0:
val_metrics = validate(model, data, data.val, pointwise = Analyzer == PointwiseDataGenie)
for m in val_metrics: writer.add_scalar('Validation/' + m, val_metrics[m], epoch)
log_end_epoch(model.hyper_params, val_metrics, epoch, time.time() - epoch_start_time, metrics_on = '(VAL)', dont_print = True)
test_metrics = validate(model, data, data.test, pointwise = Analyzer == PointwiseDataGenie)
for m in test_metrics: writer.add_scalar('Test/' + m, test_metrics[m], epoch)
log_end_epoch(model.hyper_params, test_metrics, epoch, time.time() - epoch_start_time, metrics_on = '(TEST)', dont_print = True)
avg_p1 += test_metrics['Rand P@1']
if test_metrics["P@1"] > best_metric:
model.save() ; best_metric = test_metrics["P@1"]
model.load()
test_metrics = validate(model, data, data.test, pointwise = Analyzer == PointwiseDataGenie)
test_metrics['Rand P@1'] = round(avg_p1 / float(EPOCHS), 2)
for m in test_metrics: writer.add_scalar('Test/' + m, test_metrics[m], EPOCHS + 1)
log_end_epoch(model.hyper_params, test_metrics, "final", time.time() - start_time, metrics_on = '(TEST)')
def train_linear_regression(data, embedding_type, feats_to_keep, C):
start_time = time.time()
log_file = LOGS_BASE + "linear_regression_{}_C_{}".format(embedding_type, C)
if feats_to_keep is not None: log_file += "_feats_" + "-".join(map(str, feats_to_keep))
log_file += '.txt'
x, y = data.sklearn_regression_data
########## Initially
# model = Ridge(alpha = C).fit(x, y)
########## Backward selection
model = RFE(Ridge(alpha = C, normalize = True), n_features_to_select = 10, step = 1).fit(x, y)
train_mse = round(np.mean((y - model.predict(x)) ** 2), 4)
train_var = round(np.var(y), 4)
output, y = [], []
for complete, subset, task, metric, tau in data.test_iter(data.test):
output += model.predict(
data.sklearn_regression_feature(complete, subset, task.unsqueeze(-1), metric.unsqueeze(-1))
).tolist()
y += tau
test_metrics = get_metrics(output, y, pointwise = True)
test_metrics['Train MSE'] = train_mse
test_metrics['Train Var'] = train_var
log_end_epoch({ 'log_file': log_file }, test_metrics, "final", time.time() - start_time, metrics_on = '(TEST)')
def train_logistic_regression(data, embedding_type, feats_to_keep, C):
start_time = time.time()
log_file = LOGS_BASE + "logistic_regression_{}_C_{}".format(embedding_type, C)
if feats_to_keep is not None: log_file += "_feats_" + "-".join(map(str, feats_to_keep))
log_file += '.txt'
x, y = data.sklearn_bce_data
model = LogisticRegression(C = C, max_iter = 3000).fit(x, y)
train_auc = round(roc_auc_score(y, model.predict_proba(x)[:, 1]), 4)
output, y = [], []
for complete, subset, task, metric, tau in data.test_iter(data.test):
output += model.predict_proba(
data.sklearn_bce_feature(complete, subset, subset, task.unsqueeze(-1), metric.unsqueeze(-1))
)[:, 1].tolist()
y += tau
test_metrics = get_metrics(output, y, pointwise = False)
test_metrics['Train AUC'] = train_auc
log_end_epoch({ 'log_file': log_file }, test_metrics, "final", time.time() - start_time, metrics_on = '(TEST)')
def train_xgboost_regression(data, embedding_type, feats_to_keep, max_depth):
start_time = time.time()
common_path = "xgboost_regression_{}_depth_{}".format(embedding_type, max_depth)
if feats_to_keep is not None: common_path += "_feats_" + "-".join(map(str, feats_to_keep))
x, y = data.sklearn_regression_data
model = XGBRegressor(max_depth = max_depth).fit(x, y)
train_mse = round(np.mean((y - model.predict(x)) ** 2), 4)
train_var = round(np.var(y), 4)
output, y = [], []
for complete, subset, task, metric, tau in data.test_iter(data.test):
output += model.predict(
data.sklearn_regression_feature(complete, subset, task.unsqueeze(-1), metric.unsqueeze(-1))
).tolist()
y += tau
test_metrics = get_metrics(output, y, pointwise = True)
test_metrics['Train MSE'] = train_mse
test_metrics['Train Var'] = train_var
log_end_epoch({ 'log_file': LOGS_BASE + "{}.txt".format(common_path) }, test_metrics, "final", time.time() - start_time, metrics_on = '(TEST)')
def train_xgboost_bce(data, embedding_type, feats_to_keep, max_depth):
start_time = time.time()
common_path = "xgboost_bce_{}_depth_{}".format(embedding_type, max_depth)
if feats_to_keep is not None: common_path += "_feats_" + "-".join(map(str, feats_to_keep))
x, y = data.sklearn_bce_data
model = XGBClassifier(max_depth = max_depth, use_label_encoder=False, eval_metric = "logloss").fit(x, y)
train_auc = round(roc_auc_score(y, model.predict(x)), 4)
output, y = [], []
for complete, subset, task, metric, tau in data.test_iter(data.test):
output += model.predict(
data.sklearn_bce_feature(complete, subset, subset, task.unsqueeze(-1), metric.unsqueeze(-1))
).tolist()
y += tau
test_metrics = get_metrics(output, y, pointwise = False)
test_metrics['Train AUC'] = train_auc
log_end_epoch({ 'log_file': LOGS_BASE + "{}.txt".format(common_path) }, test_metrics, "final", time.time() - start_time, metrics_on = '(TEST)')
| 8,933 | 38.883929 | 144 | py |
sampling_cf | sampling_cf-main/data_genie/get_embeddings.py | import gc
import os
import dgl
import snap
import torch
import numpy as np
from tqdm import tqdm
import networkx as nx
from collections import defaultdict
from data_genie.data_genie_config import *
from data_genie.data_genie_utils import save_numpy, load_numpy
from data_genie.data_genie_utils import EMBEDDINGS_PATH_GCN, EMBEDDINGS_PATH_HANDCRAFTED, INFOGRAPH_MODEL_PATH
from load_data import DataHolder
from data_path_constants import get_data_path, get_index_path
from data_genie.InfoGraph.infograph_model import InfoGraph
from data_genie.InfoGraph.infograph_dataset import SyntheticDataset
from data_genie.InfoGraph.train_infograph import train_infograph, argument
def get_embeddings(dataset, embedding_type):
return {
'unsupervise': get_embeddings_gcn,
'handcrafted': get_embeddings_handcrafted,
}[embedding_type[:11]](dataset, embedding_type)
def get_embeddings_gcn(dataset, model_file):
# Extract which InfoGraph model we are looking for from the specified `model_file`
dim, layers = None, None
splitted = model_file.split("_")
for i, word in enumerate(splitted):
if word == "dim": dim = int(splitted[i+1])
if word == "layers": layers = int(splitted[i+1])
PATH = EMBEDDINGS_PATH_GCN(dataset, dim, layers)
if not os.path.exists(PATH + ".npy"): prep_gcn_embeddings(dim, layers)
return load_numpy(PATH)
def prep_gcn_embeddings(dim, layers):
# Step-1: Check if the unsupervised GCN Model has been trained?
if not os.path.exists(INFOGRAPH_MODEL_PATH(dim, layers)):
print("Specified InfoGraph configuration not trained yet, training now..")
infograph_args = argument()
infograph_args.n_layers = layers
infograph_args.hid_dim = dim
train_infograph(infograph_args)
print("Loading best InfoGraph model..")
model = InfoGraph(dim, layers)
model.load_state_dict(torch.load(INFOGRAPH_MODEL_PATH(dim, layers)))
model.eval()
print("Loading data..")
# Keep the dimension of node features fixed to a reasonable value.
# If you want to change this, please also change at `InfoGraph/infograph_model.py` line 134
dataset = SyntheticDataset(feature_dimension = 32)
graphs, _ = map(list, zip(*dataset))
num_graphs = len(graphs)
# Embeddings
print("Getting GCN embeddings..")
print(len(graphs))
BSZ = 32 # Batch-size for predicting dataset embeddings
emb = np.zeros([ len(graphs), (dim * layers) + 5 ])
for b in tqdm(range(0, len(graphs), BSZ)):
wholegraph = dgl.batch(graphs[b:b+BSZ])
wholegraph.ndata['attr'] = wholegraph.ndata['attr'].to(torch.float32)
emb[b:b+BSZ] = np.hstack([
model.get_embedding(wholegraph).cpu().numpy(),
np.array(dataset.basic_data_stats_features[b:b+BSZ], dtype = np.float32)
])
del graphs, dataset, model
gc.collect()
# NOTE: Since we'll anyways be training on ALL datasets, we'll prep embeddings for ALL datasets at once
at = 0
for d in datasets:
final = np.zeros([ total_embeddings, (dim * layers) + 5 ])
for task, metrics in scenarios:
final[get_embedding_id(task, 'complete_data', 0)] = emb[at] ; at += 1
for sampling_percent in percent_rns_options:
for sampling in all_samplers:
final[get_embedding_id(task, sampling, sampling_percent)] = emb[at] ; at += 1
save_numpy(EMBEDDINGS_PATH_GCN(d, dim, layers), final)
assert at == num_graphs
def get_embeddings_handcrafted(dataset, model_file):
PATH = EMBEDDINGS_PATH_HANDCRAFTED(dataset)
if not os.path.exists(PATH + ".npy"): prep_handcrafted_embeddings(dataset, PATH)
return load_numpy(PATH)
def prep_handcrafted_embeddings(dataset, save_path):
final = np.zeros([ total_embeddings, NUM_FEAUTRES ])
print("Getting handcrafted embeddings..")
loop = tqdm(total = len(all_samplers) * len(scenarios) * len(percent_rns_options))
for task, metrics in scenarios:
final[get_embedding_id(task, 'complete_data', 0)] = get_single_feature(dataset, task, 'complete_data', 0)
for sampling_percent in percent_rns_options:
for sampling in sampling_kinds:
final[get_embedding_id(task, sampling, sampling_percent)] = get_single_feature(dataset, task, sampling, sampling_percent)
loop.update(1)
for svp_method in svp_methods:
for sampling in sampling_svp:
name = "svp_{}_{}".format(svp_method, sampling)
final[get_embedding_id(task, name, sampling_percent)] = get_single_feature(
dataset, task, "svp_{}".format(svp_method), sampling_percent, svp = sampling
)
loop.update(1)
loop.close()
save_numpy(save_path, final)
def get_single_feature(dataset, task, sampling, sampling_percent, svp = None):
hyper_params = {
'dataset': dataset,
'task': task,
'sampling': sampling,
'sampling_percent': sampling_percent,
'sampling_svp': svp
}
data = DataHolder(get_data_path(hyper_params), get_index_path(hyper_params))
MIL = float(1e6)
return list([ float(data.num_users) / MIL, float(data.num_items) / MIL ] + \
[ float(data.num_train_interactions) / MIL ] + \
[ float(data.num_val_interactions) / MIL ] + \
[ float(data.num_test_interactions) / MIL ] + \
degree_distribution(data) + \
graph_characteristics(data))
def degree_distribution(data):
user_degree, item_degree = defaultdict(int), defaultdict(int)
for at, (u, i, r) in enumerate(data.data):
if data.index[at] != -1:
user_degree[u] += 1
item_degree[i] += 1
user_degree = sorted([ user_degree[u] for u in user_degree ])
item_degree = sorted([ item_degree[i] for i in item_degree ])
return sample_distribution(user_degree) + sample_distribution(item_degree)
def graph_characteristics(data):
g, snap_g = create_nx_graph(data)
return \
sample_distribution([ g.degree(n) for n in g.nodes() ]) + \
sample_distribution([ len(c) for c in nx.connected_components(g) ]) + \
sample_distribution(hop_plot(snap_g)) + \
sample_distribution(snap_g.GetEigVals(100), reverse = True) + \
sample_distribution(clustering_coefficient(snap_g))
def hop_plot(g):
g.PlotHops("temp", "Random Graph hop plot", False, 128)
f = open("hop.temp.tab", 'r') ; lines = f.readlines() ; f.close()
for f in [ "hop.temp.png", "hop.temp.tab", "hop.temp.plt" ]: os.remove(f)
return list(map(lambda x: float(x.strip().split("\t")[1]), lines[4:]))
def clustering_coefficient(g):
Cf, CfVec = g.GetClustCf(True, -1)
return [ pair.GetVal2() for pair in CfVec ]
def sample_distribution(distribution, reverse = False):
distribution = sorted(distribution, reverse = reverse) ; n = len(distribution)
to_pick = np.round(np.linspace(0, len(distribution) - 1, NUM_SAMPLES)).astype(int)
return [ distribution[i] for i in to_pick ]
def create_nx_graph(data):
g = nx.Graph()
snap_g = snap.TUNGraph.New()
# Add nodes & edges
user_map, item_map, node_num = {}, {}, 0
for at, (u, i, r) in enumerate(data.data):
if data.index[at] != -1:
if u not in user_map:
user_map[u] = node_num
g.add_node(node_num)
snap_g.AddNode(node_num)
node_num += 1
if i not in item_map:
item_map[i] = node_num
g.add_node(node_num)
snap_g.AddNode(node_num)
node_num += 1
g.add_edge(user_map[u], item_map[i])
snap_g.AddEdge(user_map[u], item_map[i])
assert node_num == g.number_of_nodes()
return g, snap_g
| 7,131 | 33.960784 | 125 | py |
sampling_cf | sampling_cf-main/data_genie/data_genie_data.py | import torch
import numpy as np
from torch_utils import LongTensor, FloatTensor, is_cuda_available
from data_genie.data_genie_config import *
from data_genie.get_data import get_data_pointwise, get_data_pairwise
from data_genie.get_embeddings import get_embeddings
from data_genie.InfoGraph.infograph_dataset import SyntheticDataset
# NOTE: Helper functions
def sel(d, i): return list(map(lambda x: x[i], d))
def sel2(d, i): return list(map(lambda x: list(map(lambda y: y[i], x)), d))
# Main class for manipulating and iterating over training data for DataGenie
class OracleData:
def __init__(self, datasets, feats_to_keep, embedding_type, bsz, pointwise):
self.feats_to_keep, self.bsz, self.embedding_type = feats_to_keep, bsz, embedding_type
get_final_data = get_data_pointwise if pointwise else get_data_pairwise
# Get all embeddings
self.embeddings = {}
for dataset in datasets: self.embeddings[dataset] = get_embeddings(dataset, embedding_type)
self.embeddings = self.normalize_embeddings(self.embeddings)
# Create data
self.dataset_indices, train_data, val_data, test_data = [], None, None, None
for dataset in datasets:
self.dataset_indices.append(0 if train_data is None else len(train_data))
this_dataset = self.join(get_final_data(dataset), self.embeddings[dataset], pointwise)
if train_data is None: train_data, val_data, test_data = this_dataset
else:
for at, container in enumerate([ train_data, val_data, test_data ]):
for i in range(5): container[i] += this_dataset[at][i]
print("\n{} data:".format("Pointwise" if pointwise else "Pairwise"))
print("# of training points:", len(train_data[0]))
print("# of validation points:", len(val_data[0]) // len(all_samplers))
print("# of testing points:", len(test_data[0]) // len(all_samplers))
# NOTE: Each entry inside `train_data`, `val_data`, `test_data` is a 5-tuple:
# 0. Full-data embedding ID
# 1. Subset embedding ID
# 2. Task ID
# 3. Metric ID
# 4. Y: Kendall's Tau between the ranked list of recommendation algorithms \
# trained on full vs. the data-subset (what we want to predict w/ Data-Genie)
# Move data to GPU
self.val = [ FloatTensor(val_data[0]), FloatTensor(val_data[1]), LongTensor(val_data[2]), LongTensor(val_data[3]), val_data[4] ]
self.test = [ FloatTensor(test_data[0]), FloatTensor(test_data[1]), LongTensor(test_data[2]), LongTensor(test_data[3]), test_data[4] ]
if pointwise:
self.train = [
FloatTensor(train_data[0]), FloatTensor(train_data[1]), LongTensor(train_data[2]),
LongTensor(train_data[3]), FloatTensor(train_data[4])
]
else:
self.train = [
FloatTensor(train_data[0]), FloatTensor(train_data[1]), FloatTensor(train_data[2]),
LongTensor(train_data[3]), LongTensor(train_data[4])
]
def normalize_embeddings(self, embeddings):
combined_embeddings = np.concatenate(list(embeddings.values()), axis=0)
# GCN-embeddings
if self.feats_to_keep is None: pass
# Handcrafted embeddings
else:
mean, std = self.get_mean_std(combined_embeddings)
for dataset in embeddings:
embeddings[dataset] = self.select_and_norm(embeddings[dataset], mean, std)
return embeddings
def join(self, all_data, embeddings, pointwise):
train, val, test = all_data
def join(data, indices):
for i in indices: data[i] = list(map(lambda x: embeddings[x], data[i]))
return data
test, val = map(lambda x: join(x, [ 0, 1 ]), [ test, val ])
if pointwise: train = join(train, [ 0, 1 ])
else: train = join(train, [ 0, 1, 2 ])
return [ train, val, test ]
def shuffle_train(self):
num_train_interactions = len(self.train[0])
rand_indices = np.arange(num_train_interactions) ; np.random.shuffle(rand_indices)
rand_indices_tensor = LongTensor(rand_indices)
for i in range(len(self.train)):
self.train[i] = self.train[i][rand_indices_tensor]
return self.train
# Convert task/metric index to one-hot vector
def one_hot(self, index, total):
if index.shape[0] > 1:
ret = torch.zeros(index.shape[0], total)
if is_cuda_available: ret = ret.cuda()
ret.scatter_(1, index, 1.0)
return ret
ret = torch.zeros(total)
if is_cuda_available: ret = ret.cuda()
ret.scatter_(0, index, 1.0)
return ret
def sklearn_regression_feature(self, complete, subset, task, metric):
return torch.cat([ complete, subset, self.one_hot(task, 3), self.one_hot(metric, 4) ], axis = -1).cpu().numpy()
def sklearn_bce_feature(self, complete, pos, neg, task, metric):
return torch.cat([ complete, pos, neg, self.one_hot(task, 3), self.one_hot(metric, 4) ], axis = -1).cpu().numpy()
@property
def sklearn_regression_data(self):
shuffled_train = self.shuffle_train()
x, y = [], []
for b in range(shuffled_train[0].shape[0]):
x.append(self.sklearn_regression_feature(
shuffled_train[0][b], shuffled_train[1][b],
shuffled_train[2][b].unsqueeze(-1), shuffled_train[3][b].unsqueeze(-1)
))
y.append(float(shuffled_train[4][b]))
return np.asarray(x), np.asarray(y)
@property
def sklearn_bce_data(self):
shuffled_train = self.shuffle_train()
x, y = [], []
for b in range(shuffled_train[0].shape[0]):
pos_or_neg = np.random.uniform()
if pos_or_neg > 0.5:
x.append(self.sklearn_bce_feature(
shuffled_train[0][b], shuffled_train[1][b], shuffled_train[2][b],
shuffled_train[3][b].unsqueeze(-1), shuffled_train[4][b].unsqueeze(-1)
))
y.append(1)
else:
x.append(self.sklearn_bce_feature(
shuffled_train[0][b], shuffled_train[2][b], shuffled_train[1][b],
shuffled_train[3][b].unsqueeze(-1), shuffled_train[4][b].unsqueeze(-1)
))
y.append(0)
return np.asarray(x), np.asarray(y)
def train_iter(self):
num_train_interactions = len(self.train[0])
shuffled_train = self.shuffle_train()
for b in range(0, num_train_interactions, self.bsz):
l, r = b, b + self.bsz
yield shuffled_train[0][l:r], shuffled_train[1][l:r], shuffled_train[2][l:r], \
shuffled_train[3][l:r], shuffled_train[4][l:r]
def test_iter(self, data):
for b in range(0, len(data[0]), self.bsz):
l, r = b, b + self.bsz
yield data[0][l:r], data[1][l:r], data[2][l:r], \
data[3][l:r], data[4][l:r]
def select_and_norm(self, data, mean, std):
data = np.asarray(data)
assert data.shape[-1] == NUM_FEAUTRES
indices = []
for f in self.feats_to_keep: indices += list(range(NUM_SAMPLES * f, NUM_SAMPLES * (f+1)))
mean = mean[np.array(indices)] ; std = std[np.array(indices)]
indices = list(range(5)) + list(map(lambda x: x+5, indices))
indices = np.array(indices)
if len(data.shape) == 2:
data = data[:, indices]
data[:, 5:] -= mean
data[:, 5:] /= std
else:
data = data[:, :, indices]
data[:, :, 5:] -= mean
data[:, :, 5:] /= std
return data
def get_mean_std(self, combined_data):
assert combined_data.shape[1] == NUM_FEAUTRES
temp_data = combined_data[:, 5:NUM_FEAUTRES]
std = np.array(list(map(lambda x: max(x, float(1e-6)), np.std(temp_data, axis = 0))))
return np.mean(temp_data, axis = 0), std
| 7,077 | 33.526829 | 136 | py |
sampling_cf | sampling_cf-main/data_genie/data_genie_model.py | import dgl
import torch
import torch.nn as nn
from torch_utils import is_cuda_available
from data_genie.data_genie_loss import PointwiseLoss, PairwiseLoss
# NOTE: Below two are the training classes for data-genie: pointwise/pairwise
class PointwiseDataGenie:
def __init__(self, hyper_params, writer, xavier_init):
self.hyper_params = hyper_params
self.writer = writer
self.model = DataGenie(hyper_params)
if is_cuda_available: self.model.cuda()
xavier_init(self.model) ; print(self.model)
self.criterion = PointwiseLoss()
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=hyper_params['lr'], betas=(0.9, 0.98),
weight_decay=hyper_params['weight_decay']
)
def step(self, data_batch):
complete, subset, task, metric, y = data_batch
# Forward pass
output = torch.tanh(self.model(complete, subset, task, metric)) # Since in range [ -1, 1 ]
loss = self.criterion(output, y, return_mean = True)
return torch.mean(loss), float(torch.sum(loss)), float(output.shape[0])
def get_metric_dict(self, mse, total): return { 'MSE': round(mse / total, 4) }
def save(self): torch.save(self.model.state_dict(), self.hyper_params['model_file'])
def load(self): self.model.load_state_dict(torch.load(self.hyper_params['model_file']))
class PairwiseDataGenie:
def __init__(self, hyper_params, writer, xavier_init):
self.hyper_params = hyper_params
self.writer = writer
self.model = DataGenie(hyper_params)
if is_cuda_available: self.model.cuda()
xavier_init(self.model) ; print(self.model)
self.criterion = PairwiseLoss()
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=hyper_params['lr'], betas=(0.9, 0.98),
weight_decay=hyper_params['weight_decay']
)
def step(self, data_batch):
complete, pos, neg, task, metric = data_batch
# Forward pass
pos_output = self.model(complete, pos, task, metric)
neg_output = self.model(complete, neg, task, metric)
loss = self.criterion(pos_output, neg_output, return_mean = True)
# Metric
auc = float(torch.sum(pos_output > neg_output))
return loss, auc, float(pos_output.shape[0])
def get_metric_dict(self, auc, total): return { 'AUC': round(auc / total, 4) }
def save(self): torch.save(self.model.state_dict(), self.hyper_params['model_file'])
def load(self): self.model.load_state_dict(torch.load(self.hyper_params['model_file']))
# NOTE: Below is the actual data-genie pytorch model
class DataGenie(nn.Module):
def __init__(self, hyper_params):
super(DataGenie, self).__init__()
self.feats = hyper_params['feats']
if self.feats is None:
dim, layers = None, None
splitted = hyper_params['embedding_type'].split("_")
for i, word in enumerate(splitted):
if word == "dim": dim = int(splitted[i+1])
if word == "layers": layers = int(splitted[i+1])
self.feats = (dim * layers) + 5
self.task_embedding = nn.Embedding(3, 3)
self.metric_embedding = nn.Embedding(4, 4)
self.final = nn.Sequential(
nn.Linear((2 * self.feats) + 3 + 4, hyper_params['dim']),
nn.Dropout(hyper_params['dropout']),
nn.ReLU(),
nn.Linear(hyper_params['dim'], 1),
)
def forward(self, complete_feats, subset_feats, task, metric):
return self.final(torch.cat([
complete_feats,
subset_feats,
self.task_embedding(task), self.metric_embedding(metric)
], axis = -1))[:, 0]
| 3,352 | 31.240385 | 92 | py |
sampling_cf | sampling_cf-main/data_genie/InfoGraph/infograph_dataset.py | from dgl import save_graphs, load_graphs
from dgl.data import DGLDataset
from tqdm import tqdm
import numpy as np
import networkx as nx
import torch
import dgl
import os
from load_data import DataHolder
from data_path_constants import get_data_path, get_index_path
from data_genie.data_genie_config import *
from data_genie.data_genie_utils import save_numpy, load_numpy
from data_genie.data_genie_utils import INFOGRAPH_CACHED_GRAPHS, INFOGRAPH_CACHED_DATA_STATS
'''
This is a wrapper class which contains ALL the CF datasets (and their sampled subsets)
that we want to train Data-Genie on
'''
class SyntheticDataset(DGLDataset):
def __init__(self, feature_dimension):
self.feature_dimension = feature_dimension
super().__init__(name='synthetic')
def process(self):
self.graphs = []
self.basic_data_stats_features = []
for dataset in datasets:
single_dataset = SingleDataset(self.feature_dimension, dataset)
self.graphs += single_dataset.graphs
self.basic_data_stats_features += single_dataset.basic_data_stats_features
self.orig_total = len(self.graphs) # Including None
assert len(self.basic_data_stats_features) == len(self.graphs)
# Remove very small subsets
self.graphs = list(filter(lambda x: x is not None, self.graphs))
self.basic_data_stats_features = list(filter(lambda x: x is not None, self.basic_data_stats_features))
assert len(self.basic_data_stats_features) == len(self.graphs)
def __getitem__(self, i):
# NOTE: Return 0 as the graph-label
# We don't care about the label since this is an unsupervised task
return self.graphs[i], 0
def __len__(self): return len(self.graphs)
class SingleDataset(DGLDataset):
def __init__(self, feature_dimension, dataset):
self.feature_dimension = feature_dimension
self.dataset = dataset
super().__init__(name='synthetic')
# Randomly init all node features (no implicit representation of user/item nodes)
def get_random_node_features(self, graph):
graph.ndata['attr'] = torch.empty(graph.num_nodes(), self.feature_dimension)
torch.nn.init.xavier_uniform_(graph.ndata['attr'], gain=torch.nn.init.calculate_gain('relu'))
return graph
# Make the user-item interaction graph of the given CF dataset
def add_graph(self, hyper_params):
data = DataHolder(get_data_path(hyper_params), get_index_path(hyper_params))
g = nx.Graph()
# Add nodes & edges
user_map, item_map, node_num = {}, {}, 0
for at, (u, i, r) in enumerate(data.data):
if data.index[at] != -1:
if u not in user_map:
user_map[u] = node_num
g.add_node(node_num)
node_num += 1
if i not in item_map:
item_map[i] = node_num
g.add_node(node_num)
node_num += 1
g.add_edge(user_map[u], item_map[i])
assert node_num == g.number_of_nodes()
# If nodes are too less return None
if node_num < 50:
self.graphs.append(None)
self.basic_data_stats_features.append(None)
return None
self.graphs.append(self.get_random_node_features(dgl.from_networkx(g)))
MIL = float(1e6)
self.basic_data_stats_features.append([
float(data.num_users) / MIL,
float(data.num_items) / MIL,
float(data.num_train_interactions) / MIL,
float(data.num_val_interactions) / MIL,
float(data.num_test_interactions) / MIL
])
def process(self):
self.graphs = []
self.basic_data_stats_features = []
total_samplers = (len(svp_methods) * len(sampling_svp)) + len(sampling_kinds)
loop = tqdm(total = len(scenarios) * (1 + (len(percent_rns_options) * total_samplers)))
for task, metrics in scenarios:
# Full dataset
self.add_graph({
'dataset': self.dataset,
'task': task,
'sampling': 'complete_data',
}) ; loop.update(1)
# Sub-sampled dataset
for sampling_percent in percent_rns_options:
for sampling in sampling_kinds:
self.add_graph({
'dataset': self.dataset,
'task': task,
'sampling': sampling,
'sampling_percent': sampling_percent,
}) ; loop.update(1)
for svp_method in svp_methods:
for sampling in sampling_svp:
self.add_graph({
'dataset': self.dataset,
'task': task,
'sampling': "svp_{}".format(svp_method),
'sampling_svp': sampling,
'sampling_percent': sampling_percent,
}) ; loop.update(1)
loop.close()
assert len(self.graphs) == len(self.basic_data_stats_features)
def save(self):
save_graphs(self.graph_path, self.graphs)
save_numpy(
self.basic_data_stats_features_path,
np.array(self.basic_data_stats_features)
)
def load(self):
self.graphs, _ = load_graphs(self.graph_path)
self.basic_data_stats_features = load_numpy(self.basic_data_stats_features_path).tolist()
def has_cache(self): return os.path.exists(self.graph_path)
@property
def graph_path(self): return INFOGRAPH_CACHED_GRAPHS(self.dataset)
@property
def basic_data_stats_features_path(self): return INFOGRAPH_CACHED_DATA_STATS(self.dataset)
| 4,944 | 30.698718 | 104 | py |
sampling_cf | sampling_cf-main/data_genie/InfoGraph/infograph_model.py | ''' Credit https://github.com/hengruizhang98/InfoGraph & https://github.com/fanyun-sun/InfoGraph '''
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, ModuleList, Linear, ReLU, BatchNorm1d
from dgl.nn import GINConv
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, SortPooling
from data_genie.InfoGraph.infograph_utils import local_global_loss_
''' Feedforward neural network'''
class FeedforwardNetwork(nn.Module):
'''
3-layer feed-forward neural networks with jumping connections
Parameters
-----------
in_dim: int, Input feature size.
hid_dim: int, Hidden feature size.
Functions
-----------
forward(feat):
feat: Tensor, [N * D], input features
'''
def __init__(self, in_dim, hid_dim):
super(FeedforwardNetwork, self).__init__()
self.block = Sequential(
Linear(in_dim, hid_dim),
ReLU(),
Linear(hid_dim, hid_dim),
ReLU(),
Linear(hid_dim, hid_dim),
ReLU()
)
self.jump_con = Linear(in_dim, hid_dim)
def forward(self, feat):
block_out = self.block(feat)
jump_out = self.jump_con(feat)
out = block_out + jump_out
return out
''' Unsupervised Setting '''
class GINEncoder(nn.Module):
'''
Encoder based on dgl.nn.GINConv & dgl.nn.SortPooling
Parameters
-----------
in_dim: int, Input feature size.
hid_dim: int, Hidden feature size.
n_layer: int, number of GIN layers.
Functions
-----------
forward(graph, feat):
graph: dgl.Graph,
feat: Tensor, [N * D], node features
'''
def __init__(self, in_dim, hid_dim, n_layer):
super(GINEncoder, self).__init__()
self.n_layer = n_layer
self.convs = ModuleList()
self.bns = ModuleList()
for i in range(n_layer):
if i == 0: n_in = in_dim
else: n_in = hid_dim
n_out = hid_dim
block = Sequential(
Linear(n_in, n_out),
ReLU(),
Linear(hid_dim, hid_dim)
)
conv = GINConv(block, 'sum')
bn = BatchNorm1d(hid_dim)
self.convs.append(conv)
self.bns.append(bn)
# Pooling
# self.pool = SumPooling()
# self.pool = AvgPooling()
self.pool = SortPooling(1)
def forward(self, graph, feat):
xs = []
x = feat
for i in range(self.n_layer):
x = F.relu(self.convs[i](graph, x))
x = self.bns[i](x)
xs.append(x)
local_emb = th.cat(xs, 1) # patch-level embedding
global_emb = self.pool(graph, local_emb) # graph-level embedding
return global_emb, local_emb
class InfoGraph(nn.Module):
r"""
InfoGraph model for unsupervised setting
Parameters
-----------
in_dim: int
Input feature size.
hid_dim: int
Hidden feature size.
Functions
-----------
forward(graph):
graph: dgl.Graph
"""
def __init__(self, hid_dim, n_layer):
super(InfoGraph, self).__init__()
self.in_dim = 32 # Keep it fixed to a reasonable value for our experiments
self.hid_dim = hid_dim
self.n_layer = n_layer
embedding_dim = hid_dim * n_layer
self.encoder = GINEncoder(self.in_dim, hid_dim, n_layer)
self.local_d = FeedforwardNetwork(embedding_dim, embedding_dim) # local discriminator (node-level)
self.global_d = FeedforwardNetwork(embedding_dim, embedding_dim) # global discriminator (graph-level)
# get_embedding function for evaluation the learned embeddings
def get_embedding(self, graph):
with th.no_grad():
feat = graph.ndata['attr']
global_emb, _ = self.encoder(graph, feat)
return global_emb
def forward(self, graph):
feat = graph.ndata['attr']
graph_id = graph.ndata['graph_id']
global_emb, local_emb = self.encoder(graph, feat)
global_h = self.global_d(global_emb) # global hidden representation
local_h = self.local_d(local_emb) # local hidden representation
measure = 'JSD'
loss = local_global_loss_(local_h, global_h, graph_id, measure)
return loss
| 4,413 | 25.590361 | 110 | py |
sampling_cf | sampling_cf-main/data_genie/InfoGraph/train_infograph.py | import dgl
import time
import argparse
import torch as th
from dgl.dataloading import GraphDataLoader
from tqdm import tqdm
from data_genie.data_genie_utils import INFOGRAPH_MODEL_PATH
from data_genie.InfoGraph.infograph_model import InfoGraph
from data_genie.InfoGraph.infograph_dataset import SyntheticDataset
def argument():
parser = argparse.ArgumentParser(description='InfoGraph')
# training params
parser.add_argument('--gpu', type=int, default=-1, help='GPU index, default:-1, using CPU.')
parser.add_argument('--epochs', type=int, default=20, help='Training epochs.')
parser.add_argument('--batch_size', type=int, default=128, help='Training batch size.')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')
# model params
parser.add_argument('--n_layers', type=int, default=2, help='Number of graph convolution layers before each pooling')
parser.add_argument('--hid_dim', type=int, default=32, help='Hidden layer dimensionalities')
args = parser.parse_args()
# check cuda
if args.gpu != -1 and th.cuda.is_available(): args.device = 'cuda:{}'.format(args.gpu)
else: args.device = 'cpu'
return args
def collate(samples):
''' collate function for building graph dataloader'''
graphs, labels = map(list, zip(*samples))
# generate batched graphs
batched_graph = dgl.batch(graphs)
n_nodes = batched_graph.num_nodes()
# generate graph_id for each node within the batch
graph_id = th.zeros(n_nodes).long()
N = 0
id = 0
for graph in graphs:
N_next = N + graph.num_nodes()
graph_id[N:N_next] = id
N = N_next
id += 1
batched_graph.ndata['graph_id'] = graph_id
return batched_graph, len(graphs)
def train_infograph(args):
# Step 1: Prepare graph data ===================================== #
dataset = SyntheticDataset(feature_dimension = 32)
print("Total # of graphs:", len(dataset.graphs), "/", dataset.orig_total, "\n")
# creata dataloader for batch training
dataloader = GraphDataLoader(
dataset,
batch_size=args.batch_size, collate_fn=collate,
drop_last=False, shuffle=True
)
# Step 2: Create model =================================================================== #
print("DIMENSION:", args.hid_dim, "; GCN Layers:", args.n_layers)
model = InfoGraph(args.hid_dim, args.n_layers)
model = model.to(args.device)
# Step 3: Create training components ===================================================== #
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
print('===== Before training ======')
# Step 4: training epoches =============================================================== #
best_loss = float(1e10)
for epoch in range(1, args.epochs):
loss_all = 0
model.train()
start_time = time.time()
for graph, n_graph in tqdm(dataloader):
graph = graph.to(args.device)
optimizer.zero_grad()
loss = model(graph)
loss.backward()
optimizer.step()
loss_all += loss.item() * n_graph
mean_loss = loss_all / len(dataloader)
print('Epoch {}, Loss {:.4f}, Time {:.4f}'.format(epoch, mean_loss, time.time() - start_time))
if mean_loss < best_loss:
print("Saving...")
th.save(model.state_dict(), INFOGRAPH_MODEL_PATH(args.hid_dim, args.n_layers))
best_loss = mean_loss
if __name__ == '__main__':
args = argument() ; print(args)
train_infograph(args)
| 3,615 | 33.438095 | 121 | py |
sampling_cf | sampling_cf-main/data_genie/InfoGraph/infograph_utils.py | ''' Credit: https://github.com/fanyun-sun/InfoGraph '''
import torch
import torch as th
import torch.nn.functional as F
import math
def local_global_loss_(l_enc, g_enc, graph_id, measure):
num_graphs = g_enc.shape[0]
num_nodes = l_enc.shape[0]
device = g_enc.device
pos_mask = th.zeros((num_nodes, num_graphs)).to(device)
neg_mask = th.ones((num_nodes, num_graphs)).to(device)
for nodeidx, graphidx in enumerate(graph_id):
pos_mask[nodeidx][graphidx] = 1.
neg_mask[nodeidx][graphidx] = 0.
res = th.mm(l_enc, g_enc.t())
E_pos = get_positive_expectation(res * pos_mask, measure, average=False).sum()
E_pos = E_pos / num_nodes
E_neg = get_negative_expectation(res * neg_mask, measure, average=False).sum()
E_neg = E_neg / (num_nodes * (num_graphs - 1))
return E_neg - E_pos
def log_sum_exp(x, axis=None):
"""Log sum exp function
Args:
x: Input.
axis: Axis over which to perform sum.
Returns:
torch.Tensor: log sum exp
"""
x_max = torch.max(x, axis)[0]
y = torch.log((torch.exp(x - x_max)).sum(axis)) + x_max
return y
def raise_measure_error(measure):
supported_measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1']
raise NotImplementedError(
'Measure `{}` not supported. Supported: {}'.format(measure,
supported_measures))
def get_positive_expectation(p_samples, measure, average=True):
"""Computes the positive part of a divergence / difference.
Args:
p_samples: Positive samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = math.log(2.)
if measure == 'GAN':
Ep = - F.softplus(-p_samples)
elif measure == 'JSD':
Ep = log_2 - F.softplus(- p_samples)
elif measure == 'X2':
Ep = p_samples ** 2
elif measure == 'KL':
Ep = p_samples + 1.
elif measure == 'RKL':
Ep = -torch.exp(-p_samples)
elif measure == 'DV':
Ep = p_samples
elif measure == 'H2':
Ep = 1. - torch.exp(-p_samples)
elif measure == 'W1':
Ep = p_samples
else:
raise_measure_error(measure)
if average:
return Ep.mean()
else:
return Ep
def get_negative_expectation(q_samples, measure, average=True):
"""Computes the negative part of a divergence / difference.
Args:
q_samples: Negative samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = math.log(2.)
if measure == 'GAN':
Eq = F.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = F.softplus(-q_samples) + q_samples - log_2
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples ** 2) + 1.) ** 2)
elif measure == 'KL':
Eq = torch.exp(q_samples)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'DV':
Eq = log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
else:
raise_measure_error(measure)
if average:
return Eq.mean()
else:
return Eq
| 3,376 | 26.680328 | 82 | py |
sampling_cf | sampling_cf-main/pytorch_models/SASRec.py | import torch
import numpy as np
import torch.nn as nn
from torch_utils import LongTensor, BoolTensor, is_cuda_available
class PointWiseFeedForward(nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = nn.Dropout(p=dropout_rate)
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class SASRec(torch.nn.Module):
def __init__(self, hyper_params):
super(SASRec, self).__init__()
self.hyper_params = hyper_params
self.item_num = hyper_params['total_items']
self.item_emb = torch.nn.Embedding(self.item_num+1, hyper_params['latent_size'], padding_idx=self.item_num)
self.pos_emb = torch.nn.Embedding(hyper_params['max_seq_len'], hyper_params['latent_size']) # TO IMPROVE
self.emb_dropout = torch.nn.Dropout(p=hyper_params['dropout'])
self.attention_layernorms = torch.nn.ModuleList() # to be Q for self-attention
self.attention_layers = torch.nn.ModuleList()
self.forward_layernorms = torch.nn.ModuleList()
self.forward_layers = torch.nn.ModuleList()
self.last_layernorm = torch.nn.LayerNorm(hyper_params['latent_size'], eps=1e-8)
for _ in range(hyper_params['num_blocks']):
new_attn_layernorm = torch.nn.LayerNorm(hyper_params['latent_size'], eps=1e-8)
self.attention_layernorms.append(new_attn_layernorm)
new_attn_layer = torch.nn.MultiheadAttention(
hyper_params['latent_size'],
hyper_params['num_heads'],
hyper_params['dropout']
)
self.attention_layers.append(new_attn_layer)
new_fwd_layernorm = torch.nn.LayerNorm(hyper_params['latent_size'], eps=1e-8)
self.forward_layernorms.append(new_fwd_layernorm)
new_fwd_layer = PointWiseFeedForward(hyper_params['latent_size'], hyper_params['dropout'])
self.forward_layers.append(new_fwd_layer)
def log2feats(self, log_seqs):
seqs = self.item_emb(log_seqs)
seqs *= self.item_emb.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(LongTensor(positions))
seqs = self.emb_dropout(seqs)
timeline_mask = BoolTensor(log_seqs == self.item_num)
seqs *= ~timeline_mask.unsqueeze(-1) # broadcast in last dim
tl = seqs.shape[1] # time dim len for enforce causality
temp = torch.ones((tl, tl), dtype=torch.bool)
if is_cuda_available: temp = temp.cuda()
attention_mask = ~torch.tril(temp)
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
attn_mask=attention_mask)
# key_padding_mask=timeline_mask
# need_weights=False) this arg do not work?
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
seqs *= ~timeline_mask.unsqueeze(-1)
log_feats = self.last_layernorm(seqs) # (U, T, C) -> (U, -1, C)
return log_feats
def get_score(self, log_feats, items):
embs = self.item_emb(items)
return (log_feats * embs).sum(dim=-1)
def forward(self, data, eval = False):
log_seqs, pos_seqs, neg_seqs = data
# Embed sequence
log_feats = self.log2feats(log_seqs) # user_ids hasn't been used yet
if eval:
log_feats = log_feats[:, -1, :]
# Rank all items
if pos_seqs is None:
return torch.matmul(
log_feats,
self.item_emb.weight.transpose(0, 1)
)[:, :-1]
# Sampled evaluation
orig_shape = neg_seqs.shape
return self.get_score(log_feats.unsqueeze(1), pos_seqs), self.get_score(
log_feats.unsqueeze(1).repeat(1, orig_shape[1], 1), neg_seqs
).view(orig_shape)
# Dot product
orig_shape = neg_seqs.shape
return self.get_score(log_feats, pos_seqs).unsqueeze(-1).repeat(1, 1, orig_shape[2]), \
self.get_score(
log_feats.unsqueeze(2).repeat(1, 1, orig_shape[2], 1).view(orig_shape[0], orig_shape[1] * orig_shape[2], -1),
neg_seqs.view(orig_shape[0], orig_shape[1] * orig_shape[2])
).view(orig_shape)
| 5,147 | 40.853659 | 122 | py |
sampling_cf | sampling_cf-main/pytorch_models/NeuMF.py | import torch
import torch.nn as nn
from pytorch_models.MF import BaseMF
class GMF(BaseMF):
def __init__(self, hyper_params):
super(GMF, self).__init__(hyper_params)
self.final = nn.Linear(hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# Embed Latent space
user = self.dropout(self.user_embedding(user_id.view(-1)))
item = self.dropout(self.item_embedding(item_id.view(-1)))
joint = user * item
rating = self.final(joint)[:, 0].view(user_id.shape) # [bsz]
return user_bias + item_bias + self.global_bias + rating
class MLP(BaseMF):
def __init__(self, hyper_params):
super(MLP, self).__init__(hyper_params)
self.project = nn.Sequential(
nn.Dropout(hyper_params['dropout']),
nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),
nn.ReLU(),
nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
)
self.final = nn.Linear(hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# Embed Latent space
user = self.dropout(self.user_embedding(user_id.view(-1)))
item = self.dropout(self.item_embedding(item_id.view(-1)))
joint = torch.cat([ user, item ], dim = -1)
joint = self.project(joint)
rating = self.final(joint)[:, 0].view(user_id.shape)
return user_bias + item_bias + self.global_bias + rating
class NeuMF(BaseMF):
def __init__(self, hyper_params):
super(NeuMF, self).__init__(hyper_params, keep_gamma = False)
self.gmf_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])
self.gmf_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])
self.mlp_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])
self.mlp_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])
self.project = nn.Sequential(
nn.Dropout(hyper_params['dropout']),
nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),
nn.ReLU(),
nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
)
self.final = nn.Linear(2 * hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def init(self, gmf_model, mlp_model):
with torch.no_grad():
self.gmf_user_embedding.weight.data = gmf_model.user_embedding.weight.data
self.gmf_item_embedding.weight.data = gmf_model.item_embedding.weight.data
self.mlp_user_embedding.weight.data = mlp_model.user_embedding.weight.data
self.mlp_item_embedding.weight.data = mlp_model.item_embedding.weight.data
for i in range(len(self.project)):
try:
self.project[i].weight.data = mlp_model.project[i].weight.data
self.project[i].bias.data = mlp_model.project[i].bias.data
except: pass
self.final.weight.data = torch.cat([ gmf_model.final.weight.data, mlp_model.final.weight.data ], dim = -1)
self.final.bias.data = 0.5 * (gmf_model.final.bias.data + mlp_model.final.bias.data)
self.user_bias.data = 0.5 * (gmf_model.user_bias.data + mlp_model.user_bias.data)
self.item_bias.data = 0.5 * (gmf_model.item_bias.data + mlp_model.item_bias.data)
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# GMF Part
user = self.dropout(self.gmf_user_embedding(user_id.view(-1))) # [bsz x 32]
item = self.dropout(self.gmf_item_embedding(item_id.view(-1))) # [bsz x 32]
gmf_joint = user * item
# MLP Part
user = self.dropout(self.mlp_user_embedding(user_id.view(-1))) # [bsz x 32]
item = self.dropout(self.mlp_item_embedding(item_id.view(-1))) # [bsz x 32]
mlp_joint = torch.cat([ user, item ], dim = -1)
mlp_joint = self.project(mlp_joint)
# NeuMF
final = torch.cat([ gmf_joint, mlp_joint ], dim = -1)
rating = self.final(final)[:, 0].view(user_id.shape) # [bsz]
return user_bias + item_bias + self.global_bias + rating
| 5,009 | 43.732143 | 118 | py |
sampling_cf | sampling_cf-main/pytorch_models/SVAE.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torch_utils import is_cuda_available
class Encoder(nn.Module):
def __init__(self, hyper_params):
super(Encoder, self).__init__()
self.linear1 = nn.Linear(
hyper_params['latent_size'], hyper_params['latent_size']
)
nn.init.xavier_normal_(self.linear1.weight)
self.activation = nn.Tanh()
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
return x
class Decoder(nn.Module):
def __init__(self, hyper_params):
super(Decoder, self).__init__()
self.linear1 = nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
self.linear2 = nn.Linear(hyper_params['latent_size'], hyper_params['total_items'])
nn.init.xavier_normal_(self.linear1.weight)
nn.init.xavier_normal_(self.linear2.weight)
self.activation = nn.Tanh()
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.linear2(x)
return x
class SVAE(nn.Module):
def __init__(self, hyper_params):
super(SVAE, self).__init__()
self.hyper_params = hyper_params
self.encoder = Encoder(hyper_params)
self.decoder = Decoder(hyper_params)
self.item_embed = nn.Embedding(
hyper_params['total_items'] + 1, hyper_params['latent_size'],
padding_idx=hyper_params['total_items']
)
self.gru = nn.GRU(
hyper_params['latent_size'], hyper_params['latent_size'],
batch_first = True, num_layers = 1
)
self.linear1 = nn.Linear(hyper_params['latent_size'], 2 * hyper_params['latent_size'])
nn.init.xavier_normal_(self.linear1.weight)
self.tanh = nn.Tanh()
def sample_latent(self, h_enc):
temp_out = self.linear1(h_enc)
mu = temp_out[:, :self.hyper_params['latent_size']]
log_sigma = temp_out[:, self.hyper_params['latent_size']:]
sigma = torch.exp(log_sigma)
std_z = torch.from_numpy(np.random.normal(0, 1, size=sigma.size())).float()
if is_cuda_available: std_z = std_z.cuda()
return mu + sigma * Variable(std_z, requires_grad=False), mu, log_sigma # Reparameterization trick
def forward(self, data, eval = False):
x, _, _ = data
in_shape = x.shape # [bsz x seq_len]
x = self.item_embed(x) # [bsz x seq_len x latent_size]
rnn_out, _ = self.gru(x) # [bsz x seq_len x latent_size]
rnn_out = rnn_out.contiguous().view(in_shape[0] * in_shape[1], -1) # [bsz * seq_len x latent_size]
enc_out = self.encoder(rnn_out) # [bsz * seq_len x latent_size]
sampled_z, z_mean, z_log_sigma = self.sample_latent(enc_out) # [bsz * seq_len x latent_size]
dec_out = self.decoder(sampled_z) # [bsz * seq_len x total_items]
dec_out = dec_out.view(in_shape[0], in_shape[1], -1) # [bsz x seq_len x total_items]
return dec_out, z_mean, z_log_sigma
| 3,388 | 38.406977 | 107 | py |
sampling_cf | sampling_cf-main/pytorch_models/MVAE.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torch_utils import is_cuda_available
class Encoder(nn.Module):
def __init__(self, hyper_params):
super(Encoder, self).__init__()
self.linear1 = nn.Linear(
hyper_params['total_items'], hyper_params['latent_size']
)
nn.init.xavier_normal_(self.linear1.weight)
self.activation = nn.Tanh()
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
return x
class Decoder(nn.Module):
def __init__(self, hyper_params):
super(Decoder, self).__init__()
self.linear1 = nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
self.linear2 = nn.Linear(hyper_params['latent_size'], hyper_params['total_items'])
nn.init.xavier_normal_(self.linear1.weight)
nn.init.xavier_normal_(self.linear2.weight)
self.activation = nn.Tanh()
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.linear2(x)
return x
class MVAE(nn.Module):
def __init__(self, hyper_params):
super(MVAE, self).__init__()
self.hyper_params = hyper_params
self.encoder = Encoder(hyper_params)
self.decoder = Decoder(hyper_params)
self.linear1 = nn.Linear(hyper_params['latent_size'], 2 * hyper_params['latent_size'])
nn.init.xavier_normal_(self.linear1.weight)
self.tanh = nn.Tanh()
def sample_latent(self, h_enc):
temp_out = self.linear1(h_enc)
mu = temp_out[:, :self.hyper_params['latent_size']]
log_sigma = temp_out[:, self.hyper_params['latent_size']:]
sigma = torch.exp(log_sigma)
std_z = torch.from_numpy(np.random.normal(0, 1, size=sigma.size())).float()
if is_cuda_available: std_z = std_z.cuda()
return mu + sigma * Variable(std_z, requires_grad=False), mu, log_sigma # Reparameterization trick
def forward(self, data, eval = False):
x, _, _ = data
enc_out = self.encoder(x)
sampled_z, z_mean, z_log_sigma = self.sample_latent(enc_out)
dec_out = self.decoder(sampled_z)
return dec_out, z_mean, z_log_sigma
| 2,331 | 32.797101 | 107 | py |
sampling_cf | sampling_cf-main/pytorch_models/MF.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_utils import LongTensor, FloatTensor
class BaseMF(nn.Module):
def __init__(self, hyper_params, keep_gamma = True):
super(BaseMF, self).__init__()
self.hyper_params = hyper_params
# Declaring alpha, beta, gamma
self.global_bias = nn.Parameter(FloatTensor([ 4.0 if hyper_params['task'] == 'explicit' else 0.5 ]))
self.user_bias = nn.Parameter(FloatTensor([ 0.0 for _ in range(hyper_params['total_users']) ]))
self.item_bias = nn.Parameter(FloatTensor([ 0.0 for _ in range(hyper_params['total_items']) ]))
if keep_gamma:
self.user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])
self.item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])
# For faster evaluation
self.all_items_vector = LongTensor(
list(range(hyper_params['total_items']))
)
def get_score(self, data):
pass # Virtual function, implement in all sub-classes
def forward(self, data, eval = False):
user_id, pos_item_id, neg_items = data
# Evaluation -- Rank all items
if pos_item_id is None:
ret = []
for b in range(user_id.shape[0]):
ret.append(self.get_score(
user_id[b].unsqueeze(-1).repeat(1, self.hyper_params['total_items']).view(-1),
self.all_items_vector.view(-1)
).view(1, -1))
return torch.cat(ret)
# Explicit feedback
if neg_items is None: return self.get_score(user_id, pos_item_id.squeeze(-1))
# Implicit feedback
return self.get_score(
user_id.unsqueeze(-1).repeat(1, pos_item_id.shape[1]).view(-1),
pos_item_id.view(-1)
).view(pos_item_id.shape), self.get_score(
user_id.unsqueeze(-1).repeat(1, neg_items.shape[1]).view(-1),
neg_items.view(-1)
).view(neg_items.shape)
class MF(BaseMF):
def __init__(self, hyper_params):
keep_gamma = hyper_params['model_type'] != 'bias_only'
super(MF, self).__init__(hyper_params, keep_gamma = keep_gamma)
if keep_gamma: self.dropout = nn.Dropout(hyper_params['dropout'])
if hyper_params['model_type'] == 'MF':
latent_size = hyper_params['latent_size']
self.projection = nn.Sequential(
nn.Dropout(hyper_params['dropout']),
nn.Linear(2 * latent_size, latent_size),
nn.ReLU(),
nn.Linear(latent_size, latent_size)
)
for m in self.projection:
if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight)
self.final = nn.Linear(2 * latent_size, 1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
if self.hyper_params['model_type'] == 'bias_only':
return user_bias + item_bias + self.global_bias
# Embed Latent space
user = self.dropout(self.user_embedding(user_id.view(-1))) # [bsz x 32]
item = self.dropout(self.item_embedding(item_id.view(-1))) # [bsz x 32]
# Dot product
if self.hyper_params['model_type'] == 'MF_dot':
rating = torch.sum(user * item, dim = -1).view(user_id.shape)
return user_bias + item_bias + self.global_bias + rating
mf_vector = user * item
cat = torch.cat([ user, item ], dim = -1)
mlp_vector = self.projection(cat)
# Concatenate and get single score
cat = torch.cat([ mlp_vector, mf_vector ], dim = -1)
rating = self.final(cat)[:, 0].view(user_id.shape) # [bsz]
return user_bias + item_bias + self.global_bias + rating
| 4,075 | 38.960784 | 108 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.