id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
15,379 | import bisect
import functools
import logging
import numbers
import os
import signal
import sys
import traceback
import warnings
import torch
from pytorch_lightning import seed_everything
import platform
def get_has_ddp_rank():
master_port = os.environ.get('MASTER_PORT', None)
node_rank = os.environ.get('NODE_RANK', None)
local_rank = os.environ.get('LOCAL_RANK', None)
world_size = os.environ.get('WORLD_SIZE', None)
has_rank = master_port is not None or node_rank is not None or local_rank is not None or world_size is not None
return has_rank
def handle_ddp_subprocess():
def main_decorator(main_func):
@functools.wraps(main_func)
def new_main(*args, **kwargs):
# Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
has_parent = parent_cwd is not None
has_rank = get_has_ddp_rank()
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
if has_parent:
# we are in the worker
sys.argv.extend([
f'hydra.run.dir={parent_cwd}',
# 'hydra/hydra_logging=disabled',
# 'hydra/job_logging=disabled'
])
# do nothing if this is a top-level process
# TRAINING_PARENT_WORK_DIR is set in handle_ddp_parent_process after hydra initialization
main_func(*args, **kwargs)
return new_main
return main_decorator | null |
15,380 | import bisect
import functools
import logging
import numbers
import os
import signal
import sys
import traceback
import warnings
import torch
from pytorch_lightning import seed_everything
import platform
def get_has_ddp_rank():
def handle_ddp_parent_process():
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
has_parent = parent_cwd is not None
has_rank = get_has_ddp_rank()
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
if parent_cwd is None:
os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd()
return has_parent | null |
15,381 | import collections
from functools import partial
import functools
import logging
from collections import defaultdict
import numpy as np
import torch.nn as nn
from saicinpainting.training.modules.base import BaseDiscriminator, deconv_factory, get_conv_block_ctor, get_norm_layer, get_activation
from saicinpainting.training.modules.ffc import FFCResnetBlock
from saicinpainting.training.modules.multidilated_conv import MultidilatedConv
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',
dilation=1, in_dim=None, groups=1, second_dilation=None):
super(ResnetBlock, self).__init__()
self.in_dim = in_dim
self.dim = dim
if second_dilation is None:
second_dilation = dilation
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,
conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,
second_dilation=second_dilation)
if self.in_dim is not None:
self.input_conv = nn.Conv2d(in_dim, dim, 1)
self.out_channnels = dim
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',
dilation=1, in_dim=None, groups=1, second_dilation=1):
conv_layer = get_conv_block_ctor(conv_kind)
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(dilation)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(dilation)]
elif padding_type == 'zero':
p = dilation
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if in_dim is None:
in_dim = dim
conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(second_dilation)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(second_dilation)]
elif padding_type == 'zero':
p = second_dilation
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
x_before = x
if self.in_dim is not None:
x = self.input_conv(x)
out = x + self.conv_block(x_before)
return out
class MultidilatedResnetBlock(nn.Module):
def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super().__init__()
self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, conv_layer, norm_layer, activation, use_dropout, dilation=1):
conv_block = []
conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
def make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs):
blocks = []
for i in range(dilated_blocks_n):
if dilation_block_kind == 'simple':
blocks.append(ResnetBlock(**dilated_block_kwargs, dilation=2 ** (i + 1)))
elif dilation_block_kind == 'multi':
blocks.append(MultidilatedResnetBlock(**dilated_block_kwargs))
else:
raise ValueError(f'dilation_block_kind could not be "{dilation_block_kind}"')
return blocks | null |
15,382 | import abc
from typing import Tuple, List
import torch
import torch.nn as nn
from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
from saicinpainting.training.modules.multidilated_conv import MultidilatedConv
class DepthWiseSeperableConv(nn.Module):
def __init__(self, in_dim, out_dim, *args, **kwargs):
super().__init__()
if 'groups' in kwargs:
# ignoring groups for Depthwise Sep Conv
del kwargs['groups']
self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs)
self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
class MultidilatedConv(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True,
shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs):
super().__init__()
convs = []
self.equal_dim = equal_dim
assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode
if comb_mode in ('cat_out', 'cat_both'):
self.cat_out = True
if equal_dim:
assert out_dim % dilation_num == 0
out_dims = [out_dim // dilation_num] * dilation_num
self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], [])
else:
out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
out_dims.append(out_dim - sum(out_dims))
index = []
starts = [0] + out_dims[:-1]
lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)]
for i in range(out_dims[-1]):
for j in range(dilation_num):
index += list(range(starts[j], starts[j] + lengths[j]))
starts[j] += lengths[j]
self.index = index
assert(len(index) == out_dim)
self.out_dims = out_dims
else:
self.cat_out = False
self.out_dims = [out_dim] * dilation_num
if comb_mode in ('cat_in', 'cat_both'):
if equal_dim:
assert in_dim % dilation_num == 0
in_dims = [in_dim // dilation_num] * dilation_num
else:
in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
in_dims.append(in_dim - sum(in_dims))
self.in_dims = in_dims
self.cat_in = True
else:
self.cat_in = False
self.in_dims = [in_dim] * dilation_num
conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d
dilation = min_dilation
for i in range(dilation_num):
if isinstance(padding, int):
cur_padding = padding * dilation
else:
cur_padding = padding[i]
convs.append(conv_type(
self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs
))
if i > 0 and shared_weights:
convs[-1].weight = convs[0].weight
convs[-1].bias = convs[0].bias
dilation *= 2
self.convs = nn.ModuleList(convs)
self.shuffle_in_channels = shuffle_in_channels
if self.shuffle_in_channels:
# shuffle list as shuffling of tensors is nondeterministic
in_channels_permute = list(range(in_dim))
random.shuffle(in_channels_permute)
# save as buffer so it is saved and loaded with checkpoint
self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute))
def forward(self, x):
if self.shuffle_in_channels:
x = x[:, self.in_channels_permute]
outs = []
if self.cat_in:
if self.equal_dim:
x = x.chunk(len(self.convs), dim=1)
else:
new_x = []
start = 0
for dim in self.in_dims:
new_x.append(x[:, start:start+dim])
start += dim
x = new_x
for i, conv in enumerate(self.convs):
if self.cat_in:
input = x[i]
else:
input = x
outs.append(conv(input))
if self.cat_out:
out = torch.cat(outs, dim=1)[:, self.index]
else:
out = sum(outs)
return out
def get_conv_block_ctor(kind='default'):
if not isinstance(kind, str):
return kind
if kind == 'default':
return nn.Conv2d
if kind == 'depthwise':
return DepthWiseSeperableConv
if kind == 'multidilated':
return MultidilatedConv
raise ValueError(f'Unknown convolutional block kind {kind}') | null |
15,383 | import abc
from typing import Tuple, List
import torch
import torch.nn as nn
from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
from saicinpainting.training.modules.multidilated_conv import MultidilatedConv
def get_norm_layer(kind='bn'):
if not isinstance(kind, str):
return kind
if kind == 'bn':
return nn.BatchNorm2d
if kind == 'in':
return nn.InstanceNorm2d
raise ValueError(f'Unknown norm block kind {kind}') | null |
15,384 | import abc
from typing import Tuple, List
import torch
import torch.nn as nn
from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
from saicinpainting.training.modules.multidilated_conv import MultidilatedConv
def get_activation(kind='tanh'):
if kind == 'tanh':
return nn.Tanh()
if kind == 'sigmoid':
return nn.Sigmoid()
if kind is False:
return nn.Identity()
raise ValueError(f'Unknown activation kind {kind}') | null |
15,385 | import abc
from typing import Tuple, List
import torch
import torch.nn as nn
from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
from saicinpainting.training.modules.multidilated_conv import MultidilatedConv
class DepthWiseSeperableConv(nn.Module):
def __init__(self, in_dim, out_dim, *args, **kwargs):
super().__init__()
if 'groups' in kwargs:
# ignoring groups for Depthwise Sep Conv
del kwargs['groups']
self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs)
self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features):
if kind == 'convtranspose':
return [nn.ConvTranspose2d(min(max_features, ngf * mult),
min(max_features, int(ngf * mult / 2)),
kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(min(max_features, int(ngf * mult / 2))), activation]
elif kind == 'bilinear':
return [nn.Upsample(scale_factor=2, mode='bilinear'),
DepthWiseSeperableConv(min(max_features, ngf * mult),
min(max_features, int(ngf * mult / 2)),
kernel_size=3, stride=1, padding=1),
norm_layer(min(max_features, int(ngf * mult / 2))), activation]
else:
raise Exception(f"Invalid deconv kind: {kind}") | null |
15,386 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN
def get_gauss_kernel(kernel_size, width_factor=1):
coords = torch.stack(torch.meshgrid(torch.arange(kernel_size),
torch.arange(kernel_size)),
dim=0).float()
diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor)
diff /= diff.sum()
return diff | null |
15,387 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN
def dummy_distance_weighter(real_img, pred_img, mask):
return mask
class BlurMask(nn.Module):
def __init__(self, kernel_size=5, width_factor=1):
super().__init__()
self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False)
self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor))
def forward(self, real_img, pred_img, mask):
with torch.no_grad():
result = self.filter(mask) * mask
return result
class EmulatedEDTMask(nn.Module):
def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1):
super().__init__()
self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate',
bias=False)
self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float))
self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False)
self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor))
def forward(self, real_img, pred_img, mask):
with torch.no_grad():
known_mask = 1 - mask
dilated_known_mask = (self.dilate_filter(known_mask) > 1).float()
result = self.blur_filter(1 - dilated_known_mask) * mask
return result
class PropagatePerceptualSim(nn.Module):
def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3):
super().__init__()
vgg = torchvision.models.vgg19(pretrained=True).features
vgg_avg_pooling = []
for weights in vgg.parameters():
weights.requires_grad = False
cur_level_i = 0
for module in vgg.modules():
if module.__class__.__name__ == 'Sequential':
continue
elif module.__class__.__name__ == 'MaxPool2d':
vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
else:
vgg_avg_pooling.append(module)
if module.__class__.__name__ == 'ReLU':
cur_level_i += 1
if cur_level_i == level:
break
self.features = nn.Sequential(*vgg_avg_pooling)
self.max_iters = max_iters
self.temperature = temperature
self.do_erode = erode_mask_size > 0
if self.do_erode:
self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False)
self.erode_mask.weight.data.fill_(1)
def forward(self, real_img, pred_img, mask):
with torch.no_grad():
real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img)
real_feats = self.features(real_img)
vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True)
/ self.temperature)
horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True)
/ self.temperature)
mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False)
if self.do_erode:
mask_scaled = (self.erode_mask(mask_scaled) > 1).float()
cur_knowness = 1 - mask_scaled
for iter_i in range(self.max_iters):
new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate')
new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate')
new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate')
new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate')
new_knowness = torch.stack([new_top_knowness, new_bottom_knowness,
new_left_knowness, new_right_knowness],
dim=0).max(0).values
cur_knowness = torch.max(cur_knowness, new_knowness)
cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear')
result = torch.min(mask, 1 - cur_knowness)
return result
def make_mask_distance_weighter(kind='none', **kwargs):
if kind == 'none':
return dummy_distance_weighter
if kind == 'blur':
return BlurMask(**kwargs)
if kind == 'edt':
return EmulatedEDTMask(**kwargs)
if kind == 'pps':
return PropagatePerceptualSim(**kwargs)
raise ValueError(f'Unknown mask distance weighter kind {kind}') | null |
15,388 | from typing import List
import torch
import torch.nn.functional as F
def masked_l2_loss(pred, target, mask, weight_known, weight_missing):
per_pixel_l2 = F.mse_loss(pred, target, reduction='none')
pixel_weights = mask * weight_missing + (1 - mask) * weight_known
return (pixel_weights * per_pixel_l2).mean() | null |
15,389 | from typing import List
import torch
import torch.nn.functional as F
def masked_l1_loss(pred, target, mask, weight_known, weight_missing):
per_pixel_l1 = F.l1_loss(pred, target, reduction='none')
pixel_weights = mask * weight_missing + (1 - mask) * weight_known
return (pixel_weights * per_pixel_l1).mean() | null |
15,390 | from typing import List
import torch
import torch.nn.functional as F
def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None):
if mask is None:
res = torch.stack([F.mse_loss(fake_feat, target_feat)
for fake_feat, target_feat in zip(fake_features, target_features)]).mean()
else:
res = 0
norm = 0
for fake_feat, target_feat in zip(fake_features, target_features):
cur_mask = F.interpolate(mask, size=fake_feat.shape[-2:], mode='bilinear', align_corners=False)
error_weights = 1 - cur_mask
cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean()
res = res + cur_val
norm += 1
res = res / norm
return res | null |
15,391 | from typing import Tuple, Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
def make_r1_gp(discr_real_pred, real_batch):
if torch.is_grad_enabled():
grad_real = torch.autograd.grad(outputs=discr_real_pred.sum(), inputs=real_batch, create_graph=True)[0]
grad_penalty = (grad_real.view(grad_real.shape[0], -1).norm(2, dim=1) ** 2).mean()
else:
grad_penalty = 0
real_batch.requires_grad = False
return grad_penalty | null |
15,392 | from typing import Tuple, Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
class NonSaturatingWithR1(BaseAdversarialLoss):
def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False,
mask_scale_mode='nearest', extra_mask_weight_for_gen=0,
use_unmasked_for_gen=True, use_unmasked_for_discr=True):
self.gp_coef = gp_coef
self.weight = weight
# use for discr => use for gen;
# otherwise we teach only the discr to pay attention to very small difference
assert use_unmasked_for_gen or (not use_unmasked_for_discr)
# mask as target => use unmasked for discr:
# if we don't care about unmasked regions at all
# then it doesn't matter if the value of mask_as_fake_target is true or false
assert use_unmasked_for_discr or (not mask_as_fake_target)
self.use_unmasked_for_gen = use_unmasked_for_gen
self.use_unmasked_for_discr = use_unmasked_for_discr
self.mask_as_fake_target = mask_as_fake_target
self.allow_scale_mask = allow_scale_mask
self.mask_scale_mode = mask_scale_mode
self.extra_mask_weight_for_gen = extra_mask_weight_for_gen
def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
mask=None) \
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
fake_loss = F.softplus(-discr_fake_pred)
if (self.mask_as_fake_target and self.extra_mask_weight_for_gen > 0) or \
not self.use_unmasked_for_gen: # == if masked region should be treated differently
mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:])
if not self.use_unmasked_for_gen:
fake_loss = fake_loss * mask
else:
pixel_weights = 1 + mask * self.extra_mask_weight_for_gen
fake_loss = fake_loss * pixel_weights
return fake_loss.mean() * self.weight, dict()
def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
generator: nn.Module, discriminator: nn.Module):
real_batch.requires_grad = True
def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
mask=None) \
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
real_loss = F.softplus(-discr_real_pred)
grad_penalty = make_r1_gp(discr_real_pred, real_batch) * self.gp_coef
fake_loss = F.softplus(discr_fake_pred)
if not self.use_unmasked_for_discr or self.mask_as_fake_target:
# == if masked region should be treated differently
mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:])
# use_unmasked_for_discr=False only makes sense for fakes;
# for reals there is no difference beetween two regions
fake_loss = fake_loss * mask
if self.mask_as_fake_target:
fake_loss = fake_loss + (1 - mask) * F.softplus(-discr_fake_pred)
sum_discr_loss = real_loss + grad_penalty + fake_loss
metrics = dict(discr_real_out=discr_real_pred.mean(),
discr_fake_out=discr_fake_pred.mean(),
discr_real_gp=grad_penalty)
return sum_discr_loss.mean(), metrics
class BCELoss(BaseAdversarialLoss):
def __init__(self, weight):
self.weight = weight
self.bce_loss = nn.BCEWithLogitsLoss()
def generator_loss(self, discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
real_mask_gt = torch.zeros(discr_fake_pred.shape).to(discr_fake_pred.device)
fake_loss = self.bce_loss(discr_fake_pred, real_mask_gt) * self.weight
return fake_loss, dict()
def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
generator: nn.Module, discriminator: nn.Module):
real_batch.requires_grad = True
def discriminator_loss(self,
mask: torch.Tensor,
discr_real_pred: torch.Tensor,
discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
real_mask_gt = torch.zeros(discr_real_pred.shape).to(discr_real_pred.device)
sum_discr_loss = (self.bce_loss(discr_real_pred, real_mask_gt) + self.bce_loss(discr_fake_pred, mask)) / 2
metrics = dict(discr_real_out=discr_real_pred.mean(),
discr_fake_out=discr_fake_pred.mean(),
discr_real_gp=0)
return sum_discr_loss, metrics
def make_discrim_loss(kind, **kwargs):
if kind == 'r1':
return NonSaturatingWithR1(**kwargs)
elif kind == 'bce':
return BCELoss(**kwargs)
raise ValueError(f'Unknown adversarial loss kind {kind}') | null |
15,393 | import math
import random
import hashlib
import logging
from enum import Enum
import cv2
import numpy as np
from saicinpainting.evaluation.masks.mask import SegmentationMask
from saicinpainting.utils import LinearRamp
class DrawMethod(Enum):
LINE = 'line'
CIRCLE = 'circle'
SQUARE = 'square'
def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10,
draw_method=DrawMethod.LINE):
draw_method = DrawMethod(draw_method)
height, width = shape
mask = np.zeros((height, width), np.float32)
times = np.random.randint(min_times, max_times + 1)
for i in range(times):
start_x = np.random.randint(width)
start_y = np.random.randint(height)
for j in range(1 + np.random.randint(5)):
angle = 0.01 + np.random.randint(max_angle)
if i % 2 == 0:
angle = 2 * 3.1415926 - angle
length = 10 + np.random.randint(max_len)
brush_w = 5 + np.random.randint(max_width)
end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width)
end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height)
if draw_method == DrawMethod.LINE:
cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w)
elif draw_method == DrawMethod.CIRCLE:
cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1)
elif draw_method == DrawMethod.SQUARE:
radius = brush_w // 2
mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1
start_x, start_y = end_x, end_y
return mask[None, ...] | null |
15,394 | import math
import random
import hashlib
import logging
from enum import Enum
import cv2
import numpy as np
from saicinpainting.evaluation.masks.mask import SegmentationMask
from saicinpainting.utils import LinearRamp
def make_random_rectangle_mask(shape, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3):
height, width = shape
mask = np.zeros((height, width), np.float32)
bbox_max_size = min(bbox_max_size, height - margin * 2, width - margin * 2)
times = np.random.randint(min_times, max_times + 1)
for i in range(times):
box_width = np.random.randint(bbox_min_size, bbox_max_size)
box_height = np.random.randint(bbox_min_size, bbox_max_size)
start_x = np.random.randint(margin, width - margin - box_width + 1)
start_y = np.random.randint(margin, height - margin - box_height + 1)
mask[start_y:start_y + box_height, start_x:start_x + box_width] = 1
return mask[None, ...] | null |
15,395 | import math
import random
import hashlib
import logging
from enum import Enum
import cv2
import numpy as np
from saicinpainting.evaluation.masks.mask import SegmentationMask
from saicinpainting.utils import LinearRamp
def make_random_superres_mask(shape, min_step=2, max_step=4, min_width=1, max_width=3):
height, width = shape
mask = np.zeros((height, width), np.float32)
step_x = np.random.randint(min_step, max_step + 1)
width_x = np.random.randint(min_width, min(step_x, max_width + 1))
offset_x = np.random.randint(0, step_x)
step_y = np.random.randint(min_step, max_step + 1)
width_y = np.random.randint(min_width, min(step_y, max_width + 1))
offset_y = np.random.randint(0, step_y)
for dy in range(width_y):
mask[offset_y + dy::step_y] = 1
for dx in range(width_x):
mask[:, offset_x + dx::step_x] = 1
return mask[None, ...] | null |
15,396 | import glob
import logging
import os
import random
import albumentations as A
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import webdataset
from omegaconf import open_dict, OmegaConf
from skimage.feature import canny
from skimage.transform import rescale, resize
from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset
from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \
OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset
from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2
from saicinpainting.training.data.masks import get_mask_generator
LOGGER = logging.getLogger(__name__)
class InpaintingTrainDataset(Dataset):
def __init__(self, indir, mask_generator, transform):
self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
self.mask_generator = mask_generator
self.transform = transform
self.iter_i = 0
def __len__(self):
return len(self.in_files)
def __getitem__(self, item):
path = self.in_files[item]
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.transform(image=img)['image']
img = np.transpose(img, (2, 0, 1))
# TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks
mask = self.mask_generator(img, iter_i=self.iter_i)
self.iter_i += 1
return dict(image=img,
mask=mask)
class InpaintingTrainWebDataset(IterableDataset):
def __init__(self, indir, mask_generator, transform, shuffle_buffer=200):
self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg')
self.mask_generator = mask_generator
self.transform = transform
def __iter__(self):
for iter_i, (img,) in enumerate(self.impl):
img = np.clip(img * 255, 0, 255).astype('uint8')
img = self.transform(image=img)['image']
img = np.transpose(img, (2, 0, 1))
mask = self.mask_generator(img, iter_i=iter_i)
yield dict(image=img,
mask=mask)
class ImgSegmentationDataset(Dataset):
def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes):
self.indir = indir
self.segm_indir = segm_indir
self.mask_generator = mask_generator
self.transform = transform
self.out_size = out_size
self.semantic_seg_n_classes = semantic_seg_n_classes
self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
def __len__(self):
return len(self.in_files)
def __getitem__(self, item):
path = self.in_files[item]
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (self.out_size, self.out_size))
img = self.transform(image=img)['image']
img = np.transpose(img, (2, 0, 1))
mask = self.mask_generator(img)
segm, segm_classes= self.load_semantic_segm(path)
result = dict(image=img,
mask=mask,
segm=segm,
segm_classes=segm_classes)
return result
def load_semantic_segm(self, img_path):
segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png")
mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (self.out_size, self.out_size))
tensor = torch.from_numpy(np.clip(mask.astype(int)-1, 0, None))
ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes
return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0)
def get_transforms(transform_variant, out_size):
if transform_variant == 'default':
transform = A.Compose([
A.RandomScale(scale_limit=0.2), # +/- 20%
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'distortions':
transform = A.Compose([
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.7, 1.3),
rotate=(-40, 40),
shear=(-0.1, 0.1)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'distortions_scale05_1':
transform = A.Compose([
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.5, 1.0),
rotate=(-40, 40),
shear=(-0.1, 0.1),
p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'distortions_scale03_12':
transform = A.Compose([
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 1.2),
rotate=(-40, 40),
shear=(-0.1, 0.1),
p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'distortions_scale03_07':
transform = A.Compose([
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 0.7), # scale 512 to 256 in average
rotate=(-40, 40),
shear=(-0.1, 0.1),
p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'distortions_light':
transform = A.Compose([
IAAPerspective2(scale=(0.0, 0.02)),
IAAAffine2(scale=(0.8, 1.8),
rotate=(-20, 20),
shear=(-0.03, 0.03)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'non_space_transform':
transform = A.Compose([
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat()
])
elif transform_variant == 'no_augs':
transform = A.Compose([
A.ToFloat()
])
else:
raise ValueError(f'Unexpected transform_variant {transform_variant}')
return transform
def get_mask_generator(kind, kwargs):
if kind is None:
kind = "mixed"
if kwargs is None:
kwargs = {}
if kind == "mixed":
cl = MixedMaskGenerator
elif kind == "outpainting":
cl = OutpaintingMaskGenerator
elif kind == "dumb":
cl = DumbAreaMaskGenerator
else:
raise NotImplementedError(f"No such generator kind = {kind}")
return cl(**kwargs)
def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default',
mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs):
LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}')
mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs)
transform = get_transforms(transform_variant, out_size)
if kind == 'default':
dataset = InpaintingTrainDataset(indir=indir,
mask_generator=mask_generator,
transform=transform,
**kwargs)
elif kind == 'default_web':
dataset = InpaintingTrainWebDataset(indir=indir,
mask_generator=mask_generator,
transform=transform,
**kwargs)
elif kind == 'img_with_segm':
dataset = ImgSegmentationDataset(indir=indir,
mask_generator=mask_generator,
transform=transform,
out_size=out_size,
**kwargs)
else:
raise ValueError(f'Unknown train dataset kind {kind}')
if dataloader_kwargs is None:
dataloader_kwargs = {}
is_dataset_only_iterable = kind in ('default_web',)
if ddp_kwargs is not None and not is_dataset_only_iterable:
dataloader_kwargs['shuffle'] = False
dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs)
if is_dataset_only_iterable and 'shuffle' in dataloader_kwargs:
with open_dict(dataloader_kwargs):
del dataloader_kwargs['shuffle']
dataloader = DataLoader(dataset, **dataloader_kwargs)
return dataloader | null |
15,397 | import glob
import logging
import os
import random
import albumentations as A
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import webdataset
from omegaconf import open_dict, OmegaConf
from skimage.feature import canny
from skimage.transform import rescale, resize
from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset
from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \
OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset
from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2
from saicinpainting.training.data.masks import get_mask_generator
def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs):
if OmegaConf.is_list(indir) or isinstance(indir, (tuple, list)):
return ConcatDataset([
make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir
])
LOGGER.info(f'Make val dataloader {kind} from {indir}')
mask_generator = get_mask_generator(kind=kwargs.get("mask_generator_kind"), kwargs=kwargs.get("mask_gen_kwargs"))
if transform_variant is not None:
transform = get_transforms(transform_variant, out_size)
if kind == 'default':
dataset = InpaintingEvaluationDataset(indir, **kwargs)
elif kind == 'our_eval':
dataset = OurInpaintingEvaluationDataset(indir, **kwargs)
elif kind == 'img_with_segm':
dataset = ImgSegmentationDataset(indir=indir,
mask_generator=mask_generator,
transform=transform,
out_size=out_size,
**kwargs)
elif kind == 'online':
dataset = InpaintingEvalOnlineDataset(indir=indir,
mask_generator=mask_generator,
transform=transform,
out_size=out_size,
**kwargs)
else:
raise ValueError(f'Unknown val dataset kind {kind}')
return dataset
def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs):
dataset = make_default_val_dataset(*args, **kwargs)
if dataloader_kwargs is None:
dataloader_kwargs = {}
dataloader = DataLoader(dataset, **dataloader_kwargs)
return dataloader | null |
15,398 | import abc
from typing import Dict, List
import numpy as np
import torch
from skimage import color
from skimage.segmentation import mark_boundaries
from . import colors
def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str],
last_without_mask=True, rescale_keys=None, mask_only_first=None,
black_mask=False) -> np.ndarray:
mask = images_dict['mask'] > 0.5
result = []
for i, k in enumerate(keys):
img = images_dict[k]
img = np.transpose(img, (1, 2, 0))
if rescale_keys is not None and k in rescale_keys:
img = img - img.min()
img /= img.max() + 1e-5
if len(img.shape) == 2:
img = np.expand_dims(img, 2)
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif (img.shape[2] > 3):
img_classes = img.argmax(2)
img = color.label2rgb(img_classes, colors=COLORS)
if mask_only_first:
need_mark_boundaries = i == 0
else:
need_mark_boundaries = i < len(keys) - 1 or not last_without_mask
if need_mark_boundaries:
if black_mask:
img = img * (1 - mask[0][..., None])
img = mark_boundaries(img,
mask[0],
color=(1., 0., 0.),
outline_color=(1., 1., 1.),
mode='thick')
result.append(img)
return np.concatenate(result, axis=1)
def visualize_mask_and_images_batch(batch: Dict[str, torch.Tensor], keys: List[str], max_items=10,
last_without_mask=True, rescale_keys=None) -> np.ndarray:
batch = {k: tens.detach().cpu().numpy() for k, tens in batch.items()
if k in keys or k == 'mask'}
batch_size = next(iter(batch.values())).shape[0]
items_to_vis = min(batch_size, max_items)
result = []
for i in range(items_to_vis):
cur_dct = {k: tens[i] for k, tens in batch.items()}
result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask,
rescale_keys=rescale_keys))
return np.concatenate(result, axis=0) | null |
15,399 | import random
import colorsys
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
The provided code snippet includes necessary dependencies for implementing the `generate_colors` function. Write a Python function `def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False)` to solve the following problem:
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks :param nlabels: Number of labels (size of colormap) :param type: 'bright' for strong colors, 'soft' for pastel colors :param first_color_black: Option to use first color as black, True or False :param last_color_black: Option to use last color as black, True or False :param verbose: Prints the number of labels and shows the colormap. True or False :return: colormap for matplotlib
Here is the function:
def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False):
# https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
"""
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
:param nlabels: Number of labels (size of colormap)
:param type: 'bright' for strong colors, 'soft' for pastel colors
:param first_color_black: Option to use first color as black, True or False
:param last_color_black: Option to use last color as black, True or False
:param verbose: Prints the number of labels and shows the colormap. True or False
:return: colormap for matplotlib
"""
if type not in ('bright', 'soft'):
print ('Please choose "bright" or "soft" for type')
return
if verbose:
print('Number of labels: ' + str(nlabels))
# Generate color map for bright colors, based on hsv
if type == 'bright':
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Generate soft pastel colors, by limiting the RGB spectrum
if type == 'soft':
low = 0.6
high = 0.95
randRGBcolors = [(np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Display colorbar
if verbose:
from matplotlib import colors, colorbar
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
bounds = np.linspace(0, nlabels, nlabels + 1)
norm = colors.BoundaryNorm(bounds, nlabels)
cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
boundaries=bounds, format='%1i', orientation=u'horizontal')
return randRGBcolors, random_colormap | Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks :param nlabels: Number of labels (size of colormap) :param type: 'bright' for strong colors, 'soft' for pastel colors :param first_color_black: Option to use first color as black, True or False :param last_color_black: Option to use last color as black, True or False :param verbose: Prints the number of labels and shows the colormap. True or False :return: colormap for matplotlib |
15,400 | import logging
import torch
import torch.nn.functional as F
from omegaconf import OmegaConf
from saicinpainting.training.data.datasets import make_constant_area_crop_params
from saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter
from saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss
from saicinpainting.training.modules.fake_fakes import FakeFakesGenerator
from saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise
from saicinpainting.utils import add_prefix_to_keys, get_ramp
def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16):
min_size = min(img_height, img_width, min_size)
max_size = min(img_height, img_width, max_size)
if random.random() < 0.5:
out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod))
else:
out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod))
start_y = random.randint(0, img_height - out_height)
start_x = random.randint(0, img_width - out_width)
return (start_y, start_x, out_height, out_width)
def make_constant_area_crop_batch(batch, **kwargs):
crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2],
img_width=batch['image'].shape[3],
**kwargs)
batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width]
batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width]
return batch | null |
15,401 | import copy
import logging
from typing import Dict, Tuple
import pandas as pd
import pytorch_lightning as ptl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DistributedSampler
from saicinpainting.evaluation import make_evaluator
from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
from saicinpainting.training.losses.adversarial import make_discrim_loss
from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
from saicinpainting.training.modules import make_generator, make_discriminator
from saicinpainting.training.visualizers import make_visualizer
from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
get_has_ddp_rank
def make_optimizer(parameters, kind='adamw', **kwargs):
if kind == 'adam':
optimizer_class = torch.optim.Adam
elif kind == 'adamw':
optimizer_class = torch.optim.AdamW
else:
raise ValueError(f'Unknown optimizer kind {kind}')
return optimizer_class(parameters, **kwargs) | null |
15,402 | import copy
import logging
from typing import Dict, Tuple
import pandas as pd
import pytorch_lightning as ptl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DistributedSampler
from saicinpainting.evaluation import make_evaluator
from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
from saicinpainting.training.losses.adversarial import make_discrim_loss
from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
from saicinpainting.training.modules import make_generator, make_discriminator
from saicinpainting.training.visualizers import make_visualizer
from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
get_has_ddp_rank
def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999):
with torch.no_grad():
res_params = dict(result.named_parameters())
new_params = dict(new_iterate_model.named_parameters())
for k in res_params.keys():
res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay) | null |
15,403 | import copy
import logging
from typing import Dict, Tuple
import pandas as pd
import pytorch_lightning as ptl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DistributedSampler
from saicinpainting.evaluation import make_evaluator
from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
from saicinpainting.training.losses.adversarial import make_discrim_loss
from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
from saicinpainting.training.modules import make_generator, make_discriminator
from saicinpainting.training.visualizers import make_visualizer
from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
get_has_ddp_rank
def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'):
batch_size, _, height, width = base_tensor.shape
cur_height, cur_width = height, width
result = []
align_corners = False if scale_mode in ('bilinear', 'bicubic') else None
for _ in range(scales):
cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device)
cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners)
result.append(cur_sample_scaled)
cur_height //= 2
cur_width //= 2
return torch.cat(result, dim=1) | null |
15,404 | import logging
from abc import abstractmethod, ABC
import numpy as np
import sklearn
import sklearn.svm
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from scipy import linalg
from models.ade20k import SegmentationModule, NUM_CLASS, segm_options
from .fid.inception import InceptionV3
from .lpips import PerceptualLoss
from .ssim import SSIM
The provided code snippet includes necessary dependencies for implementing the `get_groupings` function. Write a Python function `def get_groupings(groups)` to solve the following problem:
:param groups: group numbers for respective elements :return: dict of kind {group_idx: indices of the corresponding group elements}
Here is the function:
def get_groupings(groups):
"""
:param groups: group numbers for respective elements
:return: dict of kind {group_idx: indices of the corresponding group elements}
"""
label_groups, count_groups = np.unique(groups, return_counts=True)
indices = np.argsort(groups)
grouping = dict()
cur_start = 0
for label, count in zip(label_groups, count_groups):
cur_end = cur_start + count
cur_indices = indices[cur_start:cur_end]
grouping[label] = cur_indices
cur_start = cur_end
return grouping | :param groups: group numbers for respective elements :return: dict of kind {group_idx: indices of the corresponding group elements} |
15,405 | import logging
from abc import abstractmethod, ABC
import numpy as np
import sklearn
import sklearn.svm
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from scipy import linalg
from models.ade20k import SegmentationModule, NUM_CLASS, segm_options
from .fid.inception import InceptionV3
from .lpips import PerceptualLoss
from .ssim import SSIM
def distribute_values_to_classes(target_class_freq_by_image_mask, values, idx2name):
assert target_class_freq_by_image_mask.ndim == 2 and target_class_freq_by_image_mask.shape[0] == values.shape[0]
total_class_freq = target_class_freq_by_image_mask.sum(0)
distr_values = (target_class_freq_by_image_mask * values[..., None]).sum(0)
result = distr_values / (total_class_freq + 1e-3)
return {idx2name[i]: val for i, val in enumerate(result) if total_class_freq[i] > 0} | null |
15,406 | import logging
from abc import abstractmethod, ABC
import numpy as np
import sklearn
import sklearn.svm
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from scipy import linalg
from models.ade20k import SegmentationModule, NUM_CLASS, segm_options
from .fid.inception import InceptionV3
from .lpips import PerceptualLoss
from .ssim import SSIM
def get_segmentation_idx2name():
return {i - 1: name for i, name in segm_options['classes'].set_index('Idx', drop=True)['Name'].to_dict().items()} | null |
15,407 | import logging
from abc import abstractmethod, ABC
import numpy as np
import sklearn
import sklearn.svm
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from scipy import linalg
from models.ade20k import SegmentationModule, NUM_CLASS, segm_options
from .fid.inception import InceptionV3
from .lpips import PerceptualLoss
from .ssim import SSIM
def calculate_frechet_distance(activations_pred, activations_target, eps=1e-6):
mu1, sigma1 = fid_calculate_activation_statistics(activations_pred)
mu2, sigma2 = fid_calculate_activation_statistics(activations_target)
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
LOGGER.warning(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
# if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculade_fid_no_img(img_i, activations_pred, activations_target, eps=1e-6):
activations_pred = activations_pred.copy()
activations_pred[img_i] = activations_target[img_i]
return calculate_frechet_distance(activations_pred, activations_target, eps=eps) | null |
15,408 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
LOGGER = logging.getLogger(__name__)
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
The provided code snippet includes necessary dependencies for implementing the `fid_inception_v3` function. Write a Python function `def fid_inception_v3()` to solve the following problem:
Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model.
Here is the function:
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
LOGGER.info('fid_inception_v3 called')
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
LOGGER.info('models.inception_v3 done')
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
LOGGER.info('fid_inception_v3 patching done')
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
LOGGER.info('fid_inception_v3 weights downloaded')
inception.load_state_dict(state_dict)
LOGGER.info('fid_inception_v3 weights loaded into model')
return inception | Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. |
15,409 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
from imageio import imread
from PIL import Image, JpegImagePlugin
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
# if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
The provided code snippet includes necessary dependencies for implementing the `calculate_fid_given_paths` function. Write a Python function `def calculate_fid_given_paths(paths, batch_size, cuda, dims)` to solve the following problem:
Calculates the FID of two paths
Here is the function:
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | Calculates the FID of two paths |
15,410 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
from imageio import imread
from PIL import Image, JpegImagePlugin
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
# if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _compute_statistics_of_images(images, model, batch_size, dims, cuda, keep_size=False):
if isinstance(images, list): # exact paths to files are provided
m, s = calculate_activation_statistics(images, model, batch_size,
dims, cuda, keep_size=keep_size)
return m, s
else:
raise ValueError
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def calculate_fid_given_images(images, batch_size, cuda, dims, use_globals=False, keep_size=False):
if use_globals:
global FID_MODEL # for multiprocessing
for imgs in images:
if isinstance(imgs, list) and isinstance(imgs[0], (Image.Image, JpegImagePlugin.JpegImageFile)):
pass
else:
raise RuntimeError('Invalid images')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
if 'FID_MODEL' not in globals() or not use_globals:
model = InceptionV3([block_idx])
if cuda:
model.cuda()
if use_globals:
FID_MODEL = model
else:
model = FID_MODEL
m1, s1 = _compute_statistics_of_images(images[0], model, batch_size,
dims, cuda, keep_size=False)
m2, s2 = _compute_statistics_of_images(images[1], model, batch_size,
dims, cuda, keep_size=False)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | null |
15,411 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
return in_feat / (norm_factor + eps) | null |
15,412 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def l2(p0, p1, range=255.):
return .5 * np.mean((p0 / range - p1 / range) ** 2) | null |
15,413 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def psnr(p0, p1, peak=255.):
return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2)) | null |
15,414 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def dssim(p0, p1, range=255.):
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. | null |
15,415 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
def rgb2lab(in_img, mean_cent=False):
from skimage import color
img_lab = color.rgb2lab(in_img)
if (mean_cent):
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
return img_lab
def np2tensor(np_obj):
# change dimenion of np array into tensor array
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def rgb2lab(input):
from skimage import color
return color.rgb2lab(input / 255.)
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False):
# image tensor to lab tensor
from skimage import color
img = tensor2im(image_tensor)
img_lab = color.rgb2lab(img)
if (mc_only):
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
if (to_norm and not mc_only):
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
img_lab = img_lab / 100.
return np2tensor(img_lab) | null |
15,416 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
def rgb2lab(in_img, mean_cent=False):
def tensor2np(tensor_obj):
def np2tensor(np_obj):
def rgb2lab(input):
def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def tensorlab2tensor(lab_tensor, return_inbnd=False):
from skimage import color
import warnings
warnings.filterwarnings("ignore")
lab = tensor2np(lab_tensor) * 100.
lab[:, :, 0] = lab[:, :, 0] + 50
rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1)
if (return_inbnd):
# convert back to lab, see if we match
lab_back = color.rgb2lab(rgb_back.astype('uint8'))
mask = 1. * np.isclose(lab_back, lab, atol=2.)
mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis])
return (im2tensor(rgb_back), mask)
else:
return im2tensor(rgb_back) | null |
15,417 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def tensor2vec(vector_tensor):
return vector_tensor.data.cpu().numpy()[:, :, 0, 0] | null |
15,418 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
The provided code snippet includes necessary dependencies for implementing the `score_2afc_dataset` function. Write a Python function `def score_2afc_dataset(data_loader, func, name='')` to solve the following problem:
Function computes Two Alternative Forced Choice (2AFC) score using distance function 'func' in dataset 'data_loader' INPUTS data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside func - callable distance function - calling d=func(in0,in1) should take 2 pytorch tensors with shape Nx3xXxY, and return numpy array of length N OUTPUTS [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators [1] - dictionary with following elements d0s,d1s - N arrays containing distances between reference patch to perturbed patches gts - N array in [0,1], preferred patch selected by human evaluators (closer to "0" for left patch p0, "1" for right patch p1, "0.6" means 60pct people preferred right patch, 40pct preferred left) scores - N array in [0,1], corresponding to what percentage function agreed with humans CONSTS N - number of test triplets in data_loader
Here is the function:
def score_2afc_dataset(data_loader, func, name=''):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist()
d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist()
gts += data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5
return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) | Function computes Two Alternative Forced Choice (2AFC) score using distance function 'func' in dataset 'data_loader' INPUTS data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside func - callable distance function - calling d=func(in0,in1) should take 2 pytorch tensors with shape Nx3xXxY, and return numpy array of length N OUTPUTS [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators [1] - dictionary with following elements d0s,d1s - N arrays containing distances between reference patch to perturbed patches gts - N array in [0,1], preferred patch selected by human evaluators (closer to "0" for left patch p0, "1" for right patch p1, "0.6" means 60pct people preferred right patch, 40pct preferred left) scores - N array in [0,1], corresponding to what percentage function agreed with humans CONSTS N - number of test triplets in data_loader |
15,419 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
The provided code snippet includes necessary dependencies for implementing the `score_jnd_dataset` function. Write a Python function `def score_jnd_dataset(data_loader, func, name='')` to solve the following problem:
Function computes JND score using distance function 'func' in dataset 'data_loader' INPUTS data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside func - callable distance function - calling d=func(in0,in1) should take 2 pytorch tensors with shape Nx3xXxY, and return pytorch array of length N OUTPUTS [0] - JND score in [0,1], mAP score (area under precision-recall curve) [1] - dictionary with following elements ds - N array containing distances between two patches shown to human evaluator sames - N array containing fraction of people who thought the two patches were identical CONSTS N - number of test triplets in data_loader
Here is the function:
def score_jnd_dataset(data_loader, func, name=''):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist()
gts += data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1 - sames_sorted)
FNs = np.sum(sames_sorted) - TPs
precs = TPs / (TPs + FPs)
recs = TPs / (TPs + FNs)
score = voc_ap(recs, precs)
return (score, dict(ds=ds, sames=sames)) | Function computes JND score using distance function 'func' in dataset 'data_loader' INPUTS data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside func - callable distance function - calling d=func(in0,in1) should take 2 pytorch tensors with shape Nx3xXxY, and return pytorch array of length N OUTPUTS [0] - JND score in [0,1], mAP score (area under precision-recall curve) [1] - dictionary with following elements ds - N array containing distances between two patches shown to human evaluator sames - N array containing fraction of people who thought the two patches were identical CONSTS N - number of test triplets in data_loader |
15,420 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2, 3], keepdim=keepdim) | null |
15,421 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
in_H = in_tens.shape[2]
scale_factor = 1. * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) | null |
15,422 | import numpy as np
from skimage.metrics import structural_similarity
import torch
from saicinpainting.utils import get_shape
import os
from collections import OrderedDict
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from collections import namedtuple
import torch
from torchvision import models as tv
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Network', net)
print('Total number of parameters: %d' % num_params) | null |
15,423 | import logging
import math
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
import tqdm
from torch.utils.data import DataLoader
from saicinpainting.evaluation.utils import move_to_device
def ssim_fid100_f1(metrics, fid_scale=100):
ssim = metrics[('ssim', 'total')]['mean']
fid = metrics[('fid', 'total')]['mean']
fid_rel = max(0, fid_scale - fid) / fid_scale
f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3)
return f1 | null |
15,424 | import logging
import math
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
import tqdm
from torch.utils.data import DataLoader
from saicinpainting.evaluation.utils import move_to_device
def lpips_fid100_f1(metrics, fid_scale=100):
neg_lpips = 1 - metrics[('lpips', 'total')]['mean'] # invert, so bigger is better
fid = metrics[('fid', 'total')]['mean']
fid_rel = max(0, fid_scale - fid) / fid_scale
f1 = 2 * neg_lpips * fid_rel / (neg_lpips + fid_rel + 1e-3)
return f1 | null |
15,425 | from enum import Enum
import yaml
from easydict import EasyDict as edict
import torch.nn as nn
import torch
def load_yaml(path):
with open(path, 'r') as f:
return edict(yaml.safe_load(f)) | null |
15,426 | import glob
import os
import cv2
import PIL.Image as Image
import numpy as np
from torch.utils.data import Dataset
import torch.nn.functional as F
def ceil_modulo(x, mod):
def pad_img_to_modulo(img, mod):
channels, height, width = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(img, ((0, 0), (0, out_height - height), (0, out_width - width)), mode='symmetric') | null |
15,427 | import glob
import os
import cv2
import PIL.Image as Image
import numpy as np
from torch.utils.data import Dataset
import torch.nn.functional as F
def scale_image(img, factor, interpolation=cv2.INTER_AREA):
if img.shape[0] == 1:
img = img[0]
else:
img = np.transpose(img, (1, 2, 0))
img = cv2.resize(img, dsize=None, fx=factor, fy=factor, interpolation=interpolation)
if img.ndim == 2:
img = img[None, ...]
else:
img = np.transpose(img, (2, 0, 1))
return img | null |
15,428 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `countless5` function. Write a Python function `def countless5(a,b,c,d,e)` to solve the following problem:
First stage of generalizing from countless2d. You have five slots: A, B, C, D, E You can decide if something is the winner by first checking for matches of three, then matches of two, then picking just one if the other two tries fail. In countless2d, you just check for matches of two and then pick one of them otherwise. Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE. Then you need to check AB, AC, AD, BC, BD We skip checking E because if none of these match, we pick E. We can skip checking AE, BE, CE, DE since if any of those match, E is our boy so it's redundant. So countless grows cominatorially in complexity.
Here is the function:
def countless5(a,b,c,d,e):
"""First stage of generalizing from countless2d.
You have five slots: A, B, C, D, E
You can decide if something is the winner by first checking for
matches of three, then matches of two, then picking just one if
the other two tries fail. In countless2d, you just check for matches
of two and then pick one of them otherwise.
Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE.
Then you need to check AB, AC, AD, BC, BD
We skip checking E because if none of these match, we pick E. We can
skip checking AE, BE, CE, DE since if any of those match, E is our boy
so it's redundant.
So countless grows cominatorially in complexity.
"""
sections = [ a,b,c,d,e ]
p2 = lambda q,r: q * (q == r) # q if p == q else 0
p3 = lambda q,r,s: q * ( (q == r) & (r == s) ) # q if q == r == s else 0
lor = lambda x,y: x + (x == 0) * y
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
results3 = reduce(lor, results3)
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
results2 = reduce(lor, results2)
return reduce(lor, (results3, results2, e)) | First stage of generalizing from countless2d. You have five slots: A, B, C, D, E You can decide if something is the winner by first checking for matches of three, then matches of two, then picking just one if the other two tries fail. In countless2d, you just check for matches of two and then pick one of them otherwise. Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE. Then you need to check AB, AC, AD, BC, BD We skip checking E because if none of these match, we pick E. We can skip checking AE, BE, CE, DE since if any of those match, E is our boy so it's redundant. So countless grows cominatorially in complexity. |
15,429 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `countless8` function. Write a Python function `def countless8(a,b,c,d,e,f,g,h)` to solve the following problem:
Extend countless5 to countless8. Same deal, except we also need to check for matches of length 4.
Here is the function:
def countless8(a,b,c,d,e,f,g,h):
"""Extend countless5 to countless8. Same deal, except we also
need to check for matches of length 4."""
sections = [ a, b, c, d, e, f, g, h ]
p2 = lambda q,r: q * (q == r)
p3 = lambda q,r,s: q * ( (q == r) & (r == s) )
p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) )
lor = lambda x,y: x + (x == 0) * y
results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) )
results4 = reduce(lor, results4)
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
results3 = reduce(lor, results3)
# We can always use our shortcut of omitting the last element
# for N choose 2
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
results2 = reduce(lor, results2)
return reduce(lor, [ results4, results3, results2, h ]) | Extend countless5 to countless8. Same deal, except we also need to check for matches of length 4. |
15,430 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `dynamic_countless3d` function. Write a Python function `def dynamic_countless3d(data)` to solve the following problem:
countless8 + dynamic programming. ~2x faster
Here is the function:
def dynamic_countless3d(data):
"""countless8 + dynamic programming. ~2x faster"""
sections = []
# shift zeros up one so they don't interfere with bitwise operators
# we'll shift down at the end
data += 1
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
pick = lambda a,b: a * (a == b)
lor = lambda x,y: x + (x == 0) * y
subproblems2 = {}
results2 = None
for x,y in combinations(range(7), 2):
res = pick(sections[x], sections[y])
subproblems2[(x,y)] = res
if results2 is not None:
results2 += (results2 == 0) * res
else:
results2 = res
subproblems3 = {}
results3 = None
for x,y,z in combinations(range(8), 3):
res = pick(subproblems2[(x,y)], sections[z])
if z != 7:
subproblems3[(x,y,z)] = res
if results3 is not None:
results3 += (results3 == 0) * res
else:
results3 = res
results3 = reduce(lor, (results3, results2, sections[-1]))
# free memory
results2 = None
subproblems2 = None
res = None
results4 = ( pick(subproblems3[(x,y,z)], sections[w]) for x,y,z,w in combinations(range(8), 4) )
results4 = reduce(lor, results4)
subproblems3 = None # free memory
final_result = lor(results4, results3) - 1
data -= 1
return final_result | countless8 + dynamic programming. ~2x faster |
15,431 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `countless3d` function. Write a Python function `def countless3d(data)` to solve the following problem:
Now write countless8 in such a way that it could be used to process an image.
Here is the function:
def countless3d(data):
"""Now write countless8 in such a way that it could be used
to process an image."""
sections = []
# shift zeros up one so they don't interfere with bitwise operators
# we'll shift down at the end
data += 1
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
p2 = lambda q,r: q * (q == r)
p3 = lambda q,r,s: q * ( (q == r) & (r == s) )
p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) )
lor = lambda x,y: x + (x == 0) * y
results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) )
results4 = reduce(lor, results4)
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
results3 = reduce(lor, results3)
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
results2 = reduce(lor, results2)
final_result = reduce(lor, (results4, results3, results2, sections[-1])) - 1
data -= 1
return final_result | Now write countless8 in such a way that it could be used to process an image. |
15,432 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `downsample_with_averaging` function. Write a Python function `def downsample_with_averaging(array)` to solve the following problem:
Downsample x by factor using averaging. @return: The downsampled array, of the same type as x.
Here is the function:
def downsample_with_averaging(array):
"""
Downsample x by factor using averaging.
@return: The downsampled array, of the same type as x.
"""
factor = (2,2,2)
if np.array_equal(factor[:3], np.array([1,1,1])):
return array
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))
temp = np.zeros(output_shape, float)
counts = np.zeros(output_shape, np.int)
for offset in np.ndindex(factor):
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
indexing_expr = tuple(np.s_[:s] for s in part.shape)
temp[indexing_expr] += part
counts[indexing_expr] += 1
return np.cast[array.dtype](temp / counts) | Downsample x by factor using averaging. @return: The downsampled array, of the same type as x. |
15,433 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
def downsample_with_max_pooling(array):
factor = (2,2,2)
sections = []
for offset in np.ndindex(factor):
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
output = sections[0].copy()
for section in sections[1:]:
np.maximum(output, section, output)
return output | null |
15,434 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
The provided code snippet includes necessary dependencies for implementing the `striding` function. Write a Python function `def striding(array)` to solve the following problem:
Downsample x by factor using striding. @return: The downsampled array, of the same type as x.
Here is the function:
def striding(array):
"""Downsample x by factor using striding.
@return: The downsampled array, of the same type as x.
"""
factor = (2,2,2)
if np.all(np.array(factor, int) == 1):
return array
return array[tuple(np.s_[::f] for f in factor)] | Downsample x by factor using striding. @return: The downsampled array, of the same type as x. |
15,435 | from six.moves import range
from PIL import Image
import numpy as np
import io
import time
import math
import random
import sys
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from functools import reduce
from tqdm import tqdm
from memory_profiler import profile
def countless_generalized(data, factor):
def dynamic_countless_generalized(data, factor):
if __name__ == '__main__':
benchmark()
def benchmark():
def countless3d_generalized(img):
return countless_generalized(img, (2,8,1))
def countless3d_dynamic_generalized(img):
return dynamic_countless_generalized(img, (8,8,1))
methods = [
# countless3d,
# dynamic_countless3d,
countless3d_generalized,
# countless3d_dynamic_generalized,
# striding,
# downsample_with_averaging,
# downsample_with_max_pooling
]
data = np.zeros(shape=(16**2, 16**2, 16**2), dtype=np.uint8) + 1
N = 5
print('Algorithm\tMPx\tMB/sec\tSec\tN=%d' % N)
for fn in methods:
start = time.time()
for _ in range(N):
result = fn(data)
end = time.time()
total_time = (end - start)
mpx = N * float(data.shape[0] * data.shape[1] * data.shape[2]) / total_time / 1024.0 / 1024.0
mbytes = mpx * np.dtype(data.dtype).itemsize
# Output in tab separated format to enable copy-paste into excel/numbers
print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time)) | null |
15,436 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
def quick_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
bc = b * (b == c) # PICK(B,C)
a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C)
return a + (a == 0) * d # AB || AC || BC || D
def countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
# allows us to prevent losing 1/2 a bit of information
# at the top end by using a bigger type. Without this 255 is handled incorrectly.
data, upgraded = upgrade_type(data)
# offset from zero, raw countless doesn't handle 0 correctly
# we'll remove the extra 1 at the end.
data += 1
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
ab_ac |= b * (b == c) # PICK(B,C)
result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1
if upgraded:
return downgrade_type(result)
# only need to reset data if we weren't upgraded
# b/c no copy was made in that case
data -= 1
return result
def countless_extreme(data):
nonzeros = np.count_nonzero(data)
# print("nonzeros", nonzeros)
N = reduce(operator.mul, data.shape)
if nonzeros == N:
print("quick")
return quick_countless(data)
elif np.count_nonzero(data + 1) == N:
print("quick")
# print("upper", nonzeros)
return quick_countless(data)
else:
return countless(data) | null |
15,437 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
The provided code snippet includes necessary dependencies for implementing the `odd_to_even` function. Write a Python function `def odd_to_even(image)` to solve the following problem:
To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one. Works by mirroring the starting 1 pixel edge of the image on odd shaped sides. e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled) For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample.
Here is the function:
def odd_to_even(image):
"""
To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one.
Works by mirroring the starting 1 pixel edge of the image on odd shaped sides.
e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled)
For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample.
"""
shape = np.array(image.shape)
offset = (shape % 2)[:2] # x,y offset
# detect if we're dealing with an even
# image. if so it's fine, just return.
if not np.any(offset):
return image
oddshape = image.shape[:2] + offset
oddshape = np.append(oddshape, shape[2:])
oddshape = oddshape.astype(int)
newimg = np.empty(shape=oddshape, dtype=image.dtype)
ox,oy = offset
sx,sy = oddshape
newimg[0,0] = image[0,0] # corner
newimg[ox:sx,0] = image[:,0] # x axis line
newimg[0,oy:sy] = image[0,:] # y axis line
return newimg | To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one. Works by mirroring the starting 1 pixel edge of the image on odd shaped sides. e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled) For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample. |
15,438 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
def counting(array):
factor = (2, 2, 1)
shape = array.shape
while len(shape) < 4:
array = np.expand_dims(array, axis=-1)
shape = array.shape
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
output = np.zeros(output_shape, dtype=array.dtype)
for chan in range(0, shape[3]):
for z in range(0, shape[2]):
for x in range(0, shape[0], 2):
for y in range(0, shape[1], 2):
block = array[ x:x+2, y:y+2, z, chan ] # 2x2 block
hashtable = defaultdict(int)
for subx, suby in np.ndindex(block.shape[0], block.shape[1]):
hashtable[block[subx, suby]] += 1
best = (0, 0)
for segid, val in six.iteritems(hashtable):
if best[1] < val:
best = (segid, val)
output[ x // 2, y // 2, chan ] = best[0]
return output | null |
15,439 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
def countless_if(array):
factor = (2, 2, 1)
shape = array.shape
if len(shape) < 3:
array = array[ :,:, np.newaxis ]
shape = array.shape
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
output = np.zeros(output_shape, dtype=array.dtype)
for chan in range(0, shape[2]):
for x in range(0, shape[0], 2):
for y in range(0, shape[1], 2):
block = array[ x:x+2, y:y+2, chan ] # 2x2 block
if block[0,0] == block[1,0]:
pick = block[0,0]
elif block[0,0] == block[0,1]:
pick = block[0,0]
elif block[1,0] == block[0,1]:
pick = block[1,0]
else:
pick = block[1,1]
output[ x // 2, y // 2, chan ] = pick
return np.squeeze(output) | null |
15,440 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
def simplest_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab = a * (a == b) # PICK(A,B)
ac = a * (a == c) # PICK(A,C)
bc = b * (b == c) # PICK(B,C)
a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
return a + (a == 0) * d # AB || AC || BC || D
def quick_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
bc = b * (b == c) # PICK(B,C)
a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C)
return a + (a == 0) * d # AB || AC || BC || D
def quickest_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
ab_ac |= b * (b == c) # PICK(B,C)
return ab_ac + (ab_ac == 0) * d # AB || AC || BC || D
def quick_countless_xor(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab = a ^ (a ^ b) # a or b
ab += (ab != a) * ((ab ^ (ab ^ c)) - b) # b or c
ab += (ab == c) * ((ab ^ (ab ^ d)) - c) # c or d
return ab
def stippled_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm
that treats zero as "background" and inflates lone
pixels.
data is a 2D numpy array with even dimensions.
"""
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
ab_ac |= b * (b == c) # PICK(B,C)
nonzero = a + (a == 0) * (b + (b == 0) * c)
return ab_ac + (ab_ac == 0) * (d + (d == 0) * nonzero) # AB || AC || BC || D
def zero_corrected_countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
# allows us to prevent losing 1/2 a bit of information
# at the top end by using a bigger type. Without this 255 is handled incorrectly.
data, upgraded = upgrade_type(data)
# offset from zero, raw countless doesn't handle 0 correctly
# we'll remove the extra 1 at the end.
data += 1
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab = a * (a == b) # PICK(A,B)
ac = a * (a == c) # PICK(A,C)
bc = b * (b == c) # PICK(B,C)
a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
result = a + (a == 0) * d - 1 # a or d - 1
if upgraded:
return downgrade_type(result)
# only need to reset data if we weren't upgraded
# b/c no copy was made in that case
data -= 1
return result
def countless(data):
"""
Vectorized implementation of downsampling a 2D
image by 2 on each side using the COUNTLESS algorithm.
data is a 2D numpy array with even dimensions.
"""
# allows us to prevent losing 1/2 a bit of information
# at the top end by using a bigger type. Without this 255 is handled incorrectly.
data, upgraded = upgrade_type(data)
# offset from zero, raw countless doesn't handle 0 correctly
# we'll remove the extra 1 at the end.
data += 1
sections = []
# This loop splits the 2D array apart into four arrays that are
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
# and (1,1) representing the A, B, C, and D positions from Figure 1.
factor = (2,2)
for offset in np.ndindex(factor):
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
a, b, c, d = sections
ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
ab_ac |= b * (b == c) # PICK(B,C)
result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1
if upgraded:
return downgrade_type(result)
# only need to reset data if we weren't upgraded
# b/c no copy was made in that case
data -= 1
return result
def ndzoom(array):
if len(array.shape) == 3:
ratio = ( 1 / 2.0, 1 / 2.0, 1.0 )
else:
ratio = ( 1 / 2.0, 1 / 2.0)
return ndimage.interpolation.zoom(array, ratio, order=1)
def downsample_with_averaging(array):
"""
Downsample x by factor using averaging.
"""
if len(array.shape) == 3:
factor = (2,2,1)
else:
factor = (2,2)
if np.array_equal(factor[:3], np.array([1,1,1])):
return array
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))
temp = np.zeros(output_shape, float)
counts = np.zeros(output_shape, np.int)
for offset in np.ndindex(factor):
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
indexing_expr = tuple(np.s_[:s] for s in part.shape)
temp[indexing_expr] += part
counts[indexing_expr] += 1
return np.cast[array.dtype](temp / counts)
def downsample_with_max_pooling(array):
factor = (2,2)
if np.all(np.array(factor, int) == 1):
return array
sections = []
for offset in np.ndindex(factor):
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
sections.append(part)
output = sections[0].copy()
for section in sections[1:]:
np.maximum(output, section, output)
return output
def striding(array):
"""Downsample x by factor using striding.
"""
factor = (2,2)
if np.all(np.array(factor, int) == 1):
return array
return array[tuple(np.s_[::f] for f in factor)]
if __name__ == '__main__':
benchmark()
def benchmark():
filename = sys.argv[1]
img = Image.open(filename)
data = np.array(img.getdata(), dtype=np.uint8)
if len(data.shape) == 1:
n_channels = 1
reshape = (img.height, img.width)
else:
n_channels = min(data.shape[1], 3)
data = data[:, :n_channels]
reshape = (img.height, img.width, n_channels)
data = data.reshape(reshape).astype(np.uint8)
methods = [
simplest_countless,
quick_countless,
quick_countless_xor,
quickest_countless,
stippled_countless,
zero_corrected_countless,
countless,
downsample_with_averaging,
downsample_with_max_pooling,
ndzoom,
striding,
# countless_if,
# counting,
]
formats = {
1: 'L',
3: 'RGB',
4: 'RGBA'
}
if not os.path.exists('./results'):
os.mkdir('./results')
N = 500
img_size = float(img.width * img.height) / 1024.0 / 1024.0
print("N = %d, %dx%d (%.2f MPx) %d chan, %s" % (N, img.width, img.height, img_size, n_channels, filename))
print("Algorithm\tMPx/sec\tMB/sec\tSec")
for fn in methods:
print(fn.__name__, end='')
sys.stdout.flush()
start = time.time()
# tqdm is here to show you what's going on the first time you run it.
# Feel free to remove it to get slightly more accurate timing results.
for _ in tqdm(range(N), desc=fn.__name__, disable=True):
result = fn(data)
end = time.time()
print("\r", end='')
total_time = (end - start)
mpx = N * img_size / total_time
mbytes = N * img_size * n_channels / total_time
# Output in tab separated format to enable copy-paste into excel/numbers
print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time))
outimg = Image.fromarray(np.squeeze(result), formats[n_channels])
outimg.save('./results/{}.png'.format(fn.__name__, "PNG")) | null |
15,441 | import torch
import torch.nn as nn
from torch.optim import Adam, SGD
from kornia.filters import gaussian_blur2d
from kornia.geometry.transform import resize
from kornia.morphology import erosion
from torch.nn import functional as F
import numpy as np
import cv2
from saicinpainting.evaluation.data import pad_tensor_to_modulo
from saicinpainting.evaluation.utils import move_to_device
from saicinpainting.training.modules.ffc import FFCResnetBlock
from saicinpainting.training.modules.pix2pixhd import ResnetBlock
from tqdm import tqdm
def _infer(
image : torch.Tensor, mask : torch.Tensor,
forward_front : nn.Module, forward_rears : nn.Module,
ref_lower_res : torch.Tensor, orig_shape : tuple, devices : list,
scale_ind : int, n_iters : int=15, lr : float=0.002):
"""Performs inference with refinement at a given scale.
Parameters
----------
image : torch.Tensor
input image to be inpainted, of size (1,3,H,W)
mask : torch.Tensor
input inpainting mask, of size (1,1,H,W)
forward_front : nn.Module
the front part of the inpainting network
forward_rears : nn.Module
the rear part of the inpainting network
ref_lower_res : torch.Tensor
the inpainting at previous scale, used as reference image
orig_shape : tuple
shape of the original input image before padding
devices : list
list of available devices
scale_ind : int
the scale index
n_iters : int, optional
number of iterations of refinement, by default 15
lr : float, optional
learning rate, by default 0.002
Returns
-------
torch.Tensor
inpainted image
"""
masked_image = image * (1 - mask)
masked_image = torch.cat([masked_image, mask], dim=1)
mask = mask.repeat(1,3,1,1)
if ref_lower_res is not None:
ref_lower_res = ref_lower_res.detach()
with torch.no_grad():
z1,z2 = forward_front(masked_image)
# Inference
mask = mask.to(devices[-1])
ekernel = torch.from_numpy(cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15)).astype(bool)).float()
ekernel = ekernel.to(devices[-1])
image = image.to(devices[-1])
z1, z2 = z1.detach().to(devices[0]), z2.detach().to(devices[0])
z1.requires_grad, z2.requires_grad = True, True
optimizer = Adam([z1,z2], lr=lr)
pbar = tqdm(range(n_iters), leave=False)
for idi in pbar:
optimizer.zero_grad()
input_feat = (z1,z2)
for idd, forward_rear in enumerate(forward_rears):
output_feat = forward_rear(input_feat)
if idd < len(devices) - 1:
midz1, midz2 = output_feat
midz1, midz2 = midz1.to(devices[idd+1]), midz2.to(devices[idd+1])
input_feat = (midz1, midz2)
else:
pred = output_feat
if ref_lower_res is None:
break
losses = {}
######################### multi-scale #############################
# scaled loss with downsampler
pred_downscaled = _pyrdown(pred[:,:,:orig_shape[0],:orig_shape[1]])
mask_downscaled = _pyrdown_mask(mask[:,:1,:orig_shape[0],:orig_shape[1]], blur_mask=False, round_up=False)
mask_downscaled = _erode_mask(mask_downscaled, ekernel=ekernel)
mask_downscaled = mask_downscaled.repeat(1,3,1,1)
losses["ms_l1"] = _l1_loss(pred, pred_downscaled, ref_lower_res, mask, mask_downscaled, image, on_pred=True)
loss = sum(losses.values())
pbar.set_description("Refining scale {} using scale {} ...current loss: {:.4f}".format(scale_ind+1, scale_ind, loss.item()))
if idi < n_iters - 1:
loss.backward()
optimizer.step()
del pred_downscaled
del loss
del pred
# "pred" is the prediction after Plug-n-Play module
inpainted = mask * pred + (1 - mask) * image
inpainted = inpainted.detach().cpu()
return inpainted
def _get_image_mask_pyramid(batch : dict, min_side : int, max_scales : int, px_budget : int):
"""Build the image mask pyramid
Parameters
----------
batch : dict
batch containing image, mask, etc
min_side : int
minimum side length to limit the number of scales of the pyramid
max_scales : int
maximum number of scales allowed
px_budget : int
the product H*W cannot exceed this budget, because of resource constraints
Returns
-------
tuple
image-mask pyramid in the form of list of images and list of masks
"""
assert batch['image'].shape[0] == 1, "refiner works on only batches of size 1!"
h, w = batch['unpad_to_size']
h, w = h[0].item(), w[0].item()
image = batch['image'][...,:h,:w]
mask = batch['mask'][...,:h,:w]
if h*w > px_budget:
#resize
ratio = np.sqrt(px_budget / float(h*w))
h_orig, w_orig = h, w
h,w = int(h*ratio), int(w*ratio)
print(f"Original image too large for refinement! Resizing {(h_orig,w_orig)} to {(h,w)}...")
image = resize(image, (h,w),interpolation='bilinear', align_corners=False)
mask = resize(mask, (h,w),interpolation='bilinear', align_corners=False)
mask[mask>1e-8] = 1
breadth = min(h,w)
n_scales = min(1 + int(round(max(0,np.log2(breadth / min_side)))), max_scales)
ls_images = []
ls_masks = []
ls_images.append(image)
ls_masks.append(mask)
for _ in range(n_scales - 1):
image_p = _pyrdown(ls_images[-1])
mask_p = _pyrdown_mask(ls_masks[-1])
ls_images.append(image_p)
ls_masks.append(mask_p)
# reverse the lists because we want the lowest resolution image as index 0
return ls_images[::-1], ls_masks[::-1]
def pad_tensor_to_modulo(img, mod):
batch_size, channels, height, width = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return F.pad(img, pad=(0, out_width - width, 0, out_height - height), mode='reflect')
def move_to_device(obj, device):
if isinstance(obj, nn.Module):
return obj.to(device)
if torch.is_tensor(obj):
return obj.to(device)
if isinstance(obj, (tuple, list)):
return [move_to_device(el, device) for el in obj]
if isinstance(obj, dict):
return {name: move_to_device(val, device) for name, val in obj.items()}
raise ValueError(f'Unexpected type {type(obj)}')
class FFCResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1,
spatial_transform_kwargs=None, inline=False, **conv_kwargs):
super().__init__()
self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
**conv_kwargs)
self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
**conv_kwargs)
if spatial_transform_kwargs is not None:
self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs)
self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs)
self.inline = inline
def forward(self, x):
if self.inline:
x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:]
else:
x_l, x_g = x if type(x) is tuple else (x, 0)
id_l, id_g = x_l, x_g
x_l, x_g = self.conv1((x_l, x_g))
x_l, x_g = self.conv2((x_l, x_g))
x_l, x_g = id_l + x_l, id_g + x_g
out = x_l, x_g
if self.inline:
out = torch.cat(out, dim=1)
return out
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',
dilation=1, in_dim=None, groups=1, second_dilation=None):
super(ResnetBlock, self).__init__()
self.in_dim = in_dim
self.dim = dim
if second_dilation is None:
second_dilation = dilation
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,
conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,
second_dilation=second_dilation)
if self.in_dim is not None:
self.input_conv = nn.Conv2d(in_dim, dim, 1)
self.out_channnels = dim
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',
dilation=1, in_dim=None, groups=1, second_dilation=1):
conv_layer = get_conv_block_ctor(conv_kind)
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(dilation)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(dilation)]
elif padding_type == 'zero':
p = dilation
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if in_dim is None:
in_dim = dim
conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(second_dilation)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(second_dilation)]
elif padding_type == 'zero':
p = second_dilation
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
x_before = x
if self.in_dim is not None:
x = self.input_conv(x)
out = x + self.conv_block(x_before)
return out
The provided code snippet includes necessary dependencies for implementing the `refine_predict` function. Write a Python function `def refine_predict( batch : dict, inpainter : nn.Module, gpu_ids : str, modulo : int, n_iters : int, lr : float, min_side : int, max_scales : int, px_budget : int )` to solve the following problem:
Refines the inpainting of the network Parameters ---------- batch : dict image-mask batch, currently we assume the batchsize to be 1 inpainter : nn.Module the inpainting neural network gpu_ids : str the GPU ids of the machine to use. If only single GPU, use: "0," modulo : int pad the image to ensure dimension % modulo == 0 n_iters : int number of iterations of refinement for each scale lr : float learning rate min_side : int all sides of image on all scales should be >= min_side / sqrt(2) max_scales : int max number of downscaling scales for the image-mask pyramid px_budget : int pixels budget. Any image will be resized to satisfy height*width <= px_budget Returns ------- torch.Tensor inpainted image of size (1,3,H,W)
Here is the function:
def refine_predict(
batch : dict, inpainter : nn.Module, gpu_ids : str,
modulo : int, n_iters : int, lr : float, min_side : int,
max_scales : int, px_budget : int
):
"""Refines the inpainting of the network
Parameters
----------
batch : dict
image-mask batch, currently we assume the batchsize to be 1
inpainter : nn.Module
the inpainting neural network
gpu_ids : str
the GPU ids of the machine to use. If only single GPU, use: "0,"
modulo : int
pad the image to ensure dimension % modulo == 0
n_iters : int
number of iterations of refinement for each scale
lr : float
learning rate
min_side : int
all sides of image on all scales should be >= min_side / sqrt(2)
max_scales : int
max number of downscaling scales for the image-mask pyramid
px_budget : int
pixels budget. Any image will be resized to satisfy height*width <= px_budget
Returns
-------
torch.Tensor
inpainted image of size (1,3,H,W)
"""
assert not inpainter.training
assert not inpainter.add_noise_kwargs
assert inpainter.concat_mask
gpu_ids = [f'cuda:{gpuid}' for gpuid in gpu_ids.replace(" ","").split(",") if gpuid.isdigit()]
n_resnet_blocks = 0
first_resblock_ind = 0
found_first_resblock = False
for idl in range(len(inpainter.generator.model)):
if isinstance(inpainter.generator.model[idl], FFCResnetBlock) or isinstance(inpainter.generator.model[idl], ResnetBlock):
n_resnet_blocks += 1
found_first_resblock = True
elif not found_first_resblock:
first_resblock_ind += 1
resblocks_per_gpu = n_resnet_blocks // len(gpu_ids)
devices = [torch.device(gpu_id) for gpu_id in gpu_ids]
# split the model into front, and rear parts
forward_front = inpainter.generator.model[0:first_resblock_ind]
forward_front.to(devices[0])
forward_rears = []
for idd in range(len(gpu_ids)):
if idd < len(gpu_ids) - 1:
forward_rears.append(inpainter.generator.model[first_resblock_ind + resblocks_per_gpu*(idd):first_resblock_ind+resblocks_per_gpu*(idd+1)])
else:
forward_rears.append(inpainter.generator.model[first_resblock_ind + resblocks_per_gpu*(idd):])
forward_rears[idd].to(devices[idd])
ls_images, ls_masks = _get_image_mask_pyramid(
batch,
min_side,
max_scales,
px_budget
)
image_inpainted = None
for ids, (image, mask) in enumerate(zip(ls_images, ls_masks)):
orig_shape = image.shape[2:]
image = pad_tensor_to_modulo(image, modulo)
mask = pad_tensor_to_modulo(mask, modulo)
mask[mask >= 1e-8] = 1.0
mask[mask < 1e-8] = 0.0
image, mask = move_to_device(image, devices[0]), move_to_device(mask, devices[0])
if image_inpainted is not None:
image_inpainted = move_to_device(image_inpainted, devices[-1])
image_inpainted = _infer(image, mask, forward_front, forward_rears, image_inpainted, orig_shape, devices, ids, n_iters, lr)
image_inpainted = image_inpainted[:,:,:orig_shape[0], :orig_shape[1]]
# detach everything to save resources
image = image.detach().cpu()
mask = mask.detach().cpu()
return image_inpainted | Refines the inpainting of the network Parameters ---------- batch : dict image-mask batch, currently we assume the batchsize to be 1 inpainter : nn.Module the inpainting neural network gpu_ids : str the GPU ids of the machine to use. If only single GPU, use: "0," modulo : int pad the image to ensure dimension % modulo == 0 n_iters : int number of iterations of refinement for each scale lr : float learning rate min_side : int all sides of image on all scales should be >= min_side / sqrt(2) max_scales : int max number of downscaling scales for the image-mask pyramid px_budget : int pixels budget. Any image will be resized to satisfy height*width <= px_budget Returns ------- torch.Tensor inpainted image of size (1,3,H,W) |
15,442 | import numpy as np
from skimage import io
from skimage.segmentation import mark_boundaries
def save_item_for_vis(item, out_file):
mask = item['mask'] > 0.5
if mask.ndim == 3:
mask = mask[0]
img = mark_boundaries(np.transpose(item['image'], (1, 2, 0)),
mask,
color=(1., 0., 0.),
outline_color=(1., 1., 1.),
mode='thick')
if 'inpainted' in item:
inp_img = mark_boundaries(np.transpose(item['inpainted'], (1, 2, 0)),
mask,
color=(1., 0., 0.),
mode='outer')
img = np.concatenate((img, inp_img), axis=1)
img = np.clip(img * 255, 0, 255).astype('uint8')
io.imsave(out_file, img) | null |
15,443 | import numpy as np
from skimage import io
from skimage.segmentation import mark_boundaries
def save_mask_for_sidebyside(item, out_file):
mask = item['mask']# > 0.5
if mask.ndim == 3:
mask = mask[0]
mask = np.clip(mask * 255, 0, 255).astype('uint8')
io.imsave(out_file, mask) | null |
15,444 | import numpy as np
from skimage import io
from skimage.segmentation import mark_boundaries
def save_img_for_sidebyside(item, out_file):
img = np.transpose(item['image'], (1, 2, 0))
img = np.clip(img * 255, 0, 255).astype('uint8')
io.imsave(out_file, img) | null |
15,445 | import glob
import os
import shutil
import traceback
import hydra
from omegaconf import OmegaConf
import PIL.Image as Image
import numpy as np
from joblib import Parallel, delayed
from saicinpainting.evaluation.masks.mask import SegmentationMask, propose_random_square_crop
from saicinpainting.evaluation.utils import load_yaml, SmallMode
from saicinpainting.training.data.masks import MixedMaskGenerator
class MakeManyMasksWrapper:
def __init__(self, impl, variants_n=2):
self.impl = impl
self.variants_n = variants_n
def get_masks(self, img):
img = np.transpose(np.array(img), (2, 0, 1))
return [self.impl(img)[0] for _ in range(self.variants_n)]
class SegmentationMask:
def __init__(self, confidence_threshold=0.5, rigidness_mode=RigidnessMode.rigid,
max_object_area=0.3, min_mask_area=0.02, downsample_levels=6, num_variants_per_mask=4,
max_mask_intersection=0.5, max_foreground_coverage=0.5, max_foreground_intersection=0.5,
max_hidden_area=0.2, max_scale_change=0.25, horizontal_flip=True,
max_vertical_shift=0.1, position_shuffle=True):
"""
:param confidence_threshold: float; threshold for confidence of the panoptic segmentator to allow for
the instance.
:param rigidness_mode: RigidnessMode object
when soft, checks intersection only with the object from which the mask_object was produced
when rigid, checks intersection with any foreground class object
:param max_object_area: float; allowed upper bound for to be considered as mask_object.
:param min_mask_area: float; lower bound for mask to be considered valid
:param downsample_levels: int; defines width of the resized segmentation to obtain shifted masks;
:param num_variants_per_mask: int; maximal number of the masks for the same object;
:param max_mask_intersection: float; maximum allowed area fraction of intersection for 2 masks
produced by horizontal shift of the same mask_object; higher value -> more diversity
:param max_foreground_coverage: float; maximum allowed area fraction of intersection for foreground object to be
covered by mask; lower value -> less the objects are covered
:param max_foreground_intersection: float; maximum allowed area of intersection for the mask with foreground
object; lower value -> mask is more on the background than on the objects
:param max_hidden_area: upper bound on part of the object hidden by shifting object outside the screen area;
:param max_scale_change: allowed scale change for the mask_object;
:param horizontal_flip: if horizontal flips are allowed;
:param max_vertical_shift: amount of vertical movement allowed;
:param position_shuffle: shuffle
"""
assert DETECTRON_INSTALLED, 'Cannot use SegmentationMask without detectron2'
self.cfg = get_cfg()
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold
self.predictor = DefaultPredictor(self.cfg)
self.rigidness_mode = RigidnessMode(rigidness_mode)
self.max_object_area = max_object_area
self.min_mask_area = min_mask_area
self.downsample_levels = downsample_levels
self.num_variants_per_mask = num_variants_per_mask
self.max_mask_intersection = max_mask_intersection
self.max_foreground_coverage = max_foreground_coverage
self.max_foreground_intersection = max_foreground_intersection
self.max_hidden_area = max_hidden_area
self.position_shuffle = position_shuffle
self.max_scale_change = max_scale_change
self.horizontal_flip = horizontal_flip
self.max_vertical_shift = max_vertical_shift
def get_segmentation(self, img):
im = img_as_ubyte(img)
panoptic_seg, segment_info = self.predictor(im)["panoptic_seg"]
return panoptic_seg, segment_info
def _is_power_of_two(n):
return (n != 0) and (n & (n-1) == 0)
def identify_candidates(self, panoptic_seg, segments_info):
potential_mask_ids = []
for segment in segments_info:
if not segment["isthing"]:
continue
mask = (panoptic_seg == segment["id"]).int().detach().cpu().numpy()
area = mask.sum().item() / np.prod(panoptic_seg.shape)
if area >= self.max_object_area:
continue
potential_mask_ids.append(segment["id"])
return potential_mask_ids
def downsample_mask(self, mask):
height, width = mask.shape
if not (self._is_power_of_two(height) and self._is_power_of_two(width)):
raise ValueError("Image sides are not power of 2.")
num_iterations = width.bit_length() - 1 - self.downsample_levels
if num_iterations < 0:
raise ValueError(f"Width is lower than 2^{self.downsample_levels}.")
if height.bit_length() - 1 < num_iterations:
raise ValueError("Height is too low to perform downsampling")
downsampled = mask
for _ in range(num_iterations):
downsampled = zero_corrected_countless(downsampled)
return downsampled
def _augmentation_params(self):
scaling_factor = np.random.uniform(1 - self.max_scale_change, 1 + self.max_scale_change)
if self.horizontal_flip:
horizontal_flip = bool(np.random.choice(2))
else:
horizontal_flip = False
vertical_shift = np.random.uniform(-self.max_vertical_shift, self.max_vertical_shift)
return {
"scaling_factor": scaling_factor,
"horizontal_flip": horizontal_flip,
"vertical_shift": vertical_shift
}
def _get_intersection(self, mask_array, mask_object):
intersection = mask_array[
mask_object.up:mask_object.down, mask_object.left:mask_object.right
] & mask_object.mask
return intersection
def _check_masks_intersection(self, aug_mask, total_mask_area, prev_masks):
for existing_mask in prev_masks:
intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
intersection_existing = intersection_area / existing_mask.sum()
intersection_current = 1 - (aug_mask.area() - intersection_area) / total_mask_area
if (intersection_existing > self.max_mask_intersection) or \
(intersection_current > self.max_mask_intersection):
return False
return True
def _check_foreground_intersection(self, aug_mask, foreground):
for existing_mask in foreground:
intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
intersection_existing = intersection_area / existing_mask.sum()
if intersection_existing > self.max_foreground_coverage:
return False
intersection_mask = intersection_area / aug_mask.area()
if intersection_mask > self.max_foreground_intersection:
return False
return True
def _move_mask(self, mask, foreground):
# Obtaining properties of the original mask_object:
orig_mask = ObjectMask(mask)
chosen_masks = []
chosen_parameters = []
# to fix the case when resizing gives mask_object consisting only of False
scaling_factor_lower_bound = 0.
for var_idx in range(self.num_variants_per_mask):
# Obtaining augmentation parameters and applying them to the downscaled mask_object
augmentation_params = self._augmentation_params()
augmentation_params["scaling_factor"] = min([
augmentation_params["scaling_factor"],
2 * min(orig_mask.up, orig_mask.height - orig_mask.down) / orig_mask.height + 1.,
2 * min(orig_mask.left, orig_mask.width - orig_mask.right) / orig_mask.width + 1.
])
augmentation_params["scaling_factor"] = max([
augmentation_params["scaling_factor"], scaling_factor_lower_bound
])
aug_mask = deepcopy(orig_mask)
aug_mask.rescale(augmentation_params["scaling_factor"], inplace=True)
if augmentation_params["horizontal_flip"]:
aug_mask.horizontal_flip(inplace=True)
total_aug_area = aug_mask.area()
if total_aug_area == 0:
scaling_factor_lower_bound = 1.
continue
# Fix if the element vertical shift is too strong and shown area is too small:
vertical_area = aug_mask.mask.sum(axis=1) / total_aug_area # share of area taken by rows
# number of rows which are allowed to be hidden from upper and lower parts of image respectively
max_hidden_up = np.searchsorted(vertical_area.cumsum(), self.max_hidden_area)
max_hidden_down = np.searchsorted(vertical_area[::-1].cumsum(), self.max_hidden_area)
# correcting vertical shift, so not too much area will be hidden
augmentation_params["vertical_shift"] = np.clip(
augmentation_params["vertical_shift"],
-(aug_mask.up + max_hidden_up) / aug_mask.height,
(aug_mask.height - aug_mask.down + max_hidden_down) / aug_mask.height
)
# Applying vertical shift:
vertical_shift = int(round(aug_mask.height * augmentation_params["vertical_shift"]))
aug_mask.shift(vertical=vertical_shift, inplace=True)
aug_mask.crop_to_canvas(vertical=True, horizontal=False, inplace=True)
# Choosing horizontal shift:
max_hidden_area = self.max_hidden_area - (1 - aug_mask.area() / total_aug_area)
horizontal_area = aug_mask.mask.sum(axis=0) / total_aug_area
max_hidden_left = np.searchsorted(horizontal_area.cumsum(), max_hidden_area)
max_hidden_right = np.searchsorted(horizontal_area[::-1].cumsum(), max_hidden_area)
allowed_shifts = np.arange(-max_hidden_left, aug_mask.width -
(aug_mask.right - aug_mask.left) + max_hidden_right + 1)
allowed_shifts = - (aug_mask.left - allowed_shifts)
if self.position_shuffle:
np.random.shuffle(allowed_shifts)
mask_is_found = False
for horizontal_shift in allowed_shifts:
aug_mask_left = deepcopy(aug_mask)
aug_mask_left.shift(horizontal=horizontal_shift, inplace=True)
aug_mask_left.crop_to_canvas(inplace=True)
prev_masks = [mask] + chosen_masks
is_mask_suitable = self._check_masks_intersection(aug_mask_left, total_aug_area, prev_masks) & \
self._check_foreground_intersection(aug_mask_left, foreground)
if is_mask_suitable:
aug_draw = aug_mask_left.restore_full_mask()
chosen_masks.append(aug_draw)
augmentation_params["horizontal_shift"] = horizontal_shift / aug_mask_left.width
chosen_parameters.append(augmentation_params)
mask_is_found = True
break
if not mask_is_found:
break
return chosen_parameters
def _prepare_mask(self, mask):
height, width = mask.shape
target_width = width if self._is_power_of_two(width) else (1 << width.bit_length())
target_height = height if self._is_power_of_two(height) else (1 << height.bit_length())
return resize(mask.astype('float32'), (target_height, target_width), order=0, mode='edge').round().astype('int32')
def get_masks(self, im, return_panoptic=False):
panoptic_seg, segments_info = self.get_segmentation(im)
potential_mask_ids = self.identify_candidates(panoptic_seg, segments_info)
panoptic_seg_scaled = self._prepare_mask(panoptic_seg.detach().cpu().numpy())
downsampled = self.downsample_mask(panoptic_seg_scaled)
scene_objects = []
for segment in segments_info:
if not segment["isthing"]:
continue
mask = downsampled == segment["id"]
if not np.any(mask):
continue
scene_objects.append(mask)
mask_set = []
for mask_id in potential_mask_ids:
mask = downsampled == mask_id
if not np.any(mask):
continue
if self.rigidness_mode is RigidnessMode.soft:
foreground = [mask]
elif self.rigidness_mode is RigidnessMode.rigid:
foreground = scene_objects
else:
raise ValueError(f'Unexpected rigidness_mode: {rigidness_mode}')
masks_params = self._move_mask(mask, foreground)
full_mask = ObjectMask((panoptic_seg == mask_id).detach().cpu().numpy())
for params in masks_params:
aug_mask = deepcopy(full_mask)
aug_mask.rescale(params["scaling_factor"], inplace=True)
if params["horizontal_flip"]:
aug_mask.horizontal_flip(inplace=True)
vertical_shift = int(round(aug_mask.height * params["vertical_shift"]))
horizontal_shift = int(round(aug_mask.width * params["horizontal_shift"]))
aug_mask.shift(vertical=vertical_shift, horizontal=horizontal_shift, inplace=True)
aug_mask = aug_mask.restore_full_mask().astype('uint8')
if aug_mask.mean() <= self.min_mask_area:
continue
mask_set.append(aug_mask)
if return_panoptic:
return mask_set, panoptic_seg.detach().cpu().numpy()
else:
return mask_set
def propose_random_square_crop(mask, min_overlap=0.5):
height, width = mask.shape
mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing
if height < width:
crop_size = height
obj_left, obj_right = mask_xs.min(), mask_xs.max()
obj_width = obj_right - obj_left
left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size))
right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap))
start_x = np.random.randint(left_border, right_border)
return start_x, 0, start_x + crop_size, height
else:
crop_size = width
obj_top, obj_bottom = mask_ys.min(), mask_ys.max()
obj_height = obj_bottom - obj_top
top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size))
bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap))
start_y = np.random.randint(top_border, bottom_border)
return 0, start_y, width, start_y + crop_size
class SmallMode(Enum):
DROP = "drop"
UPSCALE = "upscale"
class MixedMaskGenerator:
def __init__(self, irregular_proba=1/3, irregular_kwargs=None,
box_proba=1/3, box_kwargs=None,
segm_proba=1/3, segm_kwargs=None,
squares_proba=0, squares_kwargs=None,
superres_proba=0, superres_kwargs=None,
outpainting_proba=0, outpainting_kwargs=None,
invert_proba=0):
self.probas = []
self.gens = []
if irregular_proba > 0:
self.probas.append(irregular_proba)
if irregular_kwargs is None:
irregular_kwargs = {}
else:
irregular_kwargs = dict(irregular_kwargs)
irregular_kwargs['draw_method'] = DrawMethod.LINE
self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs))
if box_proba > 0:
self.probas.append(box_proba)
if box_kwargs is None:
box_kwargs = {}
self.gens.append(RandomRectangleMaskGenerator(**box_kwargs))
if segm_proba > 0:
self.probas.append(segm_proba)
if segm_kwargs is None:
segm_kwargs = {}
self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs))
if squares_proba > 0:
self.probas.append(squares_proba)
if squares_kwargs is None:
squares_kwargs = {}
else:
squares_kwargs = dict(squares_kwargs)
squares_kwargs['draw_method'] = DrawMethod.SQUARE
self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs))
if superres_proba > 0:
self.probas.append(superres_proba)
if superres_kwargs is None:
superres_kwargs = {}
self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs))
if outpainting_proba > 0:
self.probas.append(outpainting_proba)
if outpainting_kwargs is None:
outpainting_kwargs = {}
self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs))
self.probas = np.array(self.probas, dtype='float32')
self.probas /= self.probas.sum()
self.invert_proba = invert_proba
def __call__(self, img, iter_i=None, raw_image=None):
kind = np.random.choice(len(self.probas), p=self.probas)
gen = self.gens[kind]
result = gen(img, iter_i=iter_i, raw_image=raw_image)
if self.invert_proba > 0 and random.random() < self.invert_proba:
result = 1 - result
return result
def process_images(src_images, indir, outdir, config):
if config.generator_kind == 'segmentation':
mask_generator = SegmentationMask(**config.mask_generator_kwargs)
elif config.generator_kind == 'random':
mask_generator_kwargs = OmegaConf.to_container(config.mask_generator_kwargs, resolve=True)
variants_n = mask_generator_kwargs.pop('variants_n', 2)
mask_generator = MakeManyMasksWrapper(MixedMaskGenerator(**mask_generator_kwargs),
variants_n=variants_n)
else:
raise ValueError(f'Unexpected generator kind: {config.generator_kind}')
max_tamper_area = config.get('max_tamper_area', 1)
for infile in src_images:
try:
file_relpath = infile[len(indir):]
img_outpath = os.path.join(outdir, file_relpath)
os.makedirs(os.path.dirname(img_outpath), exist_ok=True)
image = Image.open(infile).convert('RGB')
# scale input image to output resolution and filter smaller images
if min(image.size) < config.cropping.out_min_size:
handle_small_mode = SmallMode(config.cropping.handle_small_mode)
if handle_small_mode == SmallMode.DROP:
continue
elif handle_small_mode == SmallMode.UPSCALE:
factor = config.cropping.out_min_size / min(image.size)
out_size = (np.array(image.size) * factor).round().astype('uint32')
image = image.resize(out_size, resample=Image.BICUBIC)
else:
factor = config.cropping.out_min_size / min(image.size)
out_size = (np.array(image.size) * factor).round().astype('uint32')
image = image.resize(out_size, resample=Image.BICUBIC)
# generate and select masks
src_masks = mask_generator.get_masks(image)
filtered_image_mask_pairs = []
for cur_mask in src_masks:
if config.cropping.out_square_crop:
(crop_left,
crop_top,
crop_right,
crop_bottom) = propose_random_square_crop(cur_mask,
min_overlap=config.cropping.crop_min_overlap)
cur_mask = cur_mask[crop_top:crop_bottom, crop_left:crop_right]
cur_image = image.copy().crop((crop_left, crop_top, crop_right, crop_bottom))
else:
cur_image = image
if len(np.unique(cur_mask)) == 0 or cur_mask.mean() > max_tamper_area:
continue
filtered_image_mask_pairs.append((cur_image, cur_mask))
mask_indices = np.random.choice(len(filtered_image_mask_pairs),
size=min(len(filtered_image_mask_pairs), config.max_masks_per_image),
replace=False)
# crop masks; save masks together with input image
mask_basename = os.path.join(outdir, os.path.splitext(file_relpath)[0])
for i, idx in enumerate(mask_indices):
cur_image, cur_mask = filtered_image_mask_pairs[idx]
cur_basename = mask_basename + f'_crop{i:03d}'
Image.fromarray(np.clip(cur_mask * 255, 0, 255).astype('uint8'),
mode='L').save(cur_basename + f'_mask{i:03d}.png')
cur_image.save(cur_basename + '.png')
except KeyboardInterrupt:
return
except Exception as ex:
print(f'Could not make masks for {infile} due to {ex}:\n{traceback.format_exc()}') | null |
15,446 | import os
import shutil
import torch
def get_checkpoint_files(s):
s = s.strip()
if ',' in s:
return [get_checkpoint_files(chunk) for chunk in s.split(',')]
return 'last.ckpt' if s == 'last' else f'{s}.ckpt' | null |
15,447 | import math
import os
import random
import braceexpand
import webdataset as wds
def is_good_key(key, cats):
return any(c in key for c in cats) | null |
15,454 | import bisect
import functools
import logging
import numbers
import os
import signal
import sys
import traceback
import warnings
import torch
from pytorch_lightning import seed_everything
LOGGER = logging.getLogger(__name__)
import platform
def print_traceback_handler(sig, frame):
def register_debug_signal_handlers(sig=signal.SIGUSR1, handler=print_traceback_handler):
LOGGER.warning(f'Setting signal {sig} handler {handler}')
signal.signal(sig, handler) | null |
15,457 | import bisect
import functools
import logging
import numbers
import os
import signal
import sys
import traceback
import warnings
import torch
from pytorch_lightning import seed_everything
import platform
def get_has_ddp_rank():
def handle_ddp_subprocess():
def main_decorator(main_func):
@functools.wraps(main_func)
def new_main(*args, **kwargs):
# Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
has_parent = parent_cwd is not None
has_rank = get_has_ddp_rank()
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
if has_parent:
# we are in the worker
sys.argv.extend([
f'hydra.run.dir={parent_cwd}',
# 'hydra/hydra_logging=disabled',
# 'hydra/job_logging=disabled'
])
# do nothing if this is a top-level process
# TRAINING_PARENT_WORK_DIR is set in handle_ddp_parent_process after hydra initialization
main_func(*args, **kwargs)
return new_main
return main_decorator | null |
15,458 | import bisect
import functools
import logging
import numbers
import os
import signal
import sys
import traceback
import warnings
import torch
from pytorch_lightning import seed_everything
import platform
def get_has_ddp_rank():
master_port = os.environ.get('MASTER_PORT', None)
node_rank = os.environ.get('NODE_RANK', None)
local_rank = os.environ.get('LOCAL_RANK', None)
world_size = os.environ.get('WORLD_SIZE', None)
has_rank = master_port is not None or node_rank is not None or local_rank is not None or world_size is not None
return has_rank
def handle_ddp_parent_process():
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
has_parent = parent_cwd is not None
has_rank = get_has_ddp_rank()
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
if parent_cwd is None:
os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd()
return has_parent | null |
15,471 | import math
import random
import hashlib
import logging
from enum import Enum
import cv2
import numpy as np
from saicinpainting.evaluation.masks.mask import SegmentationMask
from saicinpainting.utils import LinearRamp
class DrawMethod(Enum):
def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10,
draw_method=DrawMethod.LINE):
draw_method = DrawMethod(draw_method)
height, width = shape
mask = np.zeros((height, width), np.float32)
times = np.random.randint(min_times, max_times + 1)
for i in range(times):
start_x = np.random.randint(width)
start_y = np.random.randint(height)
for j in range(1 + np.random.randint(5)):
angle = 0.01 + np.random.randint(max_angle)
if i % 2 == 0:
angle = 2 * 3.1415926 - angle
length = 10 + np.random.randint(max_len)
brush_w = 5 + np.random.randint(max_width)
end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width)
end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height)
if draw_method == DrawMethod.LINE:
cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w)
elif draw_method == DrawMethod.CIRCLE:
cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1)
elif draw_method == DrawMethod.SQUARE:
radius = brush_w // 2
mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1
start_x, start_y = end_x, end_y
return mask[None, ...] | null |
15,474 | import math
import random
import hashlib
import logging
from enum import Enum
import cv2
import numpy as np
from saicinpainting.evaluation.masks.mask import SegmentationMask
from saicinpainting.utils import LinearRamp
class DumbAreaMaskGenerator:
min_ratio = 0.1
max_ratio = 0.35
default_ratio = 0.225
def __init__(self, is_training):
#Parameters:
# is_training(bool): If true - random rectangular mask, if false - central square mask
self.is_training = is_training
def _random_vector(self, dimension):
if self.is_training:
lower_limit = math.sqrt(self.min_ratio)
upper_limit = math.sqrt(self.max_ratio)
mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension)
u = random.randint(0, dimension-mask_side-1)
v = u+mask_side
else:
margin = (math.sqrt(self.default_ratio) / 2) * dimension
u = round(dimension/2 - margin)
v = round(dimension/2 + margin)
return u, v
def __call__(self, img, iter_i=None, raw_image=None):
c, height, width = img.shape
mask = np.zeros((height, width), np.float32)
x1, x2 = self._random_vector(width)
y1, y2 = self._random_vector(height)
mask[x1:x2, y1:y2] = 1
return mask[None, ...]
class OutpaintingMaskGenerator:
def __init__(self, min_padding_percent:float=0.04, max_padding_percent:int=0.25, left_padding_prob:float=0.5, top_padding_prob:float=0.5,
right_padding_prob:float=0.5, bottom_padding_prob:float=0.5, is_fixed_randomness:bool=False):
"""
is_fixed_randomness - get identical paddings for the same image if args are the same
"""
self.min_padding_percent = min_padding_percent
self.max_padding_percent = max_padding_percent
self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob]
self.is_fixed_randomness = is_fixed_randomness
assert self.min_padding_percent <= self.max_padding_percent
assert self.max_padding_percent > 0
assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]"
assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}"
assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}"
if len([x for x in self.probs if x > 0]) == 1:
LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side")
def apply_padding(self, mask, coord):
mask[int(coord[0][0]*self.img_h):int(coord[1][0]*self.img_h),
int(coord[0][1]*self.img_w):int(coord[1][1]*self.img_w)] = 1
return mask
def get_padding(self, size):
n1 = int(self.min_padding_percent*size)
n2 = int(self.max_padding_percent*size)
return self.rnd.randint(n1, n2) / size
def _img2rs(img):
arr = np.ascontiguousarray(img.astype(np.uint8))
str_hash = hashlib.sha1(arr).hexdigest()
res = hash(str_hash)%(2**32)
return res
def __call__(self, img, iter_i=None, raw_image=None):
c, self.img_h, self.img_w = img.shape
mask = np.zeros((self.img_h, self.img_w), np.float32)
at_least_one_mask_applied = False
if self.is_fixed_randomness:
assert raw_image is not None, f"Cant calculate hash on raw_image=None"
rs = self._img2rs(raw_image)
self.rnd = np.random.RandomState(rs)
else:
self.rnd = np.random
coords = [[
(0,0),
(1,self.get_padding(size=self.img_h))
],
[
(0,0),
(self.get_padding(size=self.img_w),1)
],
[
(0,1-self.get_padding(size=self.img_h)),
(1,1)
],
[
(1-self.get_padding(size=self.img_w),0),
(1,1)
]]
for pp, coord in zip(self.probs, coords):
if self.rnd.random() < pp:
at_least_one_mask_applied = True
mask = self.apply_padding(mask=mask, coord=coord)
if not at_least_one_mask_applied:
idx = self.rnd.choice(range(len(coords)), p=np.array(self.probs)/sum(self.probs))
mask = self.apply_padding(mask=mask, coord=coords[idx])
return mask[None, ...]
class MixedMaskGenerator:
def __init__(self, irregular_proba=1/3, irregular_kwargs=None,
box_proba=1/3, box_kwargs=None,
segm_proba=1/3, segm_kwargs=None,
squares_proba=0, squares_kwargs=None,
superres_proba=0, superres_kwargs=None,
outpainting_proba=0, outpainting_kwargs=None,
invert_proba=0):
self.probas = []
self.gens = []
if irregular_proba > 0:
self.probas.append(irregular_proba)
if irregular_kwargs is None:
irregular_kwargs = {}
else:
irregular_kwargs = dict(irregular_kwargs)
irregular_kwargs['draw_method'] = DrawMethod.LINE
self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs))
if box_proba > 0:
self.probas.append(box_proba)
if box_kwargs is None:
box_kwargs = {}
self.gens.append(RandomRectangleMaskGenerator(**box_kwargs))
if segm_proba > 0:
self.probas.append(segm_proba)
if segm_kwargs is None:
segm_kwargs = {}
self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs))
if squares_proba > 0:
self.probas.append(squares_proba)
if squares_kwargs is None:
squares_kwargs = {}
else:
squares_kwargs = dict(squares_kwargs)
squares_kwargs['draw_method'] = DrawMethod.SQUARE
self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs))
if superres_proba > 0:
self.probas.append(superres_proba)
if superres_kwargs is None:
superres_kwargs = {}
self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs))
if outpainting_proba > 0:
self.probas.append(outpainting_proba)
if outpainting_kwargs is None:
outpainting_kwargs = {}
self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs))
self.probas = np.array(self.probas, dtype='float32')
self.probas /= self.probas.sum()
self.invert_proba = invert_proba
def __call__(self, img, iter_i=None, raw_image=None):
kind = np.random.choice(len(self.probas), p=self.probas)
gen = self.gens[kind]
result = gen(img, iter_i=iter_i, raw_image=raw_image)
if self.invert_proba > 0 and random.random() < self.invert_proba:
result = 1 - result
return result
def get_mask_generator(kind, kwargs):
if kind is None:
kind = "mixed"
if kwargs is None:
kwargs = {}
if kind == "mixed":
cl = MixedMaskGenerator
elif kind == "outpainting":
cl = OutpaintingMaskGenerator
elif kind == "dumb":
cl = DumbAreaMaskGenerator
else:
raise NotImplementedError(f"No such generator kind = {kind}")
return cl(**kwargs) | null |
15,477 | import glob
import logging
import os
import random
import albumentations as A
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import webdataset
from omegaconf import open_dict, OmegaConf
from skimage.feature import canny
from skimage.transform import rescale, resize
from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset
from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \
OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset
from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2
from saicinpainting.training.data.masks import get_mask_generator
def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16):
min_size = min(img_height, img_width, min_size)
max_size = min(img_height, img_width, max_size)
if random.random() < 0.5:
out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod))
else:
out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod))
start_y = random.randint(0, img_height - out_height)
start_x = random.randint(0, img_width - out_width)
return (start_y, start_x, out_height, out_width) | null |
15,489 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
from imageio import imread
from PIL import Image, JpegImagePlugin
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
# if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
The provided code snippet includes necessary dependencies for implementing the `calculate_fid_given_paths` function. Write a Python function `def calculate_fid_given_paths(paths, batch_size, cuda, dims)` to solve the following problem:
Calculates the FID of two paths
Here is the function:
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | Calculates the FID of two paths |
15,490 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
from imageio import imread
from PIL import Image, JpegImagePlugin
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
# if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _compute_statistics_of_images(images, model, batch_size, dims, cuda, keep_size=False):
if isinstance(images, list): # exact paths to files are provided
m, s = calculate_activation_statistics(images, model, batch_size,
dims, cuda, keep_size=keep_size)
return m, s
else:
raise ValueError
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def calculate_fid_given_images(images, batch_size, cuda, dims, use_globals=False, keep_size=False):
if use_globals:
global FID_MODEL # for multiprocessing
for imgs in images:
if isinstance(imgs, list) and isinstance(imgs[0], (Image.Image, JpegImagePlugin.JpegImageFile)):
pass
else:
raise RuntimeError('Invalid images')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
if 'FID_MODEL' not in globals() or not use_globals:
model = InceptionV3([block_idx])
if cuda:
model.cuda()
if use_globals:
FID_MODEL = model
else:
model = FID_MODEL
m1, s1 = _compute_statistics_of_images(images[0], model, batch_size,
dims, cuda, keep_size=False)
m2, s2 = _compute_statistics_of_images(images[1], model, batch_size,
dims, cuda, keep_size=False)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | null |
15,506 | from enum import Enum
import yaml
from easydict import EasyDict as edict
import torch.nn as nn
import torch
def move_to_device(obj, device):
if isinstance(obj, nn.Module):
return obj.to(device)
if torch.is_tensor(obj):
return obj.to(device)
if isinstance(obj, (tuple, list)):
return [move_to_device(el, device) for el in obj]
if isinstance(obj, dict):
return {name: move_to_device(val, device) for name, val in obj.items()}
raise ValueError(f'Unexpected type {type(obj)}') | null |
15,507 | import glob
import os
import cv2
import PIL.Image as Image
import numpy as np
from torch.utils.data import Dataset
import torch.nn.functional as F
def load_image(fname, mode='RGB', return_orig=False):
img = np.array(Image.open(fname).convert(mode))
if img.ndim == 3:
img = np.transpose(img, (2, 0, 1))
out_img = img.astype('float32') / 255
if return_orig:
return out_img, img
else:
return out_img | null |
15,508 | import glob
import os
import cv2
import PIL.Image as Image
import numpy as np
from torch.utils.data import Dataset
import torch.nn.functional as F
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def pad_img_to_modulo(img, mod):
channels, height, width = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(img, ((0, 0), (0, out_height - height), (0, out_width - width)), mode='symmetric') | null |
15,509 | import glob
import os
import cv2
import PIL.Image as Image
import numpy as np
from torch.utils.data import Dataset
import torch.nn.functional as F
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def pad_tensor_to_modulo(img, mod):
batch_size, channels, height, width = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return F.pad(img, pad=(0, out_width - width, 0, out_height - height), mode='reflect') | null |
15,511 | import enum
from copy import deepcopy
import numpy as np
from skimage import img_as_ubyte
from skimage.transform import rescale, resize
from .countless.countless2d import zero_corrected_countless
def propose_random_square_crop(mask, min_overlap=0.5):
height, width = mask.shape
mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing
if height < width:
crop_size = height
obj_left, obj_right = mask_xs.min(), mask_xs.max()
obj_width = obj_right - obj_left
left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size))
right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap))
start_x = np.random.randint(left_border, right_border)
return start_x, 0, start_x + crop_size, height
else:
crop_size = width
obj_top, obj_bottom = mask_ys.min(), mask_ys.max()
obj_height = obj_bottom - obj_top
top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size))
bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap))
start_y = np.random.randint(top_border, bottom_border)
return 0, start_y, width, start_y + crop_size | null |
15,520 | from __future__ import print_function, division
import six
from six.moves import range
from collections import defaultdict
from functools import reduce
import operator
import io
import os
from PIL import Image
import math
import numpy as np
import random
import sys
import time
from tqdm import tqdm
from scipy import ndimage
def quick_countless(data):
def countless(data):
def countless_extreme(data):
nonzeros = np.count_nonzero(data)
# print("nonzeros", nonzeros)
N = reduce(operator.mul, data.shape)
if nonzeros == N:
print("quick")
return quick_countless(data)
elif np.count_nonzero(data + 1) == N:
print("quick")
# print("upper", nonzeros)
return quick_countless(data)
else:
return countless(data) | null |
15,529 | import os
import numpy as np
import tqdm
from skimage import io
from skimage.segmentation import mark_boundaries
from saicinpainting.evaluation.data import InpaintingDataset
from saicinpainting.evaluation.vis import save_item_for_vis
def save_mask_for_sidebyside(item, out_file):
mask = item['mask']# > 0.5
if mask.ndim == 3:
mask = mask[0]
mask = np.clip(mask * 255, 0, 255).astype('uint8')
io.imsave(out_file, mask) | null |
15,530 | import os
import numpy as np
import tqdm
from skimage import io
from skimage.segmentation import mark_boundaries
from saicinpainting.evaluation.data import InpaintingDataset
from saicinpainting.evaluation.vis import save_item_for_vis
def save_img_for_sidebyside(item, out_file):
img = np.transpose(item['image'], (1, 2, 0))
img = np.clip(img * 255, 0, 255).astype('uint8')
io.imsave(out_file, img) | null |
15,531 | import os
import numpy as np
import tqdm
from skimage import io
from skimage.segmentation import mark_boundaries
from saicinpainting.evaluation.data import InpaintingDataset
from saicinpainting.evaluation.vis import save_item_for_vis
def save_masked_img_for_sidebyside(item, out_file):
mask = item['mask']
img = item['image']
img = (1-mask) * img + mask
img = np.transpose(img, (1, 2, 0))
img = np.clip(img * 255, 0, 255).astype('uint8')
io.imsave(out_file, img) | null |
15,532 | import cv2
import numpy as np
import sklearn
import torch
import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset, load_image
from saicinpainting.evaluation.losses.fid.inception import InceptionV3
from saicinpainting.evaluation.utils import load_yaml
from saicinpainting.training.visualizers.base import visualize_mask_and_images
def draw_score(img, score):
img = np.transpose(img, (1, 2, 0))
cv2.putText(img, f'{score:.2f}',
(40, 40),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 1, 0),
thickness=3)
img = np.transpose(img, (2, 0, 1))
return img
def load_image(fname, mode='RGB', return_orig=False):
img = np.array(Image.open(fname).convert(mode))
if img.ndim == 3:
img = np.transpose(img, (2, 0, 1))
out_img = img.astype('float32') / 255
if return_orig:
return out_img, img
else:
return out_img
def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str],
last_without_mask=True, rescale_keys=None, mask_only_first=None,
black_mask=False) -> np.ndarray:
mask = images_dict['mask'] > 0.5
result = []
for i, k in enumerate(keys):
img = images_dict[k]
img = np.transpose(img, (1, 2, 0))
if rescale_keys is not None and k in rescale_keys:
img = img - img.min()
img /= img.max() + 1e-5
if len(img.shape) == 2:
img = np.expand_dims(img, 2)
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif (img.shape[2] > 3):
img_classes = img.argmax(2)
img = color.label2rgb(img_classes, colors=COLORS)
if mask_only_first:
need_mark_boundaries = i == 0
else:
need_mark_boundaries = i < len(keys) - 1 or not last_without_mask
if need_mark_boundaries:
if black_mask:
img = img * (1 - mask[0][..., None])
img = mark_boundaries(img,
mask[0],
color=(1., 0., 0.),
outline_color=(1., 1., 1.),
mode='thick')
result.append(img)
return np.concatenate(result, axis=1)
def save_global_samples(global_mask_fnames, mask2real_fname, mask2fake_fname, out_dir, real_scores_by_fname, fake_scores_by_fname):
for cur_mask_fname in global_mask_fnames:
cur_real_fname = mask2real_fname[cur_mask_fname]
orig_img = load_image(cur_real_fname, mode='RGB')
fake_img = load_image(mask2fake_fname[cur_mask_fname], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
mask = load_image(cur_mask_fname, mode='L')[None, ...]
draw_score(orig_img, real_scores_by_fname.loc[cur_real_fname, 'real_score'])
draw_score(fake_img, fake_scores_by_fname.loc[cur_mask_fname, 'fake_score'])
cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=mask, fake=fake_img),
keys=['image', 'fake'],
last_without_mask=True)
cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8')
cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(out_dir, os.path.splitext(os.path.basename(cur_mask_fname))[0] + '.jpg'),
cur_grid) | null |
15,533 | import cv2
import numpy as np
import sklearn
import torch
import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset, load_image
from saicinpainting.evaluation.losses.fid.inception import InceptionV3
from saicinpainting.evaluation.utils import load_yaml
from saicinpainting.training.visualizers.base import visualize_mask_and_images
def draw_score(img, score):
img = np.transpose(img, (1, 2, 0))
cv2.putText(img, f'{score:.2f}',
(40, 40),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 1, 0),
thickness=3)
img = np.transpose(img, (2, 0, 1))
return img
def load_image(fname, mode='RGB', return_orig=False):
img = np.array(Image.open(fname).convert(mode))
if img.ndim == 3:
img = np.transpose(img, (2, 0, 1))
out_img = img.astype('float32') / 255
if return_orig:
return out_img, img
else:
return out_img
def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str],
last_without_mask=True, rescale_keys=None, mask_only_first=None,
black_mask=False) -> np.ndarray:
mask = images_dict['mask'] > 0.5
result = []
for i, k in enumerate(keys):
img = images_dict[k]
img = np.transpose(img, (1, 2, 0))
if rescale_keys is not None and k in rescale_keys:
img = img - img.min()
img /= img.max() + 1e-5
if len(img.shape) == 2:
img = np.expand_dims(img, 2)
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif (img.shape[2] > 3):
img_classes = img.argmax(2)
img = color.label2rgb(img_classes, colors=COLORS)
if mask_only_first:
need_mark_boundaries = i == 0
else:
need_mark_boundaries = i < len(keys) - 1 or not last_without_mask
if need_mark_boundaries:
if black_mask:
img = img * (1 - mask[0][..., None])
img = mark_boundaries(img,
mask[0],
color=(1., 0., 0.),
outline_color=(1., 1., 1.),
mode='thick')
result.append(img)
return np.concatenate(result, axis=1)
def save_samples_by_real(worst_best_by_real, mask2fake_fname, fake_info, out_dir):
for real_fname in worst_best_by_real.index:
worst_mask_path = worst_best_by_real.loc[real_fname, 'worst']
best_mask_path = worst_best_by_real.loc[real_fname, 'best']
orig_img = load_image(real_fname, mode='RGB')
worst_mask_img = load_image(worst_mask_path, mode='L')[None, ...]
worst_fake_img = load_image(mask2fake_fname[worst_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
best_mask_img = load_image(best_mask_path, mode='L')[None, ...]
best_fake_img = load_image(mask2fake_fname[best_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
draw_score(orig_img, worst_best_by_real.loc[real_fname, 'real_score'])
draw_score(worst_fake_img, worst_best_by_real.loc[real_fname, 'worst_score'])
draw_score(best_fake_img, worst_best_by_real.loc[real_fname, 'best_score'])
cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=np.zeros_like(worst_mask_img),
worst_mask=worst_mask_img, worst_img=worst_fake_img,
best_mask=best_mask_img, best_img=best_fake_img),
keys=['image', 'worst_mask', 'worst_img', 'best_mask', 'best_img'],
rescale_keys=['worst_mask', 'best_mask'],
last_without_mask=True)
cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8')
cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(out_dir,
os.path.splitext(os.path.basename(real_fname))[0] + '.jpg'),
cur_grid)
fig, (ax1, ax2) = plt.subplots(1, 2)
cur_stat = fake_info[fake_info['real_fname'] == real_fname]
cur_stat['fake_score'].hist(ax=ax1)
cur_stat['real_score'].hist(ax=ax2)
fig.tight_layout()
fig.savefig(os.path.join(out_dir,
os.path.splitext(os.path.basename(real_fname))[0] + '_scores.png'))
plt.close(fig) | null |
15,534 | import cv2
import numpy as np
import sklearn
import torch
import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset, load_image
from saicinpainting.evaluation.losses.fid.inception import InceptionV3
from saicinpainting.evaluation.utils import load_yaml
from saicinpainting.training.visualizers.base import visualize_mask_and_images
def load_image(fname, mode='RGB', return_orig=False):
def extract_overlapping_masks(mask_fnames, cur_i, fake_scores_table, max_overlaps_n=2):
result_pairs = []
result_scores = []
mask_fname_a = mask_fnames[cur_i]
mask_a = load_image(mask_fname_a, mode='L')[None, ...] > 0.5
cur_score_a = fake_scores_table.loc[mask_fname_a, 'fake_score']
for mask_fname_b in mask_fnames[cur_i + 1:]:
mask_b = load_image(mask_fname_b, mode='L')[None, ...] > 0.5
if not np.any(mask_a & mask_b):
continue
cur_score_b = fake_scores_table.loc[mask_fname_b, 'fake_score']
result_pairs.append((mask_fname_a, mask_fname_b))
result_scores.append(cur_score_b - cur_score_a)
if len(result_pairs) >= max_overlaps_n:
break
return result_pairs, result_scores | null |
15,535 | import glob
import os
import shutil
import traceback
import PIL.Image as Image
import numpy as np
from joblib import Parallel, delayed
from saicinpainting.evaluation.masks.mask import SegmentationMask, propose_random_square_crop
from saicinpainting.evaluation.utils import load_yaml, SmallMode
from saicinpainting.training.data.masks import MixedMaskGenerator
class MakeManyMasksWrapper:
def __init__(self, impl, variants_n=2):
self.impl = impl
self.variants_n = variants_n
def get_masks(self, img):
img = np.transpose(np.array(img), (2, 0, 1))
return [self.impl(img)[0] for _ in range(self.variants_n)]
class SegmentationMask:
def __init__(self, confidence_threshold=0.5, rigidness_mode=RigidnessMode.rigid,
max_object_area=0.3, min_mask_area=0.02, downsample_levels=6, num_variants_per_mask=4,
max_mask_intersection=0.5, max_foreground_coverage=0.5, max_foreground_intersection=0.5,
max_hidden_area=0.2, max_scale_change=0.25, horizontal_flip=True,
max_vertical_shift=0.1, position_shuffle=True):
"""
:param confidence_threshold: float; threshold for confidence of the panoptic segmentator to allow for
the instance.
:param rigidness_mode: RigidnessMode object
when soft, checks intersection only with the object from which the mask_object was produced
when rigid, checks intersection with any foreground class object
:param max_object_area: float; allowed upper bound for to be considered as mask_object.
:param min_mask_area: float; lower bound for mask to be considered valid
:param downsample_levels: int; defines width of the resized segmentation to obtain shifted masks;
:param num_variants_per_mask: int; maximal number of the masks for the same object;
:param max_mask_intersection: float; maximum allowed area fraction of intersection for 2 masks
produced by horizontal shift of the same mask_object; higher value -> more diversity
:param max_foreground_coverage: float; maximum allowed area fraction of intersection for foreground object to be
covered by mask; lower value -> less the objects are covered
:param max_foreground_intersection: float; maximum allowed area of intersection for the mask with foreground
object; lower value -> mask is more on the background than on the objects
:param max_hidden_area: upper bound on part of the object hidden by shifting object outside the screen area;
:param max_scale_change: allowed scale change for the mask_object;
:param horizontal_flip: if horizontal flips are allowed;
:param max_vertical_shift: amount of vertical movement allowed;
:param position_shuffle: shuffle
"""
assert DETECTRON_INSTALLED, 'Cannot use SegmentationMask without detectron2'
self.cfg = get_cfg()
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold
self.predictor = DefaultPredictor(self.cfg)
self.rigidness_mode = RigidnessMode(rigidness_mode)
self.max_object_area = max_object_area
self.min_mask_area = min_mask_area
self.downsample_levels = downsample_levels
self.num_variants_per_mask = num_variants_per_mask
self.max_mask_intersection = max_mask_intersection
self.max_foreground_coverage = max_foreground_coverage
self.max_foreground_intersection = max_foreground_intersection
self.max_hidden_area = max_hidden_area
self.position_shuffle = position_shuffle
self.max_scale_change = max_scale_change
self.horizontal_flip = horizontal_flip
self.max_vertical_shift = max_vertical_shift
def get_segmentation(self, img):
im = img_as_ubyte(img)
panoptic_seg, segment_info = self.predictor(im)["panoptic_seg"]
return panoptic_seg, segment_info
def _is_power_of_two(n):
return (n != 0) and (n & (n-1) == 0)
def identify_candidates(self, panoptic_seg, segments_info):
potential_mask_ids = []
for segment in segments_info:
if not segment["isthing"]:
continue
mask = (panoptic_seg == segment["id"]).int().detach().cpu().numpy()
area = mask.sum().item() / np.prod(panoptic_seg.shape)
if area >= self.max_object_area:
continue
potential_mask_ids.append(segment["id"])
return potential_mask_ids
def downsample_mask(self, mask):
height, width = mask.shape
if not (self._is_power_of_two(height) and self._is_power_of_two(width)):
raise ValueError("Image sides are not power of 2.")
num_iterations = width.bit_length() - 1 - self.downsample_levels
if num_iterations < 0:
raise ValueError(f"Width is lower than 2^{self.downsample_levels}.")
if height.bit_length() - 1 < num_iterations:
raise ValueError("Height is too low to perform downsampling")
downsampled = mask
for _ in range(num_iterations):
downsampled = zero_corrected_countless(downsampled)
return downsampled
def _augmentation_params(self):
scaling_factor = np.random.uniform(1 - self.max_scale_change, 1 + self.max_scale_change)
if self.horizontal_flip:
horizontal_flip = bool(np.random.choice(2))
else:
horizontal_flip = False
vertical_shift = np.random.uniform(-self.max_vertical_shift, self.max_vertical_shift)
return {
"scaling_factor": scaling_factor,
"horizontal_flip": horizontal_flip,
"vertical_shift": vertical_shift
}
def _get_intersection(self, mask_array, mask_object):
intersection = mask_array[
mask_object.up:mask_object.down, mask_object.left:mask_object.right
] & mask_object.mask
return intersection
def _check_masks_intersection(self, aug_mask, total_mask_area, prev_masks):
for existing_mask in prev_masks:
intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
intersection_existing = intersection_area / existing_mask.sum()
intersection_current = 1 - (aug_mask.area() - intersection_area) / total_mask_area
if (intersection_existing > self.max_mask_intersection) or \
(intersection_current > self.max_mask_intersection):
return False
return True
def _check_foreground_intersection(self, aug_mask, foreground):
for existing_mask in foreground:
intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
intersection_existing = intersection_area / existing_mask.sum()
if intersection_existing > self.max_foreground_coverage:
return False
intersection_mask = intersection_area / aug_mask.area()
if intersection_mask > self.max_foreground_intersection:
return False
return True
def _move_mask(self, mask, foreground):
# Obtaining properties of the original mask_object:
orig_mask = ObjectMask(mask)
chosen_masks = []
chosen_parameters = []
# to fix the case when resizing gives mask_object consisting only of False
scaling_factor_lower_bound = 0.
for var_idx in range(self.num_variants_per_mask):
# Obtaining augmentation parameters and applying them to the downscaled mask_object
augmentation_params = self._augmentation_params()
augmentation_params["scaling_factor"] = min([
augmentation_params["scaling_factor"],
2 * min(orig_mask.up, orig_mask.height - orig_mask.down) / orig_mask.height + 1.,
2 * min(orig_mask.left, orig_mask.width - orig_mask.right) / orig_mask.width + 1.
])
augmentation_params["scaling_factor"] = max([
augmentation_params["scaling_factor"], scaling_factor_lower_bound
])
aug_mask = deepcopy(orig_mask)
aug_mask.rescale(augmentation_params["scaling_factor"], inplace=True)
if augmentation_params["horizontal_flip"]:
aug_mask.horizontal_flip(inplace=True)
total_aug_area = aug_mask.area()
if total_aug_area == 0:
scaling_factor_lower_bound = 1.
continue
# Fix if the element vertical shift is too strong and shown area is too small:
vertical_area = aug_mask.mask.sum(axis=1) / total_aug_area # share of area taken by rows
# number of rows which are allowed to be hidden from upper and lower parts of image respectively
max_hidden_up = np.searchsorted(vertical_area.cumsum(), self.max_hidden_area)
max_hidden_down = np.searchsorted(vertical_area[::-1].cumsum(), self.max_hidden_area)
# correcting vertical shift, so not too much area will be hidden
augmentation_params["vertical_shift"] = np.clip(
augmentation_params["vertical_shift"],
-(aug_mask.up + max_hidden_up) / aug_mask.height,
(aug_mask.height - aug_mask.down + max_hidden_down) / aug_mask.height
)
# Applying vertical shift:
vertical_shift = int(round(aug_mask.height * augmentation_params["vertical_shift"]))
aug_mask.shift(vertical=vertical_shift, inplace=True)
aug_mask.crop_to_canvas(vertical=True, horizontal=False, inplace=True)
# Choosing horizontal shift:
max_hidden_area = self.max_hidden_area - (1 - aug_mask.area() / total_aug_area)
horizontal_area = aug_mask.mask.sum(axis=0) / total_aug_area
max_hidden_left = np.searchsorted(horizontal_area.cumsum(), max_hidden_area)
max_hidden_right = np.searchsorted(horizontal_area[::-1].cumsum(), max_hidden_area)
allowed_shifts = np.arange(-max_hidden_left, aug_mask.width -
(aug_mask.right - aug_mask.left) + max_hidden_right + 1)
allowed_shifts = - (aug_mask.left - allowed_shifts)
if self.position_shuffle:
np.random.shuffle(allowed_shifts)
mask_is_found = False
for horizontal_shift in allowed_shifts:
aug_mask_left = deepcopy(aug_mask)
aug_mask_left.shift(horizontal=horizontal_shift, inplace=True)
aug_mask_left.crop_to_canvas(inplace=True)
prev_masks = [mask] + chosen_masks
is_mask_suitable = self._check_masks_intersection(aug_mask_left, total_aug_area, prev_masks) & \
self._check_foreground_intersection(aug_mask_left, foreground)
if is_mask_suitable:
aug_draw = aug_mask_left.restore_full_mask()
chosen_masks.append(aug_draw)
augmentation_params["horizontal_shift"] = horizontal_shift / aug_mask_left.width
chosen_parameters.append(augmentation_params)
mask_is_found = True
break
if not mask_is_found:
break
return chosen_parameters
def _prepare_mask(self, mask):
height, width = mask.shape
target_width = width if self._is_power_of_two(width) else (1 << width.bit_length())
target_height = height if self._is_power_of_two(height) else (1 << height.bit_length())
return resize(mask.astype('float32'), (target_height, target_width), order=0, mode='edge').round().astype('int32')
def get_masks(self, im, return_panoptic=False):
panoptic_seg, segments_info = self.get_segmentation(im)
potential_mask_ids = self.identify_candidates(panoptic_seg, segments_info)
panoptic_seg_scaled = self._prepare_mask(panoptic_seg.detach().cpu().numpy())
downsampled = self.downsample_mask(panoptic_seg_scaled)
scene_objects = []
for segment in segments_info:
if not segment["isthing"]:
continue
mask = downsampled == segment["id"]
if not np.any(mask):
continue
scene_objects.append(mask)
mask_set = []
for mask_id in potential_mask_ids:
mask = downsampled == mask_id
if not np.any(mask):
continue
if self.rigidness_mode is RigidnessMode.soft:
foreground = [mask]
elif self.rigidness_mode is RigidnessMode.rigid:
foreground = scene_objects
else:
raise ValueError(f'Unexpected rigidness_mode: {rigidness_mode}')
masks_params = self._move_mask(mask, foreground)
full_mask = ObjectMask((panoptic_seg == mask_id).detach().cpu().numpy())
for params in masks_params:
aug_mask = deepcopy(full_mask)
aug_mask.rescale(params["scaling_factor"], inplace=True)
if params["horizontal_flip"]:
aug_mask.horizontal_flip(inplace=True)
vertical_shift = int(round(aug_mask.height * params["vertical_shift"]))
horizontal_shift = int(round(aug_mask.width * params["horizontal_shift"]))
aug_mask.shift(vertical=vertical_shift, horizontal=horizontal_shift, inplace=True)
aug_mask = aug_mask.restore_full_mask().astype('uint8')
if aug_mask.mean() <= self.min_mask_area:
continue
mask_set.append(aug_mask)
if return_panoptic:
return mask_set, panoptic_seg.detach().cpu().numpy()
else:
return mask_set
def propose_random_square_crop(mask, min_overlap=0.5):
height, width = mask.shape
mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing
if height < width:
crop_size = height
obj_left, obj_right = mask_xs.min(), mask_xs.max()
obj_width = obj_right - obj_left
left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size))
right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap))
start_x = np.random.randint(left_border, right_border)
return start_x, 0, start_x + crop_size, height
else:
crop_size = width
obj_top, obj_bottom = mask_ys.min(), mask_ys.max()
obj_height = obj_bottom - obj_top
top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size))
bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap))
start_y = np.random.randint(top_border, bottom_border)
return 0, start_y, width, start_y + crop_size
class SmallMode(Enum):
DROP = "drop"
UPSCALE = "upscale"
class MixedMaskGenerator:
def __init__(self, irregular_proba=1/3, irregular_kwargs=None,
box_proba=1/3, box_kwargs=None,
segm_proba=1/3, segm_kwargs=None,
squares_proba=0, squares_kwargs=None,
superres_proba=0, superres_kwargs=None,
outpainting_proba=0, outpainting_kwargs=None,
invert_proba=0):
self.probas = []
self.gens = []
if irregular_proba > 0:
self.probas.append(irregular_proba)
if irregular_kwargs is None:
irregular_kwargs = {}
else:
irregular_kwargs = dict(irregular_kwargs)
irregular_kwargs['draw_method'] = DrawMethod.LINE
self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs))
if box_proba > 0:
self.probas.append(box_proba)
if box_kwargs is None:
box_kwargs = {}
self.gens.append(RandomRectangleMaskGenerator(**box_kwargs))
if segm_proba > 0:
self.probas.append(segm_proba)
if segm_kwargs is None:
segm_kwargs = {}
self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs))
if squares_proba > 0:
self.probas.append(squares_proba)
if squares_kwargs is None:
squares_kwargs = {}
else:
squares_kwargs = dict(squares_kwargs)
squares_kwargs['draw_method'] = DrawMethod.SQUARE
self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs))
if superres_proba > 0:
self.probas.append(superres_proba)
if superres_kwargs is None:
superres_kwargs = {}
self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs))
if outpainting_proba > 0:
self.probas.append(outpainting_proba)
if outpainting_kwargs is None:
outpainting_kwargs = {}
self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs))
self.probas = np.array(self.probas, dtype='float32')
self.probas /= self.probas.sum()
self.invert_proba = invert_proba
def __call__(self, img, iter_i=None, raw_image=None):
kind = np.random.choice(len(self.probas), p=self.probas)
gen = self.gens[kind]
result = gen(img, iter_i=iter_i, raw_image=raw_image)
if self.invert_proba > 0 and random.random() < self.invert_proba:
result = 1 - result
return result
def process_images(src_images, indir, outdir, config):
if config.generator_kind == 'segmentation':
mask_generator = SegmentationMask(**config.mask_generator_kwargs)
elif config.generator_kind == 'random':
variants_n = config.mask_generator_kwargs.pop('variants_n', 2)
mask_generator = MakeManyMasksWrapper(MixedMaskGenerator(**config.mask_generator_kwargs),
variants_n=variants_n)
else:
raise ValueError(f'Unexpected generator kind: {config.generator_kind}')
max_tamper_area = config.get('max_tamper_area', 1)
for infile in src_images:
try:
file_relpath = infile[len(indir):]
img_outpath = os.path.join(outdir, file_relpath)
os.makedirs(os.path.dirname(img_outpath), exist_ok=True)
image = Image.open(infile).convert('RGB')
# scale input image to output resolution and filter smaller images
if min(image.size) < config.cropping.out_min_size:
handle_small_mode = SmallMode(config.cropping.handle_small_mode)
if handle_small_mode == SmallMode.DROP:
continue
elif handle_small_mode == SmallMode.UPSCALE:
factor = config.cropping.out_min_size / min(image.size)
out_size = (np.array(image.size) * factor).round().astype('uint32')
image = image.resize(out_size, resample=Image.BICUBIC)
else:
factor = config.cropping.out_min_size / min(image.size)
out_size = (np.array(image.size) * factor).round().astype('uint32')
image = image.resize(out_size, resample=Image.BICUBIC)
# generate and select masks
src_masks = mask_generator.get_masks(image)
filtered_image_mask_pairs = []
for cur_mask in src_masks:
if config.cropping.out_square_crop:
(crop_left,
crop_top,
crop_right,
crop_bottom) = propose_random_square_crop(cur_mask,
min_overlap=config.cropping.crop_min_overlap)
cur_mask = cur_mask[crop_top:crop_bottom, crop_left:crop_right]
cur_image = image.copy().crop((crop_left, crop_top, crop_right, crop_bottom))
else:
cur_image = image
if len(np.unique(cur_mask)) == 0 or cur_mask.mean() > max_tamper_area:
continue
filtered_image_mask_pairs.append((cur_image, cur_mask))
mask_indices = np.random.choice(len(filtered_image_mask_pairs),
size=min(len(filtered_image_mask_pairs), config.max_masks_per_image),
replace=False)
# crop masks; save masks together with input image
mask_basename = os.path.join(outdir, os.path.splitext(file_relpath)[0])
for i, idx in enumerate(mask_indices):
cur_image, cur_mask = filtered_image_mask_pairs[idx]
cur_basename = mask_basename + f'_crop{i:03d}'
Image.fromarray(np.clip(cur_mask * 255, 0, 255).astype('uint8'),
mode='L').save(cur_basename + f'_mask{i:03d}.png')
cur_image.save(cur_basename + '.png')
except KeyboardInterrupt:
return
except Exception as ex:
print(f'Could not make masks for {infile} due to {ex}:\n{traceback.format_exc()}') | null |
15,536 | import os
from argparse import ArgumentParser
def ssim_fid100_f1(metrics, fid_scale=100):
ssim = metrics.loc['total', 'ssim']['mean']
fid = metrics.loc['total', 'fid']['mean']
fid_rel = max(0, fid_scale - fid) / fid_scale
f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3)
return f1 | null |
15,537 | import os
from argparse import ArgumentParser
def find_best_checkpoint(model_list, models_dir):
with open(model_list) as f:
models = [m.strip() for m in f.readlines()]
with open(f'{model_list}_best', 'w') as f:
for model in models:
print(model)
best_f1 = 0
best_epoch = 0
best_step = 0
with open(os.path.join(models_dir, model, 'train.log')) as fm:
lines = fm.readlines()
for line_index in range(len(lines)):
line = lines[line_index]
if 'Validation metrics after epoch' in line:
sharp_index = line.index('#')
cur_ep = line[sharp_index + 1:]
comma_index = cur_ep.index(',')
cur_ep = int(cur_ep[:comma_index])
total_index = line.index('total ')
step = int(line[total_index:].split()[1].strip())
total_line = lines[line_index + 5]
if not total_line.startswith('total'):
continue
words = total_line.strip().split()
f1 = float(words[-1])
print(f'\tEpoch: {cur_ep}, f1={f1}')
if f1 > best_f1:
best_f1 = f1
best_epoch = cur_ep
best_step = step
f.write(f'{model}\t{best_epoch}\t{best_step}\t{best_f1}\n') | null |
15,538 | import glob
import os
import re
import tensorflow as tf
from torch.utils.tensorboard import SummaryWriter
DROP_RULES = [
re.compile(r'_std$', re.I)
]
def need_drop(tag):
for rule in DROP_RULES:
if rule.search(tag):
return True
return False | null |
15,539 | import glob
import os
import re
import tensorflow as tf
from torch.utils.tensorboard import SummaryWriter
GROUPING_RULES = [
re.compile(r'^(?P<group>train|test|val|extra_val_.*?(256|512))_(?P<title>.*)', re.I)
]
def get_group_and_title(tag):
for rule in GROUPING_RULES:
match = rule.search(tag)
if match is None:
continue
return match.group('group'), match.group('title')
return None, None | null |
15,540 | import os
import cv2
import numpy as np
import torch
from skimage import io
from skimage.transform import resize
from torch.utils.data import Dataset
from saicinpainting.evaluation.evaluator import InpaintingEvaluator
from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore
def create_rectangle_mask(height, width):
mask = np.ones((height, width))
up_left_corner = width // 4, height // 4
down_right_corner = (width - up_left_corner[0] - 1, height - up_left_corner[1] - 1)
cv2.rectangle(mask, up_left_corner, down_right_corner, (0, 0, 0), thickness=cv2.FILLED)
return mask | null |
15,541 | import os
import sys
import numpy as np
import torch
def color_encode(labelmap, colors, mode='RGB'):
labelmap = labelmap.astype('int')
labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
dtype=np.uint8)
for label in np.unique(labelmap):
if label < 0:
continue
labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
np.tile(colors[label],
(labelmap.shape[0], labelmap.shape[1], 1))
if mode == 'BGR':
return labelmap_rgb[:, :, ::-1]
else:
return labelmap_rgb | null |
15,542 | import torch.nn as nn
import math
from .utils import load_url
from .segm_lib.nn import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
15,543 | import torch.nn as nn
import math
from .utils import load_url
from .segm_lib.nn import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
BatchNorm2d(oup),
nn.ReLU6(inplace=True)
) | null |
15,544 | import torch.nn as nn
import math
from .utils import load_url
from .segm_lib.nn import SynchronizedBatchNorm2d
model_urls = {
'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar',
}
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
The provided code snippet includes necessary dependencies for implementing the `mobilenetv2` function. Write a Python function `def mobilenetv2(pretrained=False, **kwargs)` to solve the following problem:
Constructs a MobileNet_V2 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def mobilenetv2(pretrained=False, **kwargs):
"""Constructs a MobileNet_V2 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MobileNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False)
return model | Constructs a MobileNet_V2 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
15,548 | import torch.cuda as cuda
import torch.nn as nn
import torch
import collections
from torch.nn.parallel._functions import Gather
The provided code snippet includes necessary dependencies for implementing the `dict_gather` function. Write a Python function `def dict_gather(outputs, target_device, dim=0)` to solve the following problem:
Gathers variables from different GPUs on a specified device (-1 means the CPU), with dictionary support.
Here is the function:
def dict_gather(outputs, target_device, dim=0):
"""
Gathers variables from different GPUs on a specified device
(-1 means the CPU), with dictionary support.
"""
def gather_map(outputs):
out = outputs[0]
if torch.is_tensor(out):
# MJY(20180330) HACK:: force nr_dims > 0
if out.dim() == 0:
outputs = [o.unsqueeze(0) for o in outputs]
return Gather.apply(target_device, dim, *outputs)
elif out is None:
return None
elif isinstance(out, collections.Mapping):
return {k: gather_map([o[k] for o in outputs]) for k in out}
elif isinstance(out, collections.Sequence):
return type(out)(map(gather_map, zip(*outputs)))
return gather_map(outputs) | Gathers variables from different GPUs on a specified device (-1 means the CPU), with dictionary support. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.